diff options
Diffstat (limited to 'drivers/mtd')
141 files changed, 8882 insertions, 3552 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 5fab4e6e830..94b821042d9 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -150,17 +150,18 @@ config MTD_BCM63XX_PARTS  config MTD_BCM47XX_PARTS  	tristate "BCM47XX partitioning support" -	depends on BCM47XX +	depends on BCM47XX || ARCH_BCM_5301X  	help  	  This provides partitions parser for devices based on BCM47xx  	  boards.  comment "User Modules And Translation Layers" +# +# MTD block device support is select'ed if needed +#  config MTD_BLKDEVS -	tristate "Common interface to block layer for MTD 'translation layers'" -	depends on BLOCK -	default n +	tristate  config MTD_BLOCK  	tristate "Caching block device access to MTD devices" @@ -320,6 +321,8 @@ source "drivers/mtd/onenand/Kconfig"  source "drivers/mtd/lpddr/Kconfig" +source "drivers/mtd/spi-nor/Kconfig" +  source "drivers/mtd/ubi/Kconfig"  endif # MTD diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 4cfb31e6c96..99bb9a1f6e1 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -32,4 +32,5 @@ inftl-objs		:= inftlcore.o inftlmount.o  obj-y		+= chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/ +obj-$(CONFIG_MTD_SPI_NOR)	+= spi-nor/  obj-$(CONFIG_MTD_UBI)		+= ubi/ diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c index 5a3942bf109..96a33e3f7b0 100644 --- a/drivers/mtd/afs.c +++ b/drivers/mtd/afs.c @@ -264,7 +264,8 @@ static struct mtd_part_parser afs_parser = {  static int __init afs_parser_init(void)  { -	return register_mtd_parser(&afs_parser); +	register_mtd_parser(&afs_parser); +	return 0;  }  static void __exit afs_parser_exit(void) diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c index ddc0a4287a4..7c9172ad262 100644 --- a/drivers/mtd/ar7part.c +++ b/drivers/mtd/ar7part.c @@ -139,7 +139,8 @@ static struct mtd_part_parser ar7_parser = {  static int __init ar7_parser_init(void)  { -	return register_mtd_parser(&ar7_parser); +	register_mtd_parser(&ar7_parser); +	return 0;  }  static void __exit ar7_parser_exit(void) diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 9279a9174f8..adfa74c1bc4 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -14,7 +14,6 @@  #include <linux/slab.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/partitions.h> -#include <bcm47xx_nvram.h>  /* 10 parts were found on sflash on Netgear WNDR4500 */  #define BCM47XXPART_MAX_PARTS		12 @@ -23,15 +22,20 @@   * Amount of bytes we read when analyzing each block of flash memory.   * Set it big enough to allow detecting partition and reading important data.   */ -#define BCM47XXPART_BYTES_TO_READ	0x404 +#define BCM47XXPART_BYTES_TO_READ	0x4e8  /* Magics */  #define BOARD_DATA_MAGIC		0x5246504D	/* MPFR */ +#define BOARD_DATA_MAGIC2		0xBD0D0BBD +#define CFE_MAGIC			0x43464531	/* 1EFC */ +#define FACTORY_MAGIC			0x59544346	/* FCTY */ +#define NVRAM_HEADER			0x48534C46	/* FLSH */  #define POT_MAGIC1			0x54544f50	/* POTT */  #define POT_MAGIC2			0x504f		/* OP */  #define ML_MAGIC1			0x39685a42  #define ML_MAGIC2			0x26594131  #define TRX_MAGIC			0x30524448 +#define SQSH_MAGIC			0x71736873	/* shsq */  struct trx_header {  	uint32_t magic; @@ -71,7 +75,14 @@ static int bcm47xxpart_parse(struct mtd_info *master,  	/* Alloc */  	parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,  			GFP_KERNEL); +	if (!parts) +		return -ENOMEM; +  	buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); +	if (!buf) { +		kfree(parts); +		return -ENOMEM; +	}  	/* Parse block by block looking for magics */  	for (offset = 0; offset <= master->size - blocksize; @@ -80,7 +91,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,  		if (offset >= 0x2000000)  			break; -		if (curr_part > BCM47XXPART_MAX_PARTS) { +		if (curr_part >= BCM47XXPART_MAX_PARTS) {  			pr_warn("Reached maximum number of partitions, scanning stopped!\n");  			break;  		} @@ -93,8 +104,9 @@ static int bcm47xxpart_parse(struct mtd_info *master,  			continue;  		} -		/* CFE has small NVRAM at 0x400 */ -		if (buf[0x400 / 4] == NVRAM_HEADER) { +		/* Magic or small NVRAM at 0x400 */ +		if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) || +		    (buf[0x400 / 4] == NVRAM_HEADER)) {  			bcm47xxpart_add_part(&parts[curr_part++], "boot",  					     offset, MTD_WRITEABLE);  			continue; @@ -110,6 +122,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,  			continue;  		} +		/* Found on Huawei E970 */ +		if (buf[0x000 / 4] == FACTORY_MAGIC) { +			bcm47xxpart_add_part(&parts[curr_part++], "factory", +					     offset, MTD_WRITEABLE); +			continue; +		} +  		/* POT(TOP) */  		if (buf[0x000 / 4] == POT_MAGIC1 &&  		    (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) { @@ -128,6 +147,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,  		/* TRX */  		if (buf[0x000 / 4] == TRX_MAGIC) { +			if (BCM47XXPART_MAX_PARTS - curr_part < 4) { +				pr_warn("Not enough partitions left to register trx, scanning stopped!\n"); +				break; +			} +  			trx = (struct trx_header *)buf;  			trx_part = curr_part; @@ -167,11 +191,33 @@ static int bcm47xxpart_parse(struct mtd_info *master,  			offset = rounddown(offset + trx->length, blocksize);  			continue;  		} + +		/* Squashfs on devices not using TRX */ +		if (buf[0x000 / 4] == SQSH_MAGIC) { +			bcm47xxpart_add_part(&parts[curr_part++], "rootfs", +					     offset, 0); +			continue; +		} + +		/* Read middle of the block */ +		if (mtd_read(master, offset + 0x8000, 0x4, +			     &bytes_read, (uint8_t *)buf) < 0) { +			pr_err("mtd_read error while parsing (offset: 0x%X)!\n", +			       offset); +			continue; +		} + +		/* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */ +		if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) { +			bcm47xxpart_add_part(&parts[curr_part++], "board_data", +					     offset, MTD_WRITEABLE); +			continue; +		}  	}  	/* Look for NVRAM at the end of the last block. */  	for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) { -		if (curr_part > BCM47XXPART_MAX_PARTS) { +		if (curr_part >= BCM47XXPART_MAX_PARTS) {  			pr_warn("Reached maximum number of partitions, scanning stopped!\n");  			break;  		} @@ -220,7 +266,8 @@ static struct mtd_part_parser bcm47xxpart_mtd_parser = {  static int __init bcm47xxpart_init(void)  { -	return register_mtd_parser(&bcm47xxpart_mtd_parser); +	register_mtd_parser(&bcm47xxpart_mtd_parser); +	return 0;  }  static void __exit bcm47xxpart_exit(void) diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c index 5c813907661..b2443f7031c 100644 --- a/drivers/mtd/bcm63xxpart.c +++ b/drivers/mtd/bcm63xxpart.c @@ -221,7 +221,8 @@ static struct mtd_part_parser bcm63xx_cfe_parser = {  static int __init bcm63xx_cfe_parser_init(void)  { -	return register_mtd_parser(&bcm63xx_cfe_parser); +	register_mtd_parser(&bcm63xx_cfe_parser); +	return 0;  }  static void __exit bcm63xx_cfe_parser_exit(void) diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index e4696b37f3d..9f02c28c020 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -169,33 +169,33 @@ config MTD_OTP  	  in the programming of OTP bits will waste them.  config MTD_CFI_INTELEXT -	tristate "Support for Intel/Sharp flash chips" +	tristate "Support for CFI command set 0001 (Intel/Sharp chips)"  	depends on MTD_GEN_PROBE  	select MTD_CFI_UTIL  	help  	  The Common Flash Interface defines a number of different command  	  sets which a CFI-compliant chip may claim to implement. This code -	  provides support for one of those command sets, used on Intel -	  StrataFlash and other parts. +	  provides support for command set 0001, used on Intel StrataFlash +	  and other parts.  config MTD_CFI_AMDSTD -	tristate "Support for AMD/Fujitsu/Spansion flash chips" +	tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)"  	depends on MTD_GEN_PROBE  	select MTD_CFI_UTIL  	help  	  The Common Flash Interface defines a number of different command  	  sets which a CFI-compliant chip may claim to implement. This code -	  provides support for one of those command sets, used on chips -	  including the AMD Am29LV320. +	  provides support for command set 0002, used on chips including +	  the AMD Am29LV320.  config MTD_CFI_STAA -	tristate "Support for ST (Advanced Architecture) flash chips" +	tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)"  	depends on MTD_GEN_PROBE  	select MTD_CFI_UTIL  	help  	  The Common Flash Interface defines a number of different command  	  sets which a CFI-compliant chip may claim to implement. This code -	  provides support for one of those command sets. +	  provides support for command set 0020.  config MTD_CFI_UTIL  	tristate diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 77514430f1f..a7543ba3e19 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -21,7 +21,6 @@  #include <linux/types.h>  #include <linux/kernel.h>  #include <linux/sched.h> -#include <linux/init.h>  #include <asm/io.h>  #include <asm/byteorder.h> @@ -53,6 +52,11 @@  /* Atmel chips */  #define AT49BV640D	0x02de  #define AT49BV640DT	0x02db +/* Sharp chips */ +#define LH28F640BFHE_PTTL90	0x00b0 +#define LH28F640BFHE_PBTL90	0x00b1 +#define LH28F640BFHE_PTTL70A	0x00b2 +#define LH28F640BFHE_PBTL70A	0x00b3  static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);  static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -69,10 +73,10 @@ static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, s  static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);  static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);  static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t); -static int cfi_intelext_get_fact_prot_info (struct mtd_info *, -					    struct otp_info *, size_t); -static int cfi_intelext_get_user_prot_info (struct mtd_info *, -					    struct otp_info *, size_t); +static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t, +					   size_t *, struct otp_info *); +static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t, +					   size_t *, struct otp_info *);  #endif  static int cfi_intelext_suspend (struct mtd_info *);  static void cfi_intelext_resume (struct mtd_info *); @@ -259,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)  		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;  }; +static int is_LH28F640BF(struct cfi_private *cfi) +{ +	/* Sharp LH28F640BF Family */ +	if (cfi->mfr == CFI_MFR_SHARP && ( +	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 || +	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A)) +		return 1; +	return 0; +} + +static void fixup_LH28F640BF(struct mtd_info *mtd) +{ +	struct map_info *map = mtd->priv; +	struct cfi_private *cfi = map->fldrv_priv; +	struct cfi_pri_intelext *extp = cfi->cmdset_priv; + +	/* Reset the Partition Configuration Register on LH28F640BF +	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */ +	if (is_LH28F640BF(cfi)) { +		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n"); +		map_write(map, CMD(0x60), 0); +		map_write(map, CMD(0x04), 0); + +		/* We have set one single partition thus +		 * Simultaneous Operations are not allowed */ +		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n"); +		extp->FeatureSupport &= ~512; +	} +} +  static void fixup_use_point(struct mtd_info *mtd)  {  	struct map_info *map = mtd->priv; @@ -310,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {  	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },  	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },  	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, +	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock }, +	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },  	{ 0, 0, NULL }  }; @@ -435,10 +471,8 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)  	int i;  	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); -	if (!mtd) { -		printk(KERN_ERR "Failed to allocate memory for MTD device\n"); +	if (!mtd)  		return NULL; -	}  	mtd->priv = map;  	mtd->type = MTD_NORFLASH; @@ -564,10 +598,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)  	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;  	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)  			* mtd->numeraseregions, GFP_KERNEL); -	if (!mtd->eraseregions) { -		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); +	if (!mtd->eraseregions)  		goto setup_err; -	}  	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {  		unsigned long ernum, ersize; @@ -1654,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,  	initial_adr = adr;  	cmd_adr = adr & ~(wbufsize-1); +	/* Sharp LH28F640BF chips need the first address for the +	 * Page Buffer Program command. See Table 5 of +	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */ +	if (is_LH28F640BF(cfi)) +		cmd_adr = adr; +  	/* Let's determine this according to the interleave only once */  	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); @@ -2399,24 +2437,19 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,  				     NULL, do_otp_lock, 1);  } -static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, -					   struct otp_info *buf, size_t len) -{ -	size_t retlen; -	int ret; +static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len, +					   size_t *retlen, struct otp_info *buf) -	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0); -	return ret ? : retlen; +{ +	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf, +				     NULL, 0);  } -static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, -					   struct otp_info *buf, size_t len) +static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len, +					   size_t *retlen, struct otp_info *buf)  { -	size_t retlen; -	int ret; - -	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1); -	return ret ? : retlen; +	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf, +				     NULL, 1);  }  #endif diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 89b9d689153..e21fde9d4d7 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -24,7 +24,6 @@  #include <linux/types.h>  #include <linux/kernel.h>  #include <linux/sched.h> -#include <linux/init.h>  #include <asm/io.h>  #include <asm/byteorder.h> @@ -507,10 +506,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)  	int i;  	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); -	if (!mtd) { -		printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); +	if (!mtd)  		return NULL; -	}  	mtd->priv = map;  	mtd->type = MTD_NORFLASH; @@ -661,10 +658,8 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)  	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;  	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)  				    * mtd->numeraseregions, GFP_KERNEL); -	if (!mtd->eraseregions) { -		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); +	if (!mtd->eraseregions)  		goto setup_err; -	}  	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {  		unsigned long ernum, ersize; diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 096993f9711..423666b51ef 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -22,7 +22,6 @@  #include <linux/types.h>  #include <linux/kernel.h>  #include <linux/sched.h> -#include <linux/init.h>  #include <asm/io.h>  #include <asm/byteorder.h> @@ -176,7 +175,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)  	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);  	if (!mtd) { -		printk(KERN_ERR "Failed to allocate memory for MTD device\n");  		kfree(cfi->cmdset_priv);  		return NULL;  	} @@ -189,7 +187,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)  	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)  			* mtd->numeraseregions, GFP_KERNEL);  	if (!mtd->eraseregions) { -		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");  		kfree(cfi->cmdset_priv);  		kfree(mtd);  		return NULL; @@ -964,7 +961,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,  			chipnum++;  			if (chipnum >= cfi->numchips) -			break; +				break;  		}  	} @@ -1173,7 +1170,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)  			chipnum++;  			if (chipnum >= cfi->numchips) -			break; +				break;  		}  	}  	return 0; diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index d2553527940..e8d0164498b 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -168,10 +168,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,  		return 0;  	cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); -	if (!cfi->cfiq) { -		printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); +	if (!cfi->cfiq)  		return 0; -	}  	memset(cfi->cfiq,0,sizeof(struct cfi_ident)); diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index f992418f40a..09c79bd0b4f 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -116,10 +116,8 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n  	printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);  	extp = kmalloc(size, GFP_KERNEL); -	if (!extp) { -		printk(KERN_ERR "Failed to allocate memory\n"); +	if (!extp)  		goto out; -	}  #ifdef CONFIG_MTD_XIP  	local_irq_disable(); @@ -241,7 +239,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,  			chipnum++;  			if (chipnum >= cfi->numchips) -			break; +				break;  		}  	} diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index ffb36ba8a6e..b57ceea2151 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c @@ -114,7 +114,6 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi  	mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);  	chip_map = kzalloc(mapsize, GFP_KERNEL);  	if (!chip_map) { -		printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);  		kfree(cfi.cfiq);  		return NULL;  	} @@ -139,7 +138,6 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi  	retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);  	if (!retcfi) { -		printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);  		kfree(cfi.cfiq);  		kfree(chip_map);  		return NULL; diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 721caebbc5c..3e829b37af8 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -395,7 +395,8 @@ static int __init cmdline_parser_init(void)  {  	if (mtdparts)  		mtdpart_setup(mtdparts); -	return register_mtd_parser(&cmdline_parser); +	register_mtd_parser(&cmdline_parser); +	return 0;  }  static void __exit cmdline_parser_exit(void) diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 74ab4b7e523..c49d0b127fe 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -80,7 +80,7 @@ config MTD_DATAFLASH_OTP  config MTD_M25P80  	tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)" -	depends on SPI_MASTER +	depends on SPI_MASTER && MTD_SPI_NOR  	help  	  This enables access to most modern SPI flash chips, used for  	  program and data storage.   Series supported include Atmel AT26DF, @@ -95,13 +95,6 @@ config MTD_M25P80  	  if you want to specify device partitioning or to use a device which  	  doesn't support the JEDEC ID instruction. -config M25PXX_USE_FAST_READ -	bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz" -	depends on MTD_M25P80 -	default y -	help -	  This option enables FAST_READ access supported by ST M25Pxx. -  config MTD_SPEAR_SMI  	tristate "SPEAR MTD NOR Support through SMI controller"  	depends on PLAT_SPEAR @@ -217,6 +210,14 @@ config MTD_DOCG3  	  M-Systems and now Sandisk. The support is very experimental,  	  and doesn't give access to any write operations. +config MTD_ST_SPI_FSM +	tristate "ST Microelectronics SPI FSM Serial Flash Controller" +	depends on ARCH_STI +	help +	  This provides an MTD device driver for the ST Microelectronics +	  SPI Fast Sequence Mode (FSM) Serial Flash Controller and support +	  for a subset of connected Serial Flash devices. +  if MTD_DOCG3  config BCH_CONST_M  	default 14 diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index d83bd73096f..c68868f6058 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_NAND_OMAP_BCH)	+= elm.o  obj-$(CONFIG_MTD_SPEAR_SMI)	+= spear_smi.o  obj-$(CONFIG_MTD_SST25L)	+= sst25l.o  obj-$(CONFIG_MTD_BCM47XXSFLASH)	+= bcm47xxsflash.o +obj-$(CONFIG_MTD_ST_SPI_FSM)    += st_spi_fsm.o  CFLAGS_docg3.o			+= -I$(src) diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 5cb4c04726b..66f0405f7e5 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -20,6 +20,7 @@  #include <linux/mutex.h>  #include <linux/mount.h>  #include <linux/slab.h> +#include <linux/major.h>  /* Info for the block device */  struct block2mtd_dev { @@ -208,7 +209,6 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)  } -/* FIXME: ensure that mtd->size % erase_size == 0 */  static struct block2mtd_dev *add_device(char *devname, int erase_size)  {  	const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; @@ -239,13 +239,18 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)  	if (IS_ERR(bdev)) {  		pr_err("error: cannot open device %s\n", devname); -		goto devinit_err; +		goto err_free_block2mtd;  	}  	dev->blkdev = bdev;  	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {  		pr_err("attempting to use an MTD device as a block device\n"); -		goto devinit_err; +		goto err_free_block2mtd; +	} + +	if ((long)dev->blkdev->bd_inode->i_size % erase_size) { +		pr_err("erasesize must be a divisor of device size\n"); +		goto err_free_block2mtd;  	}  	mutex_init(&dev->write_mutex); @@ -254,7 +259,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)  	/* make the name contain the block device in */  	name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);  	if (!name) -		goto devinit_err; +		goto err_destroy_mutex;  	dev->mtd.name = name; @@ -273,7 +278,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)  	if (mtd_device_register(&dev->mtd, NULL, 0)) {  		/* Device didn't get added, so free the entry */ -		goto devinit_err; +		goto err_destroy_mutex;  	}  	list_add(&dev->list, &blkmtd_device_list);  	pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n", @@ -282,7 +287,9 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)  		dev->mtd.erasesize >> 10, dev->mtd.erasesize);  	return dev; -devinit_err: +err_destroy_mutex: +	mutex_destroy(&dev->write_mutex); +err_free_block2mtd:  	block2mtd_free_device(dev);  	return NULL;  } @@ -447,6 +454,7 @@ static void block2mtd_exit(void)  		struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);  		block2mtd_sync(&dev->mtd);  		mtd_device_unregister(&dev->mtd); +		mutex_destroy(&dev->write_mutex);  		pr_info("mtd%d: [%s] removed\n",  			dev->mtd.index,  			dev->mtd.name + strlen("block2mtd: ")); diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 3e1b0a0ef4d..91a169c44b3 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1608,8 +1608,8 @@ static ssize_t dps1_insert_key(struct device *dev,  #define FLOOR_SYSFS(id) { \  	__ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \  	__ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \ -	__ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \ -	__ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \ +	__ATTR(f##id##_dps0_protection_key, S_IWUSR|S_IWGRP, NULL, dps0_insert_key), \ +	__ATTR(f##id##_dps1_protection_key, S_IWUSR|S_IWGRP, NULL, dps1_insert_key), \  }  static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = { @@ -2047,21 +2047,21 @@ static int __init docg3_probe(struct platform_device *pdev)  	ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!ress) {  		dev_err(dev, "No I/O memory resource defined\n"); -		goto noress; +		return ret;  	} -	base = ioremap(ress->start, DOC_IOSPACE_SIZE); +	base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);  	ret = -ENOMEM; -	cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS, -			  GFP_KERNEL); +	cascade = devm_kzalloc(dev, sizeof(*cascade) * DOC_MAX_NBFLOORS, +			       GFP_KERNEL);  	if (!cascade) -		goto nomem1; +		return ret;  	cascade->base = base;  	mutex_init(&cascade->lock);  	cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,  			     DOC_ECC_BCH_PRIMPOLY);  	if (!cascade->bch) -		goto nomem2; +		return ret;  	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {  		mtd = doc_probe_device(cascade, floor, dev); @@ -2097,15 +2097,10 @@ notfound:  	ret = -ENODEV;  	dev_info(dev, "No supported DiskOnChip found\n");  err_probe: -	kfree(cascade->bch); +	free_bch(cascade->bch);  	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)  		if (cascade->floors[floor])  			doc_release_device(cascade->floors[floor]); -nomem2: -	kfree(cascade); -nomem1: -	iounmap(base); -noress:  	return ret;  } @@ -2119,7 +2114,6 @@ static int __exit docg3_release(struct platform_device *pdev)  {  	struct docg3_cascade *cascade = platform_get_drvdata(pdev);  	struct docg3 *docg3 = cascade->floors[0]->priv; -	void __iomem *base = cascade->base;  	int floor;  	doc_unregister_sysfs(pdev, cascade); @@ -2129,8 +2123,6 @@ static int __exit docg3_release(struct platform_device *pdev)  			doc_release_device(cascade->floors[floor]);  	free_bch(docg3->cascade->bch); -	kfree(cascade); -	iounmap(base);  	return 0;  } diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c index d1dd6a33a05..b4f61c7fc16 100644 --- a/drivers/mtd/devices/elm.c +++ b/drivers/mtd/devices/elm.c @@ -15,6 +15,8 @@   *   */ +#define DRIVER_NAME	"omap-elm" +  #include <linux/platform_device.h>  #include <linux/module.h>  #include <linux/interrupt.h> @@ -84,6 +86,8 @@ struct elm_info {  	struct list_head list;  	enum bch_ecc bch_type;  	struct elm_registers elm_regs; +	int ecc_steps; +	int ecc_syndrome_size;  };  static LIST_HEAD(elm_devices); @@ -103,7 +107,8 @@ static u32 elm_read_reg(struct elm_info *info, int offset)   * @dev:	ELM device   * @bch_type:	Type of BCH ecc   */ -int elm_config(struct device *dev, enum bch_ecc bch_type) +int elm_config(struct device *dev, enum bch_ecc bch_type, +	int ecc_steps, int ecc_step_size, int ecc_syndrome_size)  {  	u32 reg_val;  	struct elm_info *info = dev_get_drvdata(dev); @@ -112,10 +117,22 @@ int elm_config(struct device *dev, enum bch_ecc bch_type)  		dev_err(dev, "Unable to configure elm - device not probed?\n");  		return -ENODEV;  	} +	/* ELM cannot detect ECC errors for chunks > 1KB */ +	if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) { +		dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size); +		return -EINVAL; +	} +	/* ELM support 8 error syndrome process */ +	if (ecc_steps > ERROR_VECTOR_MAX) { +		dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps); +		return -EINVAL; +	}  	reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);  	elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val); -	info->bch_type = bch_type; +	info->bch_type		= bch_type; +	info->ecc_steps		= ecc_steps; +	info->ecc_syndrome_size	= ecc_syndrome_size;  	return 0;  } @@ -157,17 +174,15 @@ static void elm_load_syndrome(struct elm_info *info,  	int i, offset;  	u32 val; -	for (i = 0; i < ERROR_VECTOR_MAX; i++) { +	for (i = 0; i < info->ecc_steps; i++) {  		/* Check error reported */  		if (err_vec[i].error_reported) {  			elm_configure_page_mode(info, i, true);  			offset = ELM_SYNDROME_FRAGMENT_0 +  				SYNDROME_FRAGMENT_REG_SIZE * i; - -			/* BCH8 */ -			if (info->bch_type) { - +			switch (info->bch_type) { +			case BCH8_ECC:  				/* syndrome fragment 0 = ecc[9-12B] */  				val = cpu_to_be32(*(u32 *) &ecc[9]);  				elm_write_reg(info, offset, val); @@ -186,7 +201,8 @@ static void elm_load_syndrome(struct elm_info *info,  				offset += 4;  				val = ecc[0];  				elm_write_reg(info, offset, val); -			} else { +				break; +			case BCH4_ECC:  				/* syndrome fragment 0 = ecc[20-52b] bits */  				val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |  					((ecc[2] & 0xf) << 28); @@ -196,11 +212,36 @@ static void elm_load_syndrome(struct elm_info *info,  				offset += 4;  				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;  				elm_write_reg(info, offset, val); +				break; +			case BCH16_ECC: +				val = cpu_to_be32(*(u32 *) &ecc[22]); +				elm_write_reg(info, offset, val); +				offset += 4; +				val = cpu_to_be32(*(u32 *) &ecc[18]); +				elm_write_reg(info, offset, val); +				offset += 4; +				val = cpu_to_be32(*(u32 *) &ecc[14]); +				elm_write_reg(info, offset, val); +				offset += 4; +				val = cpu_to_be32(*(u32 *) &ecc[10]); +				elm_write_reg(info, offset, val); +				offset += 4; +				val = cpu_to_be32(*(u32 *) &ecc[6]); +				elm_write_reg(info, offset, val); +				offset += 4; +				val = cpu_to_be32(*(u32 *) &ecc[2]); +				elm_write_reg(info, offset, val); +				offset += 4; +				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; +				elm_write_reg(info, offset, val); +				break; +			default: +				pr_err("invalid config bch_type\n");  			}  		}  		/* Update ecc pointer with ecc byte size */ -		ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE; +		ecc += info->ecc_syndrome_size;  	}  } @@ -223,7 +264,7 @@ static void elm_start_processing(struct elm_info *info,  	 * Set syndrome vector valid, so that ELM module  	 * will process it for vectors error is reported  	 */ -	for (i = 0; i < ERROR_VECTOR_MAX; i++) { +	for (i = 0; i < info->ecc_steps; i++) {  		if (err_vec[i].error_reported) {  			offset = ELM_SYNDROME_FRAGMENT_6 +  				SYNDROME_FRAGMENT_REG_SIZE * i; @@ -252,7 +293,7 @@ static void elm_error_correction(struct elm_info *info,  	int offset;  	u32 reg_val; -	for (i = 0; i < ERROR_VECTOR_MAX; i++) { +	for (i = 0; i < info->ecc_steps; i++) {  		/* Check error reported */  		if (err_vec[i].error_reported) { @@ -354,10 +395,8 @@ static int elm_probe(struct platform_device *pdev)  	struct elm_info *info;  	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); -	if (!info) { -		dev_err(&pdev->dev, "failed to allocate memory\n"); +	if (!info)  		return -ENOMEM; -	}  	info->dev = &pdev->dev; @@ -380,7 +419,7 @@ static int elm_probe(struct platform_device *pdev)  	}  	pm_runtime_enable(&pdev->dev); -	if (pm_runtime_get_sync(&pdev->dev)) { +	if (pm_runtime_get_sync(&pdev->dev) < 0) {  		ret = -EINVAL;  		pm_runtime_disable(&pdev->dev);  		dev_err(&pdev->dev, "can't enable clock\n"); @@ -401,6 +440,7 @@ static int elm_remove(struct platform_device *pdev)  	return 0;  } +#ifdef CONFIG_PM_SLEEP  /**   * elm_context_save   * saves ELM configurations to preserve them across Hardware powered-down @@ -418,6 +458,13 @@ static int elm_context_save(struct elm_info *info)  	for (i = 0; i < ERROR_VECTOR_MAX; i++) {  		offset = i * SYNDROME_FRAGMENT_REG_SIZE;  		switch (bch_type) { +		case BCH16_ECC: +			regs->elm_syndrome_fragment_6[i] = elm_read_reg(info, +					ELM_SYNDROME_FRAGMENT_6 + offset); +			regs->elm_syndrome_fragment_5[i] = elm_read_reg(info, +					ELM_SYNDROME_FRAGMENT_5 + offset); +			regs->elm_syndrome_fragment_4[i] = elm_read_reg(info, +					ELM_SYNDROME_FRAGMENT_4 + offset);  		case BCH8_ECC:  			regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,  					ELM_SYNDROME_FRAGMENT_3 + offset); @@ -428,6 +475,7 @@ static int elm_context_save(struct elm_info *info)  					ELM_SYNDROME_FRAGMENT_1 + offset);  			regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,  					ELM_SYNDROME_FRAGMENT_0 + offset); +			break;  		default:  			return -EINVAL;  		} @@ -456,6 +504,13 @@ static int elm_context_restore(struct elm_info *info)  	for (i = 0; i < ERROR_VECTOR_MAX; i++) {  		offset = i * SYNDROME_FRAGMENT_REG_SIZE;  		switch (bch_type) { +		case BCH16_ECC: +			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset, +					regs->elm_syndrome_fragment_6[i]); +			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset, +					regs->elm_syndrome_fragment_5[i]); +			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset, +					regs->elm_syndrome_fragment_4[i]);  		case BCH8_ECC:  			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,  					regs->elm_syndrome_fragment_3[i]); @@ -466,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)  					regs->elm_syndrome_fragment_1[i]);  			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,  					regs->elm_syndrome_fragment_0[i]); +			break;  		default:  			return -EINVAL;  		} @@ -492,6 +548,7 @@ static int elm_resume(struct device *dev)  	elm_context_restore(info);  	return 0;  } +#endif  static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume); @@ -505,7 +562,7 @@ MODULE_DEVICE_TABLE(of, elm_of_match);  static struct platform_driver elm_driver = {  	.driver	= { -		.name	= "elm", +		.name	= DRIVER_NAME,  		.owner	= THIS_MODULE,  		.of_match_table = of_match_ptr(elm_of_match),  		.pm	= &elm_pm_ops, diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 26b14f9fcac..ed7e0a1bed3 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -15,905 +15,175 @@   *   */ -#include <linux/init.h>  #include <linux/err.h>  #include <linux/errno.h>  #include <linux/module.h>  #include <linux/device.h> -#include <linux/interrupt.h> -#include <linux/mutex.h> -#include <linux/math64.h> -#include <linux/slab.h> -#include <linux/sched.h> -#include <linux/mod_devicetable.h> -#include <linux/mtd/cfi.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/partitions.h> -#include <linux/of_platform.h>  #include <linux/spi/spi.h>  #include <linux/spi/flash.h> +#include <linux/mtd/spi-nor.h> -/* Flash opcodes. */ -#define	OPCODE_WREN		0x06	/* Write enable */ -#define	OPCODE_RDSR		0x05	/* Read status register */ -#define	OPCODE_WRSR		0x01	/* Write status register 1 byte */ -#define	OPCODE_NORM_READ	0x03	/* Read data bytes (low frequency) */ -#define	OPCODE_FAST_READ	0x0b	/* Read data bytes (high frequency) */ -#define	OPCODE_PP		0x02	/* Page program (up to 256 bytes) */ -#define	OPCODE_BE_4K		0x20	/* Erase 4KiB block */ -#define	OPCODE_BE_4K_PMC	0xd7	/* Erase 4KiB block on PMC chips */ -#define	OPCODE_BE_32K		0x52	/* Erase 32KiB block */ -#define	OPCODE_CHIP_ERASE	0xc7	/* Erase whole flash chip */ -#define	OPCODE_SE		0xd8	/* Sector erase (usually 64KiB) */ -#define	OPCODE_RDID		0x9f	/* Read JEDEC ID */ - -/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ -#define	OPCODE_NORM_READ_4B	0x13	/* Read data bytes (low frequency) */ -#define	OPCODE_FAST_READ_4B	0x0c	/* Read data bytes (high frequency) */ -#define	OPCODE_PP_4B		0x12	/* Page program (up to 256 bytes) */ -#define	OPCODE_SE_4B		0xdc	/* Sector erase (usually 64KiB) */ - -/* Used for SST flashes only. */ -#define	OPCODE_BP		0x02	/* Byte program */ -#define	OPCODE_WRDI		0x04	/* Write disable */ -#define	OPCODE_AAI_WP		0xad	/* Auto address increment word program */ - -/* Used for Macronix and Winbond flashes. */ -#define	OPCODE_EN4B		0xb7	/* Enter 4-byte mode */ -#define	OPCODE_EX4B		0xe9	/* Exit 4-byte mode */ - -/* Used for Spansion flashes only. */ -#define	OPCODE_BRWR		0x17	/* Bank register write */ - -/* Status Register bits. */ -#define	SR_WIP			1	/* Write in progress */ -#define	SR_WEL			2	/* Write enable latch */ -/* meaning of other SR_* bits may differ between vendors */ -#define	SR_BP0			4	/* Block protect 0 */ -#define	SR_BP1			8	/* Block protect 1 */ -#define	SR_BP2			0x10	/* Block protect 2 */ -#define	SR_SRWD			0x80	/* SR write protect */ - -/* Define max times to check status register before we give up. */ -#define	MAX_READY_WAIT_JIFFIES	(40 * HZ)	/* M25P16 specs 40s max chip erase */ -#define	MAX_CMD_SIZE		5 - -#define JEDEC_MFR(_jedec_id)	((_jedec_id) >> 16) - -/****************************************************************************/ - +#define	MAX_CMD_SIZE		6  struct m25p {  	struct spi_device	*spi; -	struct mutex		lock; +	struct spi_nor		spi_nor;  	struct mtd_info		mtd; -	u16			page_size; -	u16			addr_width; -	u8			erase_opcode; -	u8			read_opcode; -	u8			program_opcode; -	u8			*command; -	bool			fast_read; +	u8			command[MAX_CMD_SIZE];  }; -static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) +static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)  { -	return container_of(mtd, struct m25p, mtd); -} +	struct m25p *flash = nor->priv; +	struct spi_device *spi = flash->spi; +	int ret; -/****************************************************************************/ +	ret = spi_write_then_read(spi, &code, 1, val, len); +	if (ret < 0) +		dev_err(&spi->dev, "error %d reading %x\n", ret, code); -/* - * Internal helper functions - */ - -/* - * Read the status register, returning its value in the location - * Return the status register value. - * Returns negative if error occurred. - */ -static int read_sr(struct m25p *flash) -{ -	ssize_t retval; -	u8 code = OPCODE_RDSR; -	u8 val; - -	retval = spi_write_then_read(flash->spi, &code, 1, &val, 1); - -	if (retval < 0) { -		dev_err(&flash->spi->dev, "error %d reading SR\n", -				(int) retval); -		return retval; -	} - -	return val; +	return ret;  } -/* - * Write status register 1 byte - * Returns negative if error occurred. - */ -static int write_sr(struct m25p *flash, u8 val) +static void m25p_addr2cmd(struct spi_nor *nor, unsigned int addr, u8 *cmd)  { -	flash->command[0] = OPCODE_WRSR; -	flash->command[1] = val; - -	return spi_write(flash->spi, flash->command, 2); +	/* opcode is in cmd[0] */ +	cmd[1] = addr >> (nor->addr_width * 8 -  8); +	cmd[2] = addr >> (nor->addr_width * 8 - 16); +	cmd[3] = addr >> (nor->addr_width * 8 - 24); +	cmd[4] = addr >> (nor->addr_width * 8 - 32);  } -/* - * Set write enable latch with Write Enable command. - * Returns negative if error occurred. - */ -static inline int write_enable(struct m25p *flash) +static int m25p_cmdsz(struct spi_nor *nor)  { -	u8	code = OPCODE_WREN; - -	return spi_write_then_read(flash->spi, &code, 1, NULL, 0); +	return 1 + nor->addr_width;  } -/* - * Send write disble instruction to the chip. - */ -static inline int write_disable(struct m25p *flash) +static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len, +			int wr_en)  { -	u8	code = OPCODE_WRDI; +	struct m25p *flash = nor->priv; +	struct spi_device *spi = flash->spi; -	return spi_write_then_read(flash->spi, &code, 1, NULL, 0); -} - -/* - * Enable/disable 4-byte addressing mode. - */ -static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) -{ -	switch (JEDEC_MFR(jedec_id)) { -	case CFI_MFR_MACRONIX: -	case CFI_MFR_ST: /* Micron, actually */ -	case 0xEF /* winbond */: -		flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; -		return spi_write(flash->spi, flash->command, 1); -	default: -		/* Spansion style */ -		flash->command[0] = OPCODE_BRWR; -		flash->command[1] = enable << 7; -		return spi_write(flash->spi, flash->command, 2); -	} -} - -/* - * Service routine to read status register until ready, or timeout occurs. - * Returns non-zero if error. - */ -static int wait_till_ready(struct m25p *flash) -{ -	unsigned long deadline; -	int sr; - -	deadline = jiffies + MAX_READY_WAIT_JIFFIES; - -	do { -		if ((sr = read_sr(flash)) < 0) -			break; -		else if (!(sr & SR_WIP)) -			return 0; - -		cond_resched(); - -	} while (!time_after_eq(jiffies, deadline)); +	flash->command[0] = opcode; +	if (buf) +		memcpy(&flash->command[1], buf, len); -	return 1; +	return spi_write(spi, flash->command, len + 1);  } -/* - * Erase the whole flash memory - * - * Returns 0 if successful, non-zero otherwise. - */ -static int erase_chip(struct m25p *flash) +static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, +			size_t *retlen, const u_char *buf)  { -	pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__, -			(long long)(flash->mtd.size >> 10)); - -	/* Wait until finished previous write command. */ -	if (wait_till_ready(flash)) -		return 1; - -	/* Send write enable, then erase commands. */ -	write_enable(flash); - -	/* Set up command buffer. */ -	flash->command[0] = OPCODE_CHIP_ERASE; - -	spi_write(flash->spi, flash->command, 1); - -	return 0; -} +	struct m25p *flash = nor->priv; +	struct spi_device *spi = flash->spi; +	struct spi_transfer t[2] = {}; +	struct spi_message m; +	int cmd_sz = m25p_cmdsz(nor); -static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd) -{ -	/* opcode is in cmd[0] */ -	cmd[1] = addr >> (flash->addr_width * 8 -  8); -	cmd[2] = addr >> (flash->addr_width * 8 - 16); -	cmd[3] = addr >> (flash->addr_width * 8 - 24); -	cmd[4] = addr >> (flash->addr_width * 8 - 32); -} +	spi_message_init(&m); -static int m25p_cmdsz(struct m25p *flash) -{ -	return 1 + flash->addr_width; -} +	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) +		cmd_sz = 1; -/* - * Erase one sector of flash memory at offset ``offset'' which is any - * address within the sector which should be erased. - * - * Returns 0 if successful, non-zero otherwise. - */ -static int erase_sector(struct m25p *flash, u32 offset) -{ -	pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), -			__func__, flash->mtd.erasesize / 1024, offset); +	flash->command[0] = nor->program_opcode; +	m25p_addr2cmd(nor, to, flash->command); -	/* Wait until finished previous write command. */ -	if (wait_till_ready(flash)) -		return 1; - -	/* Send write enable, then erase commands. */ -	write_enable(flash); +	t[0].tx_buf = flash->command; +	t[0].len = cmd_sz; +	spi_message_add_tail(&t[0], &m); -	/* Set up command buffer. */ -	flash->command[0] = flash->erase_opcode; -	m25p_addr2cmd(flash, offset, flash->command); +	t[1].tx_buf = buf; +	t[1].len = len; +	spi_message_add_tail(&t[1], &m); -	spi_write(flash->spi, flash->command, m25p_cmdsz(flash)); +	spi_sync(spi, &m); -	return 0; +	*retlen += m.actual_length - cmd_sz;  } -/****************************************************************************/ - -/* - * MTD implementation - */ - -/* - * Erase an address range on the flash chip.  The address range may extend - * one or more erase sectors.  Return an error is there is a problem erasing. - */ -static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) +static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)  { -	struct m25p *flash = mtd_to_m25p(mtd); -	u32 addr,len; -	uint32_t rem; - -	pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), -			__func__, (long long)instr->addr, -			(long long)instr->len); - -	div_u64_rem(instr->len, mtd->erasesize, &rem); -	if (rem) -		return -EINVAL; - -	addr = instr->addr; -	len = instr->len; - -	mutex_lock(&flash->lock); - -	/* whole-chip erase? */ -	if (len == flash->mtd.size) { -		if (erase_chip(flash)) { -			instr->state = MTD_ERASE_FAILED; -			mutex_unlock(&flash->lock); -			return -EIO; -		} - -	/* REVISIT in some cases we could speed up erasing large regions -	 * by using OPCODE_SE instead of OPCODE_BE_4K.  We may have set up -	 * to use "small sector erase", but that's not always optimal. -	 */ - -	/* "sector"-at-a-time erase */ -	} else { -		while (len) { -			if (erase_sector(flash, addr)) { -				instr->state = MTD_ERASE_FAILED; -				mutex_unlock(&flash->lock); -				return -EIO; -			} - -			addr += mtd->erasesize; -			len -= mtd->erasesize; -		} +	switch (nor->flash_read) { +	case SPI_NOR_DUAL: +		return 2; +	case SPI_NOR_QUAD: +		return 4; +	default: +		return 0;  	} - -	mutex_unlock(&flash->lock); - -	instr->state = MTD_ERASE_DONE; -	mtd_erase_callback(instr); - -	return 0;  }  /* - * Read an address range from the flash chip.  The address range + * Read an address range from the nor chip.  The address range   * may be any size provided it is within the physical boundaries.   */ -static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, -	size_t *retlen, u_char *buf) +static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, +			size_t *retlen, u_char *buf)  { -	struct m25p *flash = mtd_to_m25p(mtd); +	struct m25p *flash = nor->priv; +	struct spi_device *spi = flash->spi;  	struct spi_transfer t[2];  	struct spi_message m; -	uint8_t opcode; +	int dummy = nor->read_dummy; +	int ret; -	pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), -			__func__, (u32)from, len); +	/* Wait till previous write/erase is done. */ +	ret = nor->wait_till_ready(nor); +	if (ret) +		return ret;  	spi_message_init(&m);  	memset(t, 0, (sizeof t)); -	/* NOTE: -	 * OPCODE_FAST_READ (if available) is faster. -	 * Should add 1 byte DUMMY_BYTE. -	 */ +	flash->command[0] = nor->read_opcode; +	m25p_addr2cmd(nor, from, flash->command); +  	t[0].tx_buf = flash->command; -	t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0); +	t[0].len = m25p_cmdsz(nor) + dummy;  	spi_message_add_tail(&t[0], &m);  	t[1].rx_buf = buf; +	t[1].rx_nbits = m25p80_rx_nbits(nor);  	t[1].len = len;  	spi_message_add_tail(&t[1], &m); -	mutex_lock(&flash->lock); - -	/* Wait till previous write/erase is done. */ -	if (wait_till_ready(flash)) { -		/* REVISIT status return?? */ -		mutex_unlock(&flash->lock); -		return 1; -	} - -	/* FIXME switch to OPCODE_FAST_READ.  It's required for higher -	 * clocks; and at this writing, every chip this driver handles -	 * supports that opcode. -	 */ - -	/* Set up the write data buffer. */ -	opcode = flash->read_opcode; -	flash->command[0] = opcode; -	m25p_addr2cmd(flash, from, flash->command); - -	spi_sync(flash->spi, &m); - -	*retlen = m.actual_length - m25p_cmdsz(flash) - -			(flash->fast_read ? 1 : 0); - -	mutex_unlock(&flash->lock); - -	return 0; -} - -/* - * Write an address range to the flash chip.  Data must be written in - * FLASH_PAGESIZE chunks.  The address range may be any size provided - * it is within the physical boundaries. - */ -static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, -	size_t *retlen, const u_char *buf) -{ -	struct m25p *flash = mtd_to_m25p(mtd); -	u32 page_offset, page_size; -	struct spi_transfer t[2]; -	struct spi_message m; - -	pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), -			__func__, (u32)to, len); - -	spi_message_init(&m); -	memset(t, 0, (sizeof t)); - -	t[0].tx_buf = flash->command; -	t[0].len = m25p_cmdsz(flash); -	spi_message_add_tail(&t[0], &m); - -	t[1].tx_buf = buf; -	spi_message_add_tail(&t[1], &m); - -	mutex_lock(&flash->lock); - -	/* Wait until finished previous write command. */ -	if (wait_till_ready(flash)) { -		mutex_unlock(&flash->lock); -		return 1; -	} - -	write_enable(flash); - -	/* Set up the opcode in the write buffer. */ -	flash->command[0] = flash->program_opcode; -	m25p_addr2cmd(flash, to, flash->command); - -	page_offset = to & (flash->page_size - 1); - -	/* do all the bytes fit onto one page? */ -	if (page_offset + len <= flash->page_size) { -		t[1].len = len; - -		spi_sync(flash->spi, &m); - -		*retlen = m.actual_length - m25p_cmdsz(flash); -	} else { -		u32 i; - -		/* the size of data remaining on the first page */ -		page_size = flash->page_size - page_offset; - -		t[1].len = page_size; -		spi_sync(flash->spi, &m); - -		*retlen = m.actual_length - m25p_cmdsz(flash); - -		/* write everything in flash->page_size chunks */ -		for (i = page_size; i < len; i += page_size) { -			page_size = len - i; -			if (page_size > flash->page_size) -				page_size = flash->page_size; - -			/* write the next page to flash */ -			m25p_addr2cmd(flash, to + i, flash->command); - -			t[1].tx_buf = buf + i; -			t[1].len = page_size; - -			wait_till_ready(flash); - -			write_enable(flash); - -			spi_sync(flash->spi, &m); - -			*retlen += m.actual_length - m25p_cmdsz(flash); -		} -	} - -	mutex_unlock(&flash->lock); +	spi_sync(spi, &m); +	*retlen = m.actual_length - m25p_cmdsz(nor) - dummy;  	return 0;  } -static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, -		size_t *retlen, const u_char *buf) +static int m25p80_erase(struct spi_nor *nor, loff_t offset)  { -	struct m25p *flash = mtd_to_m25p(mtd); -	struct spi_transfer t[2]; -	struct spi_message m; -	size_t actual; -	int cmd_sz, ret; +	struct m25p *flash = nor->priv; +	int ret; -	pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), -			__func__, (u32)to, len); - -	spi_message_init(&m); -	memset(t, 0, (sizeof t)); - -	t[0].tx_buf = flash->command; -	t[0].len = m25p_cmdsz(flash); -	spi_message_add_tail(&t[0], &m); - -	t[1].tx_buf = buf; -	spi_message_add_tail(&t[1], &m); - -	mutex_lock(&flash->lock); +	dev_dbg(nor->dev, "%dKiB at 0x%08x\n", +		flash->mtd.erasesize / 1024, (u32)offset);  	/* Wait until finished previous write command. */ -	ret = wait_till_ready(flash); +	ret = nor->wait_till_ready(nor);  	if (ret) -		goto time_out; - -	write_enable(flash); - -	actual = to % 2; -	/* Start write from odd address. */ -	if (actual) { -		flash->command[0] = OPCODE_BP; -		m25p_addr2cmd(flash, to, flash->command); - -		/* write one byte. */ -		t[1].len = 1; -		spi_sync(flash->spi, &m); -		ret = wait_till_ready(flash); -		if (ret) -			goto time_out; -		*retlen += m.actual_length - m25p_cmdsz(flash); -	} -	to += actual; +		return ret; -	flash->command[0] = OPCODE_AAI_WP; -	m25p_addr2cmd(flash, to, flash->command); - -	/* Write out most of the data here. */ -	cmd_sz = m25p_cmdsz(flash); -	for (; actual < len - 1; actual += 2) { -		t[0].len = cmd_sz; -		/* write two bytes. */ -		t[1].len = 2; -		t[1].tx_buf = buf + actual; - -		spi_sync(flash->spi, &m); -		ret = wait_till_ready(flash); -		if (ret) -			goto time_out; -		*retlen += m.actual_length - cmd_sz; -		cmd_sz = 1; -		to += 2; -	} -	write_disable(flash); -	ret = wait_till_ready(flash); +	/* Send write enable, then erase commands. */ +	ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);  	if (ret) -		goto time_out; +		return ret; -	/* Write out trailing byte if it exists. */ -	if (actual != len) { -		write_enable(flash); -		flash->command[0] = OPCODE_BP; -		m25p_addr2cmd(flash, to, flash->command); -		t[0].len = m25p_cmdsz(flash); -		t[1].len = 1; -		t[1].tx_buf = buf + actual; - -		spi_sync(flash->spi, &m); -		ret = wait_till_ready(flash); -		if (ret) -			goto time_out; -		*retlen += m.actual_length - m25p_cmdsz(flash); -		write_disable(flash); -	} - -time_out: -	mutex_unlock(&flash->lock); -	return ret; -} - -static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ -	struct m25p *flash = mtd_to_m25p(mtd); -	uint32_t offset = ofs; -	uint8_t status_old, status_new; -	int res = 0; - -	mutex_lock(&flash->lock); -	/* Wait until finished previous command */ -	if (wait_till_ready(flash)) { -		res = 1; -		goto err; -	} - -	status_old = read_sr(flash); - -	if (offset < flash->mtd.size-(flash->mtd.size/2)) -		status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0; -	else if (offset < flash->mtd.size-(flash->mtd.size/4)) -		status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; -	else if (offset < flash->mtd.size-(flash->mtd.size/8)) -		status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; -	else if (offset < flash->mtd.size-(flash->mtd.size/16)) -		status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; -	else if (offset < flash->mtd.size-(flash->mtd.size/32)) -		status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; -	else if (offset < flash->mtd.size-(flash->mtd.size/64)) -		status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; -	else -		status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; - -	/* Only modify protection if it will not unlock other areas */ -	if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) > -					(status_old&(SR_BP2|SR_BP1|SR_BP0))) { -		write_enable(flash); -		if (write_sr(flash, status_new) < 0) { -			res = 1; -			goto err; -		} -	} - -err:	mutex_unlock(&flash->lock); -	return res; -} - -static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ -	struct m25p *flash = mtd_to_m25p(mtd); -	uint32_t offset = ofs; -	uint8_t status_old, status_new; -	int res = 0; - -	mutex_lock(&flash->lock); -	/* Wait until finished previous command */ -	if (wait_till_ready(flash)) { -		res = 1; -		goto err; -	} - -	status_old = read_sr(flash); - -	if (offset+len > flash->mtd.size-(flash->mtd.size/64)) -		status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0); -	else if (offset+len > flash->mtd.size-(flash->mtd.size/32)) -		status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; -	else if (offset+len > flash->mtd.size-(flash->mtd.size/16)) -		status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; -	else if (offset+len > flash->mtd.size-(flash->mtd.size/8)) -		status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; -	else if (offset+len > flash->mtd.size-(flash->mtd.size/4)) -		status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; -	else if (offset+len > flash->mtd.size-(flash->mtd.size/2)) -		status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; -	else -		status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; - -	/* Only modify protection if it will not lock other areas */ -	if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) < -					(status_old&(SR_BP2|SR_BP1|SR_BP0))) { -		write_enable(flash); -		if (write_sr(flash, status_new) < 0) { -			res = 1; -			goto err; -		} -	} - -err:	mutex_unlock(&flash->lock); -	return res; -} - -/****************************************************************************/ - -/* - * SPI device driver setup and teardown - */ - -struct flash_info { -	/* JEDEC id zero means "no ID" (most older chips); otherwise it has -	 * a high byte of zero plus three data bytes: the manufacturer id, -	 * then a two byte device id. -	 */ -	u32		jedec_id; -	u16             ext_id; - -	/* The size listed here is what works with OPCODE_SE, which isn't -	 * necessarily called a "sector" by the vendor. -	 */ -	unsigned	sector_size; -	u16		n_sectors; - -	u16		page_size; -	u16		addr_width; - -	u16		flags; -#define	SECT_4K		0x01		/* OPCODE_BE_4K works uniformly */ -#define	M25P_NO_ERASE	0x02		/* No erase command needed */ -#define	SST_WRITE	0x04		/* use SST byte programming */ -#define	M25P_NO_FR	0x08		/* Can't do fastread */ -#define	SECT_4K_PMC	0x10		/* OPCODE_BE_4K_PMC works uniformly */ -}; - -#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\ -	((kernel_ulong_t)&(struct flash_info) {				\ -		.jedec_id = (_jedec_id),				\ -		.ext_id = (_ext_id),					\ -		.sector_size = (_sector_size),				\ -		.n_sectors = (_n_sectors),				\ -		.page_size = 256,					\ -		.flags = (_flags),					\ -	}) - -#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)	\ -	((kernel_ulong_t)&(struct flash_info) {				\ -		.sector_size = (_sector_size),				\ -		.n_sectors = (_n_sectors),				\ -		.page_size = (_page_size),				\ -		.addr_width = (_addr_width),				\ -		.flags = (_flags),					\ -	}) - -/* NOTE: double check command sets and memory organization when you add - * more flash chips.  This current list focusses on newer chips, which - * have been converging on command sets which including JEDEC ID. - */ -static const struct spi_device_id m25p_ids[] = { -	/* Atmel -- some are (confusingly) marketed as "DataFlash" */ -	{ "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) }, -	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) }, - -	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) }, -	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) }, -	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, - -	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) }, -	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, -	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, -	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, - -	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, - -	/* EON -- en25xxx */ -	{ "en25f32", INFO(0x1c3116, 0, 64 * 1024,  64, SECT_4K) }, -	{ "en25p32", INFO(0x1c2016, 0, 64 * 1024,  64, 0) }, -	{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) }, -	{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, -	{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, -	{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, - -	/* Everspin */ -	{ "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) }, -	{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) }, - -	/* GigaDevice */ -	{ "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) }, -	{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, - -	/* Intel/Numonyx -- xxxs33b */ -	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) }, -	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) }, -	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) }, - -	/* Macronix */ -	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) }, -	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) }, -	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) }, -	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) }, -	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, 0) }, -	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, 0) }, -	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, -	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, -	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, -	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, -	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) }, - -	/* Micron */ -	{ "n25q064",  INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, -	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, -	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, -	{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, - -	/* PMC */ -	{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, -	{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) }, -	{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024,  64, SECT_4K) }, - -	/* Spansion -- single (large) sector size only, at least -	 * for the chips listed here (without boot sectors). -	 */ -	{ "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, 0) }, -	{ "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, 0) }, -	{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, -	{ "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, 0) }, -	{ "s25fl512s",  INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) }, -	{ "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, -	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) }, -	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) }, -	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, 0) }, -	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, 0) }, -	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) }, -	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) }, -	{ "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) }, -	{ "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) }, -	{ "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) }, -	{ "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K) }, -	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) }, - -	/* SST -- large erase sizes are "overlays", "sectors" are 4K */ -	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) }, -	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, -	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, -	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, -	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, -	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) }, -	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) }, -	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) }, -	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) }, - -	/* ST Microelectronics -- newer production may have feature updates */ -	{ "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) }, -	{ "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) }, -	{ "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) }, -	{ "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) }, -	{ "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) }, -	{ "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) }, -	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) }, -	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) }, -	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) }, -	{ "n25q032", INFO(0x20ba16,  0,  64 * 1024,  64, 0) }, - -	{ "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) }, -	{ "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) }, -	{ "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) }, -	{ "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) }, -	{ "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) }, -	{ "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) }, -	{ "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) }, -	{ "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) }, -	{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) }, - -	{ "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) }, -	{ "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) }, -	{ "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) }, - -	{ "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) }, -	{ "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) }, -	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) }, - -	{ "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) }, -	{ "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) }, -	{ "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) }, -	{ "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) }, - -	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ -	{ "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) }, -	{ "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) }, -	{ "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) }, -	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) }, -	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) }, -	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) }, -	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) }, -	{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64, SECT_4K) }, -	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, -	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, -	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, -	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) }, -	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) }, -	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, -	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, - -	/* Catalyst / On Semiconductor -- non-JEDEC */ -	{ "cat25c11", CAT25_INFO(  16, 8, 16, 1, M25P_NO_ERASE | M25P_NO_FR) }, -	{ "cat25c03", CAT25_INFO(  32, 8, 16, 2, M25P_NO_ERASE | M25P_NO_FR) }, -	{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) }, -	{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) }, -	{ "cat25128", CAT25_INFO(2048, 8, 64, 2, M25P_NO_ERASE | M25P_NO_FR) }, -	{ }, -}; -MODULE_DEVICE_TABLE(spi, m25p_ids); - -static const struct spi_device_id *jedec_probe(struct spi_device *spi) -{ -	int			tmp; -	u8			code = OPCODE_RDID; -	u8			id[5]; -	u32			jedec; -	u16                     ext_jedec; -	struct flash_info	*info; - -	/* JEDEC also defines an optional "extended device information" -	 * string for after vendor-specific data, after the three bytes -	 * we use here.  Supporting some chips might require using it. -	 */ -	tmp = spi_write_then_read(spi, &code, 1, id, 5); -	if (tmp < 0) { -		pr_debug("%s: error %d reading JEDEC ID\n", -				dev_name(&spi->dev), tmp); -		return ERR_PTR(tmp); -	} -	jedec = id[0]; -	jedec = jedec << 8; -	jedec |= id[1]; -	jedec = jedec << 8; -	jedec |= id[2]; +	/* Set up command buffer. */ +	flash->command[0] = nor->erase_opcode; +	m25p_addr2cmd(nor, offset, flash->command); -	ext_jedec = id[3] << 8 | id[4]; +	spi_write(flash->spi, flash->command, m25p_cmdsz(nor)); -	for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { -		info = (void *)m25p_ids[tmp].driver_data; -		if (info->jedec_id == jedec) { -			if (info->ext_id != 0 && info->ext_id != ext_jedec) -				continue; -			return &m25p_ids[tmp]; -		} -	} -	dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); -	return ERR_PTR(-ENODEV); +	return 0;  } -  /*   * board specific setup should have ensured the SPI clock used here   * matches what the READ command supports, at least until this driver @@ -921,196 +191,45 @@ static const struct spi_device_id *jedec_probe(struct spi_device *spi)   */  static int m25p_probe(struct spi_device *spi)  { -	const struct spi_device_id	*id = spi_get_device_id(spi); -	struct flash_platform_data	*data; -	struct m25p			*flash; -	struct flash_info		*info; -	unsigned			i;  	struct mtd_part_parser_data	ppdata; -	struct device_node __maybe_unused *np = spi->dev.of_node; - -#ifdef CONFIG_MTD_OF_PARTS -	if (!of_device_is_available(np)) -		return -ENODEV; -#endif - -	/* Platform data helps sort out which chip type we have, as -	 * well as how this board partitions it.  If we don't have -	 * a chip ID, try the JEDEC id commands; they'll work for most -	 * newer chips, even if we don't recognize the particular chip. -	 */ -	data = dev_get_platdata(&spi->dev); -	if (data && data->type) { -		const struct spi_device_id *plat_id; - -		for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) { -			plat_id = &m25p_ids[i]; -			if (strcmp(data->type, plat_id->name)) -				continue; -			break; -		} - -		if (i < ARRAY_SIZE(m25p_ids) - 1) -			id = plat_id; -		else -			dev_warn(&spi->dev, "unrecognized id %s\n", data->type); -	} - -	info = (void *)id->driver_data; - -	if (info->jedec_id) { -		const struct spi_device_id *jid; - -		jid = jedec_probe(spi); -		if (IS_ERR(jid)) { -			return PTR_ERR(jid); -		} else if (jid != id) { -			/* -			 * JEDEC knows better, so overwrite platform ID. We -			 * can't trust partitions any longer, but we'll let -			 * mtd apply them anyway, since some partitions may be -			 * marked read-only, and we don't want to lose that -			 * information, even if it's not 100% accurate. -			 */ -			dev_warn(&spi->dev, "found %s, expected %s\n", -				 jid->name, id->name); -			id = jid; -			info = (void *)jid->driver_data; -		} -	} +	struct flash_platform_data	*data; +	struct m25p *flash; +	struct spi_nor *nor; +	enum read_mode mode = SPI_NOR_NORMAL; +	int ret; -	flash = kzalloc(sizeof *flash, GFP_KERNEL); +	flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);  	if (!flash)  		return -ENOMEM; -	flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0), -					GFP_KERNEL); -	if (!flash->command) { -		kfree(flash); -		return -ENOMEM; -	} -	flash->spi = spi; -	mutex_init(&flash->lock); -	spi_set_drvdata(spi, flash); +	nor = &flash->spi_nor; -	/* -	 * Atmel, SST and Intel/Numonyx serial flash tend to power -	 * up with the software protection bits set -	 */ - -	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL || -	    JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL || -	    JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) { -		write_enable(flash); -		write_sr(flash, 0); -	} - -	if (data && data->name) -		flash->mtd.name = data->name; -	else -		flash->mtd.name = dev_name(&spi->dev); - -	flash->mtd.type = MTD_NORFLASH; -	flash->mtd.writesize = 1; -	flash->mtd.flags = MTD_CAP_NORFLASH; -	flash->mtd.size = info->sector_size * info->n_sectors; -	flash->mtd._erase = m25p80_erase; -	flash->mtd._read = m25p80_read; - -	/* flash protection support for STmicro chips */ -	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) { -		flash->mtd._lock = m25p80_lock; -		flash->mtd._unlock = m25p80_unlock; -	} +	/* install the hooks */ +	nor->read = m25p80_read; +	nor->write = m25p80_write; +	nor->erase = m25p80_erase; +	nor->write_reg = m25p80_write_reg; +	nor->read_reg = m25p80_read_reg; -	/* sst flash chips use AAI word program */ -	if (info->flags & SST_WRITE) -		flash->mtd._write = sst_write; -	else -		flash->mtd._write = m25p80_write; +	nor->dev = &spi->dev; +	nor->mtd = &flash->mtd; +	nor->priv = flash; -	/* prefer "small sector" erase if possible */ -	if (info->flags & SECT_4K) { -		flash->erase_opcode = OPCODE_BE_4K; -		flash->mtd.erasesize = 4096; -	} else if (info->flags & SECT_4K_PMC) { -		flash->erase_opcode = OPCODE_BE_4K_PMC; -		flash->mtd.erasesize = 4096; -	} else { -		flash->erase_opcode = OPCODE_SE; -		flash->mtd.erasesize = info->sector_size; -	} +	spi_set_drvdata(spi, flash); +	flash->mtd.priv = nor; +	flash->spi = spi; -	if (info->flags & M25P_NO_ERASE) -		flash->mtd.flags |= MTD_NO_ERASE; +	if (spi->mode & SPI_RX_QUAD) +		mode = SPI_NOR_QUAD; +	else if (spi->mode & SPI_RX_DUAL) +		mode = SPI_NOR_DUAL; +	ret = spi_nor_scan(nor, spi_get_device_id(spi), mode); +	if (ret) +		return ret; +	data = dev_get_platdata(&spi->dev);  	ppdata.of_node = spi->dev.of_node; -	flash->mtd.dev.parent = &spi->dev; -	flash->page_size = info->page_size; -	flash->mtd.writebufsize = flash->page_size; - -	flash->fast_read = false; -	if (np && of_property_read_bool(np, "m25p,fast-read")) -		flash->fast_read = true; - -#ifdef CONFIG_M25PXX_USE_FAST_READ -	flash->fast_read = true; -#endif -	if (info->flags & M25P_NO_FR) -		flash->fast_read = false; - -	/* Default commands */ -	if (flash->fast_read) -		flash->read_opcode = OPCODE_FAST_READ; -	else -		flash->read_opcode = OPCODE_NORM_READ; - -	flash->program_opcode = OPCODE_PP; -	if (info->addr_width) -		flash->addr_width = info->addr_width; -	else if (flash->mtd.size > 0x1000000) { -		/* enable 4-byte addressing if the device exceeds 16MiB */ -		flash->addr_width = 4; -		if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) { -			/* Dedicated 4-byte command set */ -			flash->read_opcode = flash->fast_read ? -				OPCODE_FAST_READ_4B : -				OPCODE_NORM_READ_4B; -			flash->program_opcode = OPCODE_PP_4B; -			/* No small sector erase for 4-byte command set */ -			flash->erase_opcode = OPCODE_SE_4B; -			flash->mtd.erasesize = info->sector_size; -		} else -			set_4byte(flash, info->jedec_id, 1); -	} else { -		flash->addr_width = 3; -	} - -	dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, -			(long long)flash->mtd.size >> 10); - -	pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " -			".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", -		flash->mtd.name, -		(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), -		flash->mtd.erasesize, flash->mtd.erasesize / 1024, -		flash->mtd.numeraseregions); - -	if (flash->mtd.numeraseregions) -		for (i = 0; i < flash->mtd.numeraseregions; i++) -			pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, " -				".erasesize = 0x%.8x (%uKiB), " -				".numblocks = %d }\n", -				i, (long long)flash->mtd.eraseregions[i].offset, -				flash->mtd.eraseregions[i].erasesize, -				flash->mtd.eraseregions[i].erasesize / 1024, -				flash->mtd.eraseregions[i].numblocks); - - -	/* partitions should match sector boundaries; and it may be good to -	 * use readonly partitions for writeprotected sectors (BP2..BP0). -	 */  	return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,  			data ? data->parts : NULL,  			data ? data->nr_parts : 0); @@ -1120,15 +239,9 @@ static int m25p_probe(struct spi_device *spi)  static int m25p_remove(struct spi_device *spi)  {  	struct m25p	*flash = spi_get_drvdata(spi); -	int		status;  	/* Clean up MTD stuff. */ -	status = mtd_device_unregister(&flash->mtd); -	if (status == 0) { -		kfree(flash->command); -		kfree(flash); -	} -	return 0; +	return mtd_device_unregister(&flash->mtd);  } @@ -1137,7 +250,7 @@ static struct spi_driver m25p80_driver = {  		.name	= "m25p80",  		.owner	= THIS_MODULE,  	}, -	.id_table	= m25p_ids, +	.id_table	= spi_nor_ids,  	.probe	= m25p_probe,  	.remove	= m25p_remove, diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c index 182849d39c6..5c8b322ba90 100644 --- a/drivers/mtd/devices/ms02-nv.c +++ b/drivers/mtd/devices/ms02-nv.c @@ -205,7 +205,7 @@ static int __init ms02nv_init_one(ulong addr)  	mtd->type = MTD_RAM;  	mtd->flags = MTD_CAP_RAM;  	mtd->size = fixsize; -	mtd->name = (char *)ms02nv_name; +	mtd->name = ms02nv_name;  	mtd->owner = THIS_MODULE;  	mtd->_read = ms02nv_read;  	mtd->_write = ms02nv_write; diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 0e8cbfeba11..dd22ce2cc9a 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -10,7 +10,6 @@   * 2 of the License, or (at your option) any later version.  */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/delay.h>  #include <linux/device.h> @@ -88,8 +87,6 @@ struct dataflash {  	uint8_t			command[4];  	char			name[24]; -	unsigned		partitioned:1; -  	unsigned short		page_offset;	/* offset in flash address */  	unsigned int		page_size;	/* of bytes per page */ @@ -442,8 +439,8 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,  #ifdef CONFIG_MTD_DATAFLASH_OTP -static int dataflash_get_otp_info(struct mtd_info *mtd, -		struct otp_info *info, size_t len) +static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len, +				  size_t *retlen, struct otp_info *info)  {  	/* Report both blocks as identical:  bytes 0..64, locked.  	 * Unless the user block changed from all-ones, we can't @@ -452,7 +449,8 @@ static int dataflash_get_otp_info(struct mtd_info *mtd,  	info->start = 0;  	info->length = 64;  	info->locked = 1; -	return sizeof(*info); +	*retlen = sizeof(*info); +	return 0;  }  static ssize_t otp_read(struct spi_device *spi, unsigned base, @@ -544,14 +542,18 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,  	struct dataflash	*priv = mtd->priv;  	int			status; -	if (len > 64) -		return -EINVAL; +	if (from >= 64) { +		/* +		 * Attempting to write beyond the end of OTP memory, +		 * no data can be written. +		 */ +		*retlen = 0; +		return 0; +	} -	/* Strictly speaking, we *could* truncate the write ... but -	 * let's not do that for the only write that's ever possible. -	 */ +	/* Truncate the write to fit into OTP memory. */  	if ((from + len) > 64) -		return -EINVAL; +		len = 64 - from;  	/* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes  	 * IN:  ignore all @@ -671,7 +673,6 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,  	if (!err)  		return 0; -	spi_set_drvdata(spi, NULL);  	kfree(priv);  	return err;  } @@ -881,7 +882,7 @@ static int dataflash_probe(struct spi_device *spi)  		break;  	/* obsolete AT45DB1282 not (yet?) supported */  	default: -		pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev), +		dev_info(&spi->dev, "unsupported device (%x)\n",  				status & 0x3c);  		status = -ENODEV;  	} @@ -901,10 +902,8 @@ static int dataflash_remove(struct spi_device *spi)  	pr_debug("%s: remove\n", dev_name(&spi->dev));  	status = mtd_device_unregister(&flash->mtd); -	if (status == 0) { -		spi_set_drvdata(spi, NULL); +	if (status == 0)  		kfree(flash); -	}  	return status;  } diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index ec59d65897f..8e285089229 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c @@ -92,7 +92,7 @@ static void __exit cleanup_mtdram(void)  }  int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, -		unsigned long size, char *name) +		unsigned long size, const char *name)  {  	memset(mtd, 0, sizeof(*mtd)); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 67823de68db..2cceebfb251 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -94,7 +94,7 @@ static void unregister_devices(void)  	}  } -static int register_device(char *name, unsigned long start, unsigned long len) +static int register_device(char *name, phys_addr_t start, size_t len)  {  	struct phram_mtd_list *new;  	int ret = -ENOMEM; @@ -141,35 +141,35 @@ out0:  	return ret;  } -static int ustrtoul(const char *cp, char **endp, unsigned int base) +static int parse_num64(uint64_t *num64, char *token)  { -	unsigned long result = simple_strtoul(cp, endp, base); - -	switch (**endp) { -	case 'G': -		result *= 1024; -	case 'M': -		result *= 1024; -	case 'k': -		result *= 1024; +	size_t len; +	int shift = 0; +	int ret; + +	len = strlen(token);  	/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */ -		if ((*endp)[1] == 'i') -			(*endp) += 2; +	if (len > 2) { +		if (token[len - 1] == 'i') { +			switch (token[len - 2]) { +			case 'G': +				shift += 10; +			case 'M': +				shift += 10; +			case 'k': +				shift += 10; +				token[len - 2] = 0; +				break; +			default: +				return -EINVAL; +			} +		}  	} -	return result; -} - -static int parse_num32(uint32_t *num32, const char *token) -{ -	char *endp; -	unsigned long n; -	n = ustrtoul(token, &endp, 0); -	if (*endp) -		return -EINVAL; +	ret = kstrtou64(token, 0, num64); +	*num64 <<= shift; -	*num32 = n; -	return 0; +	return ret;  }  static int parse_name(char **pname, const char *token) @@ -205,23 +205,26 @@ static inline void kill_final_newline(char *str)  	return 1;		\  } while (0) +#ifndef MODULE +static int phram_init_called;  /*   * This shall contain the module parameter if any. It is of the form:   * - phram=<device>,<address>,<size> for module case   * - phram.phram=<device>,<address>,<size> for built-in case - * We leave 64 bytes for the device name, 12 for the address and 12 for the + * We leave 64 bytes for the device name, 20 for the address and 20 for the   * size.   * Example: phram.phram=rootfs,0xa0000000,512Mi   */ -static __initdata char phram_paramline[64+12+12]; +static char phram_paramline[64 + 20 + 20]; +#endif -static int __init phram_setup(const char *val) +static int phram_setup(const char *val)  { -	char buf[64+12+12], *str = buf; +	char buf[64 + 20 + 20], *str = buf;  	char *token[3];  	char *name; -	uint32_t start; -	uint32_t len; +	uint64_t start; +	uint64_t len;  	int i, ret;  	if (strnlen(val, sizeof(buf)) >= sizeof(buf)) @@ -243,13 +246,13 @@ static int __init phram_setup(const char *val)  	if (ret)  		return ret; -	ret = parse_num32(&start, token[1]); +	ret = parse_num64(&start, token[1]);  	if (ret) {  		kfree(name);  		parse_err("illegal start address\n");  	} -	ret = parse_num32(&len, token[2]); +	ret = parse_num64(&len, token[2]);  	if (ret) {  		kfree(name);  		parse_err("illegal device length\n"); @@ -257,24 +260,43 @@ static int __init phram_setup(const char *val)  	ret = register_device(name, start, len);  	if (!ret) -		pr_info("%s device: %#x at %#x\n", name, len, start); +		pr_info("%s device: %#llx at %#llx\n", name, len, start);  	else  		kfree(name);  	return ret;  } -static int __init phram_param_call(const char *val, struct kernel_param *kp) +static int phram_param_call(const char *val, struct kernel_param *kp)  { +#ifdef MODULE +	return phram_setup(val); +#else  	/* -	 * This function is always called before 'init_phram()', whether -	 * built-in or module. +	 * If more parameters are later passed in via +	 * /sys/module/phram/parameters/phram +	 * and init_phram() has already been called, +	 * we can parse the argument now.  	 */ + +	if (phram_init_called) +		return phram_setup(val); + +	/* +	 * During early boot stage, we only save the parameters +	 * here. We must parse them later: if the param passed +	 * from kernel boot command line, phram_param_call() is +	 * called so early that it is not possible to resolve +	 * the device (even kmalloc() fails). Defer that work to +	 * phram_setup(). +	 */ +  	if (strlen(val) >= sizeof(phram_paramline))  		return -ENOSPC;  	strcpy(phram_paramline, val);  	return 0; +#endif  }  module_param_call(phram, phram_param_call, NULL, NULL, 000); @@ -283,10 +305,15 @@ MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"  static int __init init_phram(void)  { +	int ret = 0; + +#ifndef MODULE  	if (phram_paramline[0]) -		return phram_setup(phram_paramline); +		ret = phram_setup(phram_paramline); +	phram_init_called = 1; +#endif -	return 0; +	return ret;  }  static void __exit cleanup_phram(void) diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index 0c51b988e1f..f02603e1bfe 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c @@ -725,16 +725,11 @@ static int __init init_pmc551(void)  		}  		mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); -		if (!mtd) { -			printk(KERN_NOTICE "pmc551: Cannot allocate new MTD " -				"device.\n"); +		if (!mtd)  			break; -		}  		priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);  		if (!priv) { -			printk(KERN_NOTICE "pmc551: Cannot allocate new MTD " -				"device.\n");  			kfree(mtd);  			break;  		} diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h new file mode 100644 index 00000000000..f59a125295d --- /dev/null +++ b/drivers/mtd/devices/serial_flash_cmds.h @@ -0,0 +1,61 @@ +/* + * Generic/SFDP Flash Commands and Device Capabilities + * + * Copyright (C) 2013 Lee Jones <lee.jones@lianro.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _MTD_SERIAL_FLASH_CMDS_H +#define _MTD_SERIAL_FLASH_CMDS_H + +/* Generic Flash Commands/OPCODEs */ +#define SPINOR_OP_RDSR2		0x35 +#define SPINOR_OP_WRVCR		0x81 +#define SPINOR_OP_RDVCR		0x85 + +/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */ +#define SPINOR_OP_READ_1_2_2	0xbb	/* DUAL I/O READ */ +#define SPINOR_OP_READ_1_4_4	0xeb	/* QUAD I/O READ */ + +#define SPINOR_OP_WRITE		0x02	/* PAGE PROGRAM */ +#define SPINOR_OP_WRITE_1_1_2	0xa2	/* DUAL INPUT PROGRAM */ +#define SPINOR_OP_WRITE_1_2_2	0xd2	/* DUAL INPUT EXT PROGRAM */ +#define SPINOR_OP_WRITE_1_1_4	0x32	/* QUAD INPUT PROGRAM */ +#define SPINOR_OP_WRITE_1_4_4	0x12	/* QUAD INPUT EXT PROGRAM */ + +/* READ commands with 32-bit addressing */ +#define SPINOR_OP_READ4_1_2_2	0xbc +#define SPINOR_OP_READ4_1_4_4	0xec + +/* Configuration flags */ +#define FLASH_FLAG_SINGLE	0x000000ff +#define FLASH_FLAG_READ_WRITE	0x00000001 +#define FLASH_FLAG_READ_FAST	0x00000002 +#define FLASH_FLAG_SE_4K	0x00000004 +#define FLASH_FLAG_SE_32K	0x00000008 +#define FLASH_FLAG_CE		0x00000010 +#define FLASH_FLAG_32BIT_ADDR	0x00000020 +#define FLASH_FLAG_RESET	0x00000040 +#define FLASH_FLAG_DYB_LOCKING	0x00000080 + +#define FLASH_FLAG_DUAL		0x0000ff00 +#define FLASH_FLAG_READ_1_1_2	0x00000100 +#define FLASH_FLAG_READ_1_2_2	0x00000200 +#define FLASH_FLAG_READ_2_2_2	0x00000400 +#define FLASH_FLAG_WRITE_1_1_2	0x00001000 +#define FLASH_FLAG_WRITE_1_2_2	0x00002000 +#define FLASH_FLAG_WRITE_2_2_2	0x00004000 + +#define FLASH_FLAG_QUAD		0x00ff0000 +#define FLASH_FLAG_READ_1_1_4	0x00010000 +#define FLASH_FLAG_READ_1_4_4	0x00020000 +#define FLASH_FLAG_READ_4_4_4	0x00040000 +#define FLASH_FLAG_WRITE_1_1_4	0x00100000 +#define FLASH_FLAG_WRITE_1_4_4	0x00200000 +#define FLASH_FLAG_WRITE_4_4_4	0x00400000 + +#endif /* _MTD_SERIAL_FLASH_CMDS_H */ diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 5a5cd2ace4a..2fc4957cbe7 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -280,14 +280,11 @@ __setup("slram=", mtd_slram_setup);  static int __init init_slram(void)  {  	char *devname; -	int i;  #ifndef MODULE  	char *devstart;  	char *devlength; -	i = 0; -  	if (!map) {  		E("slram: not enough parameters.\n");  		return(-EINVAL); @@ -314,6 +311,7 @@ static int __init init_slram(void)  	}  #else  	int count; +	int i;  	for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];  			count++) { diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 42382141206..c4176b0f382 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -6,7 +6,7 @@   *   * Copyright © 2010 STMicroelectronics.   * Ashish Priyadarshi - * Shiraz Hashim <shiraz.hashim@st.com> + * Shiraz Hashim <shiraz.linux.kernel@gmail.com>   *   * This file is licensed under the terms of the GNU General Public   * License version 2. This program is licensed "as is" without any @@ -913,7 +913,6 @@ static int spear_smi_probe(struct platform_device *pdev)  	if (np) {  		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);  		if (!pdata) { -			pr_err("%s: ERROR: no memory", __func__);  			ret = -ENOMEM;  			goto err;  		} @@ -943,7 +942,6 @@ static int spear_smi_probe(struct platform_device *pdev)  	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);  	if (!dev) {  		ret = -ENOMEM; -		dev_err(&pdev->dev, "mem alloc fail\n");  		goto err;  	} @@ -1091,5 +1089,5 @@ static struct platform_driver spear_smi_driver = {  module_platform_driver(spear_smi_driver);  MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>"); +MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.linux.kernel@gmail.com>");  MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips"); diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index a42f1f0e728..c63ecbcad0b 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c @@ -15,7 +15,6 @@   *   */ -#include <linux/init.h>  #include <linux/module.h>  #include <linux/device.h>  #include <linux/mutex.h> @@ -364,7 +363,7 @@ static int sst25l_probe(struct spi_device *spi)  	if (!flash_info)  		return -ENODEV; -	flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL); +	flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);  	if (!flash)  		return -ENOMEM; @@ -402,11 +401,8 @@ static int sst25l_probe(struct spi_device *spi)  	ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,  					data ? data->parts : NULL,  					data ? data->nr_parts : 0); -	if (ret) { -		kfree(flash); -		spi_set_drvdata(spi, NULL); +	if (ret)  		return -ENODEV; -	}  	return 0;  } @@ -414,12 +410,8 @@ static int sst25l_probe(struct spi_device *spi)  static int sst25l_remove(struct spi_device *spi)  {  	struct sst25l_flash *flash = spi_get_drvdata(spi); -	int ret; -	ret = mtd_device_unregister(&flash->mtd); -	if (ret == 0) -		kfree(flash); -	return ret; +	return mtd_device_unregister(&flash->mtd);  }  static struct spi_driver sst25l_driver = { diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c new file mode 100644 index 00000000000..d252514d3e9 --- /dev/null +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -0,0 +1,2080 @@ +/* + * st_spi_fsm.c	- ST Fast Sequence Mode (FSM) Serial Flash Controller + * + * Author: Angus Clark <angus.clark@st.com> + * + * Copyright (C) 2010-2014 STMicroelectronics Limited + * + * JEDEC probe based on drivers/mtd/devices/m25p80.c + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/of.h> + +#include "serial_flash_cmds.h" + +/* + * FSM SPI Controller Registers + */ +#define SPI_CLOCKDIV			0x0010 +#define SPI_MODESELECT			0x0018 +#define SPI_CONFIGDATA			0x0020 +#define SPI_STA_MODE_CHANGE		0x0028 +#define SPI_FAST_SEQ_TRANSFER_SIZE	0x0100 +#define SPI_FAST_SEQ_ADD1		0x0104 +#define SPI_FAST_SEQ_ADD2		0x0108 +#define SPI_FAST_SEQ_ADD_CFG		0x010c +#define SPI_FAST_SEQ_OPC1		0x0110 +#define SPI_FAST_SEQ_OPC2		0x0114 +#define SPI_FAST_SEQ_OPC3		0x0118 +#define SPI_FAST_SEQ_OPC4		0x011c +#define SPI_FAST_SEQ_OPC5		0x0120 +#define SPI_MODE_BITS			0x0124 +#define SPI_DUMMY_BITS			0x0128 +#define SPI_FAST_SEQ_FLASH_STA_DATA	0x012c +#define SPI_FAST_SEQ_1			0x0130 +#define SPI_FAST_SEQ_2			0x0134 +#define SPI_FAST_SEQ_3			0x0138 +#define SPI_FAST_SEQ_4			0x013c +#define SPI_FAST_SEQ_CFG		0x0140 +#define SPI_FAST_SEQ_STA		0x0144 +#define SPI_QUAD_BOOT_SEQ_INIT_1	0x0148 +#define SPI_QUAD_BOOT_SEQ_INIT_2	0x014c +#define SPI_QUAD_BOOT_READ_SEQ_1	0x0150 +#define SPI_QUAD_BOOT_READ_SEQ_2	0x0154 +#define SPI_PROGRAM_ERASE_TIME		0x0158 +#define SPI_MULT_PAGE_REPEAT_SEQ_1	0x015c +#define SPI_MULT_PAGE_REPEAT_SEQ_2	0x0160 +#define SPI_STATUS_WR_TIME_REG		0x0164 +#define SPI_FAST_SEQ_DATA_REG		0x0300 + +/* + * Register: SPI_MODESELECT + */ +#define SPI_MODESELECT_CONTIG		0x01 +#define SPI_MODESELECT_FASTREAD		0x02 +#define SPI_MODESELECT_DUALIO		0x04 +#define SPI_MODESELECT_FSM		0x08 +#define SPI_MODESELECT_QUADBOOT		0x10 + +/* + * Register: SPI_CONFIGDATA + */ +#define SPI_CFG_DEVICE_ST		0x1 +#define SPI_CFG_DEVICE_ATMEL		0x4 +#define SPI_CFG_MIN_CS_HIGH(x)		(((x) & 0xfff) << 4) +#define SPI_CFG_CS_SETUPHOLD(x)		(((x) & 0xff) << 16) +#define SPI_CFG_DATA_HOLD(x)		(((x) & 0xff) << 24) + +#define SPI_CFG_DEFAULT_MIN_CS_HIGH    SPI_CFG_MIN_CS_HIGH(0x0AA) +#define SPI_CFG_DEFAULT_CS_SETUPHOLD   SPI_CFG_CS_SETUPHOLD(0xA0) +#define SPI_CFG_DEFAULT_DATA_HOLD      SPI_CFG_DATA_HOLD(0x00) + +/* + * Register: SPI_FAST_SEQ_TRANSFER_SIZE + */ +#define TRANSFER_SIZE(x)		((x) * 8) + +/* + * Register: SPI_FAST_SEQ_ADD_CFG + */ +#define ADR_CFG_CYCLES_ADD1(x)		((x) << 0) +#define ADR_CFG_PADS_1_ADD1		(0x0 << 6) +#define ADR_CFG_PADS_2_ADD1		(0x1 << 6) +#define ADR_CFG_PADS_4_ADD1		(0x3 << 6) +#define ADR_CFG_CSDEASSERT_ADD1		(1   << 8) +#define ADR_CFG_CYCLES_ADD2(x)		((x) << (0+16)) +#define ADR_CFG_PADS_1_ADD2		(0x0 << (6+16)) +#define ADR_CFG_PADS_2_ADD2		(0x1 << (6+16)) +#define ADR_CFG_PADS_4_ADD2		(0x3 << (6+16)) +#define ADR_CFG_CSDEASSERT_ADD2		(1   << (8+16)) + +/* + * Register: SPI_FAST_SEQ_n + */ +#define SEQ_OPC_OPCODE(x)		((x) << 0) +#define SEQ_OPC_CYCLES(x)		((x) << 8) +#define SEQ_OPC_PADS_1			(0x0 << 14) +#define SEQ_OPC_PADS_2			(0x1 << 14) +#define SEQ_OPC_PADS_4			(0x3 << 14) +#define SEQ_OPC_CSDEASSERT		(1   << 16) + +/* + * Register: SPI_FAST_SEQ_CFG + */ +#define SEQ_CFG_STARTSEQ		(1 << 0) +#define SEQ_CFG_SWRESET			(1 << 5) +#define SEQ_CFG_CSDEASSERT		(1 << 6) +#define SEQ_CFG_READNOTWRITE		(1 << 7) +#define SEQ_CFG_ERASE			(1 << 8) +#define SEQ_CFG_PADS_1			(0x0 << 16) +#define SEQ_CFG_PADS_2			(0x1 << 16) +#define SEQ_CFG_PADS_4			(0x3 << 16) + +/* + * Register: SPI_MODE_BITS + */ +#define MODE_DATA(x)			(x & 0xff) +#define MODE_CYCLES(x)			((x & 0x3f) << 16) +#define MODE_PADS_1			(0x0 << 22) +#define MODE_PADS_2			(0x1 << 22) +#define MODE_PADS_4			(0x3 << 22) +#define DUMMY_CSDEASSERT		(1   << 24) + +/* + * Register: SPI_DUMMY_BITS + */ +#define DUMMY_CYCLES(x)			((x & 0x3f) << 16) +#define DUMMY_PADS_1			(0x0 << 22) +#define DUMMY_PADS_2			(0x1 << 22) +#define DUMMY_PADS_4			(0x3 << 22) +#define DUMMY_CSDEASSERT		(1   << 24) + +/* + * Register: SPI_FAST_SEQ_FLASH_STA_DATA + */ +#define STA_DATA_BYTE1(x)		((x & 0xff) << 0) +#define STA_DATA_BYTE2(x)		((x & 0xff) << 8) +#define STA_PADS_1			(0x0 << 16) +#define STA_PADS_2			(0x1 << 16) +#define STA_PADS_4			(0x3 << 16) +#define STA_CSDEASSERT			(0x1 << 20) +#define STA_RDNOTWR			(0x1 << 21) + +/* + * FSM SPI Instruction Opcodes + */ +#define STFSM_OPC_CMD			0x1 +#define STFSM_OPC_ADD			0x2 +#define STFSM_OPC_STA			0x3 +#define STFSM_OPC_MODE			0x4 +#define STFSM_OPC_DUMMY		0x5 +#define STFSM_OPC_DATA			0x6 +#define STFSM_OPC_WAIT			0x7 +#define STFSM_OPC_JUMP			0x8 +#define STFSM_OPC_GOTO			0x9 +#define STFSM_OPC_STOP			0xF + +/* + * FSM SPI Instructions (== opcode + operand). + */ +#define STFSM_INSTR(cmd, op)		((cmd) | ((op) << 4)) + +#define STFSM_INST_CMD1			STFSM_INSTR(STFSM_OPC_CMD,	1) +#define STFSM_INST_CMD2			STFSM_INSTR(STFSM_OPC_CMD,	2) +#define STFSM_INST_CMD3			STFSM_INSTR(STFSM_OPC_CMD,	3) +#define STFSM_INST_CMD4			STFSM_INSTR(STFSM_OPC_CMD,	4) +#define STFSM_INST_CMD5			STFSM_INSTR(STFSM_OPC_CMD,	5) +#define STFSM_INST_ADD1			STFSM_INSTR(STFSM_OPC_ADD,	1) +#define STFSM_INST_ADD2			STFSM_INSTR(STFSM_OPC_ADD,	2) + +#define STFSM_INST_DATA_WRITE		STFSM_INSTR(STFSM_OPC_DATA,	1) +#define STFSM_INST_DATA_READ		STFSM_INSTR(STFSM_OPC_DATA,	2) + +#define STFSM_INST_STA_RD1		STFSM_INSTR(STFSM_OPC_STA,	0x1) +#define STFSM_INST_STA_WR1		STFSM_INSTR(STFSM_OPC_STA,	0x1) +#define STFSM_INST_STA_RD2		STFSM_INSTR(STFSM_OPC_STA,	0x2) +#define STFSM_INST_STA_WR1_2		STFSM_INSTR(STFSM_OPC_STA,	0x3) + +#define STFSM_INST_MODE			STFSM_INSTR(STFSM_OPC_MODE,	0) +#define STFSM_INST_DUMMY		STFSM_INSTR(STFSM_OPC_DUMMY,	0) +#define STFSM_INST_WAIT			STFSM_INSTR(STFSM_OPC_WAIT,	0) +#define STFSM_INST_STOP			STFSM_INSTR(STFSM_OPC_STOP,	0) + +#define STFSM_DEFAULT_EMI_FREQ 100000000UL                        /* 100 MHz */ +#define STFSM_DEFAULT_WR_TIME  (STFSM_DEFAULT_EMI_FREQ * (15/1000)) /* 15ms */ + +#define STFSM_FLASH_SAFE_FREQ  10000000UL                         /* 10 MHz */ + +#define STFSM_MAX_WAIT_SEQ_MS  1000     /* FSM execution time */ + +/* S25FLxxxS commands */ +#define S25FL_CMD_WRITE4_1_1_4 0x34 +#define S25FL_CMD_SE4          0xdc +#define S25FL_CMD_CLSR         0x30 +#define S25FL_CMD_DYBWR                0xe1 +#define S25FL_CMD_DYBRD                0xe0 +#define S25FL_CMD_WRITE4       0x12    /* Note, opcode clashes with +					* 'SPINOR_OP_WRITE_1_4_4' +					* as found on N25Qxxx devices! */ + +/* Status register */ +#define FLASH_STATUS_BUSY      0x01 +#define FLASH_STATUS_WEL       0x02 +#define FLASH_STATUS_BP0       0x04 +#define FLASH_STATUS_BP1       0x08 +#define FLASH_STATUS_BP2       0x10 +#define FLASH_STATUS_SRWP0     0x80 +#define FLASH_STATUS_TIMEOUT   0xff +/* S25FL Error Flags */ +#define S25FL_STATUS_E_ERR     0x20 +#define S25FL_STATUS_P_ERR     0x40 + +#define N25Q_CMD_WRVCR         0x81 +#define N25Q_CMD_RDVCR         0x85 +#define N25Q_CMD_RDVECR        0x65 +#define N25Q_CMD_RDNVCR        0xb5 +#define N25Q_CMD_WRNVCR        0xb1 + +#define FLASH_PAGESIZE         256			/* In Bytes    */ +#define FLASH_PAGESIZE_32      (FLASH_PAGESIZE / 4)	/* In uint32_t */ +#define FLASH_MAX_BUSY_WAIT    (300 * HZ)	/* Maximum 'CHIPERASE' time */ + +/* + * Flags to tweak operation of default read/write/erase routines + */ +#define CFG_READ_TOGGLE_32BIT_ADDR     0x00000001 +#define CFG_WRITE_TOGGLE_32BIT_ADDR    0x00000002 +#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008 +#define CFG_S25FL_CHECK_ERROR_FLAGS    0x00000010 + +struct stfsm_seq { +	uint32_t data_size; +	uint32_t addr1; +	uint32_t addr2; +	uint32_t addr_cfg; +	uint32_t seq_opc[5]; +	uint32_t mode; +	uint32_t dummy; +	uint32_t status; +	uint8_t  seq[16]; +	uint32_t seq_cfg; +} __packed __aligned(4); + +struct stfsm { +	struct device		*dev; +	void __iomem		*base; +	struct resource		*region; +	struct mtd_info		mtd; +	struct mutex		lock; +	struct flash_info       *info; + +	uint32_t                configuration; +	uint32_t                fifo_dir_delay; +	bool                    booted_from_spi; +	bool                    reset_signal; +	bool                    reset_por; + +	struct stfsm_seq stfsm_seq_read; +	struct stfsm_seq stfsm_seq_write; +	struct stfsm_seq stfsm_seq_en_32bit_addr; +}; + +/* Parameters to configure a READ or WRITE FSM sequence */ +struct seq_rw_config { +	uint32_t        flags;          /* flags to support config */ +	uint8_t         cmd;            /* FLASH command */ +	int             write;          /* Write Sequence */ +	uint8_t         addr_pads;      /* No. of addr pads (MODE & DUMMY) */ +	uint8_t         data_pads;      /* No. of data pads */ +	uint8_t         mode_data;      /* MODE data */ +	uint8_t         mode_cycles;    /* No. of MODE cycles */ +	uint8_t         dummy_cycles;   /* No. of DUMMY cycles */ +}; + +/* SPI Flash Device Table */ +struct flash_info { +	char            *name; +	/* +	 * JEDEC id zero means "no ID" (most older chips); otherwise it has +	 * a high byte of zero plus three data bytes: the manufacturer id, +	 * then a two byte device id. +	 */ +	u32             jedec_id; +	u16             ext_id; +	/* +	 * The size listed here is what works with SPINOR_OP_SE, which isn't +	 * necessarily called a "sector" by the vendor. +	 */ +	unsigned        sector_size; +	u16             n_sectors; +	u32             flags; +	/* +	 * Note, where FAST_READ is supported, freq_max specifies the +	 * FAST_READ frequency, not the READ frequency. +	 */ +	u32             max_freq; +	int             (*config)(struct stfsm *); +}; + +static int stfsm_n25q_config(struct stfsm *fsm); +static int stfsm_mx25_config(struct stfsm *fsm); +static int stfsm_s25fl_config(struct stfsm *fsm); +static int stfsm_w25q_config(struct stfsm *fsm); + +static struct flash_info flash_types[] = { +	/* +	 * ST Microelectronics/Numonyx -- +	 * (newer production versions may have feature updates +	 * (eg faster operating frequency) +	 */ +#define M25P_FLAG (FLASH_FLAG_READ_WRITE | FLASH_FLAG_READ_FAST) +	{ "m25p40",  0x202013, 0,  64 * 1024,   8, M25P_FLAG, 25, NULL }, +	{ "m25p80",  0x202014, 0,  64 * 1024,  16, M25P_FLAG, 25, NULL }, +	{ "m25p16",  0x202015, 0,  64 * 1024,  32, M25P_FLAG, 25, NULL }, +	{ "m25p32",  0x202016, 0,  64 * 1024,  64, M25P_FLAG, 50, NULL }, +	{ "m25p64",  0x202017, 0,  64 * 1024, 128, M25P_FLAG, 50, NULL }, +	{ "m25p128", 0x202018, 0, 256 * 1024,  64, M25P_FLAG, 50, NULL }, + +#define M25PX_FLAG (FLASH_FLAG_READ_WRITE      |	\ +		    FLASH_FLAG_READ_FAST        |	\ +		    FLASH_FLAG_READ_1_1_2       |	\ +		    FLASH_FLAG_WRITE_1_1_2) +	{ "m25px32", 0x207116, 0,  64 * 1024,  64, M25PX_FLAG, 75, NULL }, +	{ "m25px64", 0x207117, 0,  64 * 1024, 128, M25PX_FLAG, 75, NULL }, + +	/* Macronix MX25xxx +	 *     - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices +	 *       where operating frequency must be reduced. +	 */ +#define MX25_FLAG (FLASH_FLAG_READ_WRITE       |	\ +		   FLASH_FLAG_READ_FAST         |	\ +		   FLASH_FLAG_READ_1_1_2        |	\ +		   FLASH_FLAG_READ_1_2_2        |	\ +		   FLASH_FLAG_READ_1_1_4        |	\ +		   FLASH_FLAG_SE_4K             |	\ +		   FLASH_FLAG_SE_32K) +	{ "mx25l3255e",  0xc29e16, 0, 64 * 1024, 64, +	  (MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86, +	  stfsm_mx25_config}, +	{ "mx25l25635e", 0xc22019, 0, 64*1024, 512, +	  (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70, +	  stfsm_mx25_config }, +	{ "mx25l25655e", 0xc22619, 0, 64*1024, 512, +	  (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70, +	  stfsm_mx25_config}, + +#define N25Q_FLAG (FLASH_FLAG_READ_WRITE       |	\ +		   FLASH_FLAG_READ_FAST         |	\ +		   FLASH_FLAG_READ_1_1_2        |	\ +		   FLASH_FLAG_READ_1_2_2        |	\ +		   FLASH_FLAG_READ_1_1_4        |	\ +		   FLASH_FLAG_READ_1_4_4        |	\ +		   FLASH_FLAG_WRITE_1_1_2       |	\ +		   FLASH_FLAG_WRITE_1_2_2       |	\ +		   FLASH_FLAG_WRITE_1_1_4       |	\ +		   FLASH_FLAG_WRITE_1_4_4) +	{ "n25q128", 0x20ba18, 0, 64 * 1024,  256, N25Q_FLAG, 108, +	  stfsm_n25q_config }, +	{ "n25q256", 0x20ba19, 0, 64 * 1024,  512, +	  N25Q_FLAG | FLASH_FLAG_32BIT_ADDR, 108, stfsm_n25q_config }, + +	/* +	 * Spansion S25FLxxxP +	 *     - 256KiB and 64KiB sector variants (identified by ext. JEDEC) +	 */ +#define S25FLXXXP_FLAG (FLASH_FLAG_READ_WRITE  |	\ +			FLASH_FLAG_READ_1_1_2   |	\ +			FLASH_FLAG_READ_1_2_2   |	\ +			FLASH_FLAG_READ_1_1_4   |	\ +			FLASH_FLAG_READ_1_4_4   |	\ +			FLASH_FLAG_WRITE_1_1_4  |	\ +			FLASH_FLAG_READ_FAST) +	{ "s25fl032p",  0x010215, 0x4d00,  64 * 1024,  64, S25FLXXXP_FLAG, 80, +	  stfsm_s25fl_config}, +	{ "s25fl129p0", 0x012018, 0x4d00, 256 * 1024,  64, S25FLXXXP_FLAG, 80, +	  stfsm_s25fl_config }, +	{ "s25fl129p1", 0x012018, 0x4d01,  64 * 1024, 256, S25FLXXXP_FLAG, 80, +	  stfsm_s25fl_config }, + +	/* +	 * Spansion S25FLxxxS +	 *     - 256KiB and 64KiB sector variants (identified by ext. JEDEC) +	 *     - RESET# signal supported by die but not bristled out on all +	 *       package types.  The package type is a function of board design, +	 *       so this information is captured in the board's flags. +	 *     - Supports 'DYB' sector protection. Depending on variant, sectors +	 *       may default to locked state on power-on. +	 */ +#define S25FLXXXS_FLAG (S25FLXXXP_FLAG         |	\ +			FLASH_FLAG_RESET        |	\ +			FLASH_FLAG_DYB_LOCKING) +	{ "s25fl128s0", 0x012018, 0x0300,  256 * 1024, 64, S25FLXXXS_FLAG, 80, +	  stfsm_s25fl_config }, +	{ "s25fl128s1", 0x012018, 0x0301,  64 * 1024, 256, S25FLXXXS_FLAG, 80, +	  stfsm_s25fl_config }, +	{ "s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128, +	  S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config }, +	{ "s25fl256s1", 0x010219, 0x4d01,  64 * 1024, 512, +	  S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config }, + +	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ +#define W25X_FLAG (FLASH_FLAG_READ_WRITE       |	\ +		   FLASH_FLAG_READ_FAST         |	\ +		   FLASH_FLAG_READ_1_1_2        |	\ +		   FLASH_FLAG_WRITE_1_1_2) +	{ "w25x40",  0xef3013, 0,  64 * 1024,   8, W25X_FLAG, 75, NULL }, +	{ "w25x80",  0xef3014, 0,  64 * 1024,  16, W25X_FLAG, 75, NULL }, +	{ "w25x16",  0xef3015, 0,  64 * 1024,  32, W25X_FLAG, 75, NULL }, +	{ "w25x32",  0xef3016, 0,  64 * 1024,  64, W25X_FLAG, 75, NULL }, +	{ "w25x64",  0xef3017, 0,  64 * 1024, 128, W25X_FLAG, 75, NULL }, + +	/* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */ +#define W25Q_FLAG (FLASH_FLAG_READ_WRITE       |	\ +		   FLASH_FLAG_READ_FAST         |	\ +		   FLASH_FLAG_READ_1_1_2        |	\ +		   FLASH_FLAG_READ_1_2_2        |	\ +		   FLASH_FLAG_READ_1_1_4        |	\ +		   FLASH_FLAG_READ_1_4_4        |	\ +		   FLASH_FLAG_WRITE_1_1_4) +	{ "w25q80",  0xef4014, 0,  64 * 1024,  16, W25Q_FLAG, 80, +	  stfsm_w25q_config }, +	{ "w25q16",  0xef4015, 0,  64 * 1024,  32, W25Q_FLAG, 80, +	  stfsm_w25q_config }, +	{ "w25q32",  0xef4016, 0,  64 * 1024,  64, W25Q_FLAG, 80, +	  stfsm_w25q_config }, +	{ "w25q64",  0xef4017, 0,  64 * 1024, 128, W25Q_FLAG, 80, +	  stfsm_w25q_config }, + +	/* Sentinel */ +	{ NULL, 0x000000, 0, 0, 0, 0, 0, NULL }, +}; + +/* + * FSM message sequence configurations: + * + * All configs are presented in order of preference + */ + +/* Default READ configurations, in order of preference */ +static struct seq_rw_config default_read_configs[] = { +	{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4,	0, 4, 4, 0x00, 2, 4}, +	{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4,	0, 1, 4, 0x00, 4, 0}, +	{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2,	0, 2, 2, 0x00, 4, 0}, +	{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2,	0, 1, 2, 0x00, 0, 8}, +	{FLASH_FLAG_READ_FAST,	SPINOR_OP_READ_FAST,	0, 1, 1, 0x00, 0, 8}, +	{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ,		0, 1, 1, 0x00, 0, 0}, +	{0x00,			0,			0, 0, 0, 0x00, 0, 0}, +}; + +/* Default WRITE configurations */ +static struct seq_rw_config default_write_configs[] = { +	{FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0}, +	{FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0}, +	{FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0}, +	{FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0}, +	{FLASH_FLAG_READ_WRITE,  SPINOR_OP_WRITE,       1, 1, 1, 0x00, 0, 0}, +	{0x00,			 0,			0, 0, 0, 0x00, 0, 0}, +}; + +/* + * [N25Qxxx] Configuration + */ +#define N25Q_VCR_DUMMY_CYCLES(x)	(((x) & 0xf) << 4) +#define N25Q_VCR_XIP_DISABLED		((uint8_t)0x1 << 3) +#define N25Q_VCR_WRAP_CONT		0x3 + +/* N25Q 3-byte Address READ configurations + *	- 'FAST' variants configured for 8 dummy cycles. + * + * Note, the number of dummy cycles used for 'FAST' READ operations is + * configurable and would normally be tuned according to the READ command and + * operating frequency.  However, this applies universally to all 'FAST' READ + * commands, including those used by the SPIBoot controller, and remains in + * force until the device is power-cycled.  Since the SPIBoot controller is + * hard-wired to use 8 dummy cycles, we must configure the device to also use 8 + * cycles. + */ +static struct seq_rw_config n25q_read3_configs[] = { +	{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4,	0, 4, 4, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4,	0, 1, 4, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2,	0, 2, 2, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2,	0, 1, 2, 0x00, 0, 8}, +	{FLASH_FLAG_READ_FAST,	SPINOR_OP_READ_FAST,	0, 1, 1, 0x00, 0, 8}, +	{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ,	        0, 1, 1, 0x00, 0, 0}, +	{0x00,			0,			0, 0, 0, 0x00, 0, 0}, +}; + +/* N25Q 4-byte Address READ configurations + *	- use special 4-byte address READ commands (reduces overheads, and + *        reduces risk of hitting watchdog reset issues). + *	- 'FAST' variants configured for 8 dummy cycles (see note above.) + */ +static struct seq_rw_config n25q_read4_configs[] = { +	{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4,	0, 4, 4, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4,	0, 1, 4, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2,	0, 2, 2, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2,	0, 1, 2, 0x00, 0, 8}, +	{FLASH_FLAG_READ_FAST,	SPINOR_OP_READ4_FAST,	0, 1, 1, 0x00, 0, 8}, +	{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4,	0, 1, 1, 0x00, 0, 0}, +	{0x00,			0,			0, 0, 0, 0x00, 0, 0}, +}; + +/* + * [MX25xxx] Configuration + */ +#define MX25_STATUS_QE			(0x1 << 6) + +static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq) +{ +	seq->seq_opc[0] = (SEQ_OPC_PADS_1 | +			   SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(SPINOR_OP_EN4B) | +			   SEQ_OPC_CSDEASSERT); + +	seq->seq[0] = STFSM_INST_CMD1; +	seq->seq[1] = STFSM_INST_WAIT; +	seq->seq[2] = STFSM_INST_STOP; + +	seq->seq_cfg = (SEQ_CFG_PADS_1 | +			SEQ_CFG_ERASE | +			SEQ_CFG_READNOTWRITE | +			SEQ_CFG_CSDEASSERT | +			SEQ_CFG_STARTSEQ); + +	return 0; +} + +/* + * [S25FLxxx] Configuration + */ +#define STFSM_S25FL_CONFIG_QE		(0x1 << 1) + +/* + * S25FLxxxS devices provide three ways of supporting 32-bit addressing: Bank + * Register, Extended Address Modes, and a 32-bit address command set.  The + * 32-bit address command set is used here, since it avoids any problems with + * entering a state that is incompatible with the SPIBoot Controller. + */ +static struct seq_rw_config stfsm_s25fl_read4_configs[] = { +	{FLASH_FLAG_READ_1_4_4,  SPINOR_OP_READ4_1_4_4,  0, 4, 4, 0x00, 2, 4}, +	{FLASH_FLAG_READ_1_1_4,  SPINOR_OP_READ4_1_1_4,  0, 1, 4, 0x00, 0, 8}, +	{FLASH_FLAG_READ_1_2_2,  SPINOR_OP_READ4_1_2_2,  0, 2, 2, 0x00, 4, 0}, +	{FLASH_FLAG_READ_1_1_2,  SPINOR_OP_READ4_1_1_2,  0, 1, 2, 0x00, 0, 8}, +	{FLASH_FLAG_READ_FAST,   SPINOR_OP_READ4_FAST,   0, 1, 1, 0x00, 0, 8}, +	{FLASH_FLAG_READ_WRITE,  SPINOR_OP_READ4,        0, 1, 1, 0x00, 0, 0}, +	{0x00,                   0,                      0, 0, 0, 0x00, 0, 0}, +}; + +static struct seq_rw_config stfsm_s25fl_write4_configs[] = { +	{FLASH_FLAG_WRITE_1_1_4, S25FL_CMD_WRITE4_1_1_4, 1, 1, 4, 0x00, 0, 0}, +	{FLASH_FLAG_READ_WRITE,  S25FL_CMD_WRITE4,       1, 1, 1, 0x00, 0, 0}, +	{0x00,                   0,                      0, 0, 0, 0x00, 0, 0}, +}; + +/* + * [W25Qxxx] Configuration + */ +#define W25Q_STATUS_QE			(0x1 << 1) + +static struct stfsm_seq stfsm_seq_read_jedec = { +	.data_size = TRANSFER_SIZE(8), +	.seq_opc[0] = (SEQ_OPC_PADS_1 | +		       SEQ_OPC_CYCLES(8) | +		       SEQ_OPC_OPCODE(SPINOR_OP_RDID)), +	.seq = { +		STFSM_INST_CMD1, +		STFSM_INST_DATA_READ, +		STFSM_INST_STOP, +	}, +	.seq_cfg = (SEQ_CFG_PADS_1 | +		    SEQ_CFG_READNOTWRITE | +		    SEQ_CFG_CSDEASSERT | +		    SEQ_CFG_STARTSEQ), +}; + +static struct stfsm_seq stfsm_seq_read_status_fifo = { +	.data_size = TRANSFER_SIZE(4), +	.seq_opc[0] = (SEQ_OPC_PADS_1 | +		       SEQ_OPC_CYCLES(8) | +		       SEQ_OPC_OPCODE(SPINOR_OP_RDSR)), +	.seq = { +		STFSM_INST_CMD1, +		STFSM_INST_DATA_READ, +		STFSM_INST_STOP, +	}, +	.seq_cfg = (SEQ_CFG_PADS_1 | +		    SEQ_CFG_READNOTWRITE | +		    SEQ_CFG_CSDEASSERT | +		    SEQ_CFG_STARTSEQ), +}; + +static struct stfsm_seq stfsm_seq_erase_sector = { +	/* 'addr_cfg' configured during initialisation */ +	.seq_opc = { +		(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +		 SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT), + +		(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +		 SEQ_OPC_OPCODE(SPINOR_OP_SE)), +	}, +	.seq = { +		STFSM_INST_CMD1, +		STFSM_INST_CMD2, +		STFSM_INST_ADD1, +		STFSM_INST_ADD2, +		STFSM_INST_STOP, +	}, +	.seq_cfg = (SEQ_CFG_PADS_1 | +		    SEQ_CFG_READNOTWRITE | +		    SEQ_CFG_CSDEASSERT | +		    SEQ_CFG_STARTSEQ), +}; + +static struct stfsm_seq stfsm_seq_erase_chip = { +	.seq_opc = { +		(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +		 SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT), + +		(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +		 SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT), +	}, +	.seq = { +		STFSM_INST_CMD1, +		STFSM_INST_CMD2, +		STFSM_INST_WAIT, +		STFSM_INST_STOP, +	}, +	.seq_cfg = (SEQ_CFG_PADS_1 | +		    SEQ_CFG_ERASE | +		    SEQ_CFG_READNOTWRITE | +		    SEQ_CFG_CSDEASSERT | +		    SEQ_CFG_STARTSEQ), +}; + +static struct stfsm_seq stfsm_seq_write_status = { +	.seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +		       SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT), +	.seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +		       SEQ_OPC_OPCODE(SPINOR_OP_WRSR)), +	.seq = { +		STFSM_INST_CMD1, +		STFSM_INST_CMD2, +		STFSM_INST_STA_WR1, +		STFSM_INST_STOP, +	}, +	.seq_cfg = (SEQ_CFG_PADS_1 | +		    SEQ_CFG_READNOTWRITE | +		    SEQ_CFG_CSDEASSERT | +		    SEQ_CFG_STARTSEQ), +}; + +static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq) +{ +	seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(SPINOR_OP_EN4B)); +	seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(SPINOR_OP_WREN) | +			   SEQ_OPC_CSDEASSERT); + +	seq->seq[0] = STFSM_INST_CMD2; +	seq->seq[1] = STFSM_INST_CMD1; +	seq->seq[2] = STFSM_INST_WAIT; +	seq->seq[3] = STFSM_INST_STOP; + +	seq->seq_cfg = (SEQ_CFG_PADS_1 | +			SEQ_CFG_ERASE | +			SEQ_CFG_READNOTWRITE | +			SEQ_CFG_CSDEASSERT | +			SEQ_CFG_STARTSEQ); + +	return 0; +} + +static inline int stfsm_is_idle(struct stfsm *fsm) +{ +	return readl(fsm->base + SPI_FAST_SEQ_STA) & 0x10; +} + +static inline uint32_t stfsm_fifo_available(struct stfsm *fsm) +{ +	return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f; +} + +static void stfsm_clear_fifo(struct stfsm *fsm) +{ +	uint32_t avail; + +	for (;;) { +		avail = stfsm_fifo_available(fsm); +		if (!avail) +			break; + +		while (avail) { +			readl(fsm->base + SPI_FAST_SEQ_DATA_REG); +			avail--; +		} +	} +} + +static inline void stfsm_load_seq(struct stfsm *fsm, +				  const struct stfsm_seq *seq) +{ +	void __iomem *dst = fsm->base + SPI_FAST_SEQ_TRANSFER_SIZE; +	const uint32_t *src = (const uint32_t *)seq; +	int words = sizeof(*seq) / sizeof(*src); + +	BUG_ON(!stfsm_is_idle(fsm)); + +	while (words--) { +		writel(*src, dst); +		src++; +		dst += 4; +	} +} + +static void stfsm_wait_seq(struct stfsm *fsm) +{ +	unsigned long deadline; +	int timeout = 0; + +	deadline = jiffies + msecs_to_jiffies(STFSM_MAX_WAIT_SEQ_MS); + +	while (!timeout) { +		if (time_after_eq(jiffies, deadline)) +			timeout = 1; + +		if (stfsm_is_idle(fsm)) +			return; + +		cond_resched(); +	} + +	dev_err(fsm->dev, "timeout on sequence completion\n"); +} + +static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size) +{ +	uint32_t remaining = size >> 2; +	uint32_t avail; +	uint32_t words; + +	dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size); + +	BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3)); + +	while (remaining) { +		for (;;) { +			avail = stfsm_fifo_available(fsm); +			if (avail) +				break; +			udelay(1); +		} +		words = min(avail, remaining); +		remaining -= words; + +		readsl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words); +		buf += words; +	} +} + +static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf, +			    uint32_t size) +{ +	uint32_t words = size >> 2; + +	dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size); + +	BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3)); + +	writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words); + +	return size; +} + +static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter) +{ +	struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr; +	uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B; + +	seq->seq_opc[0] = (SEQ_OPC_PADS_1 | +			   SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(cmd) | +			   SEQ_OPC_CSDEASSERT); + +	stfsm_load_seq(fsm, seq); + +	stfsm_wait_seq(fsm); + +	return 0; +} + +static uint8_t stfsm_wait_busy(struct stfsm *fsm) +{ +	struct stfsm_seq *seq = &stfsm_seq_read_status_fifo; +	unsigned long deadline; +	uint32_t status; +	int timeout = 0; + +	/* Use RDRS1 */ +	seq->seq_opc[0] = (SEQ_OPC_PADS_1 | +			   SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(SPINOR_OP_RDSR)); + +	/* Load read_status sequence */ +	stfsm_load_seq(fsm, seq); + +	/* +	 * Repeat until busy bit is deasserted, or timeout, or error (S25FLxxxS) +	 */ +	deadline = jiffies + FLASH_MAX_BUSY_WAIT; +	while (!timeout) { +		if (time_after_eq(jiffies, deadline)) +			timeout = 1; + +		stfsm_wait_seq(fsm); + +		stfsm_read_fifo(fsm, &status, 4); + +		if ((status & FLASH_STATUS_BUSY) == 0) +			return 0; + +		if ((fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) && +		    ((status & S25FL_STATUS_P_ERR) || +		     (status & S25FL_STATUS_E_ERR))) +			return (uint8_t)(status & 0xff); + +		if (!timeout) +			/* Restart */ +			writel(seq->seq_cfg, fsm->base + SPI_FAST_SEQ_CFG); + +		cond_resched(); +	} + +	dev_err(fsm->dev, "timeout on wait_busy\n"); + +	return FLASH_STATUS_TIMEOUT; +} + +static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd, +			     uint8_t *data, int bytes) +{ +	struct stfsm_seq *seq = &stfsm_seq_read_status_fifo; +	uint32_t tmp; +	uint8_t *t = (uint8_t *)&tmp; +	int i; + +	dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n", +		cmd, bytes); + +	BUG_ON(bytes != 1 && bytes != 2); + +	seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(cmd)), + +	stfsm_load_seq(fsm, seq); + +	stfsm_read_fifo(fsm, &tmp, 4); + +	for (i = 0; i < bytes; i++) +		data[i] = t[i]; + +	stfsm_wait_seq(fsm); + +	return 0; +} + +static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd, +			    uint16_t data, int bytes, int wait_busy) +{ +	struct stfsm_seq *seq = &stfsm_seq_write_status; + +	dev_dbg(fsm->dev, +		"write 'status' register [0x%02x], %d byte(s), 0x%04x\n" +		" %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no"); + +	BUG_ON(bytes != 1 && bytes != 2); + +	seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(cmd)); + +	seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT; +	seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2; + +	stfsm_load_seq(fsm, seq); + +	stfsm_wait_seq(fsm); + +	if (wait_busy) +		stfsm_wait_busy(fsm); + +	return 0; +} + +/* + * SoC reset on 'boot-from-spi' systems + * + * Certain modes of operation cause the Flash device to enter a particular state + * for a period of time (e.g. 'Erase Sector', 'Quad Enable', and 'Enter 32-bit + * Addr' commands).  On boot-from-spi systems, it is important to consider what + * happens if a warm reset occurs during this period.  The SPIBoot controller + * assumes that Flash device is in its default reset state, 24-bit address mode, + * and ready to accept commands.  This can be achieved using some form of + * on-board logic/controller to force a device POR in response to a SoC-level + * reset or by making use of the device reset signal if available (limited + * number of devices only). + * + * Failure to take such precautions can cause problems following a warm reset. + * For some operations (e.g. ERASE), there is little that can be done.  For + * other modes of operation (e.g. 32-bit addressing), options are often + * available that can help minimise the window in which a reset could cause a + * problem. + * + */ +static bool stfsm_can_handle_soc_reset(struct stfsm *fsm) +{ +	/* Reset signal is available on the board and supported by the device */ +	if (fsm->reset_signal && fsm->info->flags & FLASH_FLAG_RESET) +		return true; + +	/* Board-level logic forces a power-on-reset */ +	if (fsm->reset_por) +		return true; + +	/* Reset is not properly handled and may result in failure to reboot */ +	return false; +} + +/* Configure 'addr_cfg' according to addressing mode */ +static void stfsm_prepare_erasesec_seq(struct stfsm *fsm, +				       struct stfsm_seq *seq) +{ +	int addr1_cycles = fsm->info->flags & FLASH_FLAG_32BIT_ADDR ? 16 : 8; + +	seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(addr1_cycles) | +			 ADR_CFG_PADS_1_ADD1 | +			 ADR_CFG_CYCLES_ADD2(16) | +			 ADR_CFG_PADS_1_ADD2 | +			 ADR_CFG_CSDEASSERT_ADD2); +} + +/* Search for preferred configuration based on available flags */ +static struct seq_rw_config * +stfsm_search_seq_rw_configs(struct stfsm *fsm, +			    struct seq_rw_config cfgs[]) +{ +	struct seq_rw_config *config; +	int flags = fsm->info->flags; + +	for (config = cfgs; config->cmd != 0; config++) +		if ((config->flags & flags) == config->flags) +			return config; + +	return NULL; +} + +/* Prepare a READ/WRITE sequence according to configuration parameters */ +static void stfsm_prepare_rw_seq(struct stfsm *fsm, +				 struct stfsm_seq *seq, +				 struct seq_rw_config *cfg) +{ +	int addr1_cycles, addr2_cycles; +	int i = 0; + +	memset(seq, 0, sizeof(*seq)); + +	/* Add READ/WRITE OPC  */ +	seq->seq_opc[i++] = (SEQ_OPC_PADS_1 | +			     SEQ_OPC_CYCLES(8) | +			     SEQ_OPC_OPCODE(cfg->cmd)); + +	/* Add WREN OPC for a WRITE sequence */ +	if (cfg->write) +		seq->seq_opc[i++] = (SEQ_OPC_PADS_1 | +				     SEQ_OPC_CYCLES(8) | +				     SEQ_OPC_OPCODE(SPINOR_OP_WREN) | +				     SEQ_OPC_CSDEASSERT); + +	/* Address configuration (24 or 32-bit addresses) */ +	addr1_cycles  = (fsm->info->flags & FLASH_FLAG_32BIT_ADDR) ? 16 : 8; +	addr1_cycles /= cfg->addr_pads; +	addr2_cycles  = 16 / cfg->addr_pads; +	seq->addr_cfg = ((addr1_cycles & 0x3f) << 0 |	/* ADD1 cycles */ +			 (cfg->addr_pads - 1) << 6 |	/* ADD1 pads */ +			 (addr2_cycles & 0x3f) << 16 |	/* ADD2 cycles */ +			 ((cfg->addr_pads - 1) << 22));	/* ADD2 pads */ + +	/* Data/Sequence configuration */ +	seq->seq_cfg = ((cfg->data_pads - 1) << 16 | +			SEQ_CFG_STARTSEQ | +			SEQ_CFG_CSDEASSERT); +	if (!cfg->write) +		seq->seq_cfg |= SEQ_CFG_READNOTWRITE; + +	/* Mode configuration (no. of pads taken from addr cfg) */ +	seq->mode = ((cfg->mode_data & 0xff) << 0 |	/* data */ +		     (cfg->mode_cycles & 0x3f) << 16 |	/* cycles */ +		     (cfg->addr_pads - 1) << 22);	/* pads */ + +	/* Dummy configuration (no. of pads taken from addr cfg) */ +	seq->dummy = ((cfg->dummy_cycles & 0x3f) << 16 |	/* cycles */ +		      (cfg->addr_pads - 1) << 22);		/* pads */ + + +	/* Instruction sequence */ +	i = 0; +	if (cfg->write) +		seq->seq[i++] = STFSM_INST_CMD2; + +	seq->seq[i++] = STFSM_INST_CMD1; + +	seq->seq[i++] = STFSM_INST_ADD1; +	seq->seq[i++] = STFSM_INST_ADD2; + +	if (cfg->mode_cycles) +		seq->seq[i++] = STFSM_INST_MODE; + +	if (cfg->dummy_cycles) +		seq->seq[i++] = STFSM_INST_DUMMY; + +	seq->seq[i++] = +		cfg->write ? STFSM_INST_DATA_WRITE : STFSM_INST_DATA_READ; +	seq->seq[i++] = STFSM_INST_STOP; +} + +static int stfsm_search_prepare_rw_seq(struct stfsm *fsm, +				       struct stfsm_seq *seq, +				       struct seq_rw_config *cfgs) +{ +	struct seq_rw_config *config; + +	config = stfsm_search_seq_rw_configs(fsm, cfgs); +	if (!config) { +		dev_err(fsm->dev, "failed to find suitable config\n"); +		return -EINVAL; +	} + +	stfsm_prepare_rw_seq(fsm, seq, config); + +	return 0; +} + +/* Prepare a READ/WRITE/ERASE 'default' sequences */ +static int stfsm_prepare_rwe_seqs_default(struct stfsm *fsm) +{ +	uint32_t flags = fsm->info->flags; +	int ret; + +	/* Configure 'READ' sequence */ +	ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read, +					  default_read_configs); +	if (ret) { +		dev_err(fsm->dev, +			"failed to prep READ sequence with flags [0x%08x]\n", +			flags); +		return ret; +	} + +	/* Configure 'WRITE' sequence */ +	ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write, +					  default_write_configs); +	if (ret) { +		dev_err(fsm->dev, +			"failed to prep WRITE sequence with flags [0x%08x]\n", +			flags); +		return ret; +	} + +	/* Configure 'ERASE_SECTOR' sequence */ +	stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector); + +	return 0; +} + +static int stfsm_mx25_config(struct stfsm *fsm) +{ +	uint32_t flags = fsm->info->flags; +	uint32_t data_pads; +	uint8_t sta; +	int ret; +	bool soc_reset; + +	/* +	 * Use default READ/WRITE sequences +	 */ +	ret = stfsm_prepare_rwe_seqs_default(fsm); +	if (ret) +		return ret; + +	/* +	 * Configure 32-bit Address Support +	 */ +	if (flags & FLASH_FLAG_32BIT_ADDR) { +		/* Configure 'enter_32bitaddr' FSM sequence */ +		stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr); + +		soc_reset = stfsm_can_handle_soc_reset(fsm); +		if (soc_reset || !fsm->booted_from_spi) +			/* If we can handle SoC resets, we enable 32-bit address +			 * mode pervasively */ +			stfsm_enter_32bit_addr(fsm, 1); + +		else +			/* Else, enable/disable 32-bit addressing before/after +			 * each operation */ +			fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR | +					      CFG_WRITE_TOGGLE_32BIT_ADDR | +					      CFG_ERASESEC_TOGGLE_32BIT_ADDR); +	} + +	/* Check status of 'QE' bit, update if required. */ +	stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1); +	data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; +	if (data_pads == 4) { +		if (!(sta & MX25_STATUS_QE)) { +			/* Set 'QE' */ +			sta |= MX25_STATUS_QE; + +			stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1); +		} +	} else { +		if (sta & MX25_STATUS_QE) { +			/* Clear 'QE' */ +			sta &= ~MX25_STATUS_QE; + +			stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1); +		} +	} + +	return 0; +} + +static int stfsm_n25q_config(struct stfsm *fsm) +{ +	uint32_t flags = fsm->info->flags; +	uint8_t vcr; +	int ret = 0; +	bool soc_reset; + +	/* Configure 'READ' sequence */ +	if (flags & FLASH_FLAG_32BIT_ADDR) +		ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read, +						  n25q_read4_configs); +	else +		ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read, +						  n25q_read3_configs); +	if (ret) { +		dev_err(fsm->dev, +			"failed to prepare READ sequence with flags [0x%08x]\n", +			flags); +		return ret; +	} + +	/* Configure 'WRITE' sequence (default configs) */ +	ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write, +					  default_write_configs); +	if (ret) { +		dev_err(fsm->dev, +			"preparing WRITE sequence using flags [0x%08x] failed\n", +			flags); +		return ret; +	} + +	/* * Configure 'ERASE_SECTOR' sequence */ +	stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector); + +	/* Configure 32-bit address support */ +	if (flags & FLASH_FLAG_32BIT_ADDR) { +		stfsm_n25q_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr); + +		soc_reset = stfsm_can_handle_soc_reset(fsm); +		if (soc_reset || !fsm->booted_from_spi) { +			/* +			 * If we can handle SoC resets, we enable 32-bit +			 * address mode pervasively +			 */ +			stfsm_enter_32bit_addr(fsm, 1); +		} else { +			/* +			 * If not, enable/disable for WRITE and ERASE +			 * operations (READ uses special commands) +			 */ +			fsm->configuration = (CFG_WRITE_TOGGLE_32BIT_ADDR | +					      CFG_ERASESEC_TOGGLE_32BIT_ADDR); +		} +	} + +	/* +	 * Configure device to use 8 dummy cycles +	 */ +	vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED | +	       N25Q_VCR_WRAP_CONT); +	stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0); + +	return 0; +} + +static void stfsm_s25fl_prepare_erasesec_seq_32(struct stfsm_seq *seq) +{ +	seq->seq_opc[1] = (SEQ_OPC_PADS_1 | +			   SEQ_OPC_CYCLES(8) | +			   SEQ_OPC_OPCODE(S25FL_CMD_SE4)); + +	seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(16) | +			 ADR_CFG_PADS_1_ADD1 | +			 ADR_CFG_CYCLES_ADD2(16) | +			 ADR_CFG_PADS_1_ADD2 | +			 ADR_CFG_CSDEASSERT_ADD2); +} + +static void stfsm_s25fl_read_dyb(struct stfsm *fsm, uint32_t offs, uint8_t *dby) +{ +	uint32_t tmp; +	struct stfsm_seq seq = { +		.data_size = TRANSFER_SIZE(4), +		.seq_opc[0] = (SEQ_OPC_PADS_1 | +			       SEQ_OPC_CYCLES(8) | +			       SEQ_OPC_OPCODE(S25FL_CMD_DYBRD)), +		.addr_cfg = (ADR_CFG_CYCLES_ADD1(16) | +			     ADR_CFG_PADS_1_ADD1 | +			     ADR_CFG_CYCLES_ADD2(16) | +			     ADR_CFG_PADS_1_ADD2), +		.addr1 = (offs >> 16) & 0xffff, +		.addr2 = offs & 0xffff, +		.seq = { +			STFSM_INST_CMD1, +			STFSM_INST_ADD1, +			STFSM_INST_ADD2, +			STFSM_INST_DATA_READ, +			STFSM_INST_STOP, +		}, +		.seq_cfg = (SEQ_CFG_PADS_1 | +			    SEQ_CFG_READNOTWRITE | +			    SEQ_CFG_CSDEASSERT | +			    SEQ_CFG_STARTSEQ), +	}; + +	stfsm_load_seq(fsm, &seq); + +	stfsm_read_fifo(fsm, &tmp, 4); + +	*dby = (uint8_t)(tmp >> 24); + +	stfsm_wait_seq(fsm); +} + +static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby) +{ +	struct stfsm_seq seq = { +		.seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +			       SEQ_OPC_OPCODE(SPINOR_OP_WREN) | +			       SEQ_OPC_CSDEASSERT), +		.seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | +			       SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)), +		.addr_cfg = (ADR_CFG_CYCLES_ADD1(16) | +			     ADR_CFG_PADS_1_ADD1 | +			     ADR_CFG_CYCLES_ADD2(16) | +			     ADR_CFG_PADS_1_ADD2), +		.status = (uint32_t)dby | STA_PADS_1 | STA_CSDEASSERT, +		.addr1 = (offs >> 16) & 0xffff, +		.addr2 = offs & 0xffff, +		.seq = { +			STFSM_INST_CMD1, +			STFSM_INST_CMD2, +			STFSM_INST_ADD1, +			STFSM_INST_ADD2, +			STFSM_INST_STA_WR1, +			STFSM_INST_STOP, +		}, +		.seq_cfg = (SEQ_CFG_PADS_1 | +			    SEQ_CFG_READNOTWRITE | +			    SEQ_CFG_CSDEASSERT | +			    SEQ_CFG_STARTSEQ), +	}; + +	stfsm_load_seq(fsm, &seq); +	stfsm_wait_seq(fsm); + +	stfsm_wait_busy(fsm); +} + +static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm) +{ +	struct stfsm_seq seq = { +		.seq_opc[0] = (SEQ_OPC_PADS_1 | +			       SEQ_OPC_CYCLES(8) | +			       SEQ_OPC_OPCODE(S25FL_CMD_CLSR) | +			       SEQ_OPC_CSDEASSERT), +		.seq_opc[1] = (SEQ_OPC_PADS_1 | +			       SEQ_OPC_CYCLES(8) | +			       SEQ_OPC_OPCODE(SPINOR_OP_WRDI) | +			       SEQ_OPC_CSDEASSERT), +		.seq = { +			STFSM_INST_CMD1, +			STFSM_INST_CMD2, +			STFSM_INST_WAIT, +			STFSM_INST_STOP, +		}, +		.seq_cfg = (SEQ_CFG_PADS_1 | +			    SEQ_CFG_ERASE | +			    SEQ_CFG_READNOTWRITE | +			    SEQ_CFG_CSDEASSERT | +			    SEQ_CFG_STARTSEQ), +	}; + +	stfsm_load_seq(fsm, &seq); + +	stfsm_wait_seq(fsm); + +	return 0; +} + +static int stfsm_s25fl_config(struct stfsm *fsm) +{ +	struct flash_info *info = fsm->info; +	uint32_t flags = info->flags; +	uint32_t data_pads; +	uint32_t offs; +	uint16_t sta_wr; +	uint8_t sr1, cr1, dyb; +	int update_sr = 0; +	int ret; + +	if (flags & FLASH_FLAG_32BIT_ADDR) { +		/* +		 * Prepare Read/Write/Erase sequences according to S25FLxxx +		 * 32-bit address command set +		 */ +		ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read, +						  stfsm_s25fl_read4_configs); +		if (ret) +			return ret; + +		ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write, +						  stfsm_s25fl_write4_configs); +		if (ret) +			return ret; + +		stfsm_s25fl_prepare_erasesec_seq_32(&stfsm_seq_erase_sector); + +	} else { +		/* Use default configurations for 24-bit addressing */ +		ret = stfsm_prepare_rwe_seqs_default(fsm); +		if (ret) +			return ret; +	} + +	/* +	 * For devices that support 'DYB' sector locking, check lock status and +	 * unlock sectors if necessary (some variants power-on with sectors +	 * locked by default) +	 */ +	if (flags & FLASH_FLAG_DYB_LOCKING) { +		offs = 0; +		for (offs = 0; offs < info->sector_size * info->n_sectors;) { +			stfsm_s25fl_read_dyb(fsm, offs, &dyb); +			if (dyb == 0x00) +				stfsm_s25fl_write_dyb(fsm, offs, 0xff); + +			/* Handle bottom/top 4KiB parameter sectors */ +			if ((offs < info->sector_size * 2) || +			    (offs >= (info->sector_size - info->n_sectors * 4))) +				offs += 0x1000; +			else +				offs += 0x10000; +		} +	} + +	/* Check status of 'QE' bit, update if required. */ +	stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1); +	data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; +	if (data_pads == 4) { +		if (!(cr1 & STFSM_S25FL_CONFIG_QE)) { +			/* Set 'QE' */ +			cr1 |= STFSM_S25FL_CONFIG_QE; + +			update_sr = 1; +		} +	} else { +		if (cr1 & STFSM_S25FL_CONFIG_QE) { +			/* Clear 'QE' */ +			cr1 &= ~STFSM_S25FL_CONFIG_QE; + +			update_sr = 1; +		} +	} +	if (update_sr) { +		stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1); +		sta_wr = ((uint16_t)cr1  << 8) | sr1; +		stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1); +	} + +	/* +	 * S25FLxxx devices support Program and Error error flags. +	 * Configure driver to check flags and clear if necessary. +	 */ +	fsm->configuration |= CFG_S25FL_CHECK_ERROR_FLAGS; + +	return 0; +} + +static int stfsm_w25q_config(struct stfsm *fsm) +{ +	uint32_t data_pads; +	uint8_t sr1, sr2; +	uint16_t sr_wr; +	int update_sr = 0; +	int ret; + +	ret = stfsm_prepare_rwe_seqs_default(fsm); +	if (ret) +		return ret; + +	/* Check status of 'QE' bit, update if required. */ +	stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1); +	data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; +	if (data_pads == 4) { +		if (!(sr2 & W25Q_STATUS_QE)) { +			/* Set 'QE' */ +			sr2 |= W25Q_STATUS_QE; +			update_sr = 1; +		} +	} else { +		if (sr2 & W25Q_STATUS_QE) { +			/* Clear 'QE' */ +			sr2 &= ~W25Q_STATUS_QE; +			update_sr = 1; +		} +	} +	if (update_sr) { +		/* Write status register */ +		stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1); +		sr_wr = ((uint16_t)sr2 << 8) | sr1; +		stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1); +	} + +	return 0; +} + +static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size, +		      uint32_t offset) +{ +	struct stfsm_seq *seq = &fsm->stfsm_seq_read; +	uint32_t data_pads; +	uint32_t read_mask; +	uint32_t size_ub; +	uint32_t size_lb; +	uint32_t size_mop; +	uint32_t tmp[4]; +	uint32_t page_buf[FLASH_PAGESIZE_32]; +	uint8_t *p; + +	dev_dbg(fsm->dev, "reading %d bytes from 0x%08x\n", size, offset); + +	/* Enter 32-bit address mode, if required */ +	if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR) +		stfsm_enter_32bit_addr(fsm, 1); + +	/* Must read in multiples of 32 cycles (or 32*pads/8 Bytes) */ +	data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1; +	read_mask = (data_pads << 2) - 1; + +	/* Handle non-aligned buf */ +	p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf; + +	/* Handle non-aligned size */ +	size_ub = (size + read_mask) & ~read_mask; +	size_lb = size & ~read_mask; +	size_mop = size & read_mask; + +	seq->data_size = TRANSFER_SIZE(size_ub); +	seq->addr1 = (offset >> 16) & 0xffff; +	seq->addr2 = offset & 0xffff; + +	stfsm_load_seq(fsm, seq); + +	if (size_lb) +		stfsm_read_fifo(fsm, (uint32_t *)p, size_lb); + +	if (size_mop) { +		stfsm_read_fifo(fsm, tmp, read_mask + 1); +		memcpy(p + size_lb, &tmp, size_mop); +	} + +	/* Handle non-aligned buf */ +	if ((uintptr_t)buf & 0x3) +		memcpy(buf, page_buf, size); + +	/* Wait for sequence to finish */ +	stfsm_wait_seq(fsm); + +	stfsm_clear_fifo(fsm); + +	/* Exit 32-bit address mode, if required */ +	if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR) +		stfsm_enter_32bit_addr(fsm, 0); + +	return 0; +} + +static int stfsm_write(struct stfsm *fsm, const uint8_t *buf, +		       uint32_t size, uint32_t offset) +{ +	struct stfsm_seq *seq = &fsm->stfsm_seq_write; +	uint32_t data_pads; +	uint32_t write_mask; +	uint32_t size_ub; +	uint32_t size_lb; +	uint32_t size_mop; +	uint32_t tmp[4]; +	uint32_t page_buf[FLASH_PAGESIZE_32]; +	uint8_t *t = (uint8_t *)&tmp; +	const uint8_t *p; +	int ret; +	int i; + +	dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset); + +	/* Enter 32-bit address mode, if required */ +	if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR) +		stfsm_enter_32bit_addr(fsm, 1); + +	/* Must write in multiples of 32 cycles (or 32*pads/8 bytes) */ +	data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1; +	write_mask = (data_pads << 2) - 1; + +	/* Handle non-aligned buf */ +	if ((uintptr_t)buf & 0x3) { +		memcpy(page_buf, buf, size); +		p = (uint8_t *)page_buf; +	} else { +		p = buf; +	} + +	/* Handle non-aligned size */ +	size_ub = (size + write_mask) & ~write_mask; +	size_lb = size & ~write_mask; +	size_mop = size & write_mask; + +	seq->data_size = TRANSFER_SIZE(size_ub); +	seq->addr1 = (offset >> 16) & 0xffff; +	seq->addr2 = offset & 0xffff; + +	/* Need to set FIFO to write mode, before writing data to FIFO (see +	 * GNBvb79594) +	 */ +	writel(0x00040000, fsm->base + SPI_FAST_SEQ_CFG); + +	/* +	 * Before writing data to the FIFO, apply a small delay to allow a +	 * potential change of FIFO direction to complete. +	 */ +	if (fsm->fifo_dir_delay == 0) +		readl(fsm->base + SPI_FAST_SEQ_CFG); +	else +		udelay(fsm->fifo_dir_delay); + + +	/* Write data to FIFO, before starting sequence (see GNBvd79593) */ +	if (size_lb) { +		stfsm_write_fifo(fsm, (uint32_t *)p, size_lb); +		p += size_lb; +	} + +	/* Handle non-aligned size */ +	if (size_mop) { +		memset(t, 0xff, write_mask + 1);	/* fill with 0xff's */ +		for (i = 0; i < size_mop; i++) +			t[i] = *p++; + +		stfsm_write_fifo(fsm, tmp, write_mask + 1); +	} + +	/* Start sequence */ +	stfsm_load_seq(fsm, seq); + +	/* Wait for sequence to finish */ +	stfsm_wait_seq(fsm); + +	/* Wait for completion */ +	ret = stfsm_wait_busy(fsm); +	if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) +		stfsm_s25fl_clear_status_reg(fsm); + +	/* Exit 32-bit address mode, if required */ +	if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR) +		stfsm_enter_32bit_addr(fsm, 0); + +	return 0; +} + +/* + * Read an address range from the flash chip. The address range + * may be any size provided it is within the physical boundaries. + */ +static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, +			  size_t *retlen, u_char *buf) +{ +	struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent); +	uint32_t bytes; + +	dev_dbg(fsm->dev, "%s from 0x%08x, len %zd\n", +		__func__, (u32)from, len); + +	mutex_lock(&fsm->lock); + +	while (len > 0) { +		bytes = min_t(size_t, len, FLASH_PAGESIZE); + +		stfsm_read(fsm, buf, bytes, from); + +		buf += bytes; +		from += bytes; +		len -= bytes; + +		*retlen += bytes; +	} + +	mutex_unlock(&fsm->lock); + +	return 0; +} + +static int stfsm_erase_sector(struct stfsm *fsm, uint32_t offset) +{ +	struct stfsm_seq *seq = &stfsm_seq_erase_sector; +	int ret; + +	dev_dbg(fsm->dev, "erasing sector at 0x%08x\n", offset); + +	/* Enter 32-bit address mode, if required */ +	if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR) +		stfsm_enter_32bit_addr(fsm, 1); + +	seq->addr1 = (offset >> 16) & 0xffff; +	seq->addr2 = offset & 0xffff; + +	stfsm_load_seq(fsm, seq); + +	stfsm_wait_seq(fsm); + +	/* Wait for completion */ +	ret = stfsm_wait_busy(fsm); +	if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) +		stfsm_s25fl_clear_status_reg(fsm); + +	/* Exit 32-bit address mode, if required */ +	if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR) +		stfsm_enter_32bit_addr(fsm, 0); + +	return ret; +} + +static int stfsm_erase_chip(struct stfsm *fsm) +{ +	const struct stfsm_seq *seq = &stfsm_seq_erase_chip; + +	dev_dbg(fsm->dev, "erasing chip\n"); + +	stfsm_load_seq(fsm, seq); + +	stfsm_wait_seq(fsm); + +	return stfsm_wait_busy(fsm); +} + +/* + * Write an address range to the flash chip.  Data must be written in + * FLASH_PAGESIZE chunks.  The address range may be any size provided + * it is within the physical boundaries. + */ +static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, +			   size_t *retlen, const u_char *buf) +{ +	struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent); + +	u32 page_offs; +	u32 bytes; +	uint8_t *b = (uint8_t *)buf; +	int ret = 0; + +	dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len); + +	/* Offset within page */ +	page_offs = to % FLASH_PAGESIZE; + +	mutex_lock(&fsm->lock); + +	while (len) { +		/* Write up to page boundary */ +		bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len); + +		ret = stfsm_write(fsm, b, bytes, to); +		if (ret) +			goto out1; + +		b += bytes; +		len -= bytes; +		to += bytes; + +		/* We are now page-aligned */ +		page_offs = 0; + +		*retlen += bytes; + +	} + +out1: +	mutex_unlock(&fsm->lock); + +	return ret; +} + +/* + * Erase an address range on the flash chip. The address range may extend + * one or more erase sectors.  Return an error is there is a problem erasing. + */ +static int stfsm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent); +	u32 addr, len; +	int ret; + +	dev_dbg(fsm->dev, "%s at 0x%llx, len %lld\n", __func__, +		(long long)instr->addr, (long long)instr->len); + +	addr = instr->addr; +	len = instr->len; + +	mutex_lock(&fsm->lock); + +	/* Whole-chip erase? */ +	if (len == mtd->size) { +		ret = stfsm_erase_chip(fsm); +		if (ret) +			goto out1; +	} else { +		while (len) { +			ret = stfsm_erase_sector(fsm, addr); +			if (ret) +				goto out1; + +			addr += mtd->erasesize; +			len -= mtd->erasesize; +		} +	} + +	mutex_unlock(&fsm->lock); + +	instr->state = MTD_ERASE_DONE; +	mtd_erase_callback(instr); + +	return 0; + +out1: +	instr->state = MTD_ERASE_FAILED; +	mutex_unlock(&fsm->lock); + +	return ret; +} + +static void stfsm_read_jedec(struct stfsm *fsm, uint8_t *jedec) +{ +	const struct stfsm_seq *seq = &stfsm_seq_read_jedec; +	uint32_t tmp[2]; + +	stfsm_load_seq(fsm, seq); + +	stfsm_read_fifo(fsm, tmp, 8); + +	memcpy(jedec, tmp, 5); + +	stfsm_wait_seq(fsm); +} + +static struct flash_info *stfsm_jedec_probe(struct stfsm *fsm) +{ +	struct flash_info	*info; +	u16                     ext_jedec; +	u32			jedec; +	u8			id[5]; + +	stfsm_read_jedec(fsm, id); + +	jedec     = id[0] << 16 | id[1] << 8 | id[2]; +	/* +	 * JEDEC also defines an optional "extended device information" +	 * string for after vendor-specific data, after the three bytes +	 * we use here. Supporting some chips might require using it. +	 */ +	ext_jedec = id[3] << 8  | id[4]; + +	dev_dbg(fsm->dev, "JEDEC =  0x%08x [%02x %02x %02x %02x %02x]\n", +		jedec, id[0], id[1], id[2], id[3], id[4]); + +	for (info = flash_types; info->name; info++) { +		if (info->jedec_id == jedec) { +			if (info->ext_id && info->ext_id != ext_jedec) +				continue; +			return info; +		} +	} +	dev_err(fsm->dev, "Unrecognized JEDEC id %06x\n", jedec); + +	return NULL; +} + +static int stfsm_set_mode(struct stfsm *fsm, uint32_t mode) +{ +	int ret, timeout = 10; + +	/* Wait for controller to accept mode change */ +	while (--timeout) { +		ret = readl(fsm->base + SPI_STA_MODE_CHANGE); +		if (ret & 0x1) +			break; +		udelay(1); +	} + +	if (!timeout) +		return -EBUSY; + +	writel(mode, fsm->base + SPI_MODESELECT); + +	return 0; +} + +static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq) +{ +	uint32_t emi_freq; +	uint32_t clk_div; + +	/* TODO: Make this dynamic */ +	emi_freq = STFSM_DEFAULT_EMI_FREQ; + +	/* +	 * Calculate clk_div - values between 2 and 128 +	 * Multiple of 2, rounded up +	 */ +	clk_div = 2 * DIV_ROUND_UP(emi_freq, 2 * spi_freq); +	if (clk_div < 2) +		clk_div = 2; +	else if (clk_div > 128) +		clk_div = 128; + +	/* +	 * Determine a suitable delay for the IP to complete a change of +	 * direction of the FIFO. The required delay is related to the clock +	 * divider used. The following heuristics are based on empirical tests, +	 * using a 100MHz EMI clock. +	 */ +	if (clk_div <= 4) +		fsm->fifo_dir_delay = 0; +	else if (clk_div <= 10) +		fsm->fifo_dir_delay = 1; +	else +		fsm->fifo_dir_delay = DIV_ROUND_UP(clk_div, 10); + +	dev_dbg(fsm->dev, "emi_clk = %uHZ, spi_freq = %uHZ, clk_div = %u\n", +		emi_freq, spi_freq, clk_div); + +	writel(clk_div, fsm->base + SPI_CLOCKDIV); +} + +static int stfsm_init(struct stfsm *fsm) +{ +	int ret; + +	/* Perform a soft reset of the FSM controller */ +	writel(SEQ_CFG_SWRESET, fsm->base + SPI_FAST_SEQ_CFG); +	udelay(1); +	writel(0, fsm->base + SPI_FAST_SEQ_CFG); + +	/* Set clock to 'safe' frequency initially */ +	stfsm_set_freq(fsm, STFSM_FLASH_SAFE_FREQ); + +	/* Switch to FSM */ +	ret = stfsm_set_mode(fsm, SPI_MODESELECT_FSM); +	if (ret) +		return ret; + +	/* Set timing parameters */ +	writel(SPI_CFG_DEVICE_ST            | +	       SPI_CFG_DEFAULT_MIN_CS_HIGH  | +	       SPI_CFG_DEFAULT_CS_SETUPHOLD | +	       SPI_CFG_DEFAULT_DATA_HOLD, +	       fsm->base + SPI_CONFIGDATA); +	writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG); + +	/* +	 * Set the FSM 'WAIT' delay to the minimum workable value.  Note, for +	 * our purposes, the WAIT instruction is used purely to achieve +	 * "sequence validity" rather than actually implement a delay. +	 */ +	writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME); + +	/* Clear FIFO, just in case */ +	stfsm_clear_fifo(fsm); + +	return 0; +} + +static void stfsm_fetch_platform_configs(struct platform_device *pdev) +{ +	struct stfsm *fsm = platform_get_drvdata(pdev); +	struct device_node *np = pdev->dev.of_node; +	struct regmap *regmap; +	uint32_t boot_device_reg; +	uint32_t boot_device_spi; +	uint32_t boot_device;     /* Value we read from *boot_device_reg */ +	int ret; + +	/* Booting from SPI NOR Flash is the default */ +	fsm->booted_from_spi = true; + +	regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); +	if (IS_ERR(regmap)) +		goto boot_device_fail; + +	fsm->reset_signal = of_property_read_bool(np, "st,reset-signal"); + +	fsm->reset_por = of_property_read_bool(np, "st,reset-por"); + +	/* Where in the syscon the boot device information lives */ +	ret = of_property_read_u32(np, "st,boot-device-reg", &boot_device_reg); +	if (ret) +		goto boot_device_fail; + +	/* Boot device value when booted from SPI NOR */ +	ret = of_property_read_u32(np, "st,boot-device-spi", &boot_device_spi); +	if (ret) +		goto boot_device_fail; + +	ret = regmap_read(regmap, boot_device_reg, &boot_device); +	if (ret) +		goto boot_device_fail; + +	if (boot_device != boot_device_spi) +		fsm->booted_from_spi = false; + +	return; + +boot_device_fail: +	dev_warn(&pdev->dev, +		 "failed to fetch boot device, assuming boot from SPI\n"); +} + +static int stfsm_probe(struct platform_device *pdev) +{ +	struct device_node *np = pdev->dev.of_node; +	struct mtd_part_parser_data ppdata; +	struct flash_info *info; +	struct resource *res; +	struct stfsm *fsm; +	int ret; + +	if (!np) { +		dev_err(&pdev->dev, "No DT found\n"); +		return -EINVAL; +	} +	ppdata.of_node = np; + +	fsm = devm_kzalloc(&pdev->dev, sizeof(*fsm), GFP_KERNEL); +	if (!fsm) +		return -ENOMEM; + +	fsm->dev = &pdev->dev; + +	platform_set_drvdata(pdev, fsm); + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!res) { +		dev_err(&pdev->dev, "Resource not found\n"); +		return -ENODEV; +	} + +	fsm->base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(fsm->base)) { +		dev_err(&pdev->dev, +			"Failed to reserve memory region %pR\n", res); +		return PTR_ERR(fsm->base); +	} + +	mutex_init(&fsm->lock); + +	ret = stfsm_init(fsm); +	if (ret) { +		dev_err(&pdev->dev, "Failed to initialise FSM Controller\n"); +		return ret; +	} + +	stfsm_fetch_platform_configs(pdev); + +	/* Detect SPI FLASH device */ +	info = stfsm_jedec_probe(fsm); +	if (!info) +		return -ENODEV; +	fsm->info = info; + +	/* Use device size to determine address width */ +	if (info->sector_size * info->n_sectors > 0x1000000) +		info->flags |= FLASH_FLAG_32BIT_ADDR; + +	/* +	 * Configure READ/WRITE/ERASE sequences according to platform and +	 * device flags. +	 */ +	if (info->config) { +		ret = info->config(fsm); +		if (ret) +			return ret; +	} else { +		ret = stfsm_prepare_rwe_seqs_default(fsm); +		if (ret) +			return ret; +	} + +	fsm->mtd.name		= info->name; +	fsm->mtd.dev.parent	= &pdev->dev; +	fsm->mtd.type		= MTD_NORFLASH; +	fsm->mtd.writesize	= 4; +	fsm->mtd.writebufsize	= fsm->mtd.writesize; +	fsm->mtd.flags		= MTD_CAP_NORFLASH; +	fsm->mtd.size		= info->sector_size * info->n_sectors; +	fsm->mtd.erasesize	= info->sector_size; + +	fsm->mtd._read  = stfsm_mtd_read; +	fsm->mtd._write = stfsm_mtd_write; +	fsm->mtd._erase = stfsm_mtd_erase; + +	dev_info(&pdev->dev, +		"Found serial flash device: %s\n" +		" size = %llx (%lldMiB) erasesize = 0x%08x (%uKiB)\n", +		info->name, +		(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20), +		fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10)); + +	return mtd_device_parse_register(&fsm->mtd, NULL, &ppdata, NULL, 0); +} + +static int stfsm_remove(struct platform_device *pdev) +{ +	struct stfsm *fsm = platform_get_drvdata(pdev); + +	return mtd_device_unregister(&fsm->mtd); +} + +static const struct of_device_id stfsm_match[] = { +	{ .compatible = "st,spi-fsm", }, +	{}, +}; +MODULE_DEVICE_TABLE(of, stfsm_match); + +static struct platform_driver stfsm_driver = { +	.probe		= stfsm_probe, +	.remove		= stfsm_remove, +	.driver		= { +		.name	= "st-spi-fsm", +		.owner	= THIS_MODULE, +		.of_match_table = stfsm_match, +	}, +}; +module_platform_driver(stfsm_driver); + +MODULE_AUTHOR("Angus Clark <angus.clark@st.com>"); +MODULE_DESCRIPTION("ST SPI FSM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 3af35148409..b66b541877f 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)  	struct INFTLrecord *inftl;  	unsigned long temp; -	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) +	if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)  		return;  	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */  	if (memcmp(mtd->name, "DiskOnChip", 10)) diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c index 4adc0374fb6..487e64f411a 100644 --- a/drivers/mtd/inftlmount.c +++ b/drivers/mtd/inftlmount.c @@ -30,7 +30,6 @@  #include <asm/uaccess.h>  #include <linux/delay.h>  #include <linux/slab.h> -#include <linux/init.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/nftl.h>  #include <linux/mtd/inftl.h> diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig index 265f969817e..3a19cbee24d 100644 --- a/drivers/mtd/lpddr/Kconfig +++ b/drivers/mtd/lpddr/Kconfig @@ -1,5 +1,5 @@ -menu "LPDDR flash memory drivers" -	depends on MTD!=n +menu "LPDDR & LPDDR2 PCM memory drivers" +	depends on MTD  config MTD_LPDDR  	tristate "Support for LPDDR flash chips" @@ -17,4 +17,13 @@ config MTD_QINFO_PROBE  	    Window QINFO interface, permits software to be used for entire  	    families of devices. This serves similar purpose of CFI on legacy  	    Flash products + +config MTD_LPDDR2_NVM +	# ARM dependency is only for writel_relaxed() +	depends on MTD && ARM +	tristate "Support for LPDDR2-NVM flash chips" +	help +	  This option enables support of PCM memories with a LPDDR2-NVM +	  (Low power double data rate 2) interface. +  endmenu diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile index da48e46b581..881d440d483 100644 --- a/drivers/mtd/lpddr/Makefile +++ b/drivers/mtd/lpddr/Makefile @@ -4,3 +4,4 @@  obj-$(CONFIG_MTD_QINFO_PROBE)	+= qinfo_probe.o  obj-$(CONFIG_MTD_LPDDR)	+= lpddr_cmds.o +obj-$(CONFIG_MTD_LPDDR2_NVM) += lpddr2_nvm.o diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c new file mode 100644 index 00000000000..063cec40d0a --- /dev/null +++ b/drivers/mtd/lpddr/lpddr2_nvm.c @@ -0,0 +1,507 @@ +/* + * LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock + * support for LPDDR2-NVM PCM memories + * + * Copyright © 2012 Micron Technology, Inc. + * + * Vincenzo Aliberti <vincenzo.aliberti@gmail.com> + * Domenico Manna <domenico.manna@gmail.com> + * Many thanks to Andrea Vigilante for initial enabling + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/mtd/map.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/err.h> + +/* Parameters */ +#define ERASE_BLOCKSIZE			(0x00020000/2)	/* in Word */ +#define WRITE_BUFFSIZE			(0x00000400/2)	/* in Word */ +#define OW_BASE_ADDRESS			0x00000000	/* OW offset */ +#define BUS_WIDTH			0x00000020	/* x32 devices */ + +/* PFOW symbols address offset */ +#define PFOW_QUERY_STRING_P		(0x0000/2)	/* in Word */ +#define PFOW_QUERY_STRING_F		(0x0002/2)	/* in Word */ +#define PFOW_QUERY_STRING_O		(0x0004/2)	/* in Word */ +#define PFOW_QUERY_STRING_W		(0x0006/2)	/* in Word */ + +/* OW registers address */ +#define CMD_CODE_OFS			(0x0080/2)	/* in Word */ +#define CMD_DATA_OFS			(0x0084/2)	/* in Word */ +#define CMD_ADD_L_OFS			(0x0088/2)	/* in Word */ +#define CMD_ADD_H_OFS			(0x008A/2)	/* in Word */ +#define MPR_L_OFS			(0x0090/2)	/* in Word */ +#define MPR_H_OFS			(0x0092/2)	/* in Word */ +#define CMD_EXEC_OFS			(0x00C0/2)	/* in Word */ +#define STATUS_REG_OFS			(0x00CC/2)	/* in Word */ +#define PRG_BUFFER_OFS			(0x0010/2)	/* in Word */ + +/* Datamask */ +#define MR_CFGMASK			0x8000 +#define SR_OK_DATAMASK			0x0080 + +/* LPDDR2-NVM Commands */ +#define LPDDR2_NVM_LOCK			0x0061 +#define LPDDR2_NVM_UNLOCK		0x0062 +#define LPDDR2_NVM_SW_PROGRAM		0x0041 +#define LPDDR2_NVM_SW_OVERWRITE		0x0042 +#define LPDDR2_NVM_BUF_PROGRAM		0x00E9 +#define LPDDR2_NVM_BUF_OVERWRITE	0x00EA +#define LPDDR2_NVM_ERASE		0x0020 + +/* LPDDR2-NVM Registers offset */ +#define LPDDR2_MODE_REG_DATA		0x0040 +#define LPDDR2_MODE_REG_CFG		0x0050 + +/* + * Internal Type Definitions + * pcm_int_data contains memory controller details: + * @reg_data : LPDDR2_MODE_REG_DATA register address after remapping + * @reg_cfg  : LPDDR2_MODE_REG_CFG register address after remapping + * &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes) + */ +struct pcm_int_data { +	void __iomem *ctl_regs; +	int bus_width; +}; + +static DEFINE_MUTEX(lpdd2_nvm_mutex); + +/* + * Build a map_word starting from an u_long + */ +static inline map_word build_map_word(u_long myword) +{ +	map_word val = { {0} }; +	val.x[0] = myword; +	return val; +} + +/* + * Build Mode Register Configuration DataMask based on device bus-width + */ +static inline u_int build_mr_cfgmask(u_int bus_width) +{ +	u_int val = MR_CFGMASK; + +	if (bus_width == 0x0004)		/* x32 device */ +		val = val << 16; + +	return val; +} + +/* + * Build Status Register OK DataMask based on device bus-width + */ +static inline u_int build_sr_ok_datamask(u_int bus_width) +{ +	u_int val = SR_OK_DATAMASK; + +	if (bus_width == 0x0004)		/* x32 device */ +		val = (val << 16)+val; + +	return val; +} + +/* + * Evaluates Overlay Window Control Registers address + */ +static inline u_long ow_reg_add(struct map_info *map, u_long offset) +{ +	u_long val = 0; +	struct pcm_int_data *pcm_data = map->fldrv_priv; + +	val = map->pfow_base + offset*pcm_data->bus_width; + +	return val; +} + +/* + * Enable lpddr2-nvm Overlay Window + * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers + * used by device commands as well as uservisible resources like Device Status + * Register, Device ID, etc + */ +static inline void ow_enable(struct map_info *map) +{ +	struct pcm_int_data *pcm_data = map->fldrv_priv; + +	writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18, +		pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG); +	writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA); +} + +/* + * Disable lpddr2-nvm Overlay Window + * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers + * used by device commands as well as uservisible resources like Device Status + * Register, Device ID, etc + */ +static inline void ow_disable(struct map_info *map) +{ +	struct pcm_int_data *pcm_data = map->fldrv_priv; + +	writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18, +		pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG); +	writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA); +} + +/* + * Execute lpddr2-nvm operations + */ +static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code, +	u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf) +{ +	map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} }, +		mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} }, +		exec_cmd = { {0} }, sr; +	map_word data_h = { {0} };	/* only for 2x x16 devices stacked */ +	u_long i, status_reg, prg_buff_ofs; +	struct pcm_int_data *pcm_data = map->fldrv_priv; +	u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width); + +	/* Builds low and high words for OW Control Registers */ +	add_l.x[0]	= cmd_add & 0x0000FFFF; +	add_h.x[0]	= (cmd_add >> 16) & 0x0000FFFF; +	mpr_l.x[0]	= cmd_mpr & 0x0000FFFF; +	mpr_h.x[0]	= (cmd_mpr >> 16) & 0x0000FFFF; +	cmd.x[0]	= cmd_code & 0x0000FFFF; +	exec_cmd.x[0]	= 0x0001; +	data_l.x[0]	= cmd_data & 0x0000FFFF; +	data_h.x[0]	= (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */ + +	/* Set Overlay Window Control Registers */ +	map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS)); +	map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS)); +	map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS)); +	map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS)); +	map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS)); +	map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS)); +	if (pcm_data->bus_width == 0x0004) {	/* 2x16 devices stacked */ +		map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2); +		map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2); +		map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2); +		map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2); +		map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2); +		map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2); +	} + +	/* Fill Program Buffer */ +	if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) || +		(cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) { +		prg_buff_ofs = (map_read(map, +			ow_reg_add(map, PRG_BUFFER_OFS))).x[0]; +		for (i = 0; i < cmd_mpr; i++) { +			map_write(map, build_map_word(buf[i]), map->pfow_base + +			prg_buff_ofs + i); +		} +	} + +	/* Command Execute */ +	map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS)); +	if (pcm_data->bus_width == 0x0004)	/* 2x16 devices stacked */ +		map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2); + +	/* Status Register Check */ +	do { +		sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS)); +		status_reg = sr.x[0]; +		if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */ +			sr = map_read(map, ow_reg_add(map, +				STATUS_REG_OFS) + 2); +			status_reg += sr.x[0] << 16; +		} +	} while ((status_reg & sr_ok_datamask) != sr_ok_datamask); + +	return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO); +} + +/* + * Execute lpddr2-nvm operations @ block level + */ +static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add, +	uint64_t len, u_char block_op) +{ +	struct map_info *map = mtd->priv; +	u_long add, end_add; +	int ret = 0; + +	mutex_lock(&lpdd2_nvm_mutex); + +	ow_enable(map); + +	add = start_add; +	end_add = add + len; + +	do { +		ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL); +		if (ret) +			goto out; +		add += mtd->erasesize; +	} while (add < end_add); + +out: +	ow_disable(map); +	mutex_unlock(&lpdd2_nvm_mutex); +	return ret; +} + +/* + * verify presence of PFOW string + */ +static int lpddr2_nvm_pfow_present(struct map_info *map) +{ +	map_word pfow_val[4]; +	unsigned int found = 1; + +	mutex_lock(&lpdd2_nvm_mutex); + +	ow_enable(map); + +	/* Load string from array */ +	pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P)); +	pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F)); +	pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O)); +	pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W)); + +	/* Verify the string loaded vs expected */ +	if (!map_word_equal(map, build_map_word('P'), pfow_val[0])) +		found = 0; +	if (!map_word_equal(map, build_map_word('F'), pfow_val[1])) +		found = 0; +	if (!map_word_equal(map, build_map_word('O'), pfow_val[2])) +		found = 0; +	if (!map_word_equal(map, build_map_word('W'), pfow_val[3])) +		found = 0; + +	ow_disable(map); + +	mutex_unlock(&lpdd2_nvm_mutex); + +	return found; +} + +/* + * lpddr2_nvm driver read method + */ +static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add, +				size_t len, size_t *retlen, u_char *buf) +{ +	struct map_info *map = mtd->priv; + +	mutex_lock(&lpdd2_nvm_mutex); + +	*retlen = len; + +	map_copy_from(map, buf, start_add, *retlen); + +	mutex_unlock(&lpdd2_nvm_mutex); +	return 0; +} + +/* + * lpddr2_nvm driver write method + */ +static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add, +				size_t len, size_t *retlen, const u_char *buf) +{ +	struct map_info *map = mtd->priv; +	struct pcm_int_data *pcm_data = map->fldrv_priv; +	u_long add, current_len, tot_len, target_len, my_data; +	u_char *write_buf = (u_char *)buf; +	int ret = 0; + +	mutex_lock(&lpdd2_nvm_mutex); + +	ow_enable(map); + +	/* Set start value for the variables */ +	add = start_add; +	target_len = len; +	tot_len = 0; + +	while (tot_len < target_len) { +		if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */ +			my_data = write_buf[tot_len]; +			my_data += (write_buf[tot_len+1]) << 8; +			if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */ +				my_data += (write_buf[tot_len+2]) << 16; +				my_data += (write_buf[tot_len+3]) << 24; +			} +			ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE, +				my_data, add, 0x00, NULL); +			if (ret) +				goto out; + +			add += pcm_data->bus_width; +			tot_len += pcm_data->bus_width; +		} else {		/* do buffer program */ +			current_len = min(target_len - tot_len, +				(u_long) mtd->writesize); +			ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE, +				0x00, add, current_len, write_buf + tot_len); +			if (ret) +				goto out; + +			add += current_len; +			tot_len += current_len; +		} +	} + +out: +	*retlen = tot_len; +	ow_disable(map); +	mutex_unlock(&lpdd2_nvm_mutex); +	return ret; +} + +/* + * lpddr2_nvm driver erase method + */ +static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	int ret = lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len, +		LPDDR2_NVM_ERASE); +	if (!ret) { +		instr->state = MTD_ERASE_DONE; +		mtd_erase_callback(instr); +	} + +	return ret; +} + +/* + * lpddr2_nvm driver unlock method + */ +static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add, +	uint64_t len) +{ +	return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK); +} + +/* + * lpddr2_nvm driver lock method + */ +static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add, +	uint64_t len) +{ +	return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK); +} + +/* + * lpddr2_nvm driver probe method + */ +static int lpddr2_nvm_probe(struct platform_device *pdev) +{ +	struct map_info *map; +	struct mtd_info *mtd; +	struct resource *add_range; +	struct resource *control_regs; +	struct pcm_int_data *pcm_data; + +	/* Allocate memory control_regs data structures */ +	pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL); +	if (!pcm_data) +		return -ENOMEM; + +	pcm_data->bus_width = BUS_WIDTH; + +	/* Allocate memory for map_info & mtd_info data structures */ +	map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL); +	if (!map) +		return -ENOMEM; + +	mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL); +	if (!mtd) +		return -ENOMEM; + +	/* lpddr2_nvm address range */ +	add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	/* Populate map_info data structure */ +	*map = (struct map_info) { +		.virt		= devm_ioremap_resource(&pdev->dev, add_range), +		.name		= pdev->dev.init_name, +		.phys		= add_range->start, +		.size		= resource_size(add_range), +		.bankwidth	= pcm_data->bus_width / 2, +		.pfow_base	= OW_BASE_ADDRESS, +		.fldrv_priv	= pcm_data, +	}; +	if (IS_ERR(map->virt)) +		return PTR_ERR(map->virt); + +	simple_map_init(map);	/* fill with default methods */ + +	control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); +	pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs); +	if (IS_ERR(pcm_data->ctl_regs)) +		return PTR_ERR(pcm_data->ctl_regs); + +	/* Populate mtd_info data structure */ +	*mtd = (struct mtd_info) { +		.name		= pdev->dev.init_name, +		.type		= MTD_RAM, +		.priv		= map, +		.size		= resource_size(add_range), +		.erasesize	= ERASE_BLOCKSIZE * pcm_data->bus_width, +		.writesize	= 1, +		.writebufsize	= WRITE_BUFFSIZE * pcm_data->bus_width, +		.flags		= (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), +		._read		= lpddr2_nvm_read, +		._write		= lpddr2_nvm_write, +		._erase		= lpddr2_nvm_erase, +		._unlock	= lpddr2_nvm_unlock, +		._lock		= lpddr2_nvm_lock, +	}; + +	/* Verify the presence of the device looking for PFOW string */ +	if (!lpddr2_nvm_pfow_present(map)) { +		pr_err("device not recognized\n"); +		return -EINVAL; +	} +	/* Parse partitions and register the MTD device */ +	return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); +} + +/* + * lpddr2_nvm driver remove method + */ +static int lpddr2_nvm_remove(struct platform_device *pdev) +{ +	return mtd_device_unregister(dev_get_drvdata(&pdev->dev)); +} + +/* Initialize platform_driver data structure for lpddr2_nvm */ +static struct platform_driver lpddr2_nvm_drv = { +	.driver		= { +		.name	= "lpddr2_nvm", +	}, +	.probe		= lpddr2_nvm_probe, +	.remove		= lpddr2_nvm_remove, +}; + +module_platform_driver(lpddr2_nvm_drv); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vincenzo Aliberti <vincenzo.aliberti@gmail.com>"); +MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories"); diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index d3cfe26beea..018c75faadb 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c @@ -55,10 +55,8 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)  	int i, j;  	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); -	if (!mtd) { -		printk(KERN_ERR "Failed to allocate memory for MTD device\n"); +	if (!mtd)  		return NULL; -	}  	mtd->priv = map;  	mtd->type = MTD_NORFLASH; @@ -388,7 +386,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)  	wake_up(&chip->wq);  } -int do_write_buffer(struct map_info *map, struct flchip *chip, +static int do_write_buffer(struct map_info *map, struct flchip *chip,  			unsigned long adr, const struct kvec **pvec,  			unsigned long *pvec_seek, int len)  { @@ -469,7 +467,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,  	return ret;  } -int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) +static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)  {  	struct map_info *map = mtd->priv;  	struct lpddr_private *lpddr = map->fldrv_priv; @@ -703,7 +701,7 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)  #define DO_XXLOCK_LOCK		1  #define DO_XXLOCK_UNLOCK	2 -int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) +static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)  {  	int ret = 0;  	struct map_info *map = mtd->priv; @@ -748,34 +746,6 @@ static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)  	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);  } -int word_program(struct map_info *map, loff_t adr, uint32_t curval) -{ -    int ret; -	struct lpddr_private *lpddr = map->fldrv_priv; -	int chipnum = adr >> lpddr->chipshift; -	struct flchip *chip = &lpddr->chips[chipnum]; - -	mutex_lock(&chip->mutex); -	ret = get_chip(map, chip, FL_WRITING); -	if (ret) { -		mutex_unlock(&chip->mutex); -		return ret; -	} - -	send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval); - -	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime)); -	if (ret)	{ -		printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n", -			map->name, adr, curval); -		goto out; -	} - -out:	put_chip(map, chip); -	mutex_unlock(&chip->mutex); -	return ret; -} -  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");  MODULE_DESCRIPTION("MTD driver for LPDDR flash chips"); diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c index 45abed67f1e..69f2112340b 100644 --- a/drivers/mtd/lpddr/qinfo_probe.c +++ b/drivers/mtd/lpddr/qinfo_probe.c @@ -135,11 +135,8 @@ static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)  {  	lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL); -	if (!lpddr->qinfo) { -		printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n", -				map->name); +	if (!lpddr->qinfo)  		return 0; -	}  	/* Get the ManuID */  	lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID)); diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 310dc7c9342..21b2874a303 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -66,11 +66,11 @@ config MTD_PHYSMAP_BANKWIDTH  	  used internally by the CFI drivers.  config MTD_PHYSMAP_OF -	tristate "Flash device in physical memory map based on OF description" -	depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM) +	tristate "Memory device in physical memory map based on OF description" +	depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM)  	help -	  This provides a 'mapping' driver which allows the NOR Flash and -	  ROM driver code to communicate with chips which are mapped +	  This provides a 'mapping' driver which allows the NOR Flash, ROM +	  and RAM driver code to communicate with chips which are mapped  	  physically into the CPU's memory. The mapping description here is  	  taken from OF device tree. @@ -108,7 +108,7 @@ config MTD_SUN_UFLASH  config MTD_SC520CDP  	tristate "CFI Flash device mapped on AMD SC520 CDP" -	depends on X86 && MTD_CFI +	depends on (MELAN || COMPILE_TEST) && MTD_CFI  	help  	  The SC520 CDP board has two banks of CFI-compliant chips and one  	  Dual-in-line JEDEC chip. This 'mapping' driver supports that @@ -116,7 +116,7 @@ config MTD_SC520CDP  config MTD_NETSC520  	tristate "CFI Flash device mapped on AMD NetSc520" -	depends on X86 && MTD_CFI +	depends on (MELAN || COMPILE_TEST) && MTD_CFI  	help  	  This enables access routines for the flash chips on the AMD NetSc520  	  demonstration board. If you have one of these boards and would like @@ -124,7 +124,7 @@ config MTD_NETSC520  config MTD_TS5500  	tristate "JEDEC Flash device mapped on Technologic Systems TS-5500" -	depends on X86 +	depends on TS5500 || COMPILE_TEST  	select MTD_JEDECPROBE  	select MTD_CFI_AMDSTD  	help diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c index 5434d8ded01..6ea51e54904 100644 --- a/drivers/mtd/maps/bfin-async-flash.c +++ b/drivers/mtd/maps/bfin-async-flash.c @@ -14,7 +14,6 @@   * Licensed under the GPL-2 or later.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/mtd/mtd.h> diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 1adba86474a..a4c477b9fdd 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c @@ -14,7 +14,6 @@   */  #include <linux/gpio.h> -#include <linux/init.h>  #include <linux/io.h>  #include <linux/kernel.h>  #include <linux/module.h> diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index f581ac1cf02..5ab71f0e1bc 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c @@ -31,7 +31,6 @@  #include <linux/kernel.h>  #include <linux/slab.h>  #include <linux/pci.h> -#include <linux/init.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/map.h>  #include <linux/mtd/partitions.h> @@ -180,7 +179,6 @@ static void vr_nor_pci_remove(struct pci_dev *dev)  {  	struct vr_nor_mtd *p = pci_get_drvdata(dev); -	pci_set_drvdata(dev, NULL);  	vr_nor_destroy_partitions(p);  	vr_nor_destroy_mtd_setup(p);  	vr_nor_destroy_maps(p); diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 10debfea81e..6a589f1e288 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -13,9 +13,9 @@   *   */ +#include <linux/err.h>  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/string.h>  #include <linux/slab.h> @@ -162,13 +162,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)  		mtd_device_unregister(info->mtd);  		map_destroy(info->mtd);  	} -	if (info->map.virt) -		iounmap(info->map.virt); - -	if (info->res) { -		release_resource(info->res); -		kfree(info->res); -	}  	if (plat->exit)  		plat->exit(); @@ -194,7 +187,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)  			return err;  	} -	info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); +	info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info), +			    GFP_KERNEL);  	if(!info) {  		err = -ENOMEM;  		goto Error; @@ -220,20 +214,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)  	info->map.write = ixp4xx_probe_write16;  	info->map.copy_from = ixp4xx_copy_from; -	info->res = request_mem_region(dev->resource->start, -			resource_size(dev->resource), -			"IXP4XXFlash"); -	if (!info->res) { -		printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); -		err = -ENOMEM; -		goto Error; -	} - -	info->map.virt = ioremap(dev->resource->start, -				 resource_size(dev->resource)); -	if (!info->map.virt) { -		printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); -		err = -EIO; +	info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource); +	if (IS_ERR(info->map.virt)) { +		err = PTR_ERR(info->map.virt);  		goto Error;  	} diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index d7ac65d1d56..7aa682cd4d7 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c @@ -13,7 +13,6 @@  #include <linux/kernel.h>  #include <linux/io.h>  #include <linux/slab.h> -#include <linux/init.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/map.h>  #include <linux/mtd/partitions.h> @@ -123,24 +122,28 @@ ltq_mtd_probe(struct platform_device *pdev)  		return -ENODEV;  	} -	ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); +	ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL); +	if (!ltq_mtd) +		return -ENOMEM; +  	platform_set_drvdata(pdev, ltq_mtd);  	ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!ltq_mtd->res) {  		dev_err(&pdev->dev, "failed to get memory resource\n"); -		err = -ENOENT; -		goto err_out; +		return -ENOENT;  	} -	ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); +	ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info), +				    GFP_KERNEL); +	if (!ltq_mtd->map) +		return -ENOMEM; +  	ltq_mtd->map->phys = ltq_mtd->res->start;  	ltq_mtd->map->size = resource_size(ltq_mtd->res);  	ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res); -	if (IS_ERR(ltq_mtd->map->virt)) { -		err = PTR_ERR(ltq_mtd->map->virt); -		goto err_out; -	} +	if (IS_ERR(ltq_mtd->map->virt)) +		return PTR_ERR(ltq_mtd->map->virt);  	ltq_mtd->map->name = ltq_map_name;  	ltq_mtd->map->bankwidth = 2; @@ -155,8 +158,7 @@ ltq_mtd_probe(struct platform_device *pdev)  	if (!ltq_mtd->mtd) {  		dev_err(&pdev->dev, "probing failed\n"); -		err = -ENXIO; -		goto err_free; +		return -ENXIO;  	}  	ltq_mtd->mtd->owner = THIS_MODULE; @@ -177,10 +179,6 @@ ltq_mtd_probe(struct platform_device *pdev)  err_destroy:  	map_destroy(ltq_mtd->mtd); -err_free: -	kfree(ltq_mtd->map); -err_out: -	kfree(ltq_mtd);  	return err;  } @@ -189,13 +187,9 @@ ltq_mtd_remove(struct platform_device *pdev)  {  	struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); -	if (ltq_mtd) { -		if (ltq_mtd->mtd) { -			mtd_device_unregister(ltq_mtd->mtd); -			map_destroy(ltq_mtd->mtd); -		} -		kfree(ltq_mtd->map); -		kfree(ltq_mtd); +	if (ltq_mtd && ltq_mtd->mtd) { +		mtd_device_unregister(ltq_mtd->mtd); +		map_destroy(ltq_mtd->mtd);  	}  	return 0;  } diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c index 98bb5d5375d..cadfbe05187 100644 --- a/drivers/mtd/maps/latch-addr-flash.c +++ b/drivers/mtd/maps/latch-addr-flash.c @@ -10,7 +10,6 @@   * kind, whether express or implied.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/mtd/mtd.h> diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index c2604f8b2a5..eb0242e0b2d 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -14,7 +14,6 @@  #include <linux/module.h>  #include <linux/kernel.h>  #include <linux/pci.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/mtd/mtd.h> @@ -316,7 +315,6 @@ static void mtd_pci_remove(struct pci_dev *dev)  	map->exit(dev, map);  	kfree(map); -	pci_set_drvdata(dev, NULL);  	pci_release_regions(dev);  } diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index d11109762ac..217c25d7381 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -15,7 +15,6 @@  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/device.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/map.h> diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 676271659b3..d597e89f269 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -23,7 +23,6 @@  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/string.h>  #include <linux/ioport.h> @@ -55,7 +54,7 @@ struct platram_info {  static inline struct platram_info *to_platram_info(struct platform_device *dev)  { -	return (struct platram_info *)platform_get_drvdata(dev); +	return platform_get_drvdata(dev);  }  /* platram_setrw @@ -138,7 +137,6 @@ static int platram_probe(struct platform_device *pdev)  	info = kzalloc(sizeof(*info), GFP_KERNEL);  	if (info == NULL) { -		dev_err(&pdev->dev, "no memory for flash info\n");  		err = -ENOMEM;  		goto exit_error;  	} @@ -257,21 +255,7 @@ static struct platform_driver platram_driver = {  	},  }; -/* module init/exit */ - -static int __init platram_init(void) -{ -	printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n"); -	return platform_driver_register(&platram_driver); -} - -static void __exit platram_exit(void) -{ -	platform_driver_unregister(&platram_driver); -} - -module_init(platram_init); -module_exit(platram_exit); +module_platform_driver(platram_driver);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index d210d131fef..cb4d92eea9f 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -13,7 +13,6 @@  #include <linux/types.h>  #include <linux/slab.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/platform_device.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/map.h> @@ -61,7 +60,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)  	if (!info)  		return -ENOMEM; -	info->map.name = (char *) flash->name; +	info->map.name = flash->name;  	info->map.bankwidth = flash->width;  	info->map.phys = res->start;  	info->map.size = resource_size(res); @@ -73,7 +72,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)  		return -ENOMEM;  	}  	info->map.cached = -		ioremap_cached(info->map.phys, info->map.size); +		ioremap_cache(info->map.phys, info->map.size);  	if (!info->map.cached)  		printk(KERN_WARNING "Failed to ioremap cached %s\n",  		       info->map.name); diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c index 93525121d69..146b6047ed2 100644 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ b/drivers/mtd/maps/rbtx4939-flash.c @@ -13,7 +13,6 @@  #include <linux/module.h>  #include <linux/types.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/device.h>  #include <linux/platform_device.h> diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c index 8fead8e46bc..093edd51bdc 100644 --- a/drivers/mtd/maps/sc520cdp.c +++ b/drivers/mtd/maps/sc520cdp.c @@ -183,7 +183,7 @@ static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =  static void sc520cdp_setup_par(void)  { -	volatile unsigned long __iomem *mmcr; +	unsigned long __iomem *mmcr;  	unsigned long mmcr_val;  	int i, j; @@ -203,11 +203,11 @@ static void sc520cdp_setup_par(void)  	*/  	for(i = 0; i < NUM_FLASH_BANKS; i++) {		/* for each par_table entry  */  		for(j = 0; j < NUM_SC520_PAR; j++) {	/* for each PAR register     */ -			mmcr_val = mmcr[SC520_PAR(j)]; +			mmcr_val = readl(&mmcr[SC520_PAR(j)]);  			/* if target device field matches, reprogram the PAR */  			if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)  			{ -				mmcr[SC520_PAR(j)] = par_table[i].new_par; +				writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);  				break;  			}  		} diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c index c77b68c9412..b7a22a612a4 100644 --- a/drivers/mtd/maps/scb2_flash.c +++ b/drivers/mtd/maps/scb2_flash.c @@ -47,7 +47,6 @@  #include <linux/module.h>  #include <linux/types.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <asm/io.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/map.h> @@ -212,7 +211,6 @@ static void scb2_flash_remove(struct pci_dev *dev)  	if (!region_fail)  		release_mem_region(SCB2_ADDR, SCB2_WINDOW); -	pci_set_drvdata(dev, NULL);  }  static struct pci_device_id scb2_flash_pci_ids[] = { diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c index 83a7a709156..bb580bc1644 100644 --- a/drivers/mtd/maps/solutionengine.c +++ b/drivers/mtd/maps/solutionengine.c @@ -33,28 +33,6 @@ struct map_info soleng_flash_map = {  static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; -#ifdef CONFIG_MTD_SUPERH_RESERVE -static struct mtd_partition superh_se_partitions[] = { -	/* Reserved for boot code, read-only */ -	{ -		.name = "flash_boot", -		.offset = 0x00000000, -		.size = CONFIG_MTD_SUPERH_RESERVE, -		.mask_flags = MTD_WRITEABLE, -	}, -	/* All else is writable (e.g. JFFS) */ -	{ -		.name = "Flash FS", -		.offset = MTDPART_OFS_NXTBLK, -		.size = MTDPART_SIZ_FULL, -	} -}; -#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions) -#else -#define superh_se_partitions NULL -#define NUM_PARTITIONS 0 -#endif /* CONFIG_MTD_SUPERH_RESERVE */ -  static int __init init_soleng_maps(void)  {  	/* First probe at offset 0 */ @@ -92,8 +70,7 @@ static int __init init_soleng_maps(void)  		mtd_device_register(eprom_mtd, NULL, 0);  	} -	mtd_device_parse_register(flash_mtd, probes, NULL, -				  superh_se_partitions, NUM_PARTITIONS); +	mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);  	return 0;  } diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index d467f3b11c9..b6f1aac3510 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -11,7 +11,6 @@  #include <linux/module.h>  #include <linux/fs.h>  #include <linux/errno.h> -#include <linux/init.h>  #include <linux/ioport.h>  #include <linux/of.h>  #include <linux/of_device.h> @@ -75,7 +74,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)  	up->name = of_get_property(dp, "model", NULL);  	if (up->name && 0 < strlen(up->name)) -		up->map.name = (char *)up->name; +		up->map.name = up->name;  	up->map.phys = op->resource[0].start; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 5073cbc796d..43e30992a36 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -30,7 +30,6 @@  #include <linux/blkpg.h>  #include <linux/spinlock.h>  #include <linux/hdreg.h> -#include <linux/init.h>  #include <linux/mutex.h>  #include <asm/uaccess.h> @@ -83,12 +82,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,  	block = blk_rq_pos(req) << 9 >> tr->blkshift;  	nsect = blk_rq_cur_bytes(req) >> tr->blkshift; - -	buf = req->buffer; +	buf = bio_data(req->bio);  	if (req->cmd_type != REQ_TYPE_FS)  		return -EIO; +	if (req->cmd_flags & REQ_FLUSH) +		return tr->flush(dev); +  	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >  	    get_capacity(req->rq_disk))  		return -EIO; @@ -409,6 +410,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)  	if (!new->rq)  		goto error3; +	if (tr->flush) +		blk_queue_flush(new->rq, REQ_FLUSH); +  	new->rq->queuedata = new;  	blk_queue_logical_block_size(new->rq, tr->blksize); diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 2aef5dda522..485ea751c7f 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -32,6 +32,7 @@  #include <linux/mtd/mtd.h>  #include <linux/mtd/blktrans.h>  #include <linux/mutex.h> +#include <linux/major.h>  struct mtdblk_dev { @@ -373,7 +374,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)  static struct mtd_blktrans_ops mtdblock_tr = {  	.name		= "mtdblock", -	.major		= 31, +	.major		= MTD_BLOCK_MAJOR,  	.part_bits	= 0,  	.blksize 	= 512,  	.open		= mtdblock_open, diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c index 92759a9d298..fb5dc89369d 100644 --- a/drivers/mtd/mtdblock_ro.c +++ b/drivers/mtd/mtdblock_ro.c @@ -24,6 +24,7 @@  #include <linux/mtd/mtd.h>  #include <linux/mtd/blktrans.h>  #include <linux/module.h> +#include <linux/major.h>  static int mtdblock_readsect(struct mtd_blktrans_dev *dev,  			      unsigned long block, char *buf) @@ -70,7 +71,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)  static struct mtd_blktrans_ops mtdblock_tr = {  	.name		= "mtdblock", -	.major		= 31, +	.major		= MTD_BLOCK_MAJOR,  	.part_bits	= 0,  	.blksize 	= 512,  	.readsect	= mtdblock_readsect, diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 684bfa39e4e..a0f54e80670 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -32,6 +32,7 @@  #include <linux/mount.h>  #include <linux/blkpg.h>  #include <linux/magic.h> +#include <linux/major.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/partitions.h>  #include <linux/mtd/map.h> @@ -323,6 +324,15 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c  		default:  			ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);  		} + +		/* +		 * Return -ENOSPC only if no data could be written at all. +		 * Otherwise just return the number of bytes that actually +		 * have been written. +		 */ +		if ((ret == -ENOSPC) && (total_retlen)) +			break; +  		if (!ret) {  			*ppos += retlen;  			total_retlen += retlen; @@ -558,13 +568,18 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,  {  	struct mtd_write_req req;  	struct mtd_oob_ops ops; -	void __user *usr_data, *usr_oob; +	const void __user *usr_data, *usr_oob;  	int ret; -	if (copy_from_user(&req, argp, sizeof(req)) || -			!access_ok(VERIFY_READ, req.usr_data, req.len) || -			!access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) +	if (copy_from_user(&req, argp, sizeof(req)))  		return -EFAULT; + +	usr_data = (const void __user *)(uintptr_t)req.usr_data; +	usr_oob = (const void __user *)(uintptr_t)req.usr_oob; +	if (!access_ok(VERIFY_READ, usr_data, req.len) || +	    !access_ok(VERIFY_READ, usr_oob, req.ooblen)) +		return -EFAULT; +  	if (!mtd->_write_oob)  		return -EOPNOTSUPP; @@ -573,10 +588,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,  	ops.ooblen = (size_t)req.ooblen;  	ops.ooboffs = 0; -	usr_data = (void __user *)(uintptr_t)req.usr_data; -	usr_oob = (void __user *)(uintptr_t)req.usr_oob; - -	if (req.usr_data) { +	if (usr_data) {  		ops.datbuf = memdup_user(usr_data, ops.len);  		if (IS_ERR(ops.datbuf))  			return PTR_ERR(ops.datbuf); @@ -584,7 +596,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,  		ops.datbuf = NULL;  	} -	if (req.usr_oob) { +	if (usr_oob) {  		ops.oobbuf = memdup_user(usr_oob, ops.ooblen);  		if (IS_ERR(ops.oobbuf)) {  			kfree(ops.datbuf); @@ -888,25 +900,26 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)  	case OTPGETREGIONINFO:  	{  		struct otp_info *buf = kmalloc(4096, GFP_KERNEL); +		size_t retlen;  		if (!buf)  			return -ENOMEM;  		switch (mfi->mode) {  		case MTD_FILE_MODE_OTP_FACTORY: -			ret = mtd_get_fact_prot_info(mtd, buf, 4096); +			ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);  			break;  		case MTD_FILE_MODE_OTP_USER: -			ret = mtd_get_user_prot_info(mtd, buf, 4096); +			ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);  			break;  		default:  			ret = -EINVAL;  			break;  		} -		if (ret >= 0) { +		if (!ret) {  			if (cmd == OTPGETREGIONCOUNT) { -				int nbr = ret / sizeof(struct otp_info); +				int nbr = retlen / sizeof(struct otp_info);  				ret = copy_to_user(argp, &nbr, sizeof(int));  			} else -				ret = copy_to_user(argp, buf, ret); +				ret = copy_to_user(argp, buf, retlen);  			if (ret)  				ret = -EFAULT;  		} @@ -1099,7 +1112,7 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,  		return (unsigned long) -EINVAL;  	ret = mtd_get_unmapped_area(mtd, len, offset, flags); -	return ret == -EOPNOTSUPP ? -ENOSYS : ret; +	return ret == -EOPNOTSUPP ? -ENODEV : ret;  }  #endif @@ -1124,9 +1137,9 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)  #endif  		return vm_iomap_memory(vma, map->phys, map->size);  	} -	return -ENOSYS; +	return -ENODEV;  #else -	return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; +	return vma->vm_flags & VM_SHARED ? 0 : -EACCES;  #endif  } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5e14d540ba2..d201feeb3ca 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -157,6 +157,9 @@ static ssize_t mtd_type_show(struct device *dev,  	case MTD_UBIVOLUME:  		type = "ubi";  		break; +	case MTD_MLCNANDFLASH: +		type = "mlc-nand"; +		break;  	default:  		type = "unknown";  	} @@ -310,15 +313,7 @@ static struct attribute *mtd_attrs[] = {  	&dev_attr_bitflip_threshold.attr,  	NULL,  }; - -static struct attribute_group mtd_group = { -	.attrs		= mtd_attrs, -}; - -static const struct attribute_group *mtd_groups[] = { -	&mtd_group, -	NULL, -}; +ATTRIBUTE_GROUPS(mtd);  static struct device_type mtd_devtype = {  	.name		= "mtd", @@ -888,14 +883,14 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);   * devices. The user data is one time programmable but the factory data is read   * only.   */ -int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, -			   size_t len) +int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, +			   struct otp_info *buf)  {  	if (!mtd->_get_fact_prot_info)  		return -EOPNOTSUPP;  	if (!len)  		return 0; -	return mtd->_get_fact_prot_info(mtd, buf, len); +	return mtd->_get_fact_prot_info(mtd, len, retlen, buf);  }  EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); @@ -911,14 +906,14 @@ int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,  }  EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); -int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, -			   size_t len) +int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, +			   struct otp_info *buf)  {  	if (!mtd->_get_user_prot_info)  		return -EOPNOTSUPP;  	if (!len)  		return 0; -	return mtd->_get_user_prot_info(mtd, buf, len); +	return mtd->_get_user_prot_info(mtd, len, retlen, buf);  }  EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); @@ -937,12 +932,22 @@ EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);  int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,  			    size_t *retlen, u_char *buf)  { +	int ret; +  	*retlen = 0;  	if (!mtd->_write_user_prot_reg)  		return -EOPNOTSUPP;  	if (!len)  		return 0; -	return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); +	ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); +	if (ret) +		return ret; + +	/* +	 * If no data could be written at all, we are out of memory and +	 * must return -ENOSPC. +	 */ +	return (*retlen) ? 0 : -ENOSPC;  }  EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 6e732c3820c..1ca9aec141f 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -150,11 +150,12 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,  						 retlen, buf);  } -static int part_get_user_prot_info(struct mtd_info *mtd, -		struct otp_info *buf, size_t len) +static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, +				   size_t *retlen, struct otp_info *buf)  {  	struct mtd_part *part = PART(mtd); -	return part->master->_get_user_prot_info(part->master, buf, len); +	return part->master->_get_user_prot_info(part->master, len, retlen, +						 buf);  }  static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, @@ -165,11 +166,12 @@ static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,  						 retlen, buf);  } -static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, -		size_t len) +static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, +				   size_t *retlen, struct otp_info *buf)  {  	struct mtd_part *part = PART(mtd); -	return part->master->_get_fact_prot_info(part->master, buf, len); +	return part->master->_get_fact_prot_info(part->master, len, retlen, +						 buf);  }  static int part_write(struct mtd_info *mtd, loff_t to, size_t len, @@ -534,7 +536,7 @@ out_register:  	return slave;  } -int mtd_add_partition(struct mtd_info *master, char *name, +int mtd_add_partition(struct mtd_info *master, const char *name,  		      long long offset, long long length)  {  	struct mtd_partition part; @@ -672,22 +674,19 @@ static struct mtd_part_parser *get_partition_parser(const char *name)  #define put_partition_parser(p) do { module_put((p)->owner); } while (0) -int register_mtd_parser(struct mtd_part_parser *p) +void register_mtd_parser(struct mtd_part_parser *p)  {  	spin_lock(&part_parser_lock);  	list_add(&p->list, &part_parsers);  	spin_unlock(&part_parser_lock); - -	return 0;  }  EXPORT_SYMBOL_GPL(register_mtd_parser); -int deregister_mtd_parser(struct mtd_part_parser *p) +void deregister_mtd_parser(struct mtd_part_parser *p)  {  	spin_lock(&part_parser_lock);  	list_del(&p->list);  	spin_unlock(&part_parser_lock); -	return 0;  }  EXPORT_SYMBOL_GPL(deregister_mtd_parser); diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 334da5f583c..20c02a3b741 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -17,6 +17,7 @@  #include <linux/export.h>  #include <linux/ctype.h>  #include <linux/slab.h> +#include <linux/major.h>  /*   * compare superblocks to see if they're equivalent diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index d88529841d3..f1cf503517f 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -95,44 +95,16 @@ config MTD_NAND_OMAP2  	  platforms.  config MTD_NAND_OMAP_BCH -	depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3 -	tristate "Enable support for hardware BCH error correction" +	depends on MTD_NAND_OMAP2 +	tristate "Support hardware based BCH error correction"  	default n  	select BCH -	select BCH_CONST_PARAMS  	help -	 Support for hardware BCH error correction. - -choice -	prompt "BCH error correction capability" -	depends on MTD_NAND_OMAP_BCH - -config MTD_NAND_OMAP_BCH8 -	bool "8 bits / 512 bytes (recommended)" -	help -	 Support correcting up to 8 bitflips per 512-byte block. -	 This will use 13 bytes of spare area per 512 bytes of page data. -	 This is the recommended mode, as 4-bit mode does not work -	 on some OMAP3 revisions, due to a hardware bug. - -config MTD_NAND_OMAP_BCH4 -	bool "4 bits / 512 bytes" -	help -	 Support correcting up to 4 bitflips per 512-byte block. -	 This will use 7 bytes of spare area per 512 bytes of page data. -	 Note that this mode does not work on some OMAP3 revisions, due to a -	 hardware bug. Please check your OMAP datasheet before selecting this -	 mode. - -endchoice - -if MTD_NAND_OMAP_BCH -config BCH_CONST_M -	default 13 -config BCH_CONST_T -	default 4 if MTD_NAND_OMAP_BCH4 -	default 8 if MTD_NAND_OMAP_BCH8 -endif +	  This config enables the ELM hardware engine, which can be used to +	  locate and correct errors when using BCH ECC scheme. This offloads +	  the cpu from doing ECC error searching and correction. However some +	  legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine +	  so they should not enable this config symbol.  config MTD_NAND_IDS  	tristate @@ -354,11 +326,11 @@ config MTD_NAND_ATMEL  	  on Atmel AT91 and AVR32 processors.  config MTD_NAND_PXA3xx -	tristate "Support for NAND flash devices on PXA3xx" +	tristate "NAND support on PXA3xx and Armada 370/XP"  	depends on PXA3xx || ARCH_MMP || PLAT_ORION  	help  	  This enables the driver for the NAND flash device found on -	  PXA3xx processors +	  PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).  config MTD_NAND_SLC_LPC32XX  	tristate "NXP LPC32xx SLC Controller" @@ -456,6 +428,7 @@ config MTD_NAND_FSL_IFC  	tristate "NAND support for Freescale IFC controller"  	depends on MTD_NAND && FSL_SOC  	select FSL_IFC +	select MEMORY  	help  	  Various Freescale chips e.g P1010, include a NAND Flash machine  	  with built-in hardware ECC capabilities. @@ -486,17 +459,19 @@ config MTD_NAND_MXC  config MTD_NAND_SH_FLCTL  	tristate "Support for NAND on Renesas SuperH FLCTL" -	depends on SUPERH || ARCH_SHMOBILE +	depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST +	depends on HAS_IOMEM +	depends on HAS_DMA  	help  	  Several Renesas SuperH CPU has FLCTL. This option enables support  	  for NAND Flash using FLCTL.  config MTD_NAND_DAVINCI -        tristate "Support NAND on DaVinci SoC" -        depends on ARCH_DAVINCI +        tristate "Support NAND on DaVinci/Keystone SoC" +        depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)          help  	  Enable the driver for NAND flash chips on Texas Instruments -	  DaVinci processors. +	  DaVinci/Keystone processors.  config MTD_NAND_TXX9NDFMC  	tristate "NAND Flash support for TXx9 SoC" diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index 8611eb4b45f..4936e9e0002 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -17,7 +17,6 @@   */  #include <linux/slab.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/delay.h>  #include <linux/mtd/mtd.h> diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 060feeaf6b3..4ce181a35bc 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,  	dma_dev = host->dma_chan->device; -	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | -		DMA_COMPL_SKIP_DEST_UNMAP; +	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;  	phys_addr = dma_map_single(dma_dev->dev, p, len, dir);  	if (dma_mapping_error(dma_dev->dev, phys_addr)) { @@ -431,7 +430,7 @@ err_dma:  	dma_unmap_single(dma_dev->dev, phys_addr, len, dir);  err_buf:  	if (err != 0) -		dev_warn(host->dev, "Fall back to CPU I/O\n"); +		dev_dbg(host->dev, "Fall back to CPU I/O\n");  	return err;  } @@ -1062,56 +1061,28 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)  }  /* - * Get ECC requirement in ONFI parameters, returns -1 if ONFI - * parameters is not supported. - * return 0 if success to get the ECC requirement. - */ -static int get_onfi_ecc_param(struct nand_chip *chip, -		int *ecc_bits, int *sector_size) -{ -	*ecc_bits = *sector_size = 0; - -	if (chip->onfi_params.ecc_bits == 0xff) -		/* TODO: the sector_size and ecc_bits need to be find in -		 * extended ecc parameter, currently we don't support it. -		 */ -		return -1; - -	*ecc_bits = chip->onfi_params.ecc_bits; - -	/* The default sector size (ecc codeword size) is 512 */ -	*sector_size = 512; - -	return 0; -} - -/* - * Get ecc requirement from ONFI parameters ecc requirement. + * Get minimum ecc requirements from NAND.   * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function - * will set them according to ONFI ecc requirement. Otherwise, use the + * will set them according to minimum ecc requirement. Otherwise, use the   * value in DTS file.   * return 0 if success. otherwise return error code.   */  static int pmecc_choose_ecc(struct atmel_nand_host *host,  		int *cap, int *sector_size)  { -	/* Get ECC requirement from ONFI parameters */ -	*cap = *sector_size = 0; -	if (host->nand_chip.onfi_version) { -		if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size)) -			dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n", +	/* Get minimum ECC requirements */ +	if (host->nand_chip.ecc_strength_ds) { +		*cap = host->nand_chip.ecc_strength_ds; +		*sector_size = host->nand_chip.ecc_step_ds; +		dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",  				*cap, *sector_size); -		else -			dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");  	} else { -		dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes"); -	} -	if (*cap == 0 && *sector_size == 0) {  		*cap = 2;  		*sector_size = 512; +		dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");  	} -	/* If dts file doesn't specify then use the one in ONFI parameters */ +	/* If device tree doesn't specify, use NAND's minimum ECC parameters */  	if (host->pmecc_corr_cap == 0) {  		/* use the most fitable ecc bits (the near bigger one ) */  		if (*cap <= 2) @@ -1139,7 +1110,7 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,  	return 0;  } -static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev, +static int atmel_pmecc_nand_init_params(struct platform_device *pdev,  					 struct atmel_nand_host *host)  {  	struct mtd_info *mtd = &host->mtd; @@ -1249,6 +1220,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,  		goto err;  	} +	nand_chip->options |= NAND_NO_SUBPAGE_WRITE;  	nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;  	nand_chip->ecc.write_page = atmel_nand_pmecc_write_page; @@ -1449,7 +1421,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)  		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);  } -#if defined(CONFIG_OF)  static int atmel_of_init_port(struct atmel_nand_host *host,  			      struct device_node *np)  { @@ -1457,7 +1428,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,  	u32 offset[2];  	int ecc_mode;  	struct atmel_nand_data *board = &host->board; -	enum of_gpio_flags flags; +	enum of_gpio_flags flags = 0;  	if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {  		if (val >= 32) { @@ -1540,15 +1511,8 @@ static int atmel_of_init_port(struct atmel_nand_host *host,  	return 0;  } -#else -static int atmel_of_init_port(struct atmel_nand_host *host, -			      struct device_node *np) -{ -	return -EINVAL; -} -#endif -static int __init atmel_hw_nand_init_params(struct platform_device *pdev, +static int atmel_hw_nand_init_params(struct platform_device *pdev,  					 struct atmel_nand_host *host)  {  	struct mtd_info *mtd = &host->mtd; @@ -1696,8 +1660,8 @@ static void nfc_select_chip(struct mtd_info *mtd, int chip)  		nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);  } -static int nfc_make_addr(struct mtd_info *mtd, int column, int page_addr, -		unsigned int *addr1234, unsigned int *cycle0) +static int nfc_make_addr(struct mtd_info *mtd, int command, int column, +		int page_addr, unsigned int *addr1234, unsigned int *cycle0)  {  	struct nand_chip *chip = mtd->priv; @@ -1711,7 +1675,8 @@ static int nfc_make_addr(struct mtd_info *mtd, int column, int page_addr,  	*addr1234 = 0;  	if (column != -1) { -		if (chip->options & NAND_BUSWIDTH_16) +		if (chip->options & NAND_BUSWIDTH_16 && +				!nand_opcode_8bits(command))  			column >>= 1;  		addr_bytes[acycle++] = column & 0xff;  		if (mtd->writesize > 512) @@ -1824,8 +1789,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,  	}  	if (do_addr) -		acycle = nfc_make_addr(mtd, column, page_addr, &addr1234, -				&cycle0); +		acycle = nfc_make_addr(mtd, command, column, page_addr, +				&addr1234, &cycle0);  	nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;  	nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0); @@ -1987,7 +1952,7 @@ static struct platform_driver atmel_nand_nfc_driver;  /*   * Probe for the NAND device.   */ -static int __init atmel_nand_probe(struct platform_device *pdev) +static int atmel_nand_probe(struct platform_device *pdev)  {  	struct atmel_nand_host *host;  	struct mtd_info *mtd; @@ -1998,10 +1963,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)  	/* Allocate memory for the device structure (and zero it) */  	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); -	if (!host) { -		printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n"); +	if (!host)  		return -ENOMEM; -	}  	res = platform_driver_register(&atmel_nand_nfc_driver);  	if (res) @@ -2019,7 +1982,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)  	mtd = &host->mtd;  	nand_chip = &host->nand_chip;  	host->dev = &pdev->dev; -	if (pdev->dev.of_node) { +	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { +		/* Only when CONFIG_OF is enabled of_node can be parsed */  		res = atmel_of_init_port(host, pdev->dev.of_node);  		if (res)  			goto err_nand_ioremap; @@ -2098,14 +2062,14 @@ static int __init atmel_nand_probe(struct platform_device *pdev)  		}  		if (gpio_get_value(host->board.det_pin)) { -			printk(KERN_INFO "No SmartMedia card inserted.\n"); +			dev_info(&pdev->dev, "No SmartMedia card inserted.\n");  			res = -ENXIO;  			goto err_no_card;  		}  	}  	if (host->board.on_flash_bbt || on_flash_bbt) { -		printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); +		dev_info(&pdev->dev, "Use On Flash BBT\n");  		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;  	} @@ -2177,14 +2141,13 @@ err_no_card:  	if (host->dma_chan)  		dma_release_channel(host->dma_chan);  err_nand_ioremap: -	platform_driver_unregister(&atmel_nand_nfc_driver);  	return res;  }  /*   * Remove a NAND device.   */ -static int __exit atmel_nand_remove(struct platform_device *pdev) +static int atmel_nand_remove(struct platform_device *pdev)  {  	struct atmel_nand_host *host = platform_get_drvdata(pdev);  	struct mtd_info *mtd = &host->mtd; @@ -2207,14 +2170,12 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)  	return 0;  } -#if defined(CONFIG_OF)  static const struct of_device_id atmel_nand_dt_ids[] = {  	{ .compatible = "atmel,at91rm9200-nand" },  	{ /* sentinel */ }  };  MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids); -#endif  static int atmel_nand_nfc_probe(struct platform_device *pdev)  { @@ -2253,12 +2214,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)  	return 0;  } -#if defined(CONFIG_OF) -static struct of_device_id atmel_nand_nfc_match[] = { +static const struct of_device_id atmel_nand_nfc_match[] = {  	{ .compatible = "atmel,sama5d3-nfc" },  	{ /* sentinel */ }  }; -#endif +MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);  static struct platform_driver atmel_nand_nfc_driver = {  	.driver = { @@ -2270,7 +2230,8 @@ static struct platform_driver atmel_nand_nfc_driver = {  };  static struct platform_driver atmel_nand_driver = { -	.remove		= __exit_p(atmel_nand_remove), +	.probe		= atmel_nand_probe, +	.remove		= atmel_nand_remove,  	.driver		= {  		.name	= "atmel_nand",  		.owner	= THIS_MODULE, @@ -2278,7 +2239,7 @@ static struct platform_driver atmel_nand_driver = {  	},  }; -module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe); +module_platform_driver(atmel_nand_driver);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Rick Bronson"); diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index ae8dd7c4103..bc5c518828d 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -11,7 +11,6 @@  #include <linux/slab.h>  #include <linux/gpio.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/interrupt.h>  #include <linux/mtd/mtd.h> @@ -308,7 +307,8 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i  		/* Serially input address */  		if (column != -1) {  			/* Adjust columns for 16 bit buswidth */ -			if (this->options & NAND_BUSWIDTH_16) +			if (this->options & NAND_BUSWIDTH_16 && +					!nand_opcode_8bits(command))  				column >>= 1;  			ctx->write_byte(mtd, column);  		} @@ -418,10 +418,8 @@ static int au1550nd_probe(struct platform_device *pdev)  	}  	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); -	if (!ctx) { -		dev_err(&pdev->dev, "no memory for NAND context\n"); +	if (!ctx)  		return -ENOMEM; -	}  	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!r) { @@ -480,6 +478,8 @@ static int au1550nd_probe(struct platform_device *pdev)  	mtd_device_register(&ctx->info, pd->parts, pd->num_parts); +	platform_set_drvdata(pdev, ctx); +  	return 0;  out3: diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c index 7bae569fdc7..10744591131 100644 --- a/drivers/mtd/nand/bcm47xxnflash/main.c +++ b/drivers/mtd/nand/bcm47xxnflash/main.c @@ -29,11 +29,9 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)  	struct bcm47xxnflash *b47n;  	int err = 0; -	b47n = kzalloc(sizeof(*b47n), GFP_KERNEL); -	if (!b47n) { -		err = -ENOMEM; -		goto out; -	} +	b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL); +	if (!b47n) +		return -ENOMEM;  	b47n->nand_chip.priv = b47n;  	b47n->mtd.owner = THIS_MODULE; @@ -48,22 +46,16 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)  	}  	if (err) {  		pr_err("Initialization failed: %d\n", err); -		goto err_init; +		return err;  	}  	err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);  	if (err) {  		pr_err("Failed to register MTD device: %d\n", err); -		goto err_dev_reg; +		return err;  	}  	return 0; - -err_dev_reg: -err_init: -	kfree(b47n); -out: -	return err;  }  static int bcm47xxnflash_remove(struct platform_device *pdev) @@ -85,22 +77,4 @@ static struct platform_driver bcm47xxnflash_driver = {  	},  }; -static int __init bcm47xxnflash_init(void) -{ -	int err; - -	err = platform_driver_register(&bcm47xxnflash_driver); -	if (err) -		pr_err("Failed to register bcm47xx nand flash driver: %d\n", -		       err); - -	return err; -} - -static void __exit bcm47xxnflash_exit(void) -{ -	platform_driver_unregister(&bcm47xxnflash_driver); -} - -module_init(bcm47xxnflash_init); -module_exit(bcm47xxnflash_exit); +module_platform_driver(bcm47xxnflash_driver); diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 2c42e125720..722898aea7a 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -37,7 +37,6 @@  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/string.h>  #include <linux/ioport.h> @@ -680,9 +679,6 @@ static int bf5xx_nand_remove(struct platform_device *pdev)  	peripheral_free_list(bfin_nfc_pin_req);  	bf5xx_nand_dma_remove(info); -	/* free the common resources */ -	kfree(info); -  	return 0;  } @@ -743,11 +739,10 @@ static int bf5xx_nand_probe(struct platform_device *pdev)  		return -EFAULT;  	} -	info = kzalloc(sizeof(*info), GFP_KERNEL); +	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);  	if (info == NULL) { -		dev_err(&pdev->dev, "no memory for flash info\n");  		err = -ENOMEM; -		goto out_err_kzalloc; +		goto out_err;  	}  	platform_set_drvdata(pdev, info); @@ -792,7 +787,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)  	/* initialise the hardware */  	err = bf5xx_nand_hw_init(info);  	if (err) -		goto out_err_hw_init; +		goto out_err;  	/* setup hardware ECC data struct */  	if (hardware_ecc) { @@ -829,9 +824,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)  out_err_nand_scan:  	bf5xx_nand_dma_remove(info); -out_err_hw_init: -	kfree(info); -out_err_kzalloc: +out_err:  	peripheral_free_list(bfin_nfc_pin_req);  	return err; diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index c34985a5510..4e66726da9a 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -627,6 +627,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	struct cafe_priv *cafe;  	uint32_t ctrl;  	int err = 0; +	int old_dma; +	struct nand_buffers *nbuf;  	/* Very old versions shared the same PCI ident for all three  	   functions on the chip. Verify the class too... */ @@ -640,10 +642,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	pci_set_master(pdev);  	mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL); -	if (!mtd) { -		dev_warn(&pdev->dev, "failed to alloc mtd_info\n"); +	if (!mtd)  		return  -ENOMEM; -	}  	cafe = (void *)(&mtd[1]);  	mtd->dev.parent = &pdev->dev; @@ -657,13 +657,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,  		err = -ENOMEM;  		goto out_free_mtd;  	} -	cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112 + sizeof(struct nand_buffers), -					  &cafe->dmaaddr, GFP_KERNEL); -	if (!cafe->dmabuf) { -		err = -ENOMEM; -		goto out_ior; -	} -	cafe->nand.buffers = (void *)cafe->dmabuf + 2112;  	cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);  	if (!cafe->rs) { @@ -723,7 +716,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,  			  "CAFE NAND", mtd);  	if (err) {  		dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq); -		goto out_free_dma; +		goto out_ior;  	}  	/* Disable master reset, enable NAND clock */ @@ -737,6 +730,32 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	cafe_writel(cafe, 0x7006, GLOBAL_CTRL);  	cafe_writel(cafe, 0x700a, GLOBAL_CTRL); +	/* Enable NAND IRQ in global IRQ mask register */ +	cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK); +	cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n", +		cafe_readl(cafe, GLOBAL_CTRL), +		cafe_readl(cafe, GLOBAL_IRQ_MASK)); + +	/* Do not use the DMA for the nand_scan_ident() */ +	old_dma = usedma; +	usedma = 0; + +	/* Scan to find existence of the device */ +	if (nand_scan_ident(mtd, 2, NULL)) { +		err = -ENXIO; +		goto out_irq; +	} + +	cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, +				2112 + sizeof(struct nand_buffers) + +				mtd->writesize + mtd->oobsize, +				&cafe->dmaaddr, GFP_KERNEL); +	if (!cafe->dmabuf) { +		err = -ENOMEM; +		goto out_irq; +	} +	cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112; +  	/* Set up DMA address */  	cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);  	if (sizeof(cafe->dmaaddr) > 4) @@ -748,16 +767,13 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",  		cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf); -	/* Enable NAND IRQ in global IRQ mask register */ -	cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK); -	cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n", -		cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); +	/* this driver does not need the @ecccalc and @ecccode */ +	nbuf->ecccalc = NULL; +	nbuf->ecccode = NULL; +	nbuf->databuf = (uint8_t *)(nbuf + 1); -	/* Scan to find existence of the device */ -	if (nand_scan_ident(mtd, 2, NULL)) { -		err = -ENXIO; -		goto out_irq; -	} +	/* Restore the DMA flag */ +	usedma = old_dma;  	cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */  	if (mtd->writesize == 2048) @@ -775,7 +791,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	} else {  		printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",  		       mtd->writesize); -		goto out_irq; +		goto out_free_dma;  	}  	cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;  	cafe->nand.ecc.size = mtd->writesize; @@ -792,7 +808,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	err = nand_scan_tail(mtd);  	if (err) -		goto out_irq; +		goto out_free_dma;  	pci_set_drvdata(pdev, mtd); @@ -801,12 +817,15 @@ static int cafe_nand_probe(struct pci_dev *pdev,  	goto out; + out_free_dma: +	dma_free_coherent(&cafe->pdev->dev, +			2112 + sizeof(struct nand_buffers) + +			mtd->writesize + mtd->oobsize, +			cafe->dmabuf, cafe->dmaaddr);   out_irq:  	/* Disable NAND IRQ in global IRQ mask register */  	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);  	free_irq(pdev->irq, mtd); - out_free_dma: -	dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);   out_ior:  	pci_iounmap(pdev, cafe->mmio);   out_free_mtd: @@ -826,7 +845,10 @@ static void cafe_nand_remove(struct pci_dev *pdev)  	nand_release(mtd);  	free_rs(cafe->rs);  	pci_iounmap(pdev, cafe->mmio); -	dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr); +	dma_free_coherent(&cafe->pdev->dev, +			2112 + sizeof(struct nand_buffers) + +			mtd->writesize + mtd->oobsize, +			cafe->dmabuf, cafe->dmaaddr);  	kfree(mtd);  } diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 39b2ef84881..66ec95e6ca6 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c @@ -164,7 +164,6 @@ static int __init cmx270_init(void)  				  sizeof(struct nand_chip),  				  GFP_KERNEL);  	if (!cmx270_nand_mtd) { -		pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");  		ret = -ENOMEM;  		goto err_kzalloc;  	} diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index d469a9a1dea..88109d375ae 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c @@ -199,7 +199,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)  	/* Allocate memory for MTD device structure and private data */  	new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);  	if (!new_mtd) { -		printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");  		err = -ENOMEM;  		goto out;  	} diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index b77a01efb48..b922c8efcf4 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -24,7 +24,6 @@   */  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/platform_device.h>  #include <linux/err.h> @@ -35,6 +34,7 @@  #include <linux/slab.h>  #include <linux/of_device.h>  #include <linux/of.h> +#include <linux/of_mtd.h>  #include <linux/platform_data/mtd-davinci.h>  #include <linux/platform_data/mtd-davinci-aemif.h> @@ -487,7 +487,7 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)   * ten ECC bytes plus the manufacturer's bad block marker byte, and   * and not overlapping the default BBT markers.   */ -static struct nand_ecclayout hwecc4_small __initconst = { +static struct nand_ecclayout hwecc4_small = {  	.eccbytes = 10,  	.eccpos = { 0, 1, 2, 3, 4,  		/* offset 5 holds the badblock marker */ @@ -503,7 +503,7 @@ static struct nand_ecclayout hwecc4_small __initconst = {   * storing ten ECC bytes plus the manufacturer's bad block marker byte,   * and not overlapping the default BBT markers.   */ -static struct nand_ecclayout hwecc4_2048 __initconst = { +static struct nand_ecclayout hwecc4_2048 = {  	.eccbytes = 40,  	.eccpos = {  		/* at the end of spare sector */ @@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {  #if defined(CONFIG_OF)  static const struct of_device_id davinci_nand_of_match[] = {  	{.compatible = "ti,davinci-nand", }, +	{.compatible = "ti,keystone-nand", },  	{},  };  MODULE_DEVICE_TABLE(of, davinci_nand_of_match); @@ -534,17 +535,19 @@ static struct davinci_nand_pdata  		struct davinci_nand_pdata *pdata;  		const char *mode;  		u32 prop; -		int len;  		pdata =  devm_kzalloc(&pdev->dev,  				sizeof(struct davinci_nand_pdata),  				GFP_KERNEL);  		pdev->dev.platform_data = pdata;  		if (!pdata) -			return NULL; +			return ERR_PTR(-ENOMEM);  		if (!of_property_read_u32(pdev->dev.of_node,  			"ti,davinci-chipselect", &prop))  			pdev->id = prop; +		else +			return ERR_PTR(-EINVAL); +  		if (!of_property_read_u32(pdev->dev.of_node,  			"ti,davinci-mask-ale", &prop))  			pdata->mask_ale = prop; @@ -555,6 +558,8 @@ static struct davinci_nand_pdata  			"ti,davinci-mask-chipsel", &prop))  			pdata->mask_chipsel = prop;  		if (!of_property_read_string(pdev->dev.of_node, +			"nand-ecc-mode", &mode) || +		    !of_property_read_string(pdev->dev.of_node,  			"ti,davinci-ecc-mode", &mode)) {  			if (!strncmp("none", mode, 4))  				pdata->ecc_mode = NAND_ECC_NONE; @@ -566,13 +571,22 @@ static struct davinci_nand_pdata  		if (!of_property_read_u32(pdev->dev.of_node,  			"ti,davinci-ecc-bits", &prop))  			pdata->ecc_bits = prop; -		if (!of_property_read_u32(pdev->dev.of_node, + +		prop = of_get_nand_bus_width(pdev->dev.of_node); +		if (0 < prop || !of_property_read_u32(pdev->dev.of_node,  			"ti,davinci-nand-buswidth", &prop))  			if (prop == 16)  				pdata->options |= NAND_BUSWIDTH_16; -		if (of_find_property(pdev->dev.of_node, -			"ti,davinci-nand-use-bbt", &len)) +		if (of_property_read_bool(pdev->dev.of_node, +			"nand-on-flash-bbt") || +		    of_property_read_bool(pdev->dev.of_node, +			"ti,davinci-nand-use-bbt"))  			pdata->bbt_options = NAND_BBT_USE_FLASH; + +		if (of_device_is_compatible(pdev->dev.of_node, +					    "ti,keystone-nand")) { +			pdata->options |= NAND_NO_SUBPAGE_WRITE; +		}  	}  	return dev_get_platdata(&pdev->dev); @@ -585,7 +599,7 @@ static struct davinci_nand_pdata  }  #endif -static int __init nand_davinci_probe(struct platform_device *pdev) +static int nand_davinci_probe(struct platform_device *pdev)  {  	struct davinci_nand_pdata	*pdata;  	struct davinci_nand_info	*info; @@ -598,6 +612,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  	nand_ecc_modes_t		ecc_mode;  	pdata = nand_davinci_get_pdata(pdev); +	if (IS_ERR(pdata)) +		return PTR_ERR(pdata); +  	/* insist on board-specific configuration */  	if (!pdata)  		return -ENODEV; @@ -607,11 +624,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  		return -ENODEV;  	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); -	if (!info) { -		dev_err(&pdev->dev, "unable to allocate memory\n"); -		ret = -ENOMEM; -		goto err_nomem; -	} +	if (!info) +		return -ENOMEM;  	platform_set_drvdata(pdev, info); @@ -619,19 +633,23 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);  	if (!res1 || !res2) {  		dev_err(&pdev->dev, "resource missing\n"); -		ret = -EINVAL; -		goto err_nomem; +		return -EINVAL;  	}  	vaddr = devm_ioremap_resource(&pdev->dev, res1); -	if (IS_ERR(vaddr)) { -		ret = PTR_ERR(vaddr); -		goto err_ioremap; -	} -	base = devm_ioremap_resource(&pdev->dev, res2); -	if (IS_ERR(base)) { -		ret = PTR_ERR(base); -		goto err_ioremap; +	if (IS_ERR(vaddr)) +		return PTR_ERR(vaddr); + +	/* +	 * This registers range is used to setup NAND settings. In case with +	 * TI AEMIF driver, the same memory address range is requested already +	 * by AEMIF, so we cannot request it twice, just ioremap. +	 * The AEMIF and NAND drivers not use the same registers in this range. +	 */ +	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2)); +	if (!base) { +		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2); +		return -EADDRNOTAVAIL;  	}  	info->dev		= &pdev->dev; @@ -699,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  			spin_unlock_irq(&davinci_nand_lock);  			if (ret == -EBUSY) -				goto err_ecc; +				return ret;  			info->chip.ecc.calculate = nand_davinci_calculate_4bit;  			info->chip.ecc.correct = nand_davinci_correct_4bit; @@ -715,8 +733,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  		info->chip.ecc.strength = pdata->ecc_bits;  		break;  	default: -		ret = -EINVAL; -		goto err_ecc; +		return -EINVAL;  	}  	info->chip.ecc.mode = ecc_mode; @@ -724,7 +741,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  	if (IS_ERR(info->clk)) {  		ret = PTR_ERR(info->clk);  		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); -		goto err_clk; +		return ret;  	}  	ret = clk_prepare_enable(info->clk); @@ -734,28 +751,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  		goto err_clk_enable;  	} -	/* -	 * Setup Async configuration register in case we did not boot from -	 * NAND and so bootloader did not bother to set it up. -	 */ -	val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4); - -	/* Extended Wait is not valid and Select Strobe mode is not used */ -	val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK); -	if (info->chip.options & NAND_BUSWIDTH_16) -		val |= 0x1; - -	davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val); - -	ret = 0; -	if (info->timing) -		ret = davinci_aemif_setup_timing(info->timing, info->base, -							info->core_chipsel); -	if (ret < 0) { -		dev_dbg(&pdev->dev, "NAND timing values setup fail\n"); -		goto err_timing; -	} -  	spin_lock_irq(&davinci_nand_lock);  	/* put CSxNAND into NAND mode */ @@ -769,7 +764,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  	ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);  	if (ret < 0) {  		dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); -		goto err_scan; +		goto err;  	}  	/* Update ECC layout if needed ... for 1-bit HW ECC, the default @@ -783,7 +778,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  		if (!chunks || info->mtd.oobsize < 16) {  			dev_dbg(&pdev->dev, "too small\n");  			ret = -EINVAL; -			goto err_scan; +			goto err;  		}  		/* For small page chips, preserve the manufacturer's @@ -814,7 +809,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)  		dev_warn(&pdev->dev, "no 4-bit ECC support yet "  				"for 4KiB-page NAND\n");  		ret = -EIO; -		goto err_scan; +		goto err;  syndrome_done:  		info->chip.ecc.layout = &info->ecclayout; @@ -822,7 +817,7 @@ syndrome_done:  	ret = nand_scan_tail(&info->mtd);  	if (ret < 0) -		goto err_scan; +		goto err;  	if (pdata->parts)  		ret = mtd_device_parse_register(&info->mtd, NULL, NULL, @@ -835,7 +830,7 @@ syndrome_done:  						NULL, 0);  	}  	if (ret < 0) -		goto err_scan; +		goto err;  	val = davinci_nand_readl(info, NRCSR_OFFSET);  	dev_info(&pdev->dev, "controller rev. %d.%d\n", @@ -843,8 +838,7 @@ syndrome_done:  	return 0; -err_scan: -err_timing: +err:  	clk_disable_unprepare(info->clk);  err_clk_enable: @@ -852,15 +846,10 @@ err_clk_enable:  	if (ecc_mode == NAND_ECC_HW_SYNDROME)  		ecc4_busy = false;  	spin_unlock_irq(&davinci_nand_lock); - -err_ecc: -err_clk: -err_ioremap: -err_nomem:  	return ret;  } -static int __exit nand_davinci_remove(struct platform_device *pdev) +static int nand_davinci_remove(struct platform_device *pdev)  {  	struct davinci_nand_info *info = platform_get_drvdata(pdev); @@ -877,7 +866,8 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)  }  static struct platform_driver nand_davinci_driver = { -	.remove		= __exit_p(nand_davinci_remove), +	.probe		= nand_davinci_probe, +	.remove		= nand_davinci_remove,  	.driver		= {  		.name	= "davinci_nand",  		.owner	= THIS_MODULE, @@ -886,7 +876,7 @@ static struct platform_driver nand_davinci_driver = {  };  MODULE_ALIAS("platform:davinci_nand"); -module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe); +module_platform_driver(nand_davinci_driver);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Texas Instruments"); diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 2ed2bb33a6e..9f2012a3e76 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -125,7 +125,6 @@ static void reset_buf(struct denali_nand_info *denali)  static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)  { -	BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));  	denali->buf.buf[denali->buf.tail++] = byte;  } @@ -897,7 +896,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)  /* this function examines buffers to see if they contain data that   * indicate that the buffer is part of an erased region of flash.   */ -bool is_erased(uint8_t *buf, int len) +static bool is_erased(uint8_t *buf, int len)  {  	int i = 0;  	for (i = 0; i < len; i++) @@ -1234,7 +1233,7 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)  	return status;  } -static void denali_erase(struct mtd_info *mtd, int page) +static int denali_erase(struct mtd_info *mtd, int page)  {  	struct denali_nand_info *denali = mtd_to_denali(mtd); @@ -1251,8 +1250,7 @@ static void denali_erase(struct mtd_info *mtd, int page)  	irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |  					INTR_STATUS__ERASE_FAIL); -	denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ? -						NAND_STATUS_FAIL : PASS; +	return (irq_status & INTR_STATUS__ERASE_FAIL) ? NAND_STATUS_FAIL : PASS;  }  static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, @@ -1394,7 +1392,7 @@ static struct nand_bbt_descr bbt_mirror_descr = {  };  /* initialize driver data structures */ -void denali_drv_init(struct denali_nand_info *denali) +static void denali_drv_init(struct denali_nand_info *denali)  {  	denali->idx = 0; @@ -1429,20 +1427,12 @@ int denali_init(struct denali_nand_info *denali)  		}  	} -	/* Is 32-bit DMA supported? */ -	ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); -	if (ret) { -		pr_err("Spectra: no usable DMA configuration\n"); -		return ret; -	} -	denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf, -					     DENALI_BUF_SIZE, -					     DMA_BIDIRECTIONAL); +	/* allocate a temporary buffer for nand_scan_ident() */ +	denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE, +					GFP_DMA | GFP_KERNEL); +	if (!denali->buf.buf) +		return -ENOMEM; -	if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) { -		dev_err(denali->dev, "Spectra: failed to map DMA buffer\n"); -		return -EIO; -	}  	denali->mtd.dev.parent = denali->dev;  	denali_hw_init(denali);  	denali_drv_init(denali); @@ -1475,12 +1465,29 @@ int denali_init(struct denali_nand_info *denali)  		goto failed_req_irq;  	} -	/* MTD supported page sizes vary by kernel. We validate our -	 * kernel supports the device here. -	 */ -	if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { -		ret = -ENODEV; -		pr_err("Spectra: device size not supported by this version of MTD."); +	/* allocate the right size buffer now */ +	devm_kfree(denali->dev, denali->buf.buf); +	denali->buf.buf = devm_kzalloc(denali->dev, +			     denali->mtd.writesize + denali->mtd.oobsize, +			     GFP_KERNEL); +	if (!denali->buf.buf) { +		ret = -ENOMEM; +		goto failed_req_irq; +	} + +	/* Is 32-bit DMA supported? */ +	ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); +	if (ret) { +		pr_err("Spectra: no usable DMA configuration\n"); +		goto failed_req_irq; +	} + +	denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf, +			     denali->mtd.writesize + denali->mtd.oobsize, +			     DMA_BIDIRECTIONAL); +	if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) { +		dev_err(denali->dev, "Spectra: failed to map DMA buffer\n"); +		ret = -EIO;  		goto failed_req_irq;  	} @@ -1520,7 +1527,7 @@ int denali_init(struct denali_nand_info *denali)  	 * so just let controller do 15bit ECC for MLC and 8bit ECC for  	 * SLC if possible.  	 * */ -	if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK && +	if (!nand_is_slc(&denali->nand) &&  			(denali->mtd.oobsize > (denali->bbtskipbytes +  			ECC_15BITS * (denali->mtd.writesize /  			ECC_SECTOR_SIZE)))) { @@ -1576,7 +1583,7 @@ int denali_init(struct denali_nand_info *denali)  	denali->nand.ecc.write_page_raw = denali_write_page_raw;  	denali->nand.ecc.read_oob = denali_read_oob;  	denali->nand.ecc.write_oob = denali_write_oob; -	denali->nand.erase_cmd = denali_erase; +	denali->nand.erase = denali_erase;  	if (nand_scan_tail(&denali->mtd)) {  		ret = -ENXIO; @@ -1602,7 +1609,8 @@ EXPORT_SYMBOL(denali_init);  void denali_remove(struct denali_nand_info *denali)  {  	denali_irq_cleanup(denali->irq, denali); -	dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, +	dma_unmap_single(denali->dev, denali->buf.dma_buf, +			denali->mtd.writesize + denali->mtd.oobsize,  			DMA_BIDIRECTIONAL);  }  EXPORT_SYMBOL(denali_remove); diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index cec5712862c..96681746242 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -455,12 +455,10 @@  #define ECC_SECTOR_SIZE     512 -#define DENALI_BUF_SIZE		(NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) -  struct nand_buf {  	int head;  	int tail; -	uint8_t buf[DENALI_BUF_SIZE]; +	uint8_t *buf;  	dma_addr_t dma_buf;  }; diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 92530244e2c..35cb17f5780 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -30,24 +30,6 @@ struct denali_dt {  	struct clk		*clk;  }; -static void __iomem *request_and_map(struct device *dev, -				     const struct resource *res) -{ -	void __iomem *ptr; - -	if (!devm_request_mem_region(dev, res->start, resource_size(res), -				     "denali-dt")) { -		dev_err(dev, "unable to request %s\n", res->name); -		return NULL; -	} - -	ptr = devm_ioremap_nocache(dev, res->start, resource_size(res)); -	if (!ptr) -		dev_err(dev, "ioremap_nocache of %s failed!", res->name); - -	return ptr; -} -  static const struct of_device_id denali_nand_dt_ids[] = {  		{ .compatible = "denali,denali-nand-dt" },  		{ /* sentinel */ } @@ -78,13 +60,6 @@ static int denali_dt_probe(struct platform_device *ofdev)  		return -ENOMEM;  	denali = &dt->denali; -	denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); -	nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); -	if (!denali_reg || !nand_data) { -		dev_err(&ofdev->dev, "resources not completely defined\n"); -		return -EINVAL; -	} -  	denali->platform = DT;  	denali->dev = &ofdev->dev;  	denali->irq = platform_get_irq(ofdev, 0); @@ -93,13 +68,15 @@ static int denali_dt_probe(struct platform_device *ofdev)  		return denali->irq;  	} -	denali->flash_reg = request_and_map(&ofdev->dev, denali_reg); -	if (!denali->flash_reg) -		return -ENOMEM; +	denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); +	denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg); +	if (IS_ERR(denali->flash_reg)) +		return PTR_ERR(denali->flash_reg); -	denali->flash_mem = request_and_map(&ofdev->dev, nand_data); -	if (!denali->flash_mem) -		return -ENOMEM; +	nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); +	denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data); +	if (IS_ERR(denali->flash_mem)) +		return PTR_ERR(denali->flash_mem);  	if (!of_property_read_u32(ofdev->dev.of_node,  		"dma-mask", (u32 *)&denali_dma_mask)) { @@ -108,7 +85,7 @@ static int denali_dt_probe(struct platform_device *ofdev)  		denali->dev->dma_mask = NULL;  	} -	dt->clk = clk_get(&ofdev->dev, NULL); +	dt->clk = devm_clk_get(&ofdev->dev, NULL);  	if (IS_ERR(dt->clk)) {  		dev_err(&ofdev->dev, "no clk available\n");  		return PTR_ERR(dt->clk); @@ -124,7 +101,6 @@ static int denali_dt_probe(struct platform_device *ofdev)  out_disable_clk:  	clk_disable_unprepare(dt->clk); -	clk_put(dt->clk);  	return ret;  } @@ -135,7 +111,6 @@ static int denali_dt_remove(struct platform_device *ofdev)  	denali_remove(&dt->denali);  	clk_disable(dt->clk); -	clk_put(dt->clk);  	return 0;  } diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index e3e46623b2b..6e2f387b823 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c @@ -21,7 +21,7 @@  #define DENALI_NAND_NAME    "denali-nand-pci"  /* List of platforms this NAND controller has be integrated into */ -static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = { +static const struct pci_device_id denali_pci_ids[] = {  	{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },  	{ PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },  	{ /* end: all zeroes */ } @@ -119,7 +119,6 @@ static void denali_pci_remove(struct pci_dev *dev)  	iounmap(denali->flash_mem);  	pci_release_regions(dev);  	pci_disable_device(dev); -	pci_set_drvdata(dev, NULL);  	kfree(denali);  } @@ -132,7 +131,6 @@ static struct pci_driver denali_pci_driver = {  static int denali_init_pci(void)  { -	pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);  	return pci_register_driver(&denali_pci_driver);  }  module_init(denali_init_pci); diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index eaa3c29ad86..f68a7bccecd 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -38,7 +38,7 @@  #define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0  #endif -static unsigned long __initdata doc_locations[] = { +static unsigned long doc_locations[] __initdata = {  #if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)  #ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH  	0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, @@ -698,7 +698,8 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu  		/* Serially input address */  		if (column != -1) {  			/* Adjust columns for 16 bit buswidth */ -			if (this->options & NAND_BUSWIDTH_16) +			if (this->options & NAND_BUSWIDTH_16 && +					!nand_opcode_8bits(command))  				column >>= 1;  			WriteDOC(column, docptr, Mplus_FlashAddress);  		} @@ -1058,7 +1059,6 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio  	buf = kmalloc(mtd->writesize, GFP_KERNEL);  	if (!buf) { -		printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");  		return 0;  	}  	if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1))) @@ -1166,7 +1166,6 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti  	buf = kmalloc(mtd->writesize, GFP_KERNEL);  	if (!buf) { -		printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");  		return 0;  	} @@ -1440,10 +1439,13 @@ static int __init doc_probe(unsigned long physadr)  	int reg, len, numchips;  	int ret = 0; +	if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip")) +		return -EBUSY;  	virtadr = ioremap(physadr, DOC_IOREMAP_LEN);  	if (!virtadr) {  		printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr); -		return -EIO; +		ret = -EIO; +		goto error_ioremap;  	}  	/* It's not possible to cleanly detect the DiskOnChip - the @@ -1561,7 +1563,6 @@ static int __init doc_probe(unsigned long physadr)  	    sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));  	mtd = kzalloc(len, GFP_KERNEL);  	if (!mtd) { -		printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);  		ret = -ENOMEM;  		goto fail;  	} @@ -1629,6 +1630,10 @@ static int __init doc_probe(unsigned long physadr)  	WriteDOC(save_control, virtadr, DOCControl);   fail:  	iounmap(virtadr); + +error_ioremap: +	release_mem_region(physadr, DOC_IOREMAP_LEN); +  	return ret;  } @@ -1645,6 +1650,7 @@ static void release_nanddoc(void)  		nextmtd = doc->nextdoc;  		nand_release(mtd);  		iounmap(doc->virtadr); +		release_mem_region(doc->physadr, DOC_IOREMAP_LEN);  		kfree(mtd);  	}  } diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index 548db2389fa..ce24637e14f 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -44,6 +44,7 @@  #include <linux/mtd/nand.h>  #include <linux/bch.h>  #include <linux/bitrev.h> +#include <linux/jiffies.h>  /*   * In "reliable mode" consecutive 2k pages are used in parallel (in some @@ -269,7 +270,7 @@ static int poll_status(struct docg4_priv *doc)  	 */  	uint16_t flash_status; -	unsigned int timeo; +	unsigned long timeo;  	void __iomem *docptr = doc->virtadr;  	dev_dbg(doc->dev, "%s...\n", __func__); @@ -277,22 +278,18 @@ static int poll_status(struct docg4_priv *doc)  	/* hardware quirk requires reading twice initially */  	flash_status = readw(docptr + DOC_FLASHCONTROL); -	timeo = 1000; +	timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */  	do {  		cpu_relax();  		flash_status = readb(docptr + DOC_FLASHCONTROL); -	} while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo); +	} while (!(flash_status & DOC_CTRL_FLASHREADY) && +		 time_before(jiffies, timeo)); - -	if (!timeo) { +	if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {  		dev_err(doc->dev, "%s: timed out!\n", __func__);  		return NAND_STATUS_FAIL;  	} -	if (unlikely(timeo < 50)) -		dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n", -			 __func__, timeo); -  	return 0;  } @@ -494,7 +491,7 @@ static uint8_t docg4_read_byte(struct mtd_info *mtd)  		return status;  	} -	dev_warn(doc->dev, "unexpectd call to read_byte()\n"); +	dev_warn(doc->dev, "unexpected call to read_byte()\n");  	return 0;  } @@ -875,7 +872,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,  	return 0;  } -static void docg4_erase_block(struct mtd_info *mtd, int page) +static int docg4_erase_block(struct mtd_info *mtd, int page)  {  	struct nand_chip *nand = mtd->priv;  	struct docg4_priv *doc = nand->priv; @@ -919,6 +916,8 @@ static void docg4_erase_block(struct mtd_info *mtd, int page)  	write_nop(docptr);  	poll_status(doc);  	write_nop(docptr); + +	return nand->waitfunc(mtd, nand);  }  static int write_page(struct mtd_info *mtd, struct nand_chip *nand, @@ -1239,8 +1238,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)  	nand->block_markbad = docg4_block_markbad;  	nand->read_buf = docg4_read_buf;  	nand->write_buf = docg4_write_buf16; -	nand->scan_bbt = nand_default_bbt; -	nand->erase_cmd = docg4_erase_block; +	nand->erase = docg4_erase_block;  	nand->ecc.read_page = docg4_read_page;  	nand->ecc.write_page = docg4_write_page;  	nand->ecc.read_page_raw = docg4_read_page_raw; diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 20657209a47..545a5c002f0 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -24,10 +24,10 @@  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/string.h>  #include <linux/ioport.h> +#include <linux/of_address.h>  #include <linux/of_platform.h>  #include <linux/platform_device.h>  #include <linux/slab.h> @@ -650,8 +650,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)  	        chip->page_shift);  	dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",  	        chip->phys_erase_shift); -	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n", -	        chip->ecclayout);  	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",  	        chip->ecc.mode);  	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n", @@ -725,6 +723,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,  	return 0;  } +/* ECC will be calculated automatically, and errors will be detected in + * waitfunc. + */ +static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip, +				uint32_t offset, uint32_t data_len, +				const uint8_t *buf, int oob_required) +{ +	fsl_elbc_write_buf(mtd, buf, mtd->writesize); +	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); + +	return 0; +} +  static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)  {  	struct fsl_lbc_ctrl *ctrl = priv->ctrl; @@ -763,6 +774,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)  	chip->ecc.read_page = fsl_elbc_read_page;  	chip->ecc.write_page = fsl_elbc_write_page; +	chip->ecc.write_subpage = fsl_elbc_write_subpage;  	/* If CS Base Register selects full hardware ECC then use it */  	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == @@ -848,7 +860,6 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)  	if (!fsl_lbc_ctrl_dev->nand) {  		elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);  		if (!elbc_fcm_ctrl) { -			dev_err(dev, "failed to allocate memory\n");  			mutex_unlock(&fsl_elbc_nand_mutex);  			ret = -ENOMEM;  			goto err; @@ -876,7 +887,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)  		goto err;  	} -	priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); +	priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);  	if (!priv->mtd.name) {  		ret = -ENOMEM;  		goto err; diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 317a771f158..2338124dd05 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -22,14 +22,14 @@  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/kernel.h> +#include <linux/of_address.h>  #include <linux/slab.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/nand.h>  #include <linux/mtd/partitions.h>  #include <linux/mtd/nand_ecc.h> -#include <asm/fsl_ifc.h> +#include <linux/fsl_ifc.h>  #define FSL_IFC_V1_1_0	0x01010000  #define ERR_BYTE		0xFF /* Value returned for read @@ -56,7 +56,7 @@ struct fsl_ifc_nand_ctrl {  	struct nand_hw_control controller;  	struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT]; -	u8 __iomem *addr;	/* Address of assigned IFC buffer	*/ +	void __iomem *addr;	/* Address of assigned IFC buffer	*/  	unsigned int page;	/* Last page written to / read from	*/  	unsigned int read_bytes;/* Number of bytes read during command	*/  	unsigned int column;	/* Saved column from SEQIN		*/ @@ -135,6 +135,69 @@ static struct nand_ecclayout oob_4096_ecc8 = {  	.oobfree = { {2, 6}, {136, 82} },  }; +/* 8192-byte page size with 4-bit ECC */ +static struct nand_ecclayout oob_8192_ecc4 = { +	.eccbytes = 128, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63, +		64, 65, 66, 67, 68, 69, 70, 71, +		72, 73, 74, 75, 76, 77, 78, 79, +		80, 81, 82, 83, 84, 85, 86, 87, +		88, 89, 90, 91, 92, 93, 94, 95, +		96, 97, 98, 99, 100, 101, 102, 103, +		104, 105, 106, 107, 108, 109, 110, 111, +		112, 113, 114, 115, 116, 117, 118, 119, +		120, 121, 122, 123, 124, 125, 126, 127, +		128, 129, 130, 131, 132, 133, 134, 135, +	}, +	.oobfree = { {2, 6}, {136, 208} }, +}; + +/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */ +static struct nand_ecclayout oob_8192_ecc8 = { +	.eccbytes = 256, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63, +		64, 65, 66, 67, 68, 69, 70, 71, +		72, 73, 74, 75, 76, 77, 78, 79, +		80, 81, 82, 83, 84, 85, 86, 87, +		88, 89, 90, 91, 92, 93, 94, 95, +		96, 97, 98, 99, 100, 101, 102, 103, +		104, 105, 106, 107, 108, 109, 110, 111, +		112, 113, 114, 115, 116, 117, 118, 119, +		120, 121, 122, 123, 124, 125, 126, 127, +		128, 129, 130, 131, 132, 133, 134, 135, +		136, 137, 138, 139, 140, 141, 142, 143, +		144, 145, 146, 147, 148, 149, 150, 151, +		152, 153, 154, 155, 156, 157, 158, 159, +		160, 161, 162, 163, 164, 165, 166, 167, +		168, 169, 170, 171, 172, 173, 174, 175, +		176, 177, 178, 179, 180, 181, 182, 183, +		184, 185, 186, 187, 188, 189, 190, 191, +		192, 193, 194, 195, 196, 197, 198, 199, +		200, 201, 202, 203, 204, 205, 206, 207, +		208, 209, 210, 211, 212, 213, 214, 215, +		216, 217, 218, 219, 220, 221, 222, 223, +		224, 225, 226, 227, 228, 229, 230, 231, +		232, 233, 234, 235, 236, 237, 238, 239, +		240, 241, 242, 243, 244, 245, 246, 247, +		248, 249, 250, 251, 252, 253, 254, 255, +		256, 257, 258, 259, 260, 261, 262, 263, +	}, +	.oobfree = { {2, 6}, {264, 80} }, +};  /*   * Generic flash bbt descriptors @@ -441,20 +504,29 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,  		if (mtd->writesize > 512) {  			nand_fcr0 =  				(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) | -				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT); +				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | +				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);  			iowrite32be( -				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | -				(IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT), -				&ifc->ifc_nand.nand_fir0); +				 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +				 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +				 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +				 (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) | +				 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), +				 &ifc->ifc_nand.nand_fir0); +			iowrite32be( +				 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | +				 (IFC_FIR_OP_RDSTAT << +					IFC_NAND_FIR1_OP6_SHIFT) | +				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), +				 &ifc->ifc_nand.nand_fir1);  		} else {  			nand_fcr0 = ((NAND_CMD_PAGEPROG <<  					IFC_NAND_FCR0_CMD1_SHIFT) |  				    (NAND_CMD_SEQIN << -					IFC_NAND_FCR0_CMD2_SHIFT)); +					IFC_NAND_FCR0_CMD2_SHIFT) | +				    (NAND_CMD_STATUS << +					IFC_NAND_FCR0_CMD3_SHIFT));  			iowrite32be(  				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | @@ -463,8 +535,13 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,  				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |  				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),  				&ifc->ifc_nand.nand_fir0); -			iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT, -				    &ifc->ifc_nand.nand_fir1); +			iowrite32be( +				 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | +				 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | +				 (IFC_FIR_OP_RDSTAT << +					IFC_NAND_FIR1_OP7_SHIFT) | +				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), +				  &ifc->ifc_nand.nand_fir1);  			if (column >= mtd->writesize)  				nand_fcr0 |= @@ -514,7 +591,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,  		 * The chip always seems to report that it is  		 * write-protected, even when it is not.  		 */ -		setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP); +		if (chip->options & NAND_BUSWIDTH_16) +			setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP); +		else +			setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);  		return;  	case NAND_CMD_RESET: @@ -559,7 +639,7 @@ static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)  		len = bufsize - ifc_nand_ctrl->index;  	} -	memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len); +	memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);  	ifc_nand_ctrl->index += len;  } @@ -571,13 +651,16 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)  {  	struct nand_chip *chip = mtd->priv;  	struct fsl_ifc_mtd *priv = chip->priv; +	unsigned int offset;  	/*  	 * If there are still bytes in the IFC buffer, then use the  	 * next byte.  	 */ -	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) -		return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]); +	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { +		offset = ifc_nand_ctrl->index++; +		return in_8(ifc_nand_ctrl->addr + offset); +	}  	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);  	return ERR_BYTE; @@ -598,8 +681,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)  	 * next byte.  	 */  	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { -		data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl-> -			       addr[ifc_nand_ctrl->index]); +		data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);  		ifc_nand_ctrl->index += 2;  		return (uint8_t) data;  	} @@ -624,7 +706,7 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)  	avail = min((unsigned int)len,  			ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index); -	memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail); +	memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);  	ifc_nand_ctrl->index += avail;  	if (len > avail) @@ -718,8 +800,6 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)  							chip->page_shift);  	dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,  							chip->phys_erase_shift); -	dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__, -							chip->ecclayout);  	dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,  							chip->ecc.mode);  	dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__, @@ -872,11 +952,25 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)  		} else {  			layout = &oob_4096_ecc8;  			chip->ecc.bytes = 16; +			chip->ecc.strength = 8;  		}  		priv->bufnum_mask = 1;  		break; +	case CSOR_NAND_PGS_8K: +		if ((csor & CSOR_NAND_ECC_MODE_MASK) == +		    CSOR_NAND_ECC_MODE_4) { +			layout = &oob_8192_ecc4; +		} else { +			layout = &oob_8192_ecc8; +			chip->ecc.bytes = 16; +			chip->ecc.strength = 8; +		} + +		priv->bufnum_mask = 0; +	break; +  	default:  		dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);  		return -ENODEV; @@ -907,7 +1001,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)  		iounmap(priv->vbase);  	ifc_nand_ctrl->chips[priv->bank] = NULL; -	dev_set_drvdata(priv->dev, NULL);  	return 0;  } @@ -971,7 +1064,6 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)  	if (!fsl_ifc_ctrl_dev->nand) {  		ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);  		if (!ifc_nand_ctrl) { -			dev_err(&dev->dev, "failed to allocate memory\n");  			mutex_unlock(&fsl_ifc_nand_mutex);  			return -ENOMEM;  		} @@ -1012,7 +1104,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)  		    IFC_NAND_EVTER_INTR_FTOERIR_EN |  		    IFC_NAND_EVTER_INTR_WPERIR_EN,  		    &ifc->ifc_nand.nand_evter_intr_en); -	priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); +	priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);  	if (!priv->mtd.name) {  		ret = -ENOMEM;  		goto err; @@ -1082,25 +1174,7 @@ static struct platform_driver fsl_ifc_nand_driver = {  	.remove      = fsl_ifc_nand_remove,  }; -static int __init fsl_ifc_nand_init(void) -{ -	int ret; - -	ret = platform_driver_register(&fsl_ifc_nand_driver); -	if (ret) -		printk(KERN_ERR "fsl-ifc: Failed to register platform" -				"driver\n"); - -	return ret; -} - -static void __exit fsl_ifc_nand_exit(void) -{ -	platform_driver_unregister(&fsl_ifc_nand_driver); -} - -module_init(fsl_ifc_nand_init); -module_exit(fsl_ifc_nand_exit); +module_platform_driver(fsl_ifc_nand_driver);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Freescale"); diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index 04e07252d74..4d203e84e8c 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -18,6 +18,7 @@  #include <linux/mtd/nand_ecc.h>  #include <linux/mtd/partitions.h>  #include <linux/mtd/mtd.h> +#include <linux/of_address.h>  #include <linux/of_platform.h>  #include <linux/of_gpio.h>  #include <linux/io.h> diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d8..1550692973d 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,  	dma_dev = chan->device;  	dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); -	flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; -  	if (direction == DMA_TO_DEVICE) {  		dma_src = dma_addr;  		dma_dst = host->data_pa; @@ -891,10 +889,8 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,  	pdata->nand_timings = devm_kzalloc(&pdev->dev,  				sizeof(*pdata->nand_timings), GFP_KERNEL); -	if (!pdata->nand_timings) { -		dev_err(&pdev->dev, "no memory for nand_timing\n"); +	if (!pdata->nand_timings)  		return -ENOMEM; -	}  	of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,  						sizeof(*pdata->nand_timings)); @@ -952,10 +948,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)  	/* Allocate memory for the device structure (and zero it) */  	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); -	if (!host) { -		dev_err(&pdev->dev, "failed to allocate device structure\n"); +	if (!host)  		return -ENOMEM; -	}  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");  	host->data_va = devm_ioremap_resource(&pdev->dev, res); @@ -1110,8 +1104,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)  			host->ecc_place = &fsmc_ecc4_lp_place;  			break;  		default: -			printk(KERN_WARNING "No oob scheme defined for " -			       "oobsize %d\n", mtd->oobsize); +			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n", +				 mtd->oobsize);  			BUG();  		}  	} else { @@ -1126,8 +1120,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)  			nand->ecc.layout = &fsmc_ecc1_128_layout;  			break;  		default: -			printk(KERN_WARNING "No oob scheme defined for " -			       "oobsize %d\n", mtd->oobsize); +			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n", +				 mtd->oobsize);  			BUG();  		}  	} diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index e826f898241..117ce333fdd 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -18,7 +18,6 @@  #include <linux/kernel.h>  #include <linux/err.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/module.h>  #include <linux/platform_device.h> @@ -132,13 +131,17 @@ static int gpio_nand_get_config_of(const struct device *dev,  static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)  { -	struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL); +	struct resource *r;  	u64 addr; -	if (!r || of_property_read_u64(pdev->dev.of_node, +	if (of_property_read_u64(pdev->dev.of_node,  				       "gpio-control-nand,io-sync-reg", &addr))  		return NULL; +	r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL); +	if (!r) +		return NULL; +  	r->start = addr;  	r->end = r->start + 0x3;  	r->flags = IORESOURCE_MEM; @@ -211,10 +214,8 @@ static int gpio_nand_probe(struct platform_device *pdev)  		return -EINVAL;  	gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); -	if (!gpiomtd) { -		dev_err(&pdev->dev, "failed to create NAND MTD\n"); +	if (!gpiomtd)  		return -ENOMEM; -	}  	chip = &gpiomtd->nand_chip; diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h index 588f5374047..05bb91f2f4c 100644 --- a/drivers/mtd/nand/gpmi-nand/bch-regs.h +++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h @@ -54,7 +54,7 @@  #define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0		11  #define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)  #define BF_BCH_FLASH0LAYOUT0_ECC0(v, x)				\ -	(GPMI_IS_MX6Q(x)					\ +	(GPMI_IS_MX6(x)					\  		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)	\  			& MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0)	\  		: (((v) << BP_BCH_FLASH0LAYOUT0_ECC0)		\ @@ -65,7 +65,7 @@  #define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14			\  				(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)  #define BF_BCH_FLASH0LAYOUT0_GF(v, x)				\ -	((GPMI_IS_MX6Q(x) && ((v) == 14))			\ +	((GPMI_IS_MX6(x) && ((v) == 14))			\  		? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)	\  			& MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14)	\  		: 0						\ @@ -77,7 +77,7 @@  #define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE	\  			(0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)  #define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x)				\ -	(GPMI_IS_MX6Q(x)						\ +	(GPMI_IS_MX6(x)						\  		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)	\  		: ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)		\  	) @@ -96,7 +96,7 @@  #define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN		11  #define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)  #define BF_BCH_FLASH0LAYOUT1_ECCN(v, x)				\ -	(GPMI_IS_MX6Q(x)					\ +	(GPMI_IS_MX6(x)					\  		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)	\  			& MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN)	\  		: (((v) << BP_BCH_FLASH0LAYOUT1_ECCN)		\ @@ -107,7 +107,7 @@  #define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14			\  				(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)  #define BF_BCH_FLASH0LAYOUT1_GF(v, x)				\ -	((GPMI_IS_MX6Q(x) && ((v) == 14))			\ +	((GPMI_IS_MX6(x) && ((v) == 14))			\  		? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)	\  			& MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14)	\  		: 0						\ @@ -119,7 +119,7 @@  #define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE	\  			(0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)  #define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x)				\ -	(GPMI_IS_MX6Q(x)						\ +	(GPMI_IS_MX6(x)						\  		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)	\  		: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)		\  	) diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 4f8857fa48a..87e658ce23e 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@ -20,6 +20,7 @@   */  #include <linux/delay.h>  #include <linux/clk.h> +#include <linux/slab.h>  #include "gpmi-nand.h"  #include "gpmi-regs.h" @@ -187,6 +188,12 @@ int gpmi_init(struct gpmi_nand_data *this)  	/* Select BCH ECC. */  	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); +	/* +	 * Decouple the chip select from dma channel. We use dma0 for all +	 * the chips. +	 */ +	writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); +  	gpmi_disable_clk(this);  	return 0;  err_out: @@ -201,30 +208,41 @@ void gpmi_dump_info(struct gpmi_nand_data *this)  	u32 reg;  	int i; -	pr_err("Show GPMI registers :\n"); +	dev_err(this->dev, "Show GPMI registers :\n");  	for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {  		reg = readl(r->gpmi_regs + i * 0x10); -		pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); +		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);  	}  	/* start to print out the BCH info */ -	pr_err("Show BCH registers :\n"); +	dev_err(this->dev, "Show BCH registers :\n");  	for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {  		reg = readl(r->bch_regs + i * 0x10); -		pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); +		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);  	} -	pr_err("BCH Geometry :\n"); -	pr_err("GF length              : %u\n", geo->gf_len); -	pr_err("ECC Strength           : %u\n", geo->ecc_strength); -	pr_err("Page Size in Bytes     : %u\n", geo->page_size); -	pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size); -	pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size); -	pr_err("ECC Chunk Count        : %u\n", geo->ecc_chunk_count); -	pr_err("Payload Size in Bytes  : %u\n", geo->payload_size); -	pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size); -	pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset); -	pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset); -	pr_err("Block Mark Bit Offset  : %u\n", geo->block_mark_bit_offset); +	dev_err(this->dev, "BCH Geometry :\n" +		"GF length              : %u\n" +		"ECC Strength           : %u\n" +		"Page Size in Bytes     : %u\n" +		"Metadata Size in Bytes : %u\n" +		"ECC Chunk Size in Bytes: %u\n" +		"ECC Chunk Count        : %u\n" +		"Payload Size in Bytes  : %u\n" +		"Auxiliary Size in Bytes: %u\n" +		"Auxiliary Status Offset: %u\n" +		"Block Mark Byte Offset : %u\n" +		"Block Mark Bit Offset  : %u\n", +		geo->gf_len, +		geo->ecc_strength, +		geo->page_size, +		geo->metadata_size, +		geo->ecc_chunk_size, +		geo->ecc_chunk_count, +		geo->payload_size, +		geo->auxiliary_size, +		geo->auxiliary_status_offset, +		geo->block_mark_byte_offset, +		geo->block_mark_bit_offset);  }  /* Configures the geometry for BCH.  */ @@ -259,8 +277,8 @@ int bch_set_geometry(struct gpmi_nand_data *this)  	* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.  	* On the other hand, the MX28 needs the reset, because one case has been  	* seen where the BCH produced ECC errors constantly after 10000 -	* consecutive reboots. The latter case has not been seen on the MX23 yet, -	* still we don't know if it could happen there as well. +	* consecutive reboots. The latter case has not been seen on the MX23 +	* yet, still we don't know if it could happen there as well.  	*/  	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));  	if (ret) @@ -347,7 +365,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,  	improved_timing_is_available =  		(target.tREA_in_ns  >= 0) &&  		(target.tRLOH_in_ns >= 0) && -		(target.tRHOH_in_ns >= 0) ; +		(target.tRHOH_in_ns >= 0);  	/* Inspect the clock. */  	nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]); @@ -843,7 +861,7 @@ static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,  	struct resources *r = &this->resources;  	unsigned long rate = clk_get_rate(r->clock[0]);  	int mode = this->timing_mode; -	int dll_threshold = 16; /* in ns */ +	int dll_threshold = this->devdata->max_chain_delay;  	unsigned long delay;  	unsigned long clk_period;  	int t_rea; @@ -868,9 +886,6 @@ static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,  	/* [3] for GPMI_HW_GPMI_CTRL1 */  	hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; -	if (GPMI_IS_MX6Q(this)) -		dll_threshold = 12; -  	/*  	 * Enlarge 10 times for the numerator and denominator in {3}.  	 * This make us to get more accurate result. @@ -905,10 +920,14 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)  	struct resources  *r = &this->resources;  	struct nand_chip *nand = &this->nand;  	struct mtd_info	 *mtd = &this->mtd; -	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {}; +	uint8_t *feature;  	unsigned long rate;  	int ret; +	feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL); +	if (!feature) +		return -ENOMEM; +  	nand->select_chip(mtd, 0);  	/* [1] send SET FEATURE commond to NAND */ @@ -936,11 +955,13 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)  	this->flags |= GPMI_ASYNC_EDO_ENABLED;  	this->timing_mode = mode; +	kfree(feature);  	dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);  	return 0;  err_out:  	nand->select_chip(mtd, -1); +	kfree(feature);  	dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);  	return -EINVAL;  } @@ -950,7 +971,7 @@ int gpmi_extra_init(struct gpmi_nand_data *this)  	struct nand_chip *chip = &this->nand;  	/* Enable the asynchronous EDO feature. */ -	if (GPMI_IS_MX6Q(this) && chip->onfi_version) { +	if (GPMI_IS_MX6(this) && chip->onfi_version) {  		int mode = onfi_get_async_timing_mode(chip);  		/* We only support the timing mode 4 and mode 5. */ @@ -980,7 +1001,7 @@ void gpmi_begin(struct gpmi_nand_data *this)  	/* Enable the clock. */  	ret = gpmi_enable_clk(this);  	if (ret) { -		pr_err("We failed in enable the clk\n"); +		dev_err(this->dev, "We failed in enable the clk\n");  		goto err_out;  	} @@ -997,7 +1018,7 @@ void gpmi_begin(struct gpmi_nand_data *this)  	/* [1] Set HW_GPMI_TIMING0 */  	reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |  		BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles)         | -		BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles)       ; +		BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);  	writel(reg, gpmi_regs + HW_GPMI_TIMING0); @@ -1072,12 +1093,19 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)  	if (GPMI_IS_MX23(this)) {  		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;  		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); -	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) { +	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) { +		/* +		 * In the imx6, all the ready/busy pins are bound +		 * together. So we only need to check chip 0. +		 */ +		if (GPMI_IS_MX6(this)) +			chip = 0; +  		/* MX28 shares the same R/B register as MX6Q. */  		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);  		reg = readl(r->gpmi_regs + HW_GPMI_STAT);  	} else -		pr_err("unknow arch.\n"); +		dev_err(this->dev, "unknow arch.\n");  	return reg & mask;  } @@ -1108,10 +1136,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)  	desc = dmaengine_prep_slave_sg(channel,  					(struct scatterlist *)pio,  					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); -	if (!desc) { -		pr_err("step 1 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [2] send out the COMMAND + ADDRESS string stored in @buffer */  	sgl = &this->cmd_sgl; @@ -1121,11 +1147,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)  	desc = dmaengine_prep_slave_sg(channel,  				sgl, 1, DMA_MEM_TO_DEV,  				DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - -	if (!desc) { -		pr_err("step 2 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [3] submit the DMA */  	set_dma_type(this, DMA_FOR_COMMAND); @@ -1154,20 +1177,17 @@ int gpmi_send_data(struct gpmi_nand_data *this)  	pio[1] = 0;  	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,  					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); -	if (!desc) { -		pr_err("step 1 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [2] send DMA request */  	prepare_data_dma(this, DMA_TO_DEVICE);  	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,  					1, DMA_MEM_TO_DEV,  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -	if (!desc) { -		pr_err("step 2 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL; +  	/* [3] submit the DMA */  	set_dma_type(this, DMA_FOR_WRITE_DATA);  	return start_dma_without_bch_irq(this, desc); @@ -1191,20 +1211,16 @@ int gpmi_read_data(struct gpmi_nand_data *this)  	desc = dmaengine_prep_slave_sg(channel,  					(struct scatterlist *)pio,  					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); -	if (!desc) { -		pr_err("step 1 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [2] : send DMA request */  	prepare_data_dma(this, DMA_FROM_DEVICE);  	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,  					1, DMA_DEV_TO_MEM,  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -	if (!desc) { -		pr_err("step 2 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [3] : submit the DMA */  	set_dma_type(this, DMA_FOR_READ_DATA); @@ -1249,10 +1265,9 @@ int gpmi_send_page(struct gpmi_nand_data *this,  					(struct scatterlist *)pio,  					ARRAY_SIZE(pio), DMA_TRANS_NONE,  					DMA_CTRL_ACK); -	if (!desc) { -		pr_err("step 2 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL; +  	set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);  	return start_dma_with_bch_irq(this, desc);  } @@ -1284,10 +1299,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,  	desc = dmaengine_prep_slave_sg(channel,  				(struct scatterlist *)pio, 2,  				DMA_TRANS_NONE, 0); -	if (!desc) { -		pr_err("step 1 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [2] Enable the BCH block and read. */  	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ; @@ -1314,10 +1327,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,  					(struct scatterlist *)pio,  					ARRAY_SIZE(pio), DMA_TRANS_NONE,  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -	if (!desc) { -		pr_err("step 2 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [3] Disable the BCH block */  	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; @@ -1335,10 +1346,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,  				(struct scatterlist *)pio, 3,  				DMA_TRANS_NONE,  				DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -	if (!desc) { -		pr_err("step 3 error\n"); -		return -1; -	} +	if (!desc) +		return -EINVAL;  	/* [4] submit the DMA */  	set_dma_type(this, DMA_FOR_READ_ECC_PAGE); diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 59ab0692f0b..f638cd8077c 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -18,9 +18,6 @@   * with this program; if not, write to the Free Software Foundation, Inc.,   * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.   */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -  #include <linux/clk.h>  #include <linux/slab.h>  #include <linux/interrupt.h> @@ -30,6 +27,7 @@  #include <linux/of_device.h>  #include <linux/of_mtd.h>  #include "gpmi-nand.h" +#include "bch-regs.h"  /* Resource names for the GPMI NAND driver. */  #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand" @@ -45,13 +43,40 @@ static struct nand_bbt_descr gpmi_bbt_descr = {  	.pattern	= scan_ff_pattern  }; -/*  We will use all the (page + OOB). */ +/* + * We may change the layout if we can get the ECC info from the datasheet, + * else we will use all the (page + OOB). + */  static struct nand_ecclayout gpmi_hw_ecclayout = {  	.eccbytes = 0,  	.eccpos = { 0, },  	.oobfree = { {.offset = 0, .length = 0} }  }; +static const struct gpmi_devdata gpmi_devdata_imx23 = { +	.type = IS_MX23, +	.bch_max_ecc_strength = 20, +	.max_chain_delay = 16, +}; + +static const struct gpmi_devdata gpmi_devdata_imx28 = { +	.type = IS_MX28, +	.bch_max_ecc_strength = 20, +	.max_chain_delay = 16, +}; + +static const struct gpmi_devdata gpmi_devdata_imx6q = { +	.type = IS_MX6Q, +	.bch_max_ecc_strength = 40, +	.max_chain_delay = 12, +}; + +static const struct gpmi_devdata gpmi_devdata_imx6sx = { +	.type = IS_MX6SX, +	.bch_max_ecc_strength = 62, +	.max_chain_delay = 12, +}; +  static irqreturn_t bch_irq(int irq, void *cookie)  {  	struct gpmi_nand_data *this = cookie; @@ -101,14 +126,8 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)  		/* The mx23/mx28 only support the GF13. */  		if (geo->gf_len == 14)  			return false; - -		if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX) -			return false; -	} else if (GPMI_IS_MX6Q(this)) { -		if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX) -			return false;  	} -	return true; +	return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;  }  /* @@ -269,8 +288,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)  			"We can not support this nand chip."  			" Its required ecc strength(%d) is beyond our"  			" capability(%d).\n", geo->ecc_strength, -			(GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX -					: MXS_ECC_STRENGTH_MAX)); +			this->devdata->bch_max_ecc_strength);  		return -EINVAL;  	} @@ -349,14 +367,16 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)  int common_nfc_set_geometry(struct gpmi_nand_data *this)  { -	return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); +	if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc") +		&& set_geometry_by_ecc_info(this)) +		return 0; +	return legacy_set_geometry(this);  }  struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)  { -	int chipnr = this->current_chip; - -	return this->dma_chans[chipnr]; +	/* We use the DMA channel 0 to access all the nand chips. */ +	return this->dma_chans[0];  }  /* Can we use the upper's buffer directly for DMA? */ @@ -365,25 +385,28 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)  	struct scatterlist *sgl = &this->data_sgl;  	int ret; -	this->direct_dma_map_ok = true; -  	/* first try to map the upper buffer directly */ -	sg_init_one(sgl, this->upper_buf, this->upper_len); -	ret = dma_map_sg(this->dev, sgl, 1, dr); -	if (ret == 0) { -		/* We have to use our own DMA buffer. */ -		sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE); - -		if (dr == DMA_TO_DEVICE) -			memcpy(this->data_buffer_dma, this->upper_buf, -				this->upper_len); - +	if (virt_addr_valid(this->upper_buf) && +		!object_is_on_stack(this->upper_buf)) { +		sg_init_one(sgl, this->upper_buf, this->upper_len);  		ret = dma_map_sg(this->dev, sgl, 1, dr);  		if (ret == 0) -			pr_err("DMA mapping failed.\n"); +			goto map_fail; -		this->direct_dma_map_ok = false; +		this->direct_dma_map_ok = true; +		return;  	} + +map_fail: +	/* We have to use our own DMA buffer. */ +	sg_init_one(sgl, this->data_buffer_dma, this->upper_len); + +	if (dr == DMA_TO_DEVICE) +		memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len); + +	dma_map_sg(this->dev, sgl, 1, dr); + +	this->direct_dma_map_ok = false;  }  /* This will be called after the DMA operation is finished. */ @@ -392,8 +415,6 @@ static void dma_irq_callback(void *param)  	struct gpmi_nand_data *this = param;  	struct completion *dma_c = &this->dma_done; -	complete(dma_c); -  	switch (this->dma_type) {  	case DMA_FOR_COMMAND:  		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE); @@ -416,8 +437,10 @@ static void dma_irq_callback(void *param)  		break;  	default: -		pr_err("in wrong DMA operation.\n"); +		dev_err(this->dev, "in wrong DMA operation.\n");  	} + +	complete(dma_c);  }  int start_dma_without_bch_irq(struct gpmi_nand_data *this, @@ -436,7 +459,8 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,  	/* Wait for the interrupt from the DMA block. */  	err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));  	if (!err) { -		pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type); +		dev_err(this->dev, "DMA timeout, last DMA :%d\n", +			this->last_dma_type);  		gpmi_dump_info(this);  		return -ETIMEDOUT;  	} @@ -465,7 +489,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,  	/* Wait for the interrupt from the BCH block. */  	err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));  	if (!err) { -		pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type); +		dev_err(this->dev, "BCH timeout, last DMA :%d\n", +			this->last_dma_type);  		gpmi_dump_info(this);  		return -ETIMEDOUT;  	} @@ -481,70 +506,38 @@ static int acquire_register_block(struct gpmi_nand_data *this,  	void __iomem *p;  	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); -	if (!r) { -		pr_err("Can't get resource for %s\n", res_name); -		return -ENODEV; -	} - -	p = ioremap(r->start, resource_size(r)); -	if (!p) { -		pr_err("Can't remap %s\n", res_name); -		return -ENOMEM; -	} +	p = devm_ioremap_resource(&pdev->dev, r); +	if (IS_ERR(p)) +		return PTR_ERR(p);  	if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))  		res->gpmi_regs = p;  	else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))  		res->bch_regs = p;  	else -		pr_err("unknown resource name : %s\n", res_name); +		dev_err(this->dev, "unknown resource name : %s\n", res_name);  	return 0;  } -static void release_register_block(struct gpmi_nand_data *this) -{ -	struct resources *res = &this->resources; -	if (res->gpmi_regs) -		iounmap(res->gpmi_regs); -	if (res->bch_regs) -		iounmap(res->bch_regs); -	res->gpmi_regs = NULL; -	res->bch_regs = NULL; -} -  static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)  {  	struct platform_device *pdev = this->pdev; -	struct resources *res = &this->resources;  	const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;  	struct resource *r;  	int err;  	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);  	if (!r) { -		pr_err("Can't get resource for %s\n", res_name); +		dev_err(this->dev, "Can't get resource for %s\n", res_name);  		return -ENODEV;  	} -	err = request_irq(r->start, irq_h, 0, res_name, this); -	if (err) { -		pr_err("Can't own %s\n", res_name); -		return err; -	} - -	res->bch_low_interrupt = r->start; -	res->bch_high_interrupt = r->end; -	return 0; -} - -static void release_bch_irq(struct gpmi_nand_data *this) -{ -	struct resources *res = &this->resources; -	int i = res->bch_low_interrupt; +	err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this); +	if (err) +		dev_err(this->dev, "error requesting BCH IRQ\n"); -	for (; i <= res->bch_high_interrupt; i++) -		free_irq(i, this); +	return err;  }  static void release_dma_channels(struct gpmi_nand_data *this) @@ -565,7 +558,7 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)  	/* request dma channel */  	dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");  	if (!dma_chan) { -		pr_err("Failed to request DMA channel.\n"); +		dev_err(this->dev, "Failed to request DMA channel.\n");  		goto acquire_err;  	} @@ -577,21 +570,6 @@ acquire_err:  	return -EINVAL;  } -static void gpmi_put_clks(struct gpmi_nand_data *this) -{ -	struct resources *r = &this->resources; -	struct clk *clk; -	int i; - -	for (i = 0; i < GPMI_CLK_MAX; i++) { -		clk = r->clock[i]; -		if (clk) { -			clk_put(clk); -			r->clock[i] = NULL; -		} -	} -} -  static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {  	"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",  }; @@ -604,14 +582,14 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)  	int err, i;  	/* The main clock is stored in the first. */ -	r->clock[0] = clk_get(this->dev, "gpmi_io"); +	r->clock[0] = devm_clk_get(this->dev, "gpmi_io");  	if (IS_ERR(r->clock[0])) {  		err = PTR_ERR(r->clock[0]);  		goto err_clock;  	}  	/* Get extra clocks */ -	if (GPMI_IS_MX6Q(this)) +	if (GPMI_IS_MX6(this))  		extra_clks = extra_clks_for_mx6q;  	if (!extra_clks)  		return 0; @@ -620,7 +598,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)  		if (extra_clks[i - 1] == NULL)  			break; -		clk = clk_get(this->dev, extra_clks[i - 1]); +		clk = devm_clk_get(this->dev, extra_clks[i - 1]);  		if (IS_ERR(clk)) {  			err = PTR_ERR(clk);  			goto err_clock; @@ -629,9 +607,9 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)  		r->clock[i] = clk;  	} -	if (GPMI_IS_MX6Q(this)) +	if (GPMI_IS_MX6(this))  		/* -		 * Set the default value for the gpmi clock in mx6q: +		 * Set the default value for the gpmi clock.  		 *  		 * If you want to use the ONFI nand which is in the  		 * Synchronous Mode, you should change the clock as you need. @@ -642,7 +620,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)  err_clock:  	dev_dbg(this->dev, "failed in finding the clocks.\n"); -	gpmi_put_clks(this);  	return err;  } @@ -664,7 +641,7 @@ static int acquire_resources(struct gpmi_nand_data *this)  	ret = acquire_dma_channels(this);  	if (ret) -		goto exit_dma_channels; +		goto exit_regs;  	ret = gpmi_get_clks(this);  	if (ret) @@ -673,18 +650,12 @@ static int acquire_resources(struct gpmi_nand_data *this)  exit_clock:  	release_dma_channels(this); -exit_dma_channels: -	release_bch_irq(this);  exit_regs: -	release_register_block(this);  	return ret;  }  static void release_resources(struct gpmi_nand_data *this)  { -	gpmi_put_clks(this); -	release_register_block(this); -	release_bch_irq(this);  	release_dma_channels(this);  } @@ -730,8 +701,7 @@ static int read_page_prepare(struct gpmi_nand_data *this,  						length, DMA_FROM_DEVICE);  		if (dma_mapping_error(dev, dest_phys)) {  			if (alt_size < length) { -				pr_err("%s, Alternate buffer is too small\n", -					__func__); +				dev_err(dev, "Alternate buffer is too small\n");  				return -ENOMEM;  			}  			goto map_failed; @@ -781,8 +751,7 @@ static int send_page_prepare(struct gpmi_nand_data *this,  						DMA_TO_DEVICE);  		if (dma_mapping_error(dev, source_phys)) {  			if (alt_size < length) { -				pr_err("%s, Alternate buffer is too small\n", -					__func__); +				dev_err(dev, "Alternate buffer is too small\n");  				return -ENOMEM;  			}  			goto map_failed; @@ -835,14 +804,23 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)  {  	struct bch_geometry *geo = &this->bch_geometry;  	struct device *dev = this->dev; +	struct mtd_info *mtd = &this->mtd;  	/* [1] Allocate a command buffer. PAGE_SIZE is enough. */  	this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);  	if (this->cmd_buffer == NULL)  		goto error_alloc; -	/* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ -	this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); +	/* +	 * [2] Allocate a read/write data buffer. +	 *     The gpmi_alloc_dma_buffer can be called twice. +	 *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer +	 *     is called before the nand_scan_ident; and we allocate a buffer +	 *     of the real NAND page size when the gpmi_alloc_dma_buffer is +	 *     called after the nand_scan_ident. +	 */ +	this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE, +					GFP_DMA | GFP_KERNEL);  	if (this->data_buffer_dma == NULL)  		goto error_alloc; @@ -870,7 +848,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)  error_alloc:  	gpmi_free_dma_buffer(this); -	pr_err("Error allocating DMA buffers!\n");  	return -ENOMEM;  } @@ -902,7 +879,8 @@ static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)  	ret = gpmi_send_command(this);  	if (ret) -		pr_err("Chip: %u, Error %d\n", this->current_chip, ret); +		dev_err(this->dev, "Chip: %u, Error %d\n", +			this->current_chip, ret);  	this->command_length = 0;  } @@ -933,7 +911,7 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)  	struct nand_chip *chip = mtd->priv;  	struct gpmi_nand_data *this = chip->priv; -	pr_debug("len is %d\n", len); +	dev_dbg(this->dev, "len is %d\n", len);  	this->upper_buf	= buf;  	this->upper_len	= len; @@ -945,7 +923,7 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)  	struct nand_chip *chip = mtd->priv;  	struct gpmi_nand_data *this = chip->priv; -	pr_debug("len is %d\n", len); +	dev_dbg(this->dev, "len is %d\n", len);  	this->upper_buf	= (uint8_t *)buf;  	this->upper_len	= len; @@ -1024,13 +1002,13 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,  	unsigned int  max_bitflips = 0;  	int           ret; -	pr_debug("page number is : %d\n", page); -	ret = read_page_prepare(this, buf, mtd->writesize, +	dev_dbg(this->dev, "page number is : %d\n", page); +	ret = read_page_prepare(this, buf, nfc_geo->payload_size,  					this->payload_virt, this->payload_phys,  					nfc_geo->payload_size,  					&payload_virt, &payload_phys);  	if (ret) { -		pr_err("Inadequate DMA buffer\n"); +		dev_err(this->dev, "Inadequate DMA buffer\n");  		ret = -ENOMEM;  		return ret;  	} @@ -1039,12 +1017,12 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,  	/* go! */  	ret = gpmi_read_page(this, payload_phys, auxiliary_phys); -	read_page_end(this, buf, mtd->writesize, +	read_page_end(this, buf, nfc_geo->payload_size,  			this->payload_virt, this->payload_phys,  			nfc_geo->payload_size,  			payload_virt, payload_phys);  	if (ret) { -		pr_err("Error in ECC-based read: %d\n", ret); +		dev_err(this->dev, "Error in ECC-based read: %d\n", ret);  		return ret;  	} @@ -1081,7 +1059,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,  		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];  	} -	read_page_swap_end(this, buf, mtd->writesize, +	read_page_swap_end(this, buf, nfc_geo->payload_size,  			this->payload_virt, this->payload_phys,  			nfc_geo->payload_size,  			payload_virt, payload_phys); @@ -1089,6 +1067,90 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,  	return max_bitflips;  } +/* Fake a virtual small page for the subpage read */ +static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, +			uint32_t offs, uint32_t len, uint8_t *buf, int page) +{ +	struct gpmi_nand_data *this = chip->priv; +	void __iomem *bch_regs = this->resources.bch_regs; +	struct bch_geometry old_geo = this->bch_geometry; +	struct bch_geometry *geo = &this->bch_geometry; +	int size = chip->ecc.size; /* ECC chunk size */ +	int meta, n, page_size; +	u32 r1_old, r2_old, r1_new, r2_new; +	unsigned int max_bitflips; +	int first, last, marker_pos; +	int ecc_parity_size; +	int col = 0; + +	/* The size of ECC parity */ +	ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; + +	/* Align it with the chunk size */ +	first = offs / size; +	last = (offs + len - 1) / size; + +	/* +	 * Find the chunk which contains the Block Marker. If this chunk is +	 * in the range of [first, last], we have to read out the whole page. +	 * Why? since we had swapped the data at the position of Block Marker +	 * to the metadata which is bound with the chunk 0. +	 */ +	marker_pos = geo->block_mark_byte_offset / size; +	if (last >= marker_pos && first <= marker_pos) { +		dev_dbg(this->dev, "page:%d, first:%d, last:%d, marker at:%d\n", +				page, first, last, marker_pos); +		return gpmi_ecc_read_page(mtd, chip, buf, 0, page); +	} + +	meta = geo->metadata_size; +	if (first) { +		col = meta + (size + ecc_parity_size) * first; +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1); + +		meta = 0; +		buf = buf + first * size; +	} + +	/* Save the old environment */ +	r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0); +	r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1); + +	/* change the BCH registers and bch_geometry{} */ +	n = last - first + 1; +	page_size = meta + (size + ecc_parity_size) * n; + +	r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS | +			BM_BCH_FLASH0LAYOUT0_META_SIZE); +	r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) +			| BF_BCH_FLASH0LAYOUT0_META_SIZE(meta); +	writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0); + +	r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE; +	r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size); +	writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1); + +	geo->ecc_chunk_count = n; +	geo->payload_size = n * size; +	geo->page_size = page_size; +	geo->auxiliary_status_offset = ALIGN(meta, 4); + +	dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", +		page, offs, len, col, first, n, page_size); + +	/* Read the subpage now */ +	this->swap_block_mark = false; +	max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page); + +	/* Restore */ +	writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0); +	writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1); +	this->bch_geometry = old_geo; +	this->swap_block_mark = true; + +	return max_bitflips; +} +  static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,  				const uint8_t *buf, int oob_required)  { @@ -1100,7 +1162,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,  	dma_addr_t auxiliary_phys;  	int        ret; -	pr_debug("ecc write page.\n"); +	dev_dbg(this->dev, "ecc write page.\n");  	if (this->swap_block_mark) {  		/*  		 * If control arrives here, we're doing block mark swapping. @@ -1130,7 +1192,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,  				nfc_geo->payload_size,  				&payload_virt, &payload_phys);  		if (ret) { -			pr_err("Inadequate payload DMA buffer\n"); +			dev_err(this->dev, "Inadequate payload DMA buffer\n");  			return 0;  		} @@ -1140,7 +1202,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,  				nfc_geo->auxiliary_size,  				&auxiliary_virt, &auxiliary_phys);  		if (ret) { -			pr_err("Inadequate auxiliary DMA buffer\n"); +			dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");  			goto exit_auxiliary;  		}  	} @@ -1148,7 +1210,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,  	/* Ask the NFC. */  	ret = gpmi_send_page(this, payload_phys, auxiliary_phys);  	if (ret) -		pr_err("Error in ECC-based write: %d\n", ret); +		dev_err(this->dev, "Error in ECC-based write: %d\n", ret);  	if (!this->swap_block_mark) {  		send_page_end(this, chip->oob_poi, mtd->oobsize, @@ -1238,7 +1300,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,  {  	struct gpmi_nand_data *this = chip->priv; -	pr_debug("page number is %d\n", page); +	dev_dbg(this->dev, "page number is %d\n", page);  	/* clear the OOB buffer */  	memset(chip->oob_poi, ~0, mtd->oobsize); @@ -1263,14 +1325,22 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,  static int  gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)  { -	/* -	 * The BCH will use all the (page + oob). -	 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob. -	 * But it can not stop some ioctls such MEMWRITEOOB which uses -	 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit -	 * these ioctls too. -	 */ -	return -EPERM; +	struct nand_oobfree *of = mtd->ecclayout->oobfree; +	int status = 0; + +	/* Do we have available oob area? */ +	if (!of->length) +		return -EPERM; + +	if (!nand_is_slc(chip)) +		return -EPERM; + +	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page); +	chip->write_buf(mtd, chip->oob_poi + of->offset, of->length); +	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + +	status = chip->waitfunc(mtd, chip); +	return status & NAND_STATUS_FAIL ? -EIO : 0;  }  static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) @@ -1443,7 +1513,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)  	/* Write the NCB fingerprint into the page buffer. */  	memset(buffer, ~0, mtd->writesize); -	memset(chip->oob_poi, ~0, mtd->oobsize);  	memcpy(buffer + 12, fingerprint, strlen(fingerprint));  	/* Loop through the first search area, writing NCB fingerprints. */ @@ -1558,7 +1627,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)  	/* Set up the NFC geometry which is used by BCH. */  	ret = bch_set_geometry(this);  	if (ret) { -		pr_err("Error setting BCH geometry : %d\n", ret); +		dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);  		return ret;  	} @@ -1566,26 +1635,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)  	return gpmi_alloc_dma_buffer(this);  } -static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this) -{ -	int ret; - -	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ -	if (GPMI_IS_MX23(this)) -		this->swap_block_mark = false; -	else -		this->swap_block_mark = true; - -	/* Set up the medium geometry */ -	ret = gpmi_set_geometry(this); -	if (ret) -		return ret; - -	/* NAND boot init, depends on the gpmi_set_geometry(). */ -	return nand_boot_init(this); -} - -static void gpmi_nfc_exit(struct gpmi_nand_data *this) +static void gpmi_nand_exit(struct gpmi_nand_data *this)  {  	nand_release(&this->mtd);  	gpmi_free_dma_buffer(this); @@ -1599,8 +1649,11 @@ static int gpmi_init_last(struct gpmi_nand_data *this)  	struct bch_geometry *bch_geo = &this->bch_geometry;  	int ret; -	/* Prepare for the BBT scan. */ -	ret = gpmi_pre_bbt_scan(this); +	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ +	this->swap_block_mark = !GPMI_IS_MX23(this); + +	/* Set up the medium geometry */ +	ret = gpmi_set_geometry(this);  	if (ret)  		return ret; @@ -1615,6 +1668,17 @@ static int gpmi_init_last(struct gpmi_nand_data *this)  	ecc->layout	= &gpmi_hw_ecclayout;  	/* +	 * We only enable the subpage read when: +	 *  (1) the chip is imx6, and +	 *  (2) the size of the ECC parity is byte aligned. +	 */ +	if (GPMI_IS_MX6(this) && +		((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) { +		ecc->read_subpage = gpmi_ecc_read_subpage; +		chip->options |= NAND_SUBPAGE_READ; +	} + +	/*  	 * Can we enable the extra features? such as EDO or Sync mode.  	 *  	 * We do not check the return value now. That's means if we fail in @@ -1625,7 +1689,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)  	return 0;  } -static int gpmi_nfc_init(struct gpmi_nand_data *this) +static int gpmi_nand_init(struct gpmi_nand_data *this)  {  	struct mtd_info  *mtd = &this->mtd;  	struct nand_chip *chip = &this->nand; @@ -1664,7 +1728,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)  	if (ret)  		goto err_out; -	ret = nand_scan_ident(mtd, 1, NULL); +	ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);  	if (ret)  		goto err_out; @@ -1672,10 +1736,16 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)  	if (ret)  		goto err_out; +	chip->options |= NAND_SKIP_BBTSCAN;  	ret = nand_scan_tail(mtd);  	if (ret)  		goto err_out; +	ret = nand_boot_init(this); +	if (ret) +		goto err_out; +	chip->scan_bbt(mtd); +  	ppdata.of_node = this->pdev->dev.of_node;  	ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);  	if (ret) @@ -1683,27 +1753,23 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)  	return 0;  err_out: -	gpmi_nfc_exit(this); +	gpmi_nand_exit(this);  	return ret;  } -static const struct platform_device_id gpmi_ids[] = { -	{ .name = "imx23-gpmi-nand", .driver_data = IS_MX23, }, -	{ .name = "imx28-gpmi-nand", .driver_data = IS_MX28, }, -	{ .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, }, -	{}, -}; -  static const struct of_device_id gpmi_nand_id_table[] = {  	{  		.compatible = "fsl,imx23-gpmi-nand", -		.data = (void *)&gpmi_ids[IS_MX23] +		.data = (void *)&gpmi_devdata_imx23,  	}, {  		.compatible = "fsl,imx28-gpmi-nand", -		.data = (void *)&gpmi_ids[IS_MX28] +		.data = (void *)&gpmi_devdata_imx28,  	}, {  		.compatible = "fsl,imx6q-gpmi-nand", -		.data = (void *)&gpmi_ids[IS_MX6Q] +		.data = (void *)&gpmi_devdata_imx6q, +	}, { +		.compatible = "fsl,imx6sx-gpmi-nand", +		.data = (void *)&gpmi_devdata_imx6sx,  	}, {}  };  MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); @@ -1714,20 +1780,18 @@ static int gpmi_nand_probe(struct platform_device *pdev)  	const struct of_device_id *of_id;  	int ret; +	this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL); +	if (!this) +		return -ENOMEM; +  	of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);  	if (of_id) { -		pdev->id_entry = of_id->data; +		this->devdata = of_id->data;  	} else { -		pr_err("Failed to find the right device id.\n"); +		dev_err(&pdev->dev, "Failed to find the right device id.\n");  		return -ENODEV;  	} -	this = kzalloc(sizeof(*this), GFP_KERNEL); -	if (!this) { -		pr_err("Failed to allocate per-device memory\n"); -		return -ENOMEM; -	} -  	platform_set_drvdata(pdev, this);  	this->pdev  = pdev;  	this->dev   = &pdev->dev; @@ -1740,7 +1804,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)  	if (ret)  		goto exit_nfc_init; -	ret = gpmi_nfc_init(this); +	ret = gpmi_nand_init(this);  	if (ret)  		goto exit_nfc_init; @@ -1752,7 +1816,6 @@ exit_nfc_init:  	release_resources(this);  exit_acquire_resources:  	dev_err(this->dev, "driver registration failed: %d\n", ret); -	kfree(this);  	return ret;  } @@ -1761,9 +1824,8 @@ static int gpmi_nand_remove(struct platform_device *pdev)  {  	struct gpmi_nand_data *this = platform_get_drvdata(pdev); -	gpmi_nfc_exit(this); +	gpmi_nand_exit(this);  	release_resources(this); -	kfree(this);  	return 0;  } @@ -1774,7 +1836,6 @@ static struct platform_driver gpmi_nand_driver = {  	},  	.probe   = gpmi_nand_probe,  	.remove  = gpmi_nand_remove, -	.id_table = gpmi_ids,  };  module_platform_driver(gpmi_nand_driver); diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index a7685e3a874..32c6ba49f98 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h @@ -26,8 +26,6 @@  struct resources {  	void __iomem  *gpmi_regs;  	void __iomem  *bch_regs; -	unsigned int  bch_low_interrupt; -	unsigned int  bch_high_interrupt;  	unsigned int  dma_low_channel;  	unsigned int  dma_high_channel;  	struct clk    *clock[GPMI_CLK_MAX]; @@ -121,11 +119,25 @@ struct nand_timing {  	int8_t  tRHOH_in_ns;  }; +enum gpmi_type { +	IS_MX23, +	IS_MX28, +	IS_MX6Q, +	IS_MX6SX +}; + +struct gpmi_devdata { +	enum gpmi_type type; +	int bch_max_ecc_strength; +	int max_chain_delay; /* See the async EDO mode */ +}; +  struct gpmi_nand_data {  	/* flags */  #define GPMI_ASYNC_EDO_ENABLED	(1 << 0)  #define GPMI_TIMING_INIT_OK	(1 << 1)  	int			flags; +	const struct gpmi_devdata *devdata;  	/* System Interface */  	struct device		*dev; @@ -283,15 +295,11 @@ extern int gpmi_read_page(struct gpmi_nand_data *,  #define STATUS_ERASED		0xff  #define STATUS_UNCORRECTABLE	0xfe -/* BCH's bit correction capability. */ -#define MXS_ECC_STRENGTH_MAX	20	/* mx23 and mx28 */ -#define MX6_ECC_STRENGTH_MAX	40 - -/* Use the platform_id to distinguish different Archs. */ -#define IS_MX23			0x0 -#define IS_MX28			0x1 -#define IS_MX6Q			0x2 -#define GPMI_IS_MX23(x)		((x)->pdev->id_entry->driver_data == IS_MX23) -#define GPMI_IS_MX28(x)		((x)->pdev->id_entry->driver_data == IS_MX28) -#define GPMI_IS_MX6Q(x)		((x)->pdev->id_entry->driver_data == IS_MX6Q) +/* Use the devdata to distinguish different Archs. */ +#define GPMI_IS_MX23(x)		((x)->devdata->type == IS_MX23) +#define GPMI_IS_MX28(x)		((x)->devdata->type == IS_MX28) +#define GPMI_IS_MX6Q(x)		((x)->devdata->type == IS_MX6Q) +#define GPMI_IS_MX6SX(x)	((x)->devdata->type == IS_MX6SX) + +#define GPMI_IS_MX6(x)		(GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))  #endif diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h index 53397cc290f..82114cdc833 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h @@ -108,6 +108,9 @@  #define HW_GPMI_CTRL1_CLR				0x00000068  #define HW_GPMI_CTRL1_TOG				0x0000006c +#define BP_GPMI_CTRL1_DECOUPLE_CS			24 +#define BM_GPMI_CTRL1_DECOUPLE_CS	(1 << BP_GPMI_CTRL1_DECOUPLE_CS) +  #define BP_GPMI_CTRL1_WRN_DLY_SEL			22  #define BM_GPMI_CTRL1_WRN_DLY_SEL	(0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)  #define BF_GPMI_CTRL1_WRN_DLY_SEL(v)  \ diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index a264b888c66..a2c804de156 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -416,10 +416,8 @@ static int jz_nand_probe(struct platform_device *pdev)  	uint8_t nand_maf_id = 0, nand_dev_id = 0;  	nand = kzalloc(sizeof(*nand), GFP_KERNEL); -	if (!nand) { -		dev_err(&pdev->dev, "Failed to allocate device structure.\n"); +	if (!nand)  		return -ENOMEM; -	}  	ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);  	if (ret) diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index f4dd2a887ea..687478c9f09 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -539,20 +539,6 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,  	return 0;  } -static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip, -			uint32_t offset, int data_len, const uint8_t *buf, -			int oob_required, int page, int cached, int raw) -{ -	int res; - -	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); -	res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required); -	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); -	lpc32xx_waitfunc(mtd, chip); - -	return res; -} -  static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,  			    int page)  { @@ -627,10 +613,8 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)  	struct device_node *np = dev->of_node;  	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL); -	if (!ncfg) { -		dev_err(dev, "could not allocate memory for platform data\n"); +	if (!ncfg)  		return NULL; -	}  	of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);  	of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay); @@ -666,10 +650,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	/* Allocate memory for the device structure (and zero it) */  	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); -	if (!host) { -		dev_err(&pdev->dev, "failed to allocate device structure.\n"); +	if (!host)  		return -ENOMEM; -	}  	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	host->io_base = devm_ioremap_resource(&pdev->dev, rc); @@ -732,9 +714,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	nand_chip->ecc.write_oob = lpc32xx_write_oob;  	nand_chip->ecc.read_oob = lpc32xx_read_oob;  	nand_chip->ecc.strength = 4; -	nand_chip->write_page = lpc32xx_write_page;  	nand_chip->waitfunc = lpc32xx_waitfunc; +	nand_chip->options = NAND_NO_SUBPAGE_WRITE;  	nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;  	nand_chip->bbt_td = &lpc32xx_nand_bbt;  	nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror; @@ -764,14 +746,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);  	if (!host->dma_buf) { -		dev_err(&pdev->dev, "Error allocating dma_buf memory\n");  		res = -ENOMEM;  		goto err_exit3;  	}  	host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);  	if (!host->dummy_buf) { -		dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");  		res = -ENOMEM;  		goto err_exit3;  	} @@ -905,7 +885,7 @@ static struct platform_driver lpc32xx_nand_driver = {  	.driver		= {  		.name	= DRV_NAME,  		.owner	= THIS_MODULE, -		.of_match_table = of_match_ptr(lpc32xx_nand_match), +		.of_match_table = lpc32xx_nand_match,  	},  }; diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c index add75709d41..53a6742e3da 100644 --- a/drivers/mtd/nand/lpc32xx_slc.c +++ b/drivers/mtd/nand/lpc32xx_slc.c @@ -725,10 +725,8 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)  	struct device_node *np = dev->of_node;  	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL); -	if (!ncfg) { -		dev_err(dev, "could not allocate memory for NAND config\n"); +	if (!ncfg)  		return NULL; -	}  	of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);  	of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth); @@ -772,10 +770,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	/* Allocate memory for the device structure (and zero it) */  	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); -	if (!host) { -		dev_err(&pdev->dev, "failed to allocate device structure\n"); +	if (!host)  		return -ENOMEM; -	}  	host->io_base_dma = rc->start;  	host->io_base = devm_ioremap_resource(&pdev->dev, rc); @@ -791,8 +787,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	}  	if (host->ncfg->wp_gpio == -EPROBE_DEFER)  		return -EPROBE_DEFER; -	if (gpio_is_valid(host->ncfg->wp_gpio) && -			gpio_request(host->ncfg->wp_gpio, "NAND WP")) { +	if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev, +			host->ncfg->wp_gpio, "NAND WP")) {  		dev_err(&pdev->dev, "GPIO not available\n");  		return -EBUSY;  	} @@ -808,7 +804,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	mtd->dev.parent = &pdev->dev;  	/* Get NAND clock */ -	host->clk = clk_get(&pdev->dev, NULL); +	host->clk = devm_clk_get(&pdev->dev, NULL);  	if (IS_ERR(host->clk)) {  		dev_err(&pdev->dev, "Clock failure\n");  		res = -ENOENT; @@ -858,7 +854,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,  				      GFP_KERNEL);  	if (host->data_buf == NULL) { -		dev_err(&pdev->dev, "Error allocating memory\n");  		res = -ENOMEM;  		goto err_exit2;  	} @@ -893,7 +888,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  	/* Avoid extra scan if using BBT, setup BBT support */  	if (host->ncfg->use_bbt) { -		chip->options |= NAND_SKIP_BBTSCAN;  		chip->bbt_options |= NAND_BBT_USE_FLASH;  		/* @@ -915,13 +909,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)  		goto err_exit3;  	} -	/* Standard layout in FLASH for bad block tables */ -	if (host->ncfg->use_bbt) { -		if (nand_default_bbt(mtd) < 0) -			dev_err(&pdev->dev, -			       "Error initializing default bad block tables\n"); -	} -  	mtd->name = "nxp_lpc3220_slc";  	ppdata.of_node = pdev->dev.of_node;  	res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts, @@ -935,10 +922,8 @@ err_exit3:  	dma_release_channel(host->dma_chan);  err_exit2:  	clk_disable(host->clk); -	clk_put(host->clk);  err_exit1:  	lpc32xx_wp_enable(host); -	gpio_free(host->ncfg->wp_gpio);  	return res;  } @@ -961,9 +946,7 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)  	writel(tmp, SLC_CTRL(host->io_base));  	clk_disable(host->clk); -	clk_put(host->clk);  	lpc32xx_wp_enable(host); -	gpio_free(host->ncfg->wp_gpio);  	return 0;  } @@ -1023,7 +1006,7 @@ static struct platform_driver lpc32xx_nand_driver = {  	.driver		= {  		.name	= LPC32XX_MODNAME,  		.owner	= THIS_MODULE, -		.of_match_table = of_match_ptr(lpc32xx_nand_match), +		.of_match_table = lpc32xx_nand_match,  	},  }; diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index 3c60a000b42..e78841a2dcc 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -30,13 +30,14 @@  #include <linux/gfp.h>  #include <linux/delay.h>  #include <linux/err.h> -#include <linux/init.h>  #include <linux/interrupt.h>  #include <linux/io.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/nand.h>  #include <linux/mtd/partitions.h> +#include <linux/of_address.h>  #include <linux/of_device.h> +#include <linux/of_irq.h>  #include <linux/of_platform.h>  #include <asm/mpc5121.h> @@ -651,10 +652,8 @@ static int mpc5121_nfc_probe(struct platform_device *op)  	}  	prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL); -	if (!prv) { -		dev_err(dev, "Memory exhausted!\n"); +	if (!prv)  		return -ENOMEM; -	}  	mtd = &prv->mtd;  	chip = &prv->chip; @@ -729,7 +728,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)  	of_node_put(rootnode);  	/* Enable NFC clock */ -	clk = devm_clk_get(dev, "nfc_clk"); +	clk = devm_clk_get(dev, "ipg");  	if (IS_ERR(clk)) {  		dev_err(dev, "Unable to acquire NFC clock!\n");  		retval = PTR_ERR(clk); @@ -784,7 +783,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)  	/* Detect NAND chips */  	if (nand_scan(mtd, be32_to_cpup(chips_no))) {  		dev_err(dev, "NAND Flash not found !\n"); -		devm_free_irq(dev, prv->irq, mtd);  		retval = -ENXIO;  		goto error;  	} @@ -809,7 +807,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)  	default:  		dev_err(dev, "Unsupported NAND flash!\n"); -		devm_free_irq(dev, prv->irq, mtd);  		retval = -ENXIO;  		goto error;  	} @@ -820,7 +817,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)  	retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);  	if (retval) {  		dev_err(dev, "Error adding MTD device!\n"); -		devm_free_irq(dev, prv->irq, mtd);  		goto error;  	} @@ -834,11 +830,8 @@ static int mpc5121_nfc_remove(struct platform_device *op)  {  	struct device *dev = &op->dev;  	struct mtd_info *mtd = dev_get_drvdata(dev); -	struct nand_chip *chip = mtd->priv; -	struct mpc5121_nfc_prv *prv = chip->priv;  	nand_release(mtd); -	devm_free_irq(dev, prv->irq, mtd);  	mpc5121_nfc_free(dev, mtd);  	return 0; diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index ce8242b6c3e..dba262bf766 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -32,6 +32,7 @@  #include <linux/io.h>  #include <linux/irq.h>  #include <linux/completion.h> +#include <linux/of.h>  #include <linux/of_device.h>  #include <linux/of_mtd.h> @@ -395,7 +396,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)  	if (useirq) {  		if (!host->devtype_data->check_int(host)) { -			INIT_COMPLETION(host->op_completion); +			reinit_completion(&host->op_completion);  			irq_control(host, 1);  			wait_for_completion(&host->op_completion);  		} @@ -676,7 +677,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,  		ecc_stat >>= 4;  	} while (--no_subpages); -	mtd->ecc_stats.corrected += ret;  	pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);  	return ret; @@ -1399,12 +1399,15 @@ static int mxcnd_probe(struct platform_device *pdev)  	int err = 0;  	/* Allocate memory for MTD device structure and private data */ -	host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) + -			NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL); +	host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host), +			GFP_KERNEL);  	if (!host)  		return -ENOMEM; -	host->data_buf = (uint8_t *)(host + 1); +	/* allocate a temporary buffer for the nand_scan_ident() */ +	host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL); +	if (!host->data_buf) +		return -ENOMEM;  	host->dev = &pdev->dev;  	/* structures must be linked */ @@ -1498,6 +1501,8 @@ static int mxcnd_probe(struct platform_device *pdev)  	init_completion(&host->op_completion);  	host->irq = platform_get_irq(pdev, 0); +	if (host->irq < 0) +		return host->irq;  	/*  	 * Use host->devtype_data->irq_control() here instead of irq_control() @@ -1507,11 +1512,13 @@ static int mxcnd_probe(struct platform_device *pdev)  	host->devtype_data->irq_control(host, 0);  	err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq, -			IRQF_DISABLED, DRIVER_NAME, host); +			0, DRIVER_NAME, host);  	if (err)  		return err; -	clk_prepare_enable(host->clk); +	err = clk_prepare_enable(host->clk); +	if (err) +		return err;  	host->clk_act = 1;  	/* @@ -1530,6 +1537,15 @@ static int mxcnd_probe(struct platform_device *pdev)  		goto escan;  	} +	/* allocate the right size buffer now */ +	devm_kfree(&pdev->dev, (void *)host->data_buf); +	host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize, +					GFP_KERNEL); +	if (!host->data_buf) { +		err = -ENOMEM; +		goto escan; +	} +  	/* Call preset again, with correct writesize this time */  	host->devtype_data->preset(mtd); @@ -1575,6 +1591,8 @@ static int mxcnd_remove(struct platform_device *pdev)  	struct mxc_nand_host *host = platform_get_drvdata(pdev);  	nand_release(&host->mtd); +	if (host->clk_act) +		clk_disable_unprepare(host->clk);  	return 0;  } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 7ed4841327f..4f3e80c68a2 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -29,12 +29,15 @@   *   */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +  #include <linux/module.h>  #include <linux/delay.h>  #include <linux/errno.h>  #include <linux/err.h>  #include <linux/sched.h>  #include <linux/slab.h> +#include <linux/mm.h>  #include <linux/types.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/nand.h> @@ -202,6 +205,51 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)  }  /** + * nand_write_byte - [DEFAULT] write single byte to chip + * @mtd: MTD device structure + * @byte: value to write + * + * Default function to write a byte to I/O[7:0] + */ +static void nand_write_byte(struct mtd_info *mtd, uint8_t byte) +{ +	struct nand_chip *chip = mtd->priv; + +	chip->write_buf(mtd, &byte, 1); +} + +/** + * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16 + * @mtd: MTD device structure + * @byte: value to write + * + * Default function to write a byte to I/O[7:0] on a 16-bit wide chip. + */ +static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte) +{ +	struct nand_chip *chip = mtd->priv; +	uint16_t word = byte; + +	/* +	 * It's not entirely clear what should happen to I/O[15:8] when writing +	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads: +	 * +	 *    When the host supports a 16-bit bus width, only data is +	 *    transferred at the 16-bit width. All address and command line +	 *    transfers shall use only the lower 8-bits of the data bus. During +	 *    command transfers, the host may place any value on the upper +	 *    8-bits of the data bus. During address transfers, the host shall +	 *    set the upper 8-bits of the data bus to 00h. +	 * +	 * One user of the write_byte callback is nand_onfi_set_features. The +	 * four parameters are specified to be written to I/O[7:0], but this is +	 * neither an address nor a command transfer. Let's assume a 0 on the +	 * upper I/O lines is OK. +	 */ +	chip->write_buf(mtd, (uint8_t *)&word, 2); +} + +/**   * nand_write_buf - [DEFAULT] write buffer to chip   * @mtd: MTD device structure   * @buf: data buffer @@ -542,7 +590,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,  	/* Serially input address */  	if (column != -1) {  		/* Adjust columns for 16 bit buswidth */ -		if (chip->options & NAND_BUSWIDTH_16) +		if (chip->options & NAND_BUSWIDTH_16 && +				!nand_opcode_8bits(command))  			column >>= 1;  		chip->cmd_ctrl(mtd, column, ctrl);  		ctrl &= ~NAND_CTRL_CHANGE; @@ -633,7 +682,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,  		/* Serially input address */  		if (column != -1) {  			/* Adjust columns for 16 bit buswidth */ -			if (chip->options & NAND_BUSWIDTH_16) +			if (chip->options & NAND_BUSWIDTH_16 && +					!nand_opcode_8bits(command))  				column >>= 1;  			chip->cmd_ctrl(mtd, column, ctrl);  			ctrl &= ~NAND_CTRL_CHANGE; @@ -1113,9 +1163,11 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,   * @data_offs: offset of requested data within the page   * @readlen: data length   * @bufpoi: buffer to store read data + * @page: page number to read   */  static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, -			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) +			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi, +			int page)  {  	int start_step, end_step, num_steps;  	uint32_t *eccpos = chip->ecc.layout->eccpos; @@ -1123,13 +1175,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,  	int data_col_addr, i, gaps = 0;  	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;  	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; -	int index = 0; +	int index;  	unsigned int max_bitflips = 0;  	/* Column address within the page aligned to ECC size (256bytes) */  	start_step = data_offs / chip->ecc.size;  	end_step = (data_offs + readlen - 1) / chip->ecc.size;  	num_steps = end_step - start_step + 1; +	index = start_step * chip->ecc.bytes;  	/* Data size aligned to ECC ecc.size */  	datafrag_len = num_steps * chip->ecc.size; @@ -1152,8 +1205,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,  	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.  	 */  	for (i = 0; i < eccfrag_len - 1; i++) { -		if (eccpos[i + start_step * chip->ecc.bytes] + 1 != -			eccpos[i + start_step * chip->ecc.bytes + 1]) { +		if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {  			gaps = 1;  			break;  		} @@ -1166,8 +1218,6 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,  		 * Send the command to read the particular ECC bytes take care  		 * about buswidth alignment in read_buf.  		 */ -		index = start_step * chip->ecc.bytes; -  		aligned_pos = eccpos[index] & ~(busw - 1);  		aligned_len = eccfrag_len;  		if (eccpos[index] & (busw - 1)) @@ -1408,6 +1458,30 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,  }  /** + * nand_setup_read_retry - [INTERN] Set the READ RETRY mode + * @mtd: MTD device structure + * @retry_mode: the retry mode to use + * + * Some vendors supply a special command to shift the Vt threshold, to be used + * when there are too many bitflips in a page (i.e., ECC error). After setting + * a new threshold, the host should retry reading the page. + */ +static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode) +{ +	struct nand_chip *chip = mtd->priv; + +	pr_debug("setting READ RETRY mode %d\n", retry_mode); + +	if (retry_mode >= chip->read_retries) +		return -EINVAL; + +	if (!chip->setup_read_retry) +		return -EOPNOTSUPP; + +	return chip->setup_read_retry(mtd, retry_mode); +} + +/**   * nand_do_read_ops - [INTERN] Read data with ECC   * @mtd: MTD device structure   * @from: offset to read from @@ -1420,7 +1494,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  {  	int chipnr, page, realpage, col, bytes, aligned, oob_required;  	struct nand_chip *chip = mtd->priv; -	struct mtd_ecc_stats stats;  	int ret = 0;  	uint32_t readlen = ops->len;  	uint32_t oobreadlen = ops->ooblen; @@ -1428,9 +1501,10 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  		mtd->oobavail : mtd->oobsize;  	uint8_t *bufpoi, *oob, *buf; +	int use_bufpoi;  	unsigned int max_bitflips = 0; - -	stats = mtd->ecc_stats; +	int retry_mode = 0; +	bool ecc_fail = false;  	chipnr = (int)(from >> chip->chip_shift);  	chip->select_chip(mtd, chipnr); @@ -1445,13 +1519,27 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  	oob_required = oob ? 1 : 0;  	while (1) { +		unsigned int ecc_failures = mtd->ecc_stats.failed; +  		bytes = min(mtd->writesize - col, readlen);  		aligned = (bytes == mtd->writesize); +		if (!aligned) +			use_bufpoi = 1; +		else if (chip->options & NAND_USE_BOUNCE_BUFFER) +			use_bufpoi = !virt_addr_valid(buf); +		else +			use_bufpoi = 0; +  		/* Is the current page in the buffer? */  		if (realpage != chip->pagebuf || oob) { -			bufpoi = aligned ? buf : chip->buffers->databuf; +			bufpoi = use_bufpoi ? chip->buffers->databuf : buf; + +			if (use_bufpoi && aligned) +				pr_debug("%s: using read bounce buffer for buf@%p\n", +						 __func__, buf); +read_retry:  			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);  			/* @@ -1465,12 +1553,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&  				 !oob)  				ret = chip->ecc.read_subpage(mtd, chip, -							col, bytes, bufpoi); +							col, bytes, bufpoi, +							page);  			else  				ret = chip->ecc.read_page(mtd, chip, bufpoi,  							  oob_required, page);  			if (ret < 0) { -				if (!aligned) +				if (use_bufpoi)  					/* Invalidate page cache */  					chip->pagebuf = -1;  				break; @@ -1479,9 +1568,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  			max_bitflips = max_t(unsigned int, max_bitflips, ret);  			/* Transfer not aligned data */ -			if (!aligned) { +			if (use_bufpoi) {  				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && -				    !(mtd->ecc_stats.failed - stats.failed) && +				    !(mtd->ecc_stats.failed - ecc_failures) &&  				    (ops->mode != MTD_OPS_RAW)) {  					chip->pagebuf = realpage;  					chip->pagebuf_bitflips = ret; @@ -1492,8 +1581,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  				memcpy(buf, chip->buffers->databuf + col, bytes);  			} -			buf += bytes; -  			if (unlikely(oob)) {  				int toread = min(oobreadlen, max_oobsize); @@ -1511,6 +1598,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  				else  					nand_wait_ready(mtd);  			} + +			if (mtd->ecc_stats.failed - ecc_failures) { +				if (retry_mode + 1 < chip->read_retries) { +					retry_mode++; +					ret = nand_setup_read_retry(mtd, +							retry_mode); +					if (ret < 0) +						break; + +					/* Reset failures; retry */ +					mtd->ecc_stats.failed = ecc_failures; +					goto read_retry; +				} else { +					/* No more retry modes; real failure */ +					ecc_fail = true; +				} +			} + +			buf += bytes;  		} else {  			memcpy(buf, chip->buffers->databuf + col, bytes);  			buf += bytes; @@ -1520,6 +1626,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  		readlen -= bytes; +		/* Reset to retry mode 0 */ +		if (retry_mode) { +			ret = nand_setup_read_retry(mtd, 0); +			if (ret < 0) +				break; +			retry_mode = 0; +		} +  		if (!readlen)  			break; @@ -1545,7 +1659,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,  	if (ret < 0)  		return ret; -	if (mtd->ecc_stats.failed - stats.failed) +	if (ecc_fail)  		return -EBADMSG;  	return max_bitflips; @@ -1902,7 +2016,7 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,  			oob += chip->ecc.prepad;  		} -		chip->read_buf(mtd, oob, eccbytes); +		chip->write_buf(mtd, oob, eccbytes);  		oob += eccbytes;  		if (chip->ecc.postpad) { @@ -2274,11 +2388,23 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,  		int bytes = mtd->writesize;  		int cached = writelen > bytes && page != blockmask;  		uint8_t *wbuf = buf; +		int use_bufpoi; +		int part_pagewr = (column || writelen < (mtd->writesize - 1)); + +		if (part_pagewr) +			use_bufpoi = 1; +		else if (chip->options & NAND_USE_BOUNCE_BUFFER) +			use_bufpoi = !virt_addr_valid(buf); +		else +			use_bufpoi = 0; -		/* Partial page write? */ -		if (unlikely(column || writelen < (mtd->writesize - 1))) { +		/* Partial page write?, or need to use bounce buffer */ +		if (use_bufpoi) { +			pr_debug("%s: using write bounce buffer for buf@%p\n", +					 __func__, buf);  			cached = 0; -			bytes = min_t(int, bytes - column, (int) writelen); +			if (part_pagewr) +				bytes = min_t(int, bytes - column, writelen);  			chip->pagebuf = -1;  			memset(chip->buffers->databuf, 0xff, mtd->writesize);  			memcpy(&chip->buffers->databuf[column], buf, bytes); @@ -2516,18 +2642,20 @@ out:  }  /** - * single_erase_cmd - [GENERIC] NAND standard block erase command function + * single_erase - [GENERIC] NAND standard block erase command function   * @mtd: MTD device structure   * @page: the page address of the block which will be erased   * - * Standard erase command for NAND chips. + * Standard erase command for NAND chips. Returns NAND status.   */ -static void single_erase_cmd(struct mtd_info *mtd, int page) +static int single_erase(struct mtd_info *mtd, int page)  {  	struct nand_chip *chip = mtd->priv;  	/* Send commands to erase a block */  	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);  	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); + +	return chip->waitfunc(mtd, chip);  }  /** @@ -2608,9 +2736,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,  		    (page + pages_per_block))  			chip->pagebuf = -1; -		chip->erase_cmd(mtd, page & chip->pagemask); - -		status = chip->waitfunc(mtd, chip); +		status = chip->erase(mtd, page & chip->pagemask);  		/*  		 * See if operation failed and additional status checks are @@ -2716,6 +2842,7 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,  			int addr, uint8_t *subfeature_param)  {  	int status; +	int i;  	if (!chip->onfi_version ||  	    !(le16_to_cpu(chip->onfi_params.opt_cmd) @@ -2723,7 +2850,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,  		return -EINVAL;  	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1); -	chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN); +	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) +		chip->write_byte(mtd, subfeature_param[i]); +  	status = chip->waitfunc(mtd, chip);  	if (status & NAND_STATUS_FAIL)  		return -EIO; @@ -2740,6 +2869,8 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,  static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,  			int addr, uint8_t *subfeature_param)  { +	int i; +  	if (!chip->onfi_version ||  	    !(le16_to_cpu(chip->onfi_params.opt_cmd)  	      & ONFI_OPT_CMD_SET_GET_FEATURES)) @@ -2749,7 +2880,8 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,  	memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);  	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1); -	chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN); +	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) +		*subfeature_param++ = chip->read_byte(mtd);  	return 0;  } @@ -2812,6 +2944,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)  		chip->block_markbad = nand_default_block_markbad;  	if (!chip->write_buf || chip->write_buf == nand_write_buf)  		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; +	if (!chip->write_byte || chip->write_byte == nand_write_byte) +		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;  	if (!chip->read_buf || chip->read_buf == nand_read_buf)  		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;  	if (!chip->scan_bbt) @@ -2869,10 +3003,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,  	len = le16_to_cpu(p->ext_param_page_length) * 16;  	ep = kmalloc(len, GFP_KERNEL); -	if (!ep) { -		ret = -ENOMEM; -		goto ext_out; -	} +	if (!ep) +		return -ENOMEM;  	/* Send our own NAND_CMD_PARAM. */  	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); @@ -2914,19 +3046,44 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,  	/* get the info we want. */  	ecc = (struct onfi_ext_ecc_info *)cursor; -	if (ecc->codeword_size) { -		chip->ecc_strength_ds = ecc->ecc_bits; -		chip->ecc_step_ds = 1 << ecc->codeword_size; +	if (!ecc->codeword_size) { +		pr_debug("Invalid codeword size\n"); +		goto ext_out;  	} -	pr_info("ONFI extended param page detected.\n"); -	return 0; +	chip->ecc_strength_ds = ecc->ecc_bits; +	chip->ecc_step_ds = 1 << ecc->codeword_size; +	ret = 0;  ext_out:  	kfree(ep);  	return ret;  } +static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode) +{ +	struct nand_chip *chip = mtd->priv; +	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode}; + +	return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY, +			feature); +} + +/* + * Configure chip properties from Micron vendor-specific ONFI table + */ +static void nand_onfi_detect_micron(struct nand_chip *chip, +		struct nand_onfi_params *p) +{ +	struct nand_onfi_vendor_micron *micron = (void *)p->vendor; + +	if (le16_to_cpu(p->vendor_revision) < 1) +		return; + +	chip->read_retries = micron->read_retry_options; +	chip->setup_read_retry = nand_setup_read_retry_micron; +} +  /*   * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.   */ @@ -2934,14 +3091,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  					int *busw)  {  	struct nand_onfi_params *p = &chip->onfi_params; -	int i; +	int i, j;  	int val; -	/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */ -	if (chip->options & NAND_BUSWIDTH_16) { -		pr_err("Trying ONFI probe in 16 bits mode, aborting !\n"); -		return 0; -	}  	/* Try ONFI for unknown chip or LP */  	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);  	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || @@ -2950,16 +3102,18 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);  	for (i = 0; i < 3; i++) { -		chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); +		for (j = 0; j < sizeof(*p); j++) +			((uint8_t *)p)[j] = chip->read_byte(mtd);  		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==  				le16_to_cpu(p->crc)) { -			pr_info("ONFI param page %d valid\n", i);  			break;  		}  	} -	if (i == 3) +	if (i == 3) { +		pr_err("Could not find valid ONFI parameter page; aborting\n");  		return 0; +	}  	/* Check version */  	val = le16_to_cpu(p->revision); @@ -2975,7 +3129,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  		chip->onfi_version = 10;  	if (!chip->onfi_version) { -		pr_info("%s: unsupported ONFI version: %d\n", __func__, val); +		pr_info("unsupported ONFI version: %d\n", val);  		return 0;  	} @@ -2983,11 +3137,23 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  	sanitize_string(p->model, sizeof(p->model));  	if (!mtd->name)  		mtd->name = p->model; +  	mtd->writesize = le32_to_cpu(p->byte_per_page); -	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; + +	/* +	 * pages_per_block and blocks_per_lun may not be a power-of-2 size +	 * (don't ask me who thought of this...). MTD assumes that these +	 * dimensions will be power-of-2, so just truncate the remaining area. +	 */ +	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1); +	mtd->erasesize *= mtd->writesize; +  	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); -	chip->chipsize = le32_to_cpu(p->blocks_per_lun); + +	/* See erasesize comment */ +	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);  	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; +	chip->bits_per_cell = p->bits_per_cell;  	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)  		*busw = NAND_BUSWIDTH_16; @@ -3011,10 +3177,95 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  		/* The Extended Parameter Page is supported since ONFI 2.1. */  		if (nand_flash_detect_ext_param_page(mtd, chip, p)) -			pr_info("Failed to detect the extended param page.\n"); +			pr_warn("Failed to detect ONFI extended param page\n"); +	} else { +		pr_warn("Could not retrieve ONFI ECC requirements\n"); +	} + +	if (p->jedec_id == NAND_MFR_MICRON) +		nand_onfi_detect_micron(chip, p); + +	return 1; +} + +/* + * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise. + */ +static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip, +					int *busw) +{ +	struct nand_jedec_params *p = &chip->jedec_params; +	struct jedec_ecc_info *ecc; +	int val; +	int i, j; + +	/* Try JEDEC for unknown chip or LP */ +	chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1); +	if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' || +		chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' || +		chip->read_byte(mtd) != 'C') +		return 0; + +	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1); +	for (i = 0; i < 3; i++) { +		for (j = 0; j < sizeof(*p); j++) +			((uint8_t *)p)[j] = chip->read_byte(mtd); + +		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) == +				le16_to_cpu(p->crc)) +			break; +	} + +	if (i == 3) { +		pr_err("Could not find valid JEDEC parameter page; aborting\n"); +		return 0; +	} + +	/* Check version */ +	val = le16_to_cpu(p->revision); +	if (val & (1 << 2)) +		chip->jedec_version = 10; +	else if (val & (1 << 1)) +		chip->jedec_version = 1; /* vendor specific version */ + +	if (!chip->jedec_version) { +		pr_info("unsupported JEDEC version: %d\n", val); +		return 0; +	} + +	sanitize_string(p->manufacturer, sizeof(p->manufacturer)); +	sanitize_string(p->model, sizeof(p->model)); +	if (!mtd->name) +		mtd->name = p->model; + +	mtd->writesize = le32_to_cpu(p->byte_per_page); + +	/* Please reference to the comment for nand_flash_detect_onfi. */ +	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1); +	mtd->erasesize *= mtd->writesize; + +	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); + +	/* Please reference to the comment for nand_flash_detect_onfi. */ +	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1); +	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; +	chip->bits_per_cell = p->bits_per_cell; + +	if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS) +		*busw = NAND_BUSWIDTH_16; +	else +		*busw = 0; + +	/* ECC info */ +	ecc = &p->ecc_info[0]; + +	if (ecc->codeword_size >= 9) { +		chip->ecc_strength_ds = ecc->ecc_bits; +		chip->ecc_step_ds = 1 << ecc->codeword_size; +	} else { +		pr_warn("Invalid codeword size\n");  	} -	pr_info("ONFI flash detected\n");  	return 1;  } @@ -3077,6 +3328,16 @@ static int nand_id_len(u8 *id_data, int arrlen)  	return arrlen;  } +/* Extract the bits of per cell from the 3rd byte of the extended ID */ +static int nand_get_bits_per_cell(u8 cellinfo) +{ +	int bits; + +	bits = cellinfo & NAND_CI_CELLTYPE_MSK; +	bits >>= NAND_CI_CELLTYPE_SHIFT; +	return bits + 1; +} +  /*   * Many new NAND share similar device ID codes, which represent the size of the   * chip. The rest of the parameters must be decoded according to generic or @@ -3087,7 +3348,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,  {  	int extid, id_len;  	/* The 3rd id byte holds MLC / multichip data */ -	chip->cellinfo = id_data[2]; +	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);  	/* The 4th id byte is the important one */  	extid = id_data[3]; @@ -3103,8 +3364,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,  	 * ID to decide what to do.  	 */  	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && -			(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && -			id_data[5] != 0x00) { +			!nand_is_slc(chip) && id_data[5] != 0x00) {  		/* Calc pagesize */  		mtd->writesize = 2048 << (extid & 0x03);  		extid >>= 2; @@ -3126,9 +3386,12 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,  			mtd->oobsize = 512;  			break;  		case 6: -		default: /* Other cases are "reserved" (unknown) */  			mtd->oobsize = 640;  			break; +		case 7: +		default: /* Other cases are "reserved" (unknown) */ +			mtd->oobsize = 1024; +			break;  		}  		extid >>= 2;  		/* Calc blocksize */ @@ -3136,7 +3399,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,  			(((extid >> 1) & 0x04) | (extid & 0x03));  		*busw = 0;  	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && -			(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { +			!nand_is_slc(chip)) {  		unsigned int tmp;  		/* Calc pagesize */ @@ -3199,7 +3462,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,  		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC  		 */  		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA && -				!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && +				nand_is_slc(chip) &&  				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&  				!(id_data[4] & 0x80) /* !BENAND */) {  			mtd->oobsize = 32 * mtd->writesize >> 9; @@ -3224,6 +3487,9 @@ static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,  	mtd->oobsize = mtd->writesize / 32;  	*busw = type->options & NAND_BUSWIDTH_16; +	/* All legacy ID NAND are small-page, SLC */ +	chip->bits_per_cell = 1; +  	/*  	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since  	 * some Spansion chips have erasesize that conflicts with size @@ -3260,11 +3526,11 @@ static void nand_decode_bbm_options(struct mtd_info *mtd,  	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,  	 * AMD/Spansion, and Macronix.  All others scan only the first page.  	 */ -	if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && +	if (!nand_is_slc(chip) &&  			(maf_id == NAND_MFR_SAMSUNG ||  			 maf_id == NAND_MFR_HYNIX))  		chip->bbt_options |= NAND_BBT_SCANLASTPAGE; -	else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && +	else if ((nand_is_slc(chip) &&  				(maf_id == NAND_MFR_SAMSUNG ||  				 maf_id == NAND_MFR_HYNIX ||  				 maf_id == NAND_MFR_TOSHIBA || @@ -3288,7 +3554,7 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,  		mtd->erasesize = type->erasesize;  		mtd->oobsize = type->oobsize; -		chip->cellinfo = id_data[2]; +		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);  		chip->chipsize = (uint64_t)type->chipsize << 20;  		chip->options |= type->options;  		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type); @@ -3296,6 +3562,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,  		*busw = type->options & NAND_BUSWIDTH_16; +		if (!mtd->name) +			mtd->name = type->name; +  		return true;  	}  	return false; @@ -3306,10 +3575,10 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,   */  static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,  						  struct nand_chip *chip, -						  int busw,  						  int *maf_id, int *dev_id,  						  struct nand_flash_dev *type)  { +	int busw;  	int i, maf_idx;  	u8 id_data[8]; @@ -3343,8 +3612,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,  		id_data[i] = chip->read_byte(mtd);  	if (id_data[0] != *maf_id || id_data[1] != *dev_id) { -		pr_info("%s: second ID read did not match " -			"%02x,%02x against %02x,%02x\n", __func__, +		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",  			*maf_id, *dev_id, id_data[0], id_data[1]);  		return ERR_PTR(-ENODEV);  	} @@ -3363,9 +3631,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,  	chip->onfi_version = 0;  	if (!type->name || !type->pagesize) { -		/* Check is chip is ONFI compliant */ +		/* Check if the chip is ONFI compliant */  		if (nand_flash_detect_onfi(mtd, chip, &busw))  			goto ident_done; + +		/* Check if the chip is JEDEC compliant */ +		if (nand_flash_detect_jedec(mtd, chip, &busw)) +			goto ident_done;  	}  	if (!type->name) @@ -3411,10 +3683,10 @@ ident_done:  		 * Check, if buswidth is correct. Hardware drivers should set  		 * chip correct!  		 */ -		pr_info("NAND device: Manufacturer ID:" -			" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, -			*dev_id, nand_manuf_ids[maf_idx].name, mtd->name); -		pr_warn("NAND bus width %d instead %d bit\n", +		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", +			*maf_id, *dev_id); +		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name); +		pr_warn("bus width %d instead %d bit\n",  			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,  			   busw ? 16 : 8);  		return ERR_PTR(-EINVAL); @@ -3437,18 +3709,28 @@ ident_done:  	}  	chip->badblockbits = 8; -	chip->erase_cmd = single_erase_cmd; +	chip->erase = single_erase;  	/* Do not replace user supplied command function! */  	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)  		chip->cmdfunc = nand_command_lp; -	pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)," -		" %dMiB, page size: %d, OOB size: %d\n", -		*maf_id, *dev_id, nand_manuf_ids[maf_idx].name, -		chip->onfi_version ? chip->onfi_params.model : type->name, -		(int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize); +	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", +		*maf_id, *dev_id); +	if (chip->onfi_version) +		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, +				chip->onfi_params.model); +	else if (chip->jedec_version) +		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, +				chip->jedec_params.model); +	else +		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, +				type->name); + +	pr_info("%dMiB, %s, page size: %d, OOB size: %d\n", +		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", +		mtd->writesize, mtd->oobsize);  	return type;  } @@ -3466,18 +3748,16 @@ ident_done:  int nand_scan_ident(struct mtd_info *mtd, int maxchips,  		    struct nand_flash_dev *table)  { -	int i, busw, nand_maf_id, nand_dev_id; +	int i, nand_maf_id, nand_dev_id;  	struct nand_chip *chip = mtd->priv;  	struct nand_flash_dev *type; -	/* Get buswidth to select the correct functions */ -	busw = chip->options & NAND_BUSWIDTH_16;  	/* Set the default functions */ -	nand_set_defaults(chip, busw); +	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);  	/* Read the flash type */ -	type = nand_get_flash_type(mtd, chip, busw, -				&nand_maf_id, &nand_dev_id, table); +	type = nand_get_flash_type(mtd, chip, &nand_maf_id, +				   &nand_dev_id, table);  	if (IS_ERR(type)) {  		if (!(chip->options & NAND_SCAN_SILENT_NODEV)) @@ -3504,7 +3784,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,  		chip->select_chip(mtd, -1);  	}  	if (i > 1) -		pr_info("%d NAND chips detected\n", i); +		pr_info("%d chips detected\n", i);  	/* Store the number of chips and calc total size for mtd */  	chip->numchips = i; @@ -3514,6 +3794,39 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,  }  EXPORT_SYMBOL(nand_scan_ident); +/* + * Check if the chip configuration meet the datasheet requirements. + + * If our configuration corrects A bits per B bytes and the minimum + * required correction level is X bits per Y bytes, then we must ensure + * both of the following are true: + * + * (1) A / B >= X / Y + * (2) A >= X + * + * Requirement (1) ensures we can correct for the required bitflip density. + * Requirement (2) ensures we can correct even when all bitflips are clumped + * in the same sector. + */ +static bool nand_ecc_strength_good(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct nand_ecc_ctrl *ecc = &chip->ecc; +	int corr, ds_corr; + +	if (ecc->size == 0 || chip->ecc_step_ds == 0) +		/* Not enough information */ +		return true; + +	/* +	 * We get the number of corrected bits per page to compare +	 * the correction density. +	 */ +	corr = (mtd->writesize * ecc->strength) / ecc->size; +	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds; + +	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds; +}  /**   * nand_scan_tail - [NAND Interface] Scan for the NAND device @@ -3527,15 +3840,27 @@ int nand_scan_tail(struct mtd_info *mtd)  {  	int i;  	struct nand_chip *chip = mtd->priv; +	struct nand_ecc_ctrl *ecc = &chip->ecc; +	struct nand_buffers *nbuf;  	/* New bad blocks should be marked in OOB, flash-based BBT, or both */  	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&  			!(chip->bbt_options & NAND_BBT_USE_FLASH)); -	if (!(chip->options & NAND_OWN_BUFFERS)) -		chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL); -	if (!chip->buffers) -		return -ENOMEM; +	if (!(chip->options & NAND_OWN_BUFFERS)) { +		nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize +				+ mtd->oobsize * 3, GFP_KERNEL); +		if (!nbuf) +			return -ENOMEM; +		nbuf->ecccalc = (uint8_t *)(nbuf + 1); +		nbuf->ecccode = nbuf->ecccalc + mtd->oobsize; +		nbuf->databuf = nbuf->ecccode + mtd->oobsize; + +		chip->buffers = nbuf; +	} else { +		if (!chip->buffers) +			return -ENOMEM; +	}  	/* Set the internal oob buffer location, just after the page data */  	chip->oob_poi = chip->buffers->databuf + mtd->writesize; @@ -3543,19 +3868,19 @@ int nand_scan_tail(struct mtd_info *mtd)  	/*  	 * If no default placement scheme is given, select an appropriate one.  	 */ -	if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { +	if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {  		switch (mtd->oobsize) {  		case 8: -			chip->ecc.layout = &nand_oob_8; +			ecc->layout = &nand_oob_8;  			break;  		case 16: -			chip->ecc.layout = &nand_oob_16; +			ecc->layout = &nand_oob_16;  			break;  		case 64: -			chip->ecc.layout = &nand_oob_64; +			ecc->layout = &nand_oob_64;  			break;  		case 128: -			chip->ecc.layout = &nand_oob_128; +			ecc->layout = &nand_oob_128;  			break;  		default:  			pr_warn("No oob scheme defined for oobsize %d\n", @@ -3572,64 +3897,62 @@ int nand_scan_tail(struct mtd_info *mtd)  	 * selected and we have 256 byte pagesize fallback to software ECC  	 */ -	switch (chip->ecc.mode) { +	switch (ecc->mode) {  	case NAND_ECC_HW_OOB_FIRST:  		/* Similar to NAND_ECC_HW, but a separate read_page handle */ -		if (!chip->ecc.calculate || !chip->ecc.correct || -		     !chip->ecc.hwctl) { +		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {  			pr_warn("No ECC functions supplied; "  				   "hardware ECC not possible\n");  			BUG();  		} -		if (!chip->ecc.read_page) -			chip->ecc.read_page = nand_read_page_hwecc_oob_first; +		if (!ecc->read_page) +			ecc->read_page = nand_read_page_hwecc_oob_first;  	case NAND_ECC_HW:  		/* Use standard hwecc read page function? */ -		if (!chip->ecc.read_page) -			chip->ecc.read_page = nand_read_page_hwecc; -		if (!chip->ecc.write_page) -			chip->ecc.write_page = nand_write_page_hwecc; -		if (!chip->ecc.read_page_raw) -			chip->ecc.read_page_raw = nand_read_page_raw; -		if (!chip->ecc.write_page_raw) -			chip->ecc.write_page_raw = nand_write_page_raw; -		if (!chip->ecc.read_oob) -			chip->ecc.read_oob = nand_read_oob_std; -		if (!chip->ecc.write_oob) -			chip->ecc.write_oob = nand_write_oob_std; -		if (!chip->ecc.read_subpage) -			chip->ecc.read_subpage = nand_read_subpage; -		if (!chip->ecc.write_subpage) -			chip->ecc.write_subpage = nand_write_subpage_hwecc; +		if (!ecc->read_page) +			ecc->read_page = nand_read_page_hwecc; +		if (!ecc->write_page) +			ecc->write_page = nand_write_page_hwecc; +		if (!ecc->read_page_raw) +			ecc->read_page_raw = nand_read_page_raw; +		if (!ecc->write_page_raw) +			ecc->write_page_raw = nand_write_page_raw; +		if (!ecc->read_oob) +			ecc->read_oob = nand_read_oob_std; +		if (!ecc->write_oob) +			ecc->write_oob = nand_write_oob_std; +		if (!ecc->read_subpage) +			ecc->read_subpage = nand_read_subpage; +		if (!ecc->write_subpage) +			ecc->write_subpage = nand_write_subpage_hwecc;  	case NAND_ECC_HW_SYNDROME: -		if ((!chip->ecc.calculate || !chip->ecc.correct || -		     !chip->ecc.hwctl) && -		    (!chip->ecc.read_page || -		     chip->ecc.read_page == nand_read_page_hwecc || -		     !chip->ecc.write_page || -		     chip->ecc.write_page == nand_write_page_hwecc)) { +		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && +		    (!ecc->read_page || +		     ecc->read_page == nand_read_page_hwecc || +		     !ecc->write_page || +		     ecc->write_page == nand_write_page_hwecc)) {  			pr_warn("No ECC functions supplied; "  				   "hardware ECC not possible\n");  			BUG();  		}  		/* Use standard syndrome read/write page function? */ -		if (!chip->ecc.read_page) -			chip->ecc.read_page = nand_read_page_syndrome; -		if (!chip->ecc.write_page) -			chip->ecc.write_page = nand_write_page_syndrome; -		if (!chip->ecc.read_page_raw) -			chip->ecc.read_page_raw = nand_read_page_raw_syndrome; -		if (!chip->ecc.write_page_raw) -			chip->ecc.write_page_raw = nand_write_page_raw_syndrome; -		if (!chip->ecc.read_oob) -			chip->ecc.read_oob = nand_read_oob_syndrome; -		if (!chip->ecc.write_oob) -			chip->ecc.write_oob = nand_write_oob_syndrome; - -		if (mtd->writesize >= chip->ecc.size) { -			if (!chip->ecc.strength) { +		if (!ecc->read_page) +			ecc->read_page = nand_read_page_syndrome; +		if (!ecc->write_page) +			ecc->write_page = nand_write_page_syndrome; +		if (!ecc->read_page_raw) +			ecc->read_page_raw = nand_read_page_raw_syndrome; +		if (!ecc->write_page_raw) +			ecc->write_page_raw = nand_write_page_raw_syndrome; +		if (!ecc->read_oob) +			ecc->read_oob = nand_read_oob_syndrome; +		if (!ecc->write_oob) +			ecc->write_oob = nand_write_oob_syndrome; + +		if (mtd->writesize >= ecc->size) { +			if (!ecc->strength) {  				pr_warn("Driver must set ecc.strength when using hardware ECC\n");  				BUG();  			} @@ -3637,112 +3960,112 @@ int nand_scan_tail(struct mtd_info *mtd)  		}  		pr_warn("%d byte HW ECC not possible on "  			   "%d byte page size, fallback to SW ECC\n", -			   chip->ecc.size, mtd->writesize); -		chip->ecc.mode = NAND_ECC_SOFT; +			   ecc->size, mtd->writesize); +		ecc->mode = NAND_ECC_SOFT;  	case NAND_ECC_SOFT: -		chip->ecc.calculate = nand_calculate_ecc; -		chip->ecc.correct = nand_correct_data; -		chip->ecc.read_page = nand_read_page_swecc; -		chip->ecc.read_subpage = nand_read_subpage; -		chip->ecc.write_page = nand_write_page_swecc; -		chip->ecc.read_page_raw = nand_read_page_raw; -		chip->ecc.write_page_raw = nand_write_page_raw; -		chip->ecc.read_oob = nand_read_oob_std; -		chip->ecc.write_oob = nand_write_oob_std; -		if (!chip->ecc.size) -			chip->ecc.size = 256; -		chip->ecc.bytes = 3; -		chip->ecc.strength = 1; +		ecc->calculate = nand_calculate_ecc; +		ecc->correct = nand_correct_data; +		ecc->read_page = nand_read_page_swecc; +		ecc->read_subpage = nand_read_subpage; +		ecc->write_page = nand_write_page_swecc; +		ecc->read_page_raw = nand_read_page_raw; +		ecc->write_page_raw = nand_write_page_raw; +		ecc->read_oob = nand_read_oob_std; +		ecc->write_oob = nand_write_oob_std; +		if (!ecc->size) +			ecc->size = 256; +		ecc->bytes = 3; +		ecc->strength = 1;  		break;  	case NAND_ECC_SOFT_BCH:  		if (!mtd_nand_has_bch()) { -			pr_warn("CONFIG_MTD_ECC_BCH not enabled\n"); +			pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");  			BUG();  		} -		chip->ecc.calculate = nand_bch_calculate_ecc; -		chip->ecc.correct = nand_bch_correct_data; -		chip->ecc.read_page = nand_read_page_swecc; -		chip->ecc.read_subpage = nand_read_subpage; -		chip->ecc.write_page = nand_write_page_swecc; -		chip->ecc.read_page_raw = nand_read_page_raw; -		chip->ecc.write_page_raw = nand_write_page_raw; -		chip->ecc.read_oob = nand_read_oob_std; -		chip->ecc.write_oob = nand_write_oob_std; +		ecc->calculate = nand_bch_calculate_ecc; +		ecc->correct = nand_bch_correct_data; +		ecc->read_page = nand_read_page_swecc; +		ecc->read_subpage = nand_read_subpage; +		ecc->write_page = nand_write_page_swecc; +		ecc->read_page_raw = nand_read_page_raw; +		ecc->write_page_raw = nand_write_page_raw; +		ecc->read_oob = nand_read_oob_std; +		ecc->write_oob = nand_write_oob_std;  		/*  		 * Board driver should supply ecc.size and ecc.bytes values to  		 * select how many bits are correctable; see nand_bch_init()  		 * for details. Otherwise, default to 4 bits for large page  		 * devices.  		 */ -		if (!chip->ecc.size && (mtd->oobsize >= 64)) { -			chip->ecc.size = 512; -			chip->ecc.bytes = 7; +		if (!ecc->size && (mtd->oobsize >= 64)) { +			ecc->size = 512; +			ecc->bytes = 7;  		} -		chip->ecc.priv = nand_bch_init(mtd, -					       chip->ecc.size, -					       chip->ecc.bytes, -					       &chip->ecc.layout); -		if (!chip->ecc.priv) { +		ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes, +					       &ecc->layout); +		if (!ecc->priv) {  			pr_warn("BCH ECC initialization failed!\n");  			BUG();  		} -		chip->ecc.strength = -			chip->ecc.bytes * 8 / fls(8 * chip->ecc.size); +		ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);  		break;  	case NAND_ECC_NONE:  		pr_warn("NAND_ECC_NONE selected by board driver. "  			   "This is not recommended!\n"); -		chip->ecc.read_page = nand_read_page_raw; -		chip->ecc.write_page = nand_write_page_raw; -		chip->ecc.read_oob = nand_read_oob_std; -		chip->ecc.read_page_raw = nand_read_page_raw; -		chip->ecc.write_page_raw = nand_write_page_raw; -		chip->ecc.write_oob = nand_write_oob_std; -		chip->ecc.size = mtd->writesize; -		chip->ecc.bytes = 0; -		chip->ecc.strength = 0; +		ecc->read_page = nand_read_page_raw; +		ecc->write_page = nand_write_page_raw; +		ecc->read_oob = nand_read_oob_std; +		ecc->read_page_raw = nand_read_page_raw; +		ecc->write_page_raw = nand_write_page_raw; +		ecc->write_oob = nand_write_oob_std; +		ecc->size = mtd->writesize; +		ecc->bytes = 0; +		ecc->strength = 0;  		break;  	default: -		pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode); +		pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);  		BUG();  	}  	/* For many systems, the standard OOB write also works for raw */ -	if (!chip->ecc.read_oob_raw) -		chip->ecc.read_oob_raw = chip->ecc.read_oob; -	if (!chip->ecc.write_oob_raw) -		chip->ecc.write_oob_raw = chip->ecc.write_oob; +	if (!ecc->read_oob_raw) +		ecc->read_oob_raw = ecc->read_oob; +	if (!ecc->write_oob_raw) +		ecc->write_oob_raw = ecc->write_oob;  	/*  	 * The number of bytes available for a client to place data into  	 * the out of band area.  	 */ -	chip->ecc.layout->oobavail = 0; -	for (i = 0; chip->ecc.layout->oobfree[i].length -			&& i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++) -		chip->ecc.layout->oobavail += -			chip->ecc.layout->oobfree[i].length; -	mtd->oobavail = chip->ecc.layout->oobavail; +	ecc->layout->oobavail = 0; +	for (i = 0; ecc->layout->oobfree[i].length +			&& i < ARRAY_SIZE(ecc->layout->oobfree); i++) +		ecc->layout->oobavail += ecc->layout->oobfree[i].length; +	mtd->oobavail = ecc->layout->oobavail; + +	/* ECC sanity check: warn if it's too weak */ +	if (!nand_ecc_strength_good(mtd)) +		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", +			mtd->name);  	/*  	 * Set the number of read / write steps for one page depending on ECC  	 * mode.  	 */ -	chip->ecc.steps = mtd->writesize / chip->ecc.size; -	if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { +	ecc->steps = mtd->writesize / ecc->size; +	if (ecc->steps * ecc->size != mtd->writesize) {  		pr_warn("Invalid ECC parameters\n");  		BUG();  	} -	chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; +	ecc->total = ecc->steps * ecc->bytes;  	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ -	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && -	    !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { -		switch (chip->ecc.steps) { +	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { +		switch (ecc->steps) {  		case 2:  			mtd->subpage_sft = 1;  			break; @@ -3762,11 +4085,19 @@ int nand_scan_tail(struct mtd_info *mtd)  	chip->pagebuf = -1;  	/* Large page NAND with SOFT_ECC should support subpage reads */ -	if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9)) -		chip->options |= NAND_SUBPAGE_READ; +	switch (ecc->mode) { +	case NAND_ECC_SOFT: +	case NAND_ECC_SOFT_BCH: +		if (chip->page_shift > 9) +			chip->options |= NAND_SUBPAGE_READ; +		break; + +	default: +		break; +	}  	/* Fill in remaining MTD driver data */ -	mtd->type = MTD_NANDFLASH; +	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;  	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :  						MTD_CAP_NANDFLASH;  	mtd->_erase = nand_erase; @@ -3787,9 +4118,9 @@ int nand_scan_tail(struct mtd_info *mtd)  	mtd->writebufsize = mtd->writesize;  	/* propagate ecc info to mtd_info */ -	mtd->ecclayout = chip->ecc.layout; -	mtd->ecc_strength = chip->ecc.strength; -	mtd->ecc_step_size = chip->ecc.size; +	mtd->ecclayout = ecc->layout; +	mtd->ecc_strength = ecc->strength; +	mtd->ecc_step_size = ecc->size;  	/*  	 * Initialize bitflip_threshold to its default prior scan_bbt() call.  	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index bc06196d573..7f0c3b4c2a4 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -412,25 +412,6 @@ static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,  	}  } -/* Scan a given block full */ -static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, -			   loff_t offs, uint8_t *buf, size_t readlen, -			   int scanlen, int numpages) -{ -	int ret, j; - -	ret = scan_read_oob(mtd, buf, offs, readlen); -	/* Ignore ECC errors when checking for BBM */ -	if (ret && !mtd_is_bitflip_or_eccerr(ret)) -		return ret; - -	for (j = 0; j < numpages; j++, buf += scanlen) { -		if (check_pattern(buf, scanlen, mtd->writesize, bd)) -			return 1; -	} -	return 0; -} -  /* Scan a given block partially */  static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,  			   loff_t offs, uint8_t *buf, int numpages) @@ -477,24 +458,17 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,  	struct nand_bbt_descr *bd, int chip)  {  	struct nand_chip *this = mtd->priv; -	int i, numblocks, numpages, scanlen; +	int i, numblocks, numpages;  	int startblock;  	loff_t from; -	size_t readlen;  	pr_info("Scanning device for bad blocks\n"); -	if (bd->options & NAND_BBT_SCANALLPAGES) -		numpages = 1 << (this->bbt_erase_shift - this->page_shift); -	else if (bd->options & NAND_BBT_SCAN2NDPAGE) +	if (bd->options & NAND_BBT_SCAN2NDPAGE)  		numpages = 2;  	else  		numpages = 1; -	/* We need only read few bytes from the OOB area */ -	scanlen = 0; -	readlen = bd->len; -  	if (chip == -1) {  		numblocks = mtd->size >> this->bbt_erase_shift;  		startblock = 0; @@ -519,12 +493,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,  		BUG_ON(bd->options & NAND_BBT_NO_OOB); -		if (bd->options & NAND_BBT_SCANALLPAGES) -			ret = scan_block_full(mtd, bd, from, buf, readlen, -					      scanlen, numpages); -		else -			ret = scan_block_fast(mtd, bd, from, buf, numpages); - +		ret = scan_block_fast(mtd, bd, from, buf, numpages);  		if (ret < 0)  			return ret; @@ -559,7 +528,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr  {  	struct nand_chip *this = mtd->priv;  	int i, chips; -	int bits, startblock, block, dir; +	int startblock, block, dir;  	int scanlen = mtd->writesize + mtd->oobsize;  	int bbtblocks;  	int blocktopage = this->bbt_erase_shift - this->page_shift; @@ -583,9 +552,6 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr  		bbtblocks = mtd->size >> this->bbt_erase_shift;  	} -	/* Number of bits for each erase block in the bbt */ -	bits = td->options & NAND_BBT_NRBITS_MSK; -  	for (i = 0; i < chips; i++) {  		/* Reset version information */  		td->version[i] = 0; @@ -1316,6 +1282,7 @@ static int nand_create_badblock_pattern(struct nand_chip *this)  int nand_default_bbt(struct mtd_info *mtd)  {  	struct nand_chip *this = mtd->priv; +	int ret;  	/* Is a flash based bad block table requested? */  	if (this->bbt_options & NAND_BBT_USE_FLASH) { @@ -1334,8 +1301,11 @@ int nand_default_bbt(struct mtd_info *mtd)  		this->bbt_md = NULL;  	} -	if (!this->badblock_pattern) -		nand_create_badblock_pattern(this); +	if (!this->badblock_pattern) { +		ret = nand_create_badblock_pattern(this); +		if (ret) +			return ret; +	}  	return nand_scan_bbt(mtd, this->badblock_pattern);  } @@ -1392,4 +1362,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)  }  EXPORT_SYMBOL(nand_scan_bbt); -EXPORT_SYMBOL(nand_default_bbt); diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index 053c9a2d47c..97c4c0216c9 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -506,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,  	if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)  		return 1;	/* error in ECC data; no action needed */ -	pr_err("%s: uncorrectable ECC error", __func__); +	pr_err("%s: uncorrectable ECC error\n", __func__);  	return -1;  }  EXPORT_SYMBOL(__nand_correct_data); diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index a87b0a3afa3..3d7c89fc103 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -43,6 +43,9 @@ struct nand_flash_dev nand_flash_ids[] = {  	{"TC58NVG6D2 64G 3.3V 8-bit",  		{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },  		  SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) }, +	{"SDTNRGAMA 64G 3.3V 8-bit", +		{ .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} }, +		  SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },  	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),  	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), @@ -169,6 +172,8 @@ struct nand_manufacturers nand_manuf_ids[] = {  	{NAND_MFR_AMD, "AMD/Spansion"},  	{NAND_MFR_MACRONIX, "Macronix"},  	{NAND_MFR_EON, "Eon"}, +	{NAND_MFR_SANDISK, "SanDisk"}, +	{NAND_MFR_INTEL, "Intel"},  	{0x0, "Unknown"}  }; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index bdc1d15369f..4f0d83648e5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -575,12 +575,12 @@ static int alloc_device(struct nandsim *ns)  		cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);  		if (IS_ERR(cfile))  			return PTR_ERR(cfile); -		if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) { +		if (!(cfile->f_mode & FMODE_CAN_READ)) {  			NS_ERR("alloc_device: cache file not readable\n");  			err = -EINVAL;  			goto err_close;  		} -		if (!cfile->f_op->write && !cfile->f_op->aio_write) { +		if (!(cfile->f_mode & FMODE_CAN_WRITE)) {  			NS_ERR("alloc_device: cache file not writeable\n");  			err = -EINVAL;  			goto err_close; @@ -2372,7 +2372,7 @@ static int __init ns_init_module(void)  	if ((retval = init_nandsim(nsmtd)) != 0)  		goto err_exit; -	if ((retval = nand_default_bbt(nsmtd)) != 0) +	if ((retval = chip->scan_bbt(nsmtd)) != 0)  		goto err_exit;  	if ((retval = parse_badblocks(nand, nsmtd)) != 0) diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index 8e148f1478f..69eaba690a9 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c @@ -30,6 +30,7 @@  #include <linux/mtd/ndfc.h>  #include <linux/slab.h>  #include <linux/mtd/mtd.h> +#include <linux/of_address.h>  #include <linux/of_platform.h>  #include <asm/io.h> diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 52115151e4a..e8a5fffd6ab 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -10,7 +10,6 @@   */  #include <linux/slab.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/interrupt.h>  #include <linux/io.h> @@ -152,7 +151,8 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,  	if (column != -1 || page_addr != -1) {  		if (column != -1) { -			if (chip->options & NAND_BUSWIDTH_16) +			if (chip->options & NAND_BUSWIDTH_16 && +					!nand_opcode_8bits(command))  				column >>= 1;  			write_addr_reg(nand, column);  			write_addr_reg(nand, column >> 8 | ENDADDR); @@ -225,7 +225,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)  	val = __raw_readl(nand->reg + REG_FMICSR);  	if (!(val & NAND_EN)) -		__raw_writel(val | NAND_EN, REG_FMICSR); +		__raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);  	val = __raw_readl(nand->reg + REG_SMCSR); @@ -241,12 +241,10 @@ static int nuc900_nand_probe(struct platform_device *pdev)  {  	struct nuc900_nand *nuc900_nand;  	struct nand_chip *chip; -	int retval;  	struct resource *res; -	retval = 0; - -	nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); +	nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand), +				   GFP_KERNEL);  	if (!nuc900_nand)  		return -ENOMEM;  	chip = &(nuc900_nand->chip); @@ -255,11 +253,9 @@ static int nuc900_nand_probe(struct platform_device *pdev)  	nuc900_nand->mtd.owner	= THIS_MODULE;  	spin_lock_init(&nuc900_nand->lock); -	nuc900_nand->clk = clk_get(&pdev->dev, NULL); -	if (IS_ERR(nuc900_nand->clk)) { -		retval = -ENOENT; -		goto fail1; -	} +	nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL); +	if (IS_ERR(nuc900_nand->clk)) +		return -ENOENT;  	clk_enable(nuc900_nand->clk);  	chip->cmdfunc		= nuc900_nand_command_lp; @@ -272,57 +268,29 @@ static int nuc900_nand_probe(struct platform_device *pdev)  	chip->ecc.mode		= NAND_ECC_SOFT;  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!res) { -		retval = -ENXIO; -		goto fail1; -	} - -	if (!request_mem_region(res->start, resource_size(res), pdev->name)) { -		retval = -EBUSY; -		goto fail1; -	} - -	nuc900_nand->reg = ioremap(res->start, resource_size(res)); -	if (!nuc900_nand->reg) { -		retval = -ENOMEM; -		goto fail2; -	} +	nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(nuc900_nand->reg)) +		return PTR_ERR(nuc900_nand->reg);  	nuc900_nand_enable(nuc900_nand); -	if (nand_scan(&(nuc900_nand->mtd), 1)) { -		retval = -ENXIO; -		goto fail3; -	} +	if (nand_scan(&(nuc900_nand->mtd), 1)) +		return -ENXIO;  	mtd_device_register(&(nuc900_nand->mtd), partitions,  			    ARRAY_SIZE(partitions));  	platform_set_drvdata(pdev, nuc900_nand); -	return retval; - -fail3:	iounmap(nuc900_nand->reg); -fail2:	release_mem_region(res->start, resource_size(res)); -fail1:	kfree(nuc900_nand); -	return retval; +	return 0;  }  static int nuc900_nand_remove(struct platform_device *pdev)  {  	struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); -	struct resource *res;  	nand_release(&nuc900_nand->mtd); -	iounmap(nuc900_nand->reg); - -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	release_mem_region(res->start, resource_size(res)); -  	clk_disable(nuc900_nand->clk); -	clk_put(nuc900_nand->clk); - -	kfree(nuc900_nand);  	return 0;  } diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 4ecf0e5fd48..f0ed92e210a 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -25,10 +25,8 @@  #include <linux/of.h>  #include <linux/of_device.h> -#ifdef CONFIG_MTD_NAND_OMAP_BCH -#include <linux/bch.h> +#include <linux/mtd/nand_bch.h>  #include <linux/platform_data/elm.h> -#endif  #include <linux/platform_data/mtd-nand-omap2.h> @@ -120,14 +118,9 @@  #define OMAP24XX_DMA_GPMC		4 -#define BCH8_MAX_ERROR		8	/* upto 8 bit correctable */ -#define BCH4_MAX_ERROR		4	/* upto 4 bit correctable */ -  #define SECTOR_BYTES		512  /* 4 bit padding to make byte aligned, 56 = 52 + 4 */  #define BCH4_BIT_PAD		4 -#define BCH8_ECC_MAX		((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8) -#define BCH4_ECC_MAX		((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)  /* GPMC ecc engine settings for read */  #define BCH_WRAPMODE_1		1	/* BCH wrap mode 1 */ @@ -141,7 +134,13 @@  #define BCH_ECC_SIZE0		0x0	/* ecc_size0 = 0, no oob protection */  #define BCH_ECC_SIZE1		0x20	/* ecc_size1 = 32 */ +#define BADBLOCK_MARKER_LENGTH		2 +  #ifdef CONFIG_MTD_NAND_OMAP_BCH +static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55, +				0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78, +				0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93, +				0x07, 0x0e};  static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,  	0xac, 0x6b, 0xff, 0x99, 0x7b};  static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10}; @@ -149,17 +148,6 @@ static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};  /* oob info generated runtime depending on ecc algorithm and layout selected */  static struct nand_ecclayout omap_oobinfo; -/* Define some generic bad / good block scan pattern which are used - * while scanning a device for factory marked good / bad blocks - */ -static uint8_t scan_ff_pattern[] = { 0xff }; -static struct nand_bbt_descr bb_descrip_flashbased = { -	.options = NAND_BBT_SCANALLPAGES, -	.offs = 0, -	.len = 1, -	.pattern = scan_ff_pattern, -}; -  struct omap_nand_info {  	struct nand_hw_control		controller; @@ -170,7 +158,7 @@ struct omap_nand_info {  	int				gpmc_cs;  	unsigned long			phys_base; -	unsigned long			mem_size; +	enum omap_ecc			ecc_opt;  	struct completion		comp;  	struct dma_chan			*dma;  	int				gpmc_irq_fifo; @@ -182,14 +170,9 @@ struct omap_nand_info {  	u_char				*buf;  	int					buf_len;  	struct gpmc_nand_regs		reg; - -#ifdef CONFIG_MTD_NAND_OMAP_BCH -	struct bch_control             *bch; -	struct nand_ecclayout           ecclayout; -	bool				is_elm_used; +	/* fields specific for BCHx_HW ECC scheme */  	struct device			*elm_dev;  	struct device_node		*of_node; -#endif  };  /** @@ -1058,10 +1041,8 @@ static int omap_dev_ready(struct mtd_info *mtd)  	}  } -#ifdef CONFIG_MTD_NAND_OMAP_BCH -  /** - * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction + * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation   * @mtd: MTD device structure   * @mode: Read/Write mode   * @@ -1072,50 +1053,86 @@ static int omap_dev_ready(struct mtd_info *mtd)   * eccsize0 = 0  (no additional protected byte in spare area)   * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)   */ -static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) +static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)  { -	int nerrors; +	unsigned int bch_type;  	unsigned int dev_width, nsectors;  	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,  						   mtd); +	enum omap_ecc ecc_opt = info->ecc_opt;  	struct nand_chip *chip = mtd->priv;  	u32 val, wr_mode;  	unsigned int ecc_size1, ecc_size0; -	/* Using wrapping mode 6 for writing */ -	wr_mode = BCH_WRAPMODE_6; - -	/* -	 * ECC engine enabled for valid ecc_size0 nibbles -	 * and disabled for ecc_size1 nibbles. -	 */ -	ecc_size0 = BCH_ECC_SIZE0; -	ecc_size1 = BCH_ECC_SIZE1; - -	/* Perform ecc calculation on 512-byte sector */ -	nsectors = 1; - -	/* Update number of error correction */ -	nerrors = info->nand.ecc.strength; - -	/* Multi sector reading/writing for NAND flash with page size < 4096 */ -	if (info->is_elm_used && (mtd->writesize <= 4096)) { +	/* GPMC configurations for calculating ECC */ +	switch (ecc_opt) { +	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: +		bch_type = 0; +		nsectors = 1;  		if (mode == NAND_ECC_READ) { -			/* Using wrapping mode 1 for reading */ -			wr_mode = BCH_WRAPMODE_1; - -			/* -			 * ECC engine enabled for ecc_size0 nibbles -			 * and disabled for ecc_size1 nibbles. -			 */ -			ecc_size0 = (nerrors == 8) ? -				BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0; -			ecc_size1 = (nerrors == 8) ? -				BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1; +			wr_mode	  = BCH_WRAPMODE_6; +			ecc_size0 = BCH_ECC_SIZE0; +			ecc_size1 = BCH_ECC_SIZE1; +		} else { +			wr_mode   = BCH_WRAPMODE_6; +			ecc_size0 = BCH_ECC_SIZE0; +			ecc_size1 = BCH_ECC_SIZE1;  		} - -		/* Perform ecc calculation for one page (< 4096) */ -		nsectors = info->nand.ecc.steps; +		break; +	case OMAP_ECC_BCH4_CODE_HW: +		bch_type = 0; +		nsectors = chip->ecc.steps; +		if (mode == NAND_ECC_READ) { +			wr_mode	  = BCH_WRAPMODE_1; +			ecc_size0 = BCH4R_ECC_SIZE0; +			ecc_size1 = BCH4R_ECC_SIZE1; +		} else { +			wr_mode   = BCH_WRAPMODE_6; +			ecc_size0 = BCH_ECC_SIZE0; +			ecc_size1 = BCH_ECC_SIZE1; +		} +		break; +	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +		bch_type = 1; +		nsectors = 1; +		if (mode == NAND_ECC_READ) { +			wr_mode	  = BCH_WRAPMODE_6; +			ecc_size0 = BCH_ECC_SIZE0; +			ecc_size1 = BCH_ECC_SIZE1; +		} else { +			wr_mode   = BCH_WRAPMODE_6; +			ecc_size0 = BCH_ECC_SIZE0; +			ecc_size1 = BCH_ECC_SIZE1; +		} +		break; +	case OMAP_ECC_BCH8_CODE_HW: +		bch_type = 1; +		nsectors = chip->ecc.steps; +		if (mode == NAND_ECC_READ) { +			wr_mode	  = BCH_WRAPMODE_1; +			ecc_size0 = BCH8R_ECC_SIZE0; +			ecc_size1 = BCH8R_ECC_SIZE1; +		} else { +			wr_mode   = BCH_WRAPMODE_6; +			ecc_size0 = BCH_ECC_SIZE0; +			ecc_size1 = BCH_ECC_SIZE1; +		} +		break; +	case OMAP_ECC_BCH16_CODE_HW: +		bch_type = 0x2; +		nsectors = chip->ecc.steps; +		if (mode == NAND_ECC_READ) { +			wr_mode	  = 0x01; +			ecc_size0 = 52; /* ECC bits in nibbles per sector */ +			ecc_size1 = 0;  /* non-ECC bits in nibbles per sector */ +		} else { +			wr_mode	  = 0x01; +			ecc_size0 = 0;  /* extra bits in nibbles per sector */ +			ecc_size1 = 52; /* OOB bits in nibbles per sector */ +		} +		break; +	default: +		return;  	}  	writel(ECC1, info->reg.gpmc_ecc_control); @@ -1128,7 +1145,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)  	/* BCH configuration */  	val = ((1                        << 16) | /* enable BCH */ -	       (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */ +	       (bch_type		 << 12) | /* BCH4/BCH8/BCH16 */  	       (wr_mode                  <<  8) | /* wrap mode */  	       (dev_width                <<  7) | /* bus width */  	       (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */ @@ -1141,127 +1158,40 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)  	writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);  } -/** - * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes - * @mtd: MTD device structure - * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer - */ -static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat, -				    u_char *ecc_code) -{ -	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, -						   mtd); -	unsigned long nsectors, val1, val2; -	int i; - -	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; - -	for (i = 0; i < nsectors; i++) { - -		/* Read hw-computed remainder */ -		val1 = readl(info->reg.gpmc_bch_result0[i]); -		val2 = readl(info->reg.gpmc_bch_result1[i]); - -		/* -		 * Add constant polynomial to remainder, in order to get an ecc -		 * sequence of 0xFFs for a buffer filled with 0xFFs; and -		 * left-justify the resulting polynomial. -		 */ -		*ecc_code++ = 0x28 ^ ((val2 >> 12) & 0xFF); -		*ecc_code++ = 0x13 ^ ((val2 >>  4) & 0xFF); -		*ecc_code++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF)); -		*ecc_code++ = 0x39 ^ ((val1 >> 20) & 0xFF); -		*ecc_code++ = 0x96 ^ ((val1 >> 12) & 0xFF); -		*ecc_code++ = 0xac ^ ((val1 >> 4) & 0xFF); -		*ecc_code++ = 0x7f ^ ((val1 & 0xF) << 4); -	} - -	return 0; -} - -/** - * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes - * @mtd: MTD device structure - * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer - */ -static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat, -				    u_char *ecc_code) -{ -	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, -						   mtd); -	unsigned long nsectors, val1, val2, val3, val4; -	int i; - -	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; - -	for (i = 0; i < nsectors; i++) { - -		/* Read hw-computed remainder */ -		val1 = readl(info->reg.gpmc_bch_result0[i]); -		val2 = readl(info->reg.gpmc_bch_result1[i]); -		val3 = readl(info->reg.gpmc_bch_result2[i]); -		val4 = readl(info->reg.gpmc_bch_result3[i]); - -		/* -		 * Add constant polynomial to remainder, in order to get an ecc -		 * sequence of 0xFFs for a buffer filled with 0xFFs. -		 */ -		*ecc_code++ = 0xef ^ (val4 & 0xFF); -		*ecc_code++ = 0x51 ^ ((val3 >> 24) & 0xFF); -		*ecc_code++ = 0x2e ^ ((val3 >> 16) & 0xFF); -		*ecc_code++ = 0x09 ^ ((val3 >> 8) & 0xFF); -		*ecc_code++ = 0xed ^ (val3 & 0xFF); -		*ecc_code++ = 0x93 ^ ((val2 >> 24) & 0xFF); -		*ecc_code++ = 0x9a ^ ((val2 >> 16) & 0xFF); -		*ecc_code++ = 0xc2 ^ ((val2 >> 8) & 0xFF); -		*ecc_code++ = 0x97 ^ (val2 & 0xFF); -		*ecc_code++ = 0x79 ^ ((val1 >> 24) & 0xFF); -		*ecc_code++ = 0xe5 ^ ((val1 >> 16) & 0xFF); -		*ecc_code++ = 0x24 ^ ((val1 >> 8) & 0xFF); -		*ecc_code++ = 0xb5 ^ (val1 & 0xFF); -	} - -	return 0; -} +static u8  bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f}; +static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, +				0x97, 0x79, 0xe5, 0x24, 0xb5};  /** - * omap3_calculate_ecc_bch - Generate bytes of ECC bytes + * omap_calculate_ecc_bch - Generate bytes of ECC bytes   * @mtd:	MTD device structure   * @dat:	The pointer to data on which ecc is computed   * @ecc_code:	The ecc_code buffer   *   * Support calculating of BCH4/8 ecc vectors for the page   */ -static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat, -				    u_char *ecc_code) +static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, +					const u_char *dat, u_char *ecc_calc)  {  	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,  						   mtd); +	int eccbytes	= info->nand.ecc.bytes; +	struct gpmc_nand_regs	*gpmc_regs = &info->reg; +	u8 *ecc_code;  	unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; -	int i, eccbchtsel; +	u32 val; +	int i, j;  	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; -	/* -	 * find BCH scheme used -	 * 0 -> BCH4 -	 * 1 -> BCH8 -	 */ -	eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3); -  	for (i = 0; i < nsectors; i++) { - -		/* Read hw-computed remainder */ -		bch_val1 = readl(info->reg.gpmc_bch_result0[i]); -		bch_val2 = readl(info->reg.gpmc_bch_result1[i]); -		if (eccbchtsel) { -			bch_val3 = readl(info->reg.gpmc_bch_result2[i]); -			bch_val4 = readl(info->reg.gpmc_bch_result3[i]); -		} - -		if (eccbchtsel) { -			/* BCH8 ecc scheme */ +		ecc_code = ecc_calc; +		switch (info->ecc_opt) { +		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +		case OMAP_ECC_BCH8_CODE_HW: +			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); +			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); +			bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); +			bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);  			*ecc_code++ = (bch_val4 & 0xFF);  			*ecc_code++ = ((bch_val3 >> 24) & 0xFF);  			*ecc_code++ = ((bch_val3 >> 16) & 0xFF); @@ -1275,14 +1205,11 @@ static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,  			*ecc_code++ = ((bch_val1 >> 16) & 0xFF);  			*ecc_code++ = ((bch_val1 >> 8) & 0xFF);  			*ecc_code++ = (bch_val1 & 0xFF); -			/* -			 * Setting 14th byte to zero to handle -			 * erased page & maintain compatibility -			 * with RBL -			 */ -			*ecc_code++ = 0x0; -		} else { -			/* BCH4 ecc scheme */ +			break; +		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: +		case OMAP_ECC_BCH4_CODE_HW: +			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); +			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);  			*ecc_code++ = ((bch_val2 >> 12) & 0xFF);  			*ecc_code++ = ((bch_val2 >> 4) & 0xFF);  			*ecc_code++ = ((bch_val2 & 0xF) << 4) | @@ -1291,17 +1218,81 @@ static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,  			*ecc_code++ = ((bch_val1 >> 12) & 0xFF);  			*ecc_code++ = ((bch_val1 >> 4) & 0xFF);  			*ecc_code++ = ((bch_val1 & 0xF) << 4); -			/* -			 * Setting 8th byte to zero to handle -			 * erased page -			 */ -			*ecc_code++ = 0x0; +			break; +		case OMAP_ECC_BCH16_CODE_HW: +			val = readl(gpmc_regs->gpmc_bch_result6[i]); +			ecc_code[0]  = ((val >>  8) & 0xFF); +			ecc_code[1]  = ((val >>  0) & 0xFF); +			val = readl(gpmc_regs->gpmc_bch_result5[i]); +			ecc_code[2]  = ((val >> 24) & 0xFF); +			ecc_code[3]  = ((val >> 16) & 0xFF); +			ecc_code[4]  = ((val >>  8) & 0xFF); +			ecc_code[5]  = ((val >>  0) & 0xFF); +			val = readl(gpmc_regs->gpmc_bch_result4[i]); +			ecc_code[6]  = ((val >> 24) & 0xFF); +			ecc_code[7]  = ((val >> 16) & 0xFF); +			ecc_code[8]  = ((val >>  8) & 0xFF); +			ecc_code[9]  = ((val >>  0) & 0xFF); +			val = readl(gpmc_regs->gpmc_bch_result3[i]); +			ecc_code[10] = ((val >> 24) & 0xFF); +			ecc_code[11] = ((val >> 16) & 0xFF); +			ecc_code[12] = ((val >>  8) & 0xFF); +			ecc_code[13] = ((val >>  0) & 0xFF); +			val = readl(gpmc_regs->gpmc_bch_result2[i]); +			ecc_code[14] = ((val >> 24) & 0xFF); +			ecc_code[15] = ((val >> 16) & 0xFF); +			ecc_code[16] = ((val >>  8) & 0xFF); +			ecc_code[17] = ((val >>  0) & 0xFF); +			val = readl(gpmc_regs->gpmc_bch_result1[i]); +			ecc_code[18] = ((val >> 24) & 0xFF); +			ecc_code[19] = ((val >> 16) & 0xFF); +			ecc_code[20] = ((val >>  8) & 0xFF); +			ecc_code[21] = ((val >>  0) & 0xFF); +			val = readl(gpmc_regs->gpmc_bch_result0[i]); +			ecc_code[22] = ((val >> 24) & 0xFF); +			ecc_code[23] = ((val >> 16) & 0xFF); +			ecc_code[24] = ((val >>  8) & 0xFF); +			ecc_code[25] = ((val >>  0) & 0xFF); +			break; +		default: +			return -EINVAL; +		} + +		/* ECC scheme specific syndrome customizations */ +		switch (info->ecc_opt) { +		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: +			/* Add constant polynomial to remainder, so that +			 * ECC of blank pages results in 0x0 on reading back */ +			for (j = 0; j < eccbytes; j++) +				ecc_calc[j] ^= bch4_polynomial[j]; +			break; +		case OMAP_ECC_BCH4_CODE_HW: +			/* Set  8th ECC byte as 0x0 for ROM compatibility */ +			ecc_calc[eccbytes - 1] = 0x0; +			break; +		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +			/* Add constant polynomial to remainder, so that +			 * ECC of blank pages results in 0x0 on reading back */ +			for (j = 0; j < eccbytes; j++) +				ecc_calc[j] ^= bch8_polynomial[j]; +			break; +		case OMAP_ECC_BCH8_CODE_HW: +			/* Set 14th ECC byte as 0x0 for ROM compatibility */ +			ecc_calc[eccbytes - 1] = 0x0; +			break; +		case OMAP_ECC_BCH16_CODE_HW: +			break; +		default: +			return -EINVAL;  		} + +	ecc_calc += eccbytes;  	}  	return 0;  } +#ifdef CONFIG_MTD_NAND_OMAP_BCH  /**   * erased_sector_bitflips - count bit flips   * @data:	data sector buffer @@ -1349,55 +1340,50 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,   * @calc_ecc:	ecc read from HW ECC registers   *   * Calculated ecc vector reported as zero in case of non-error pages. - * In case of error/erased pages non-zero error vector is reported. - * In case of non-zero ecc vector, check read_ecc at fixed offset - * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not. - * To handle bit flips in this data, count the number of 0's in - * read_ecc[x] and check if it greater than 4. If it is less, it is - * programmed page, else erased page. - * - * 1. If page is erased, check with standard ecc vector (ecc vector - * for erased page to find any bit flip). If check fails, bit flip - * is present in erased page. Count the bit flips in erased page and - * if it falls under correctable level, report page with 0xFF and - * update the correctable bit information. - * 2. If error is reported on programmed page, update elm error - * vector and correct the page with ELM error correction routine. - * + * In case of non-zero ecc vector, first filter out erased-pages, and + * then process data via ELM to detect bit-flips.   */  static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,  				u_char *read_ecc, u_char *calc_ecc)  {  	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,  			mtd); +	struct nand_ecc_ctrl *ecc = &info->nand.ecc;  	int eccsteps = info->nand.ecc.steps;  	int i , j, stat = 0; -	int eccsize, eccflag, ecc_vector_size; +	int eccflag, actual_eccbytes;  	struct elm_errorvec err_vec[ERROR_VECTOR_MAX];  	u_char *ecc_vec = calc_ecc;  	u_char *spare_ecc = read_ecc;  	u_char *erased_ecc_vec; -	enum bch_ecc type; +	u_char *buf; +	int bitflip_count;  	bool is_error_reported = false; +	u32 bit_pos, byte_pos, error_max, pos; +	int err; -	/* Initialize elm error vector to zero */ -	memset(err_vec, 0, sizeof(err_vec)); - -	if (info->nand.ecc.strength == BCH8_MAX_ERROR) { -		type = BCH8_ECC; -		erased_ecc_vec = bch8_vector; -	} else { -		type = BCH4_ECC; +	switch (info->ecc_opt) { +	case OMAP_ECC_BCH4_CODE_HW: +		/* omit  7th ECC byte reserved for ROM code compatibility */ +		actual_eccbytes = ecc->bytes - 1;  		erased_ecc_vec = bch4_vector; +		break; +	case OMAP_ECC_BCH8_CODE_HW: +		/* omit 14th ECC byte reserved for ROM code compatibility */ +		actual_eccbytes = ecc->bytes - 1; +		erased_ecc_vec = bch8_vector; +		break; +	case OMAP_ECC_BCH16_CODE_HW: +		actual_eccbytes = ecc->bytes; +		erased_ecc_vec = bch16_vector; +		break; +	default: +		pr_err("invalid driver configuration\n"); +		return -EINVAL;  	} -	ecc_vector_size = info->nand.ecc.bytes; - -	/* -	 * Remove extra byte padding for BCH8 RBL -	 * compatibility and erased page handling -	 */ -	eccsize = ecc_vector_size - 1; +	/* Initialize elm error vector to zero */ +	memset(err_vec, 0, sizeof(err_vec));  	for (i = 0; i < eccsteps ; i++) {  		eccflag = 0;	/* initialize eccflag */ @@ -1406,8 +1392,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,  		 * Check any error reported,  		 * In case of error, non zero ecc reported.  		 */ - -		for (j = 0; (j < eccsize); j++) { +		for (j = 0; j < actual_eccbytes; j++) {  			if (calc_ecc[j] != 0) {  				eccflag = 1; /* non zero ecc, error present */  				break; @@ -1415,76 +1400,73 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,  		}  		if (eccflag == 1) { -			/* -			 * Set threshold to minimum of 4, half of ecc.strength/2 -			 * to allow max bit flip in byte to 4 -			 */ -			unsigned int threshold = min_t(unsigned int, 4, -					info->nand.ecc.strength / 2); - -			/* -			 * Check data area is programmed by counting -			 * number of 0's at fixed offset in spare area. -			 * Checking count of 0's against threshold. -			 * In case programmed page expects at least threshold -			 * zeros in byte. -			 * If zeros are less than threshold for programmed page/ -			 * zeros are more than threshold erased page, either -			 * case page reported as uncorrectable. -			 */ -			if (hweight8(~read_ecc[eccsize]) >= threshold) { +			if (memcmp(calc_ecc, erased_ecc_vec, +						actual_eccbytes) == 0) {  				/* -				 * Update elm error vector as -				 * data area is programmed +				 * calc_ecc[] matches pattern for ECC(all 0xff) +				 * so this is definitely an erased-page  				 */ -				err_vec[i].error_reported = true; -				is_error_reported = true;  			} else { -				/* Error reported in erased page */ -				int bitflip_count; -				u_char *buf = &data[info->nand.ecc.size * i]; - -				if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) { -					bitflip_count = erased_sector_bitflips( -							buf, read_ecc, info); - -					if (bitflip_count) -						stat += bitflip_count; -					else -						return -EINVAL; +				buf = &data[info->nand.ecc.size * i]; +				/* +				 * count number of 0-bits in read_buf. +				 * This check can be removed once a similar +				 * check is introduced in generic NAND driver +				 */ +				bitflip_count = erased_sector_bitflips( +						buf, read_ecc, info); +				if (bitflip_count) { +					/* +					 * number of 0-bits within ECC limits +					 * So this may be an erased-page +					 */ +					stat += bitflip_count; +				} else { +					/* +					 * Too many 0-bits. It may be a +					 * - programmed-page, OR +					 * - erased-page with many bit-flips +					 * So this page requires check by ELM +					 */ +					err_vec[i].error_reported = true; +					is_error_reported = true;  				}  			}  		}  		/* Update the ecc vector */ -		calc_ecc += ecc_vector_size; -		read_ecc += ecc_vector_size; +		calc_ecc += ecc->bytes; +		read_ecc += ecc->bytes;  	}  	/* Check if any error reported */  	if (!is_error_reported) -		return 0; +		return stat;  	/* Decode BCH error using ELM module */  	elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec); +	err = 0;  	for (i = 0; i < eccsteps; i++) { -		if (err_vec[i].error_reported) { +		if (err_vec[i].error_uncorrectable) { +			pr_err("nand: uncorrectable bit-flips found\n"); +			err = -EBADMSG; +		} else if (err_vec[i].error_reported) {  			for (j = 0; j < err_vec[i].error_count; j++) { -				u32 bit_pos, byte_pos, error_max, pos; - -				if (type == BCH8_ECC) -					error_max = BCH8_ECC_MAX; -				else -					error_max = BCH4_ECC_MAX; - -				if (info->nand.ecc.strength == BCH8_MAX_ERROR) -					pos = err_vec[i].error_loc[j]; -				else -					/* Add 4 to take care 4 bit padding */ +				switch (info->ecc_opt) { +				case OMAP_ECC_BCH4_CODE_HW: +					/* Add 4 bits to take care of padding */  					pos = err_vec[i].error_loc[j] +  						BCH4_BIT_PAD; - +					break; +				case OMAP_ECC_BCH8_CODE_HW: +				case OMAP_ECC_BCH16_CODE_HW: +					pos = err_vec[i].error_loc[j]; +					break; +				default: +					return -EINVAL; +				} +				error_max = (ecc->size + actual_eccbytes) * 8;  				/* Calculate bit position of error */  				bit_pos = pos % 8; @@ -1492,13 +1474,22 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,  				byte_pos = (error_max - pos - 1) / 8;  				if (pos < error_max) { -					if (byte_pos < 512) +					if (byte_pos < 512) { +						pr_debug("bitflip@dat[%d]=%x\n", +						     byte_pos, data[byte_pos]);  						data[byte_pos] ^= 1 << bit_pos; -					else +					} else { +						pr_debug("bitflip@oob[%d]=%x\n", +							(byte_pos - 512), +						     spare_ecc[byte_pos - 512]);  						spare_ecc[byte_pos - 512] ^=  							1 << bit_pos; +					} +				} else { +					pr_err("invalid bit-flip @ %d:%d\n", +							 byte_pos, bit_pos); +					err = -EBADMSG;  				} -				/* else, not interested to correct ecc */  			}  		} @@ -1506,48 +1497,11 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,  		stat += err_vec[i].error_count;  		/* Update page data with sector size */ -		data += info->nand.ecc.size; -		spare_ecc += ecc_vector_size; +		data += ecc->size; +		spare_ecc += ecc->bytes;  	} -	for (i = 0; i < eccsteps; i++) -		/* Return error if uncorrectable error present */ -		if (err_vec[i].error_uncorrectable) -			return -EINVAL; - -	return stat; -} - -/** - * omap3_correct_data_bch - Decode received data and correct errors - * @mtd: MTD device structure - * @data: page data - * @read_ecc: ecc read from nand flash - * @calc_ecc: ecc read from HW ECC registers - */ -static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data, -				  u_char *read_ecc, u_char *calc_ecc) -{ -	int i, count; -	/* cannot correct more than 8 errors */ -	unsigned int errloc[8]; -	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, -						   mtd); - -	count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL, -			   errloc); -	if (count > 0) { -		/* correct errors */ -		for (i = 0; i < count; i++) { -			/* correct data only, not ecc bytes */ -			if (errloc[i] < 8*512) -				data[errloc[i]/8] ^= 1 << (errloc[i] & 7); -			pr_debug("corrected bitflip %u\n", errloc[i]); -		} -	} else if (count < 0) { -		pr_err("ecc unrecoverable error\n"); -	} -	return count; +	return (err) ? err : stat;  }  /** @@ -1637,197 +1591,48 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,  }  /** - * omap3_free_bch - Release BCH ecc resources - * @mtd: MTD device structure - */ -static void omap3_free_bch(struct mtd_info *mtd) -{ -	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, -						   mtd); -	if (info->bch) { -		free_bch(info->bch); -		info->bch = NULL; -	} -} - -/** - * omap3_init_bch - Initialize BCH ECC - * @mtd: MTD device structure - * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW) - */ -static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) -{ -	int max_errors; -	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, -						   mtd); -#ifdef CONFIG_MTD_NAND_OMAP_BCH8 -	const int hw_errors = BCH8_MAX_ERROR; -#else -	const int hw_errors = BCH4_MAX_ERROR; -#endif -	enum bch_ecc bch_type; -	const __be32 *parp; -	int lenp; -	struct device_node *elm_node; - -	info->bch = NULL; - -	max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? -		BCH8_MAX_ERROR : BCH4_MAX_ERROR; -	if (max_errors != hw_errors) { -		pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported", -		       max_errors, hw_errors); -		goto fail; -	} - -	info->nand.ecc.size = 512; -	info->nand.ecc.hwctl = omap3_enable_hwecc_bch; -	info->nand.ecc.mode = NAND_ECC_HW; -	info->nand.ecc.strength = max_errors; - -	if (hw_errors == BCH8_MAX_ERROR) -		bch_type = BCH8_ECC; -	else -		bch_type = BCH4_ECC; - -	/* Detect availability of ELM module */ -	parp = of_get_property(info->of_node, "elm_id", &lenp); -	if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { -		pr_err("Missing elm_id property, fall back to Software BCH\n"); -		info->is_elm_used = false; -	} else { -		struct platform_device *pdev; - -		elm_node = of_find_node_by_phandle(be32_to_cpup(parp)); -		pdev = of_find_device_by_node(elm_node); -		info->elm_dev = &pdev->dev; - -		if (elm_config(info->elm_dev, bch_type) == 0) -			info->is_elm_used = true; -	} - -	if (info->is_elm_used && (mtd->writesize <= 4096)) { - -		if (hw_errors == BCH8_MAX_ERROR) -			info->nand.ecc.bytes = BCH8_SIZE; -		else -			info->nand.ecc.bytes = BCH4_SIZE; - -		info->nand.ecc.correct = omap_elm_correct_data; -		info->nand.ecc.calculate = omap3_calculate_ecc_bch; -		info->nand.ecc.read_page = omap_read_page_bch; -		info->nand.ecc.write_page = omap_write_page_bch; -	} else { -		/* -		 * software bch library is only used to detect and -		 * locate errors -		 */ -		info->bch = init_bch(13, max_errors, -				0x201b /* hw polynomial */); -		if (!info->bch) -			goto fail; - -		info->nand.ecc.correct = omap3_correct_data_bch; - -		/* -		 * The number of corrected errors in an ecc block that will -		 * trigger block scrubbing defaults to the ecc strength (4 or 8) -		 * Set mtd->bitflip_threshold here to define a custom threshold. -		 */ - -		if (max_errors == 8) { -			info->nand.ecc.bytes = 13; -			info->nand.ecc.calculate = omap3_calculate_ecc_bch8; -		} else { -			info->nand.ecc.bytes = 7; -			info->nand.ecc.calculate = omap3_calculate_ecc_bch4; -		} -	} - -	pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors); -	return 0; -fail: -	omap3_free_bch(mtd); -	return -1; -} - -/** - * omap3_init_bch_tail - Build an oob layout for BCH ECC correction. - * @mtd: MTD device structure + * is_elm_present - checks for presence of ELM module by scanning DT nodes + * @omap_nand_info: NAND device structure containing platform data + * @bch_type: 0x0=BCH4, 0x1=BCH8, 0x2=BCH16   */ -static int omap3_init_bch_tail(struct mtd_info *mtd) +static int is_elm_present(struct omap_nand_info *info, +			struct device_node *elm_node, enum bch_ecc bch_type)  { -	int i, steps, offset; -	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, -						   mtd); -	struct nand_ecclayout *layout = &info->ecclayout; - -	/* build oob layout */ -	steps = mtd->writesize/info->nand.ecc.size; -	layout->eccbytes = steps*info->nand.ecc.bytes; - -	/* do not bother creating special oob layouts for small page devices */ -	if (mtd->oobsize < 64) { -		pr_err("BCH ecc is not supported on small page devices\n"); -		goto fail; +	struct platform_device *pdev; +	struct nand_ecc_ctrl *ecc = &info->nand.ecc; +	int err; +	/* check whether elm-id is passed via DT */ +	if (!elm_node) { +		pr_err("nand: error: ELM DT node not found\n"); +		return -ENODEV;  	} - -	/* reserve 2 bytes for bad block marker */ -	if (layout->eccbytes+2 > mtd->oobsize) { -		pr_err("no oob layout available for oobsize %d eccbytes %u\n", -		       mtd->oobsize, layout->eccbytes); -		goto fail; +	pdev = of_find_device_by_node(elm_node); +	/* check whether ELM device is registered */ +	if (!pdev) { +		pr_err("nand: error: ELM device not found\n"); +		return -ENODEV;  	} +	/* ELM module available, now configure it */ +	info->elm_dev = &pdev->dev; +	err = elm_config(info->elm_dev, bch_type, +		(info->mtd.writesize / ecc->size), ecc->size, ecc->bytes); -	/* ECC layout compatible with RBL for BCH8 */ -	if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE)) -		offset = 2; -	else -		offset = mtd->oobsize - layout->eccbytes; - -	/* put ecc bytes at oob tail */ -	for (i = 0; i < layout->eccbytes; i++) -		layout->eccpos[i] = offset + i; - -	if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE)) -		layout->oobfree[0].offset = 2 + layout->eccbytes * steps; -	else -		layout->oobfree[0].offset = 2; - -	layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; -	info->nand.ecc.layout = layout; - -	if (!(info->nand.options & NAND_BUSWIDTH_16)) -		info->nand.badblock_pattern = &bb_descrip_flashbased; -	return 0; -fail: -	omap3_free_bch(mtd); -	return -1; -} - -#else -static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) -{ -	pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n"); -	return -1; -} -static int omap3_init_bch_tail(struct mtd_info *mtd) -{ -	return -1; -} -static void omap3_free_bch(struct mtd_info *mtd) -{ +	return err;  } -#endif /* CONFIG_MTD_NAND_OMAP_BCH */ +#endif /* CONFIG_MTD_NAND_ECC_BCH */  static int omap_nand_probe(struct platform_device *pdev)  {  	struct omap_nand_info		*info;  	struct omap_nand_platform_data	*pdata; +	struct mtd_info			*mtd; +	struct nand_chip		*nand_chip; +	struct nand_ecclayout		*ecclayout;  	int				err; -	int				i, offset; -	dma_cap_mask_t mask; -	unsigned sig; +	int				i; +	dma_cap_mask_t			mask; +	unsigned			sig; +	unsigned			oob_index;  	struct resource			*res;  	struct mtd_part_parser_data	ppdata = {}; @@ -1837,7 +1642,8 @@ static int omap_nand_probe(struct platform_device *pdev)  		return -ENODEV;  	} -	info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL); +	info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info), +				GFP_KERNEL);  	if (!info)  		return -ENOMEM; @@ -1846,47 +1652,30 @@ static int omap_nand_probe(struct platform_device *pdev)  	spin_lock_init(&info->controller.lock);  	init_waitqueue_head(&info->controller.wq); -	info->pdev = pdev; - +	info->pdev		= pdev;  	info->gpmc_cs		= pdata->cs;  	info->reg		= pdata->reg; - -	info->mtd.priv		= &info->nand; -	info->mtd.name		= dev_name(&pdev->dev); -	info->mtd.owner		= THIS_MODULE; - -	info->nand.options	= pdata->devsize; -	info->nand.options	|= NAND_SKIP_BBTSCAN; -#ifdef CONFIG_MTD_NAND_OMAP_BCH  	info->of_node		= pdata->of_node; -#endif +	info->ecc_opt		= pdata->ecc_opt; +	mtd			= &info->mtd; +	mtd->priv		= &info->nand; +	mtd->name		= dev_name(&pdev->dev); +	mtd->owner		= THIS_MODULE; +	nand_chip		= &info->nand; +	nand_chip->ecc.priv	= NULL; +	nand_chip->options	|= NAND_SKIP_BBTSCAN;  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (res == NULL) { -		err = -EINVAL; -		dev_err(&pdev->dev, "error getting memory resource\n"); -		goto out_free_info; -	} +	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(nand_chip->IO_ADDR_R)) +		return PTR_ERR(nand_chip->IO_ADDR_R);  	info->phys_base = res->start; -	info->mem_size = resource_size(res); - -	if (!request_mem_region(info->phys_base, info->mem_size, -				pdev->dev.driver->name)) { -		err = -EBUSY; -		goto out_free_info; -	} - -	info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size); -	if (!info->nand.IO_ADDR_R) { -		err = -ENOMEM; -		goto out_release_mem_region; -	} -	info->nand.controller = &info->controller; +	nand_chip->controller = &info->controller; -	info->nand.IO_ADDR_W = info->nand.IO_ADDR_R; -	info->nand.cmd_ctrl  = omap_hwcontrol; +	nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R; +	nand_chip->cmd_ctrl  = omap_hwcontrol;  	/*  	 * If RDY/BSY line is connected to OMAP then use the omap ready @@ -1896,27 +1685,37 @@ static int omap_nand_probe(struct platform_device *pdev)  	 * device and read status register until you get a failure or success  	 */  	if (pdata->dev_ready) { -		info->nand.dev_ready = omap_dev_ready; -		info->nand.chip_delay = 0; +		nand_chip->dev_ready = omap_dev_ready; +		nand_chip->chip_delay = 0;  	} else { -		info->nand.waitfunc = omap_wait; -		info->nand.chip_delay = 50; +		nand_chip->waitfunc = omap_wait; +		nand_chip->chip_delay = 50; +	} + +	/* scan NAND device connected to chip controller */ +	nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16; +	if (nand_scan_ident(mtd, 1, NULL)) { +		pr_err("nand device scan failed, may be bus-width mismatch\n"); +		err = -ENXIO; +		goto return_error; +	} + +	/* check for small page devices */ +	if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) { +		pr_err("small page devices are not supported\n"); +		err = -EINVAL; +		goto return_error;  	} +	/* re-populate low-level callbacks based on xfer modes */  	switch (pdata->xfer_type) {  	case NAND_OMAP_PREFETCH_POLLED: -		info->nand.read_buf   = omap_read_buf_pref; -		info->nand.write_buf  = omap_write_buf_pref; +		nand_chip->read_buf   = omap_read_buf_pref; +		nand_chip->write_buf  = omap_write_buf_pref;  		break;  	case NAND_OMAP_POLLED: -		if (info->nand.options & NAND_BUSWIDTH_16) { -			info->nand.read_buf   = omap_read_buf16; -			info->nand.write_buf  = omap_write_buf16; -		} else { -			info->nand.read_buf   = omap_read_buf8; -			info->nand.write_buf  = omap_write_buf8; -		} +		/* Use nand_base defaults for {read,write}_buf */  		break;  	case NAND_OMAP_PREFETCH_DMA: @@ -1927,7 +1726,7 @@ static int omap_nand_probe(struct platform_device *pdev)  		if (!info->dma) {  			dev_err(&pdev->dev, "DMA engine request failed\n");  			err = -ENXIO; -			goto out_release_mem_region; +			goto return_error;  		} else {  			struct dma_slave_config cfg; @@ -1942,10 +1741,10 @@ static int omap_nand_probe(struct platform_device *pdev)  			if (err) {  				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",  					err); -				goto out_release_mem_region; +				goto return_error;  			} -			info->nand.read_buf   = omap_read_buf_dma_pref; -			info->nand.write_buf  = omap_write_buf_dma_pref; +			nand_chip->read_buf   = omap_read_buf_dma_pref; +			nand_chip->write_buf  = omap_write_buf_dma_pref;  		}  		break; @@ -1954,34 +1753,36 @@ static int omap_nand_probe(struct platform_device *pdev)  		if (info->gpmc_irq_fifo <= 0) {  			dev_err(&pdev->dev, "error getting fifo irq\n");  			err = -ENODEV; -			goto out_release_mem_region; +			goto return_error;  		} -		err = request_irq(info->gpmc_irq_fifo,	omap_nand_irq, -					IRQF_SHARED, "gpmc-nand-fifo", info); +		err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo, +					omap_nand_irq, IRQF_SHARED, +					"gpmc-nand-fifo", info);  		if (err) {  			dev_err(&pdev->dev, "requesting irq(%d) error:%d",  						info->gpmc_irq_fifo, err);  			info->gpmc_irq_fifo = 0; -			goto out_release_mem_region; +			goto return_error;  		}  		info->gpmc_irq_count = platform_get_irq(pdev, 1);  		if (info->gpmc_irq_count <= 0) {  			dev_err(&pdev->dev, "error getting count irq\n");  			err = -ENODEV; -			goto out_release_mem_region; +			goto return_error;  		} -		err = request_irq(info->gpmc_irq_count,	omap_nand_irq, -					IRQF_SHARED, "gpmc-nand-count", info); +		err = devm_request_irq(&pdev->dev, info->gpmc_irq_count, +					omap_nand_irq, IRQF_SHARED, +					"gpmc-nand-count", info);  		if (err) {  			dev_err(&pdev->dev, "requesting irq(%d) error:%d",  						info->gpmc_irq_count, err);  			info->gpmc_irq_count = 0; -			goto out_release_mem_region; +			goto return_error;  		} -		info->nand.read_buf  = omap_read_buf_irq_pref; -		info->nand.write_buf = omap_write_buf_irq_pref; +		nand_chip->read_buf  = omap_read_buf_irq_pref; +		nand_chip->write_buf = omap_write_buf_irq_pref;  		break; @@ -1989,117 +1790,275 @@ static int omap_nand_probe(struct platform_device *pdev)  		dev_err(&pdev->dev,  			"xfer_type(%d) not supported!\n", pdata->xfer_type);  		err = -EINVAL; -		goto out_release_mem_region; +		goto return_error;  	} -	/* select the ecc type */ -	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) -		info->nand.ecc.mode = NAND_ECC_SOFT; -	else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || -		(pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { -		info->nand.ecc.bytes            = 3; -		info->nand.ecc.size             = 512; -		info->nand.ecc.strength         = 1; -		info->nand.ecc.calculate        = omap_calculate_ecc; -		info->nand.ecc.hwctl            = omap_enable_hwecc; -		info->nand.ecc.correct          = omap_correct_data; -		info->nand.ecc.mode             = NAND_ECC_HW; -	} else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) || -		   (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) { -		err = omap3_init_bch(&info->mtd, pdata->ecc_opt); -		if (err) { +	/* populate MTD interface based on ECC scheme */ +	nand_chip->ecc.layout	= &omap_oobinfo; +	ecclayout		= &omap_oobinfo; +	switch (info->ecc_opt) { +	case OMAP_ECC_HAM1_CODE_HW: +		pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); +		nand_chip->ecc.mode             = NAND_ECC_HW; +		nand_chip->ecc.bytes            = 3; +		nand_chip->ecc.size             = 512; +		nand_chip->ecc.strength         = 1; +		nand_chip->ecc.calculate        = omap_calculate_ecc; +		nand_chip->ecc.hwctl            = omap_enable_hwecc; +		nand_chip->ecc.correct          = omap_correct_data; +		/* define ECC layout */ +		ecclayout->eccbytes		= nand_chip->ecc.bytes * +							(mtd->writesize / +							nand_chip->ecc.size); +		if (nand_chip->options & NAND_BUSWIDTH_16) +			oob_index		= BADBLOCK_MARKER_LENGTH; +		else +			oob_index		= 1; +		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) +			ecclayout->eccpos[i]	= oob_index; +		/* no reserved-marker in ecclayout for this ecc-scheme */ +		ecclayout->oobfree->offset	= +				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; +		break; + +	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: +#ifdef CONFIG_MTD_NAND_ECC_BCH +		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n"); +		nand_chip->ecc.mode		= NAND_ECC_HW; +		nand_chip->ecc.size		= 512; +		nand_chip->ecc.bytes		= 7; +		nand_chip->ecc.strength		= 4; +		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch; +		nand_chip->ecc.correct		= nand_bch_correct_data; +		nand_chip->ecc.calculate	= omap_calculate_ecc_bch; +		/* define ECC layout */ +		ecclayout->eccbytes		= nand_chip->ecc.bytes * +							(mtd->writesize / +							nand_chip->ecc.size); +		oob_index			= BADBLOCK_MARKER_LENGTH; +		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { +			ecclayout->eccpos[i] = oob_index; +			if (((i + 1) % nand_chip->ecc.bytes) == 0) +				oob_index++; +		} +		/* include reserved-marker in ecclayout->oobfree calculation */ +		ecclayout->oobfree->offset	= 1 + +				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; +		/* software bch library is used for locating errors */ +		nand_chip->ecc.priv		= nand_bch_init(mtd, +							nand_chip->ecc.size, +							nand_chip->ecc.bytes, +							&nand_chip->ecc.layout); +		if (!nand_chip->ecc.priv) { +			pr_err("nand: error: unable to use s/w BCH library\n");  			err = -EINVAL; -			goto out_release_mem_region;  		} -	} +		break; +#else +		pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n"); +		err = -EINVAL; +		goto return_error; +#endif -	/* DIP switches on some boards change between 8 and 16 bit -	 * bus widths for flash.  Try the other width if the first try fails. -	 */ -	if (nand_scan_ident(&info->mtd, 1, NULL)) { -		info->nand.options ^= NAND_BUSWIDTH_16; -		if (nand_scan_ident(&info->mtd, 1, NULL)) { -			err = -ENXIO; -			goto out_release_mem_region; +	case OMAP_ECC_BCH4_CODE_HW: +#ifdef CONFIG_MTD_NAND_OMAP_BCH +		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n"); +		nand_chip->ecc.mode		= NAND_ECC_HW; +		nand_chip->ecc.size		= 512; +		/* 14th bit is kept reserved for ROM-code compatibility */ +		nand_chip->ecc.bytes		= 7 + 1; +		nand_chip->ecc.strength		= 4; +		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch; +		nand_chip->ecc.correct		= omap_elm_correct_data; +		nand_chip->ecc.calculate	= omap_calculate_ecc_bch; +		nand_chip->ecc.read_page	= omap_read_page_bch; +		nand_chip->ecc.write_page	= omap_write_page_bch; +		/* define ECC layout */ +		ecclayout->eccbytes		= nand_chip->ecc.bytes * +							(mtd->writesize / +							nand_chip->ecc.size); +		oob_index			= BADBLOCK_MARKER_LENGTH; +		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) +			ecclayout->eccpos[i]	= oob_index; +		/* reserved marker already included in ecclayout->eccbytes */ +		ecclayout->oobfree->offset	= +				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; +		/* This ECC scheme requires ELM H/W block */ +		if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) { +			pr_err("nand: error: could not initialize ELM\n"); +			err = -ENODEV; +			goto return_error;  		} -	} - -	/* rom code layout */ -	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { +		break; +#else +		pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); +		err = -EINVAL; +		goto return_error; +#endif -		if (info->nand.options & NAND_BUSWIDTH_16) -			offset = 2; -		else { -			offset = 1; -			info->nand.badblock_pattern = &bb_descrip_flashbased; +	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +#ifdef CONFIG_MTD_NAND_ECC_BCH +		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n"); +		nand_chip->ecc.mode		= NAND_ECC_HW; +		nand_chip->ecc.size		= 512; +		nand_chip->ecc.bytes		= 13; +		nand_chip->ecc.strength		= 8; +		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch; +		nand_chip->ecc.correct		= nand_bch_correct_data; +		nand_chip->ecc.calculate	= omap_calculate_ecc_bch; +		/* define ECC layout */ +		ecclayout->eccbytes		= nand_chip->ecc.bytes * +							(mtd->writesize / +							nand_chip->ecc.size); +		oob_index			= BADBLOCK_MARKER_LENGTH; +		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { +			ecclayout->eccpos[i] = oob_index; +			if (((i + 1) % nand_chip->ecc.bytes) == 0) +				oob_index++;  		} -		omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16); -		for (i = 0; i < omap_oobinfo.eccbytes; i++) -			omap_oobinfo.eccpos[i] = i+offset; - -		omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes; -		omap_oobinfo.oobfree->length = info->mtd.oobsize - -					(offset + omap_oobinfo.eccbytes); - -		info->nand.ecc.layout = &omap_oobinfo; -	} else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) || -		   (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) { -		/* build OOB layout for BCH ECC correction */ -		err = omap3_init_bch_tail(&info->mtd); -		if (err) { +		/* include reserved-marker in ecclayout->oobfree calculation */ +		ecclayout->oobfree->offset	= 1 + +				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; +		/* software bch library is used for locating errors */ +		nand_chip->ecc.priv		= nand_bch_init(mtd, +							nand_chip->ecc.size, +							nand_chip->ecc.bytes, +							&nand_chip->ecc.layout); +		if (!nand_chip->ecc.priv) { +			pr_err("nand: error: unable to use s/w BCH library\n");  			err = -EINVAL; -			goto out_release_mem_region; +			goto return_error; +		} +		break; +#else +		pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n"); +		err = -EINVAL; +		goto return_error; +#endif + +	case OMAP_ECC_BCH8_CODE_HW: +#ifdef CONFIG_MTD_NAND_OMAP_BCH +		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n"); +		nand_chip->ecc.mode		= NAND_ECC_HW; +		nand_chip->ecc.size		= 512; +		/* 14th bit is kept reserved for ROM-code compatibility */ +		nand_chip->ecc.bytes		= 13 + 1; +		nand_chip->ecc.strength		= 8; +		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch; +		nand_chip->ecc.correct		= omap_elm_correct_data; +		nand_chip->ecc.calculate	= omap_calculate_ecc_bch; +		nand_chip->ecc.read_page	= omap_read_page_bch; +		nand_chip->ecc.write_page	= omap_write_page_bch; +		/* This ECC scheme requires ELM H/W block */ +		err = is_elm_present(info, pdata->elm_of_node, BCH8_ECC); +		if (err < 0) { +			pr_err("nand: error: could not initialize ELM\n"); +			goto return_error;  		} +		/* define ECC layout */ +		ecclayout->eccbytes		= nand_chip->ecc.bytes * +							(mtd->writesize / +							nand_chip->ecc.size); +		oob_index			= BADBLOCK_MARKER_LENGTH; +		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) +			ecclayout->eccpos[i]	= oob_index; +		/* reserved marker already included in ecclayout->eccbytes */ +		ecclayout->oobfree->offset	= +				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; +		break; +#else +		pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); +		err = -EINVAL; +		goto return_error; +#endif + +	case OMAP_ECC_BCH16_CODE_HW: +#ifdef CONFIG_MTD_NAND_OMAP_BCH +		pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n"); +		nand_chip->ecc.mode		= NAND_ECC_HW; +		nand_chip->ecc.size		= 512; +		nand_chip->ecc.bytes		= 26; +		nand_chip->ecc.strength		= 16; +		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch; +		nand_chip->ecc.correct		= omap_elm_correct_data; +		nand_chip->ecc.calculate	= omap_calculate_ecc_bch; +		nand_chip->ecc.read_page	= omap_read_page_bch; +		nand_chip->ecc.write_page	= omap_write_page_bch; +		/* This ECC scheme requires ELM H/W block */ +		err = is_elm_present(info, pdata->elm_of_node, BCH16_ECC); +		if (err < 0) { +			pr_err("ELM is required for this ECC scheme\n"); +			goto return_error; +		} +		/* define ECC layout */ +		ecclayout->eccbytes		= nand_chip->ecc.bytes * +							(mtd->writesize / +							nand_chip->ecc.size); +		oob_index			= BADBLOCK_MARKER_LENGTH; +		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) +			ecclayout->eccpos[i]	= oob_index; +		/* reserved marker already included in ecclayout->eccbytes */ +		ecclayout->oobfree->offset	= +				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; +		break; +#else +		pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); +		err = -EINVAL; +		goto return_error; +#endif +	default: +		pr_err("nand: error: invalid or unsupported ECC scheme\n"); +		err = -EINVAL; +		goto return_error; +	} + +	/* all OOB bytes from oobfree->offset till end off OOB are free */ +	ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; +	/* check if NAND device's OOB is enough to store ECC signatures */ +	if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { +		pr_err("not enough OOB bytes required = %d, available=%d\n", +					   ecclayout->eccbytes, mtd->oobsize); +		err = -EINVAL; +		goto return_error;  	}  	/* second phase scan */ -	if (nand_scan_tail(&info->mtd)) { +	if (nand_scan_tail(mtd)) {  		err = -ENXIO; -		goto out_release_mem_region; +		goto return_error;  	}  	ppdata.of_node = pdata->of_node; -	mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts, +	mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,  				  pdata->nr_parts); -	platform_set_drvdata(pdev, &info->mtd); +	platform_set_drvdata(pdev, mtd);  	return 0; -out_release_mem_region: +return_error:  	if (info->dma)  		dma_release_channel(info->dma); -	if (info->gpmc_irq_count > 0) -		free_irq(info->gpmc_irq_count, info); -	if (info->gpmc_irq_fifo > 0) -		free_irq(info->gpmc_irq_fifo, info); -	release_mem_region(info->phys_base, info->mem_size); -out_free_info: -	kfree(info); - +	if (nand_chip->ecc.priv) { +		nand_bch_free(nand_chip->ecc.priv); +		nand_chip->ecc.priv = NULL; +	}  	return err;  }  static int omap_nand_remove(struct platform_device *pdev)  {  	struct mtd_info *mtd = platform_get_drvdata(pdev); +	struct nand_chip *nand_chip = mtd->priv;  	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,  							mtd); -	omap3_free_bch(&info->mtd); - +	if (nand_chip->ecc.priv) { +		nand_bch_free(nand_chip->ecc.priv); +		nand_chip->ecc.priv = NULL; +	}  	if (info->dma)  		dma_release_channel(info->dma); - -	if (info->gpmc_irq_count > 0) -		free_irq(info->gpmc_irq_count, info); -	if (info->gpmc_irq_fifo > 0) -		free_irq(info->gpmc_irq_fifo, info); - -	/* Release NAND device, its internal structures and partitions */ -	nand_release(&info->mtd); -	iounmap(info->nand.IO_ADDR_R); -	release_mem_region(info->phys_base, info->mem_size); -	kfree(info); +	nand_release(mtd);  	return 0;  } diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index a393a5b6ce1..471b4df3a5a 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -87,7 +87,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)  	nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);  	if (!nc) { -		printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");  		ret = -ENOMEM;  		goto no_res;  	} @@ -101,7 +100,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)  	io_base = ioremap(res->start, resource_size(res));  	if (!io_base) { -		printk(KERN_ERR "orion_nand: ioremap failed\n"); +		dev_err(&pdev->dev, "ioremap failed\n");  		ret = -EIO;  		goto no_res;  	} @@ -110,7 +109,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)  		board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),  					GFP_KERNEL);  		if (!board) { -			printk(KERN_ERR "orion_nand: failed to allocate board structure.\n");  			ret = -ENOMEM;  			goto no_res;  		} @@ -216,7 +214,7 @@ static int orion_nand_remove(struct platform_device *pdev)  }  #ifdef CONFIG_OF -static struct of_device_id orion_nand_of_match_table[] = { +static const struct of_device_id orion_nand_of_match_table[] = {  	{ .compatible = "marvell,orion-nand", },  	{},  }; diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 5a67082c07e..2c98f9da747 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -23,11 +23,12 @@  #undef DEBUG  #include <linux/slab.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/nand.h>  #include <linux/mtd/nand_ecc.h> +#include <linux/of_address.h> +#include <linux/of_irq.h>  #include <linux/of_platform.h>  #include <linux/platform_device.h>  #include <linux/pci.h> @@ -221,7 +222,7 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);  static struct platform_driver pasemi_nand_driver =  {  	.driver = { -		.name = (char*)driver_name, +		.name = driver_name,  		.owner = THIS_MODULE,  		.of_match_table = pasemi_nand_match,  	}, diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index cad4cdc9df3..0b068a5c0bf 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -9,6 +9,7 @@   *   */ +#include <linux/err.h>  #include <linux/io.h>  #include <linux/module.h>  #include <linux/platform_device.h> @@ -47,30 +48,16 @@ static int plat_nand_probe(struct platform_device *pdev)  		return -EINVAL;  	} -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!res) -		return -ENXIO; -  	/* Allocate memory for the device structure (and zero it) */ -	data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL); -	if (!data) { -		dev_err(&pdev->dev, "failed to allocate device structure.\n"); +	data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data), +			    GFP_KERNEL); +	if (!data)  		return -ENOMEM; -	} - -	if (!request_mem_region(res->start, resource_size(res), -				dev_name(&pdev->dev))) { -		dev_err(&pdev->dev, "request_mem_region failed\n"); -		err = -EBUSY; -		goto out_free; -	} -	data->io_base = ioremap(res->start, resource_size(res)); -	if (data->io_base == NULL) { -		dev_err(&pdev->dev, "ioremap failed\n"); -		err = -EIO; -		goto out_release_io; -	} +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	data->io_base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(data->io_base)) +		return PTR_ERR(data->io_base);  	data->chip.priv = &data;  	data->mtd.priv = &data->chip; @@ -122,11 +109,6 @@ static int plat_nand_probe(struct platform_device *pdev)  out:  	if (pdata->ctrl.remove)  		pdata->ctrl.remove(pdev); -	iounmap(data->io_base); -out_release_io: -	release_mem_region(res->start, resource_size(res)); -out_free: -	kfree(data);  	return err;  } @@ -137,16 +119,10 @@ static int plat_nand_remove(struct platform_device *pdev)  {  	struct plat_nand_data *data = platform_get_drvdata(pdev);  	struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); -	struct resource *res; - -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	nand_release(&data->mtd);  	if (pdata->ctrl.remove)  		pdata->ctrl.remove(pdev); -	iounmap(data->io_base); -	release_mem_region(res->start, resource_size(res)); -	kfree(data);  	return 0;  } diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index dd03dfdfb0d..96b0b1d27df 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -7,6 +7,8 @@   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation. + * + * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.   */  #include <linux/kernel.h> @@ -24,6 +26,7 @@  #include <linux/slab.h>  #include <linux/of.h>  #include <linux/of_device.h> +#include <linux/of_mtd.h>  #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)  #define ARCH_HAS_DMA @@ -39,6 +42,13 @@  #define NAND_STOP_DELAY		(2 * HZ/50)  #define PAGE_CHUNK_SIZE		(2048) +/* + * Define a buffer size for the initial command that detects the flash device: + * STATUS, READID and PARAM. The largest of these is the PARAM command, + * needing 256 bytes. + */ +#define INIT_BUFFER_SIZE	256 +  /* registers and bit definitions */  #define NDCR		(0x00) /* Control register */  #define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */ @@ -47,6 +57,7 @@  #define NDPCR		(0x18) /* Page Count Register */  #define NDBDR0		(0x1C) /* Bad Block Register 0 */  #define NDBDR1		(0x20) /* Bad Block Register 1 */ +#define NDECCCTRL	(0x28) /* ECC control */  #define NDDB		(0x40) /* Data Buffer */  #define NDCB0		(0x48) /* Command Buffer0 */  #define NDCB1		(0x4C) /* Command Buffer1 */ @@ -73,6 +84,9 @@  #define NDCR_INT_MASK           (0xFFF)  #define NDSR_MASK		(0xfff) +#define NDSR_ERR_CNT_OFF	(16) +#define NDSR_ERR_CNT_MASK       (0x1f) +#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)  #define NDSR_RDY                (0x1 << 12)  #define NDSR_FLASH_RDY          (0x1 << 11)  #define NDSR_CS0_PAGED		(0x1 << 10) @@ -81,8 +95,8 @@  #define NDSR_CS1_CMDD		(0x1 << 7)  #define NDSR_CS0_BBD		(0x1 << 6)  #define NDSR_CS1_BBD		(0x1 << 5) -#define NDSR_DBERR		(0x1 << 4) -#define NDSR_SBERR		(0x1 << 3) +#define NDSR_UNCORERR		(0x1 << 4) +#define NDSR_CORERR		(0x1 << 3)  #define NDSR_WRDREQ		(0x1 << 2)  #define NDSR_RDDREQ		(0x1 << 1)  #define NDSR_WRCMDREQ		(0x1) @@ -91,6 +105,8 @@  #define NDCB0_ST_ROW_EN         (0x1 << 26)  #define NDCB0_AUTO_RS		(0x1 << 25)  #define NDCB0_CSEL		(0x1 << 24) +#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29) +#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)  #define NDCB0_CMD_TYPE_MASK	(0x7 << 21)  #define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)  #define NDCB0_NC		(0x1 << 20) @@ -101,21 +117,29 @@  #define NDCB0_CMD1_MASK		(0xff)  #define NDCB0_ADDR_CYC_SHIFT	(16) +#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */ +#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */ +#define EXT_CMD_TYPE_READ	4 /* Read */ +#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */ +#define EXT_CMD_TYPE_FINAL	3 /* Final command */ +#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */ +#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */ +  /* macros for registers read/write */  #define nand_writel(info, off, val)	\ -	__raw_writel((val), (info)->mmio_base + (off)) +	writel_relaxed((val), (info)->mmio_base + (off))  #define nand_readl(info, off)		\ -	__raw_readl((info)->mmio_base + (off)) +	readl_relaxed((info)->mmio_base + (off))  /* error code and state */  enum {  	ERR_NONE	= 0,  	ERR_DMABUSERR	= -1,  	ERR_SENDCMD	= -2, -	ERR_DBERR	= -3, +	ERR_UNCORERR	= -3,  	ERR_BBERR	= -4, -	ERR_SBERR	= -5, +	ERR_CORERR	= -5,  };  enum { @@ -142,7 +166,6 @@ struct pxa3xx_nand_host {  	void			*info_data;  	/* page size of attached chip */ -	unsigned int		page_size;  	int			use_ecc;  	int			cs; @@ -160,10 +183,13 @@ struct pxa3xx_nand_info {  	struct clk		*clk;  	void __iomem		*mmio_base;  	unsigned long		mmio_phys; -	struct completion	cmd_complete; +	struct completion	cmd_complete, dev_ready;  	unsigned int 		buf_start;  	unsigned int		buf_count; +	unsigned int		buf_size; +	unsigned int		data_buff_pos; +	unsigned int		oob_buff_pos;  	/* DMA information */  	int			drcmr_dat; @@ -187,13 +213,18 @@ struct pxa3xx_nand_info {  	int			cs;  	int			use_ecc;	/* use HW ECC ? */ +	int			ecc_bch;	/* using BCH ECC? */  	int			use_dma;	/* use DMA ? */  	int			use_spare;	/* use spare ? */ -	int			is_ready; +	int			need_wait; -	unsigned int		page_size;	/* page size of attached chip */ -	unsigned int		data_size;	/* data size in FIFO */ +	unsigned int		data_size;	/* data to be read from FIFO */ +	unsigned int		chunk_size;	/* split commands chunk size */  	unsigned int		oob_size; +	unsigned int		spare_size; +	unsigned int		ecc_size; +	unsigned int		ecc_err_cnt; +	unsigned int		max_bitflips;  	int 			retcode;  	/* cached register value */ @@ -231,6 +262,64 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {  { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },  }; +static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' }; +static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' }; + +static struct nand_bbt_descr bbt_main_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE +		| NAND_BBT_2BIT | NAND_BBT_VERSION, +	.offs =	8, +	.len = 6, +	.veroffs = 14, +	.maxblocks = 8,		/* Last 8 blocks in each chip */ +	.pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE +		| NAND_BBT_2BIT | NAND_BBT_VERSION, +	.offs =	8, +	.len = 6, +	.veroffs = 14, +	.maxblocks = 8,		/* Last 8 blocks in each chip */ +	.pattern = bbt_mirror_pattern +}; + +static struct nand_ecclayout ecc_layout_2KB_bch4bit = { +	.eccbytes = 32, +	.eccpos = { +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63}, +	.oobfree = { {2, 30} } +}; + +static struct nand_ecclayout ecc_layout_4KB_bch4bit = { +	.eccbytes = 64, +	.eccpos = { +		32,  33,  34,  35,  36,  37,  38,  39, +		40,  41,  42,  43,  44,  45,  46,  47, +		48,  49,  50,  51,  52,  53,  54,  55, +		56,  57,  58,  59,  60,  61,  62,  63, +		96,  97,  98,  99,  100, 101, 102, 103, +		104, 105, 106, 107, 108, 109, 110, 111, +		112, 113, 114, 115, 116, 117, 118, 119, +		120, 121, 122, 123, 124, 125, 126, 127}, +	/* Bootrom looks in bytes 0 & 5 for bad blocks */ +	.oobfree = { {6, 26}, { 64, 32} } +}; + +static struct nand_ecclayout ecc_layout_4KB_bch8bit = { +	.eccbytes = 128, +	.eccpos = { +		32,  33,  34,  35,  36,  37,  38,  39, +		40,  41,  42,  43,  44,  45,  46,  47, +		48,  49,  50,  51,  52,  53,  54,  55, +		56,  57,  58,  59,  60,  61,  62,  63}, +	.oobfree = { } +}; +  /* Define a default flash type setting serve as flash detecting only */  #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) @@ -248,6 +337,29 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {  /* convert nano-seconds to nand flash controller clock cycles */  #define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000) +static const struct of_device_id pxa3xx_nand_dt_ids[] = { +	{ +		.compatible = "marvell,pxa3xx-nand", +		.data       = (void *)PXA3XX_NAND_VARIANT_PXA, +	}, +	{ +		.compatible = "marvell,armada370-nand", +		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370, +	}, +	{} +}; +MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); + +static enum pxa3xx_nand_variant +pxa3xx_nand_get_variant(struct platform_device *pdev) +{ +	const struct of_device_id *of_id = +			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev); +	if (!of_id) +		return PXA3XX_NAND_VARIANT_PXA; +	return (enum pxa3xx_nand_variant)of_id->data; +} +  static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,  				   const struct pxa3xx_nand_timing *t)  { @@ -272,25 +384,23 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,  	nand_writel(info, NDTR1CS0, ndtr1);  } -static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) +/* + * Set the data and OOB size, depending on the selected + * spare and ECC configuration. + * Only applicable to READ0, READOOB and PAGEPROG commands. + */ +static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info, +				struct mtd_info *mtd)  { -	struct pxa3xx_nand_host *host = info->host[info->cs];  	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; -	info->data_size = host->page_size; -	if (!oob_enable) { -		info->oob_size = 0; +	info->data_size = mtd->writesize; +	if (!oob_enable)  		return; -	} -	switch (host->page_size) { -	case 2048: -		info->oob_size = (info->use_ecc) ? 40 : 64; -		break; -	case 512: -		info->oob_size = (info->use_ecc) ? 8 : 16; -		break; -	} +	info->oob_size = info->spare_size; +	if (!info->use_ecc) +		info->oob_size += info->ecc_size;  }  /** @@ -305,10 +415,15 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)  	ndcr = info->reg_ndcr; -	if (info->use_ecc) +	if (info->use_ecc) {  		ndcr |= NDCR_ECC_EN; -	else +		if (info->ecc_bch) +			nand_writel(info, NDECCCTRL, 0x1); +	} else {  		ndcr &= ~NDCR_ECC_EN; +		if (info->ecc_bch) +			nand_writel(info, NDECCCTRL, 0x0); +	}  	if (info->use_dma)  		ndcr |= NDCR_DMA_EN; @@ -367,26 +482,39 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)  static void handle_data_pio(struct pxa3xx_nand_info *info)  { +	unsigned int do_bytes = min(info->data_size, info->chunk_size); +  	switch (info->state) {  	case STATE_PIO_WRITING: -		__raw_writesl(info->mmio_base + NDDB, info->data_buff, -				DIV_ROUND_UP(info->data_size, 4)); +		__raw_writesl(info->mmio_base + NDDB, +			      info->data_buff + info->data_buff_pos, +			      DIV_ROUND_UP(do_bytes, 4)); +  		if (info->oob_size > 0) -			__raw_writesl(info->mmio_base + NDDB, info->oob_buff, -					DIV_ROUND_UP(info->oob_size, 4)); +			__raw_writesl(info->mmio_base + NDDB, +				      info->oob_buff + info->oob_buff_pos, +				      DIV_ROUND_UP(info->oob_size, 4));  		break;  	case STATE_PIO_READING: -		__raw_readsl(info->mmio_base + NDDB, info->data_buff, -				DIV_ROUND_UP(info->data_size, 4)); +		__raw_readsl(info->mmio_base + NDDB, +			     info->data_buff + info->data_buff_pos, +			     DIV_ROUND_UP(do_bytes, 4)); +  		if (info->oob_size > 0) -			__raw_readsl(info->mmio_base + NDDB, info->oob_buff, -					DIV_ROUND_UP(info->oob_size, 4)); +			__raw_readsl(info->mmio_base + NDDB, +				     info->oob_buff + info->oob_buff_pos, +				     DIV_ROUND_UP(info->oob_size, 4));  		break;  	default:  		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,  				info->state);  		BUG();  	} + +	/* Update buffer pointers for multi-page read/write */ +	info->data_buff_pos += do_bytes; +	info->oob_buff_pos += info->oob_size; +	info->data_size -= do_bytes;  }  #ifdef ARCH_HAS_DMA @@ -444,7 +572,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)  static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)  {  	struct pxa3xx_nand_info *info = devid; -	unsigned int status, is_completed = 0; +	unsigned int status, is_completed = 0, is_ready = 0;  	unsigned int ready, cmd_done;  	if (info->cs == 0) { @@ -457,10 +585,25 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)  	status = nand_readl(info, NDSR); -	if (status & NDSR_DBERR) -		info->retcode = ERR_DBERR; -	if (status & NDSR_SBERR) -		info->retcode = ERR_SBERR; +	if (status & NDSR_UNCORERR) +		info->retcode = ERR_UNCORERR; +	if (status & NDSR_CORERR) { +		info->retcode = ERR_CORERR; +		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && +		    info->ecc_bch) +			info->ecc_err_cnt = NDSR_ERR_CNT(status); +		else +			info->ecc_err_cnt = 1; + +		/* +		 * Each chunk composing a page is corrected independently, +		 * and we need to store maximum number of corrected bitflips +		 * to return it to the MTD layer in ecc.read_page(). +		 */ +		info->max_bitflips = max_t(unsigned int, +					   info->max_bitflips, +					   info->ecc_err_cnt); +	}  	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {  		/* whether use dma to transfer data */  		if (info->use_dma) { @@ -480,8 +623,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)  		is_completed = 1;  	}  	if (status & ready) { -		info->is_ready = 1;  		info->state = STATE_READY; +		is_ready = 1;  	}  	if (status & NDSR_WRCMDREQ) { @@ -510,6 +653,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)  	nand_writel(info, NDSR, status);  	if (is_completed)  		complete(&info->cmd_complete); +	if (is_ready) +		complete(&info->dev_ready);  NORMAL_IRQ_EXIT:  	return IRQ_HANDLED;  } @@ -522,52 +667,94 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)  	return 1;  } -static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, -		uint16_t column, int page_addr) +static void set_command_address(struct pxa3xx_nand_info *info, +		unsigned int page_size, uint16_t column, int page_addr)  { -	int addr_cycle, exec_cmd; -	struct pxa3xx_nand_host *host; -	struct mtd_info *mtd; +	/* small page addr setting */ +	if (page_size < PAGE_CHUNK_SIZE) { +		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) +				| (column & 0xFF); -	host = info->host[info->cs]; -	mtd = host->mtd; -	addr_cycle = 0; -	exec_cmd = 1; +		info->ndcb2 = 0; +	} else { +		info->ndcb1 = ((page_addr & 0xFFFF) << 16) +				| (column & 0xFFFF); + +		if (page_addr & 0xFF0000) +			info->ndcb2 = (page_addr & 0xFF0000) >> 16; +		else +			info->ndcb2 = 0; +	} +} + +static void prepare_start_command(struct pxa3xx_nand_info *info, int command) +{ +	struct pxa3xx_nand_host *host = info->host[info->cs]; +	struct mtd_info *mtd = host->mtd;  	/* reset data and oob column point to handle data */  	info->buf_start		= 0;  	info->buf_count		= 0;  	info->oob_size		= 0; +	info->data_buff_pos	= 0; +	info->oob_buff_pos	= 0;  	info->use_ecc		= 0;  	info->use_spare		= 1; -	info->use_dma		= (use_dma) ? 1 : 0; -	info->is_ready		= 0;  	info->retcode		= ERR_NONE; -	if (info->cs != 0) -		info->ndcb0 = NDCB0_CSEL; -	else -		info->ndcb0 = 0; +	info->ecc_err_cnt	= 0; +	info->ndcb3		= 0; +	info->need_wait		= 0;  	switch (command) {  	case NAND_CMD_READ0:  	case NAND_CMD_PAGEPROG:  		info->use_ecc = 1;  	case NAND_CMD_READOOB: -		pxa3xx_set_datasize(info); +		pxa3xx_set_datasize(info, mtd);  		break;  	case NAND_CMD_PARAM:  		info->use_spare = 0;  		break; -	case NAND_CMD_SEQIN: -		exec_cmd = 0; -		break;  	default:  		info->ndcb1 = 0;  		info->ndcb2 = 0; -		info->ndcb3 = 0;  		break;  	} +	/* +	 * If we are about to issue a read command, or about to set +	 * the write address, then clean the data buffer. +	 */ +	if (command == NAND_CMD_READ0 || +	    command == NAND_CMD_READOOB || +	    command == NAND_CMD_SEQIN) { + +		info->buf_count = mtd->writesize + mtd->oobsize; +		memset(info->data_buff, 0xFF, info->buf_count); +	} + +} + +static int prepare_set_command(struct pxa3xx_nand_info *info, int command, +		int ext_cmd_type, uint16_t column, int page_addr) +{ +	int addr_cycle, exec_cmd; +	struct pxa3xx_nand_host *host; +	struct mtd_info *mtd; + +	host = info->host[info->cs]; +	mtd = host->mtd; +	addr_cycle = 0; +	exec_cmd = 1; + +	if (info->cs != 0) +		info->ndcb0 = NDCB0_CSEL; +	else +		info->ndcb0 = 0; + +	if (command == NAND_CMD_SEQIN) +		exec_cmd = 0; +  	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles  				    + host->col_addr_cycles); @@ -582,30 +769,42 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,  		if (command == NAND_CMD_READOOB)  			info->buf_start += mtd->writesize; -		/* Second command setting for large pages */ -		if (host->page_size >= PAGE_CHUNK_SIZE) +		/* +		 * Multiple page read needs an 'extended command type' field, +		 * which is either naked-read or last-read according to the +		 * state. +		 */ +		if (mtd->writesize == PAGE_CHUNK_SIZE) {  			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8); +		} else if (mtd->writesize > PAGE_CHUNK_SIZE) { +			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) +					| NDCB0_LEN_OVRD +					| NDCB0_EXT_CMD_TYPE(ext_cmd_type); +			info->ndcb3 = info->chunk_size + +				      info->oob_size; +		} + +		set_command_address(info, mtd->writesize, column, page_addr); +		break;  	case NAND_CMD_SEQIN: -		/* small page addr setting */ -		if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { -			info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) -					| (column & 0xFF); -			info->ndcb2 = 0; -		} else { -			info->ndcb1 = ((page_addr & 0xFFFF) << 16) -					| (column & 0xFFFF); +		info->buf_start = column; +		set_command_address(info, mtd->writesize, 0, page_addr); -			if (page_addr & 0xFF0000) -				info->ndcb2 = (page_addr & 0xFF0000) >> 16; -			else -				info->ndcb2 = 0; +		/* +		 * Multiple page programming needs to execute the initial +		 * SEQIN command that sets the page address. +		 */ +		if (mtd->writesize > PAGE_CHUNK_SIZE) { +			info->ndcb0 |= NDCB0_CMD_TYPE(0x1) +				| NDCB0_EXT_CMD_TYPE(ext_cmd_type) +				| addr_cycle +				| command; +			/* No data transfer in this case */ +			info->data_size = 0; +			exec_cmd = 1;  		} - -		info->buf_count = mtd->writesize + mtd->oobsize; -		memset(info->data_buff, 0xFF, info->buf_count); -  		break;  	case NAND_CMD_PAGEPROG: @@ -615,13 +814,40 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,  			break;  		} -		info->ndcb0 |= NDCB0_CMD_TYPE(0x1) -				| NDCB0_AUTO_RS -				| NDCB0_ST_ROW_EN -				| NDCB0_DBC -				| (NAND_CMD_PAGEPROG << 8) -				| NAND_CMD_SEQIN -				| addr_cycle; +		/* Second command setting for large pages */ +		if (mtd->writesize > PAGE_CHUNK_SIZE) { +			/* +			 * Multiple page write uses the 'extended command' +			 * field. This can be used to issue a command dispatch +			 * or a naked-write depending on the current stage. +			 */ +			info->ndcb0 |= NDCB0_CMD_TYPE(0x1) +					| NDCB0_LEN_OVRD +					| NDCB0_EXT_CMD_TYPE(ext_cmd_type); +			info->ndcb3 = info->chunk_size + +				      info->oob_size; + +			/* +			 * This is the command dispatch that completes a chunked +			 * page program operation. +			 */ +			if (info->data_size == 0) { +				info->ndcb0 = NDCB0_CMD_TYPE(0x1) +					| NDCB0_EXT_CMD_TYPE(ext_cmd_type) +					| command; +				info->ndcb1 = 0; +				info->ndcb2 = 0; +				info->ndcb3 = 0; +			} +		} else { +			info->ndcb0 |= NDCB0_CMD_TYPE(0x1) +					| NDCB0_AUTO_RS +					| NDCB0_ST_ROW_EN +					| NDCB0_DBC +					| (NAND_CMD_PAGEPROG << 8) +					| NAND_CMD_SEQIN +					| addr_cycle; +		}  		break;  	case NAND_CMD_PARAM: @@ -684,8 +910,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,  	return exec_cmd;  } -static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, -				int column, int page_addr) +static void nand_cmdfunc(struct mtd_info *mtd, unsigned command, +			 int column, int page_addr)  {  	struct pxa3xx_nand_host *host = mtd->priv;  	struct pxa3xx_nand_info *info = host->info_data; @@ -710,10 +936,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,  		nand_writel(info, NDTR1CS0, info->ndtr1cs0);  	} +	prepare_start_command(info, command); +  	info->state = STATE_PREPARED; -	exec_cmd = prepare_command_pool(info, command, column, page_addr); +	exec_cmd = prepare_set_command(info, command, 0, column, page_addr); +  	if (exec_cmd) {  		init_completion(&info->cmd_complete); +		init_completion(&info->dev_ready); +		info->need_wait = 1;  		pxa3xx_nand_start(info);  		ret = wait_for_completion_timeout(&info->cmd_complete, @@ -727,6 +958,117 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,  	info->state = STATE_IDLE;  } +static void nand_cmdfunc_extended(struct mtd_info *mtd, +				  const unsigned command, +				  int column, int page_addr) +{ +	struct pxa3xx_nand_host *host = mtd->priv; +	struct pxa3xx_nand_info *info = host->info_data; +	int ret, exec_cmd, ext_cmd_type; + +	/* +	 * if this is a x16 device then convert the input +	 * "byte" address into a "word" address appropriate +	 * for indexing a word-oriented device +	 */ +	if (info->reg_ndcr & NDCR_DWIDTH_M) +		column /= 2; + +	/* +	 * There may be different NAND chip hooked to +	 * different chip select, so check whether +	 * chip select has been changed, if yes, reset the timing +	 */ +	if (info->cs != host->cs) { +		info->cs = host->cs; +		nand_writel(info, NDTR0CS0, info->ndtr0cs0); +		nand_writel(info, NDTR1CS0, info->ndtr1cs0); +	} + +	/* Select the extended command for the first command */ +	switch (command) { +	case NAND_CMD_READ0: +	case NAND_CMD_READOOB: +		ext_cmd_type = EXT_CMD_TYPE_MONO; +		break; +	case NAND_CMD_SEQIN: +		ext_cmd_type = EXT_CMD_TYPE_DISPATCH; +		break; +	case NAND_CMD_PAGEPROG: +		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; +		break; +	default: +		ext_cmd_type = 0; +		break; +	} + +	prepare_start_command(info, command); + +	/* +	 * Prepare the "is ready" completion before starting a command +	 * transaction sequence. If the command is not executed the +	 * completion will be completed, see below. +	 * +	 * We can do that inside the loop because the command variable +	 * is invariant and thus so is the exec_cmd. +	 */ +	info->need_wait = 1; +	init_completion(&info->dev_ready); +	do { +		info->state = STATE_PREPARED; +		exec_cmd = prepare_set_command(info, command, ext_cmd_type, +					       column, page_addr); +		if (!exec_cmd) { +			info->need_wait = 0; +			complete(&info->dev_ready); +			break; +		} + +		init_completion(&info->cmd_complete); +		pxa3xx_nand_start(info); + +		ret = wait_for_completion_timeout(&info->cmd_complete, +				CHIP_DELAY_TIMEOUT); +		if (!ret) { +			dev_err(&info->pdev->dev, "Wait time out!!!\n"); +			/* Stop State Machine for next command cycle */ +			pxa3xx_nand_stop(info); +			break; +		} + +		/* Check if the sequence is complete */ +		if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) +			break; + +		/* +		 * After a splitted program command sequence has issued +		 * the command dispatch, the command sequence is complete. +		 */ +		if (info->data_size == 0 && +		    command == NAND_CMD_PAGEPROG && +		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH) +			break; + +		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { +			/* Last read: issue a 'last naked read' */ +			if (info->data_size == info->chunk_size) +				ext_cmd_type = EXT_CMD_TYPE_LAST_RW; +			else +				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; + +		/* +		 * If a splitted program command has no more data to transfer, +		 * the command dispatch must be issued to complete. +		 */ +		} else if (command == NAND_CMD_PAGEPROG && +			   info->data_size == 0) { +				ext_cmd_type = EXT_CMD_TYPE_DISPATCH; +		} +	} while (1); + +	info->state = STATE_IDLE; +} +  static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,  		struct nand_chip *chip, const uint8_t *buf, int oob_required)  { @@ -746,20 +1088,14 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,  	chip->read_buf(mtd, buf, mtd->writesize);  	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); -	if (info->retcode == ERR_SBERR) { -		switch (info->use_ecc) { -		case 1: -			mtd->ecc_stats.corrected++; -			break; -		case 0: -		default: -			break; -		} -	} else if (info->retcode == ERR_DBERR) { +	if (info->retcode == ERR_CORERR && info->use_ecc) { +		mtd->ecc_stats.corrected += info->ecc_err_cnt; + +	} else if (info->retcode == ERR_UNCORERR) {  		/*  		 * for blank page (all 0xff), HW will calculate its ECC as  		 * 0, which is different from the ECC information within -		 * OOB, ignore such double bit errors +		 * OOB, ignore such uncorrectable errors  		 */  		if (is_buf_blank(buf, mtd->writesize))  			info->retcode = ERR_NONE; @@ -767,7 +1103,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,  			mtd->ecc_stats.failed++;  	} -	return 0; +	return info->max_bitflips;  }  static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) @@ -826,21 +1162,27 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)  {  	struct pxa3xx_nand_host *host = mtd->priv;  	struct pxa3xx_nand_info *info = host->info_data; +	int ret; + +	if (info->need_wait) { +		ret = wait_for_completion_timeout(&info->dev_ready, +				CHIP_DELAY_TIMEOUT); +		info->need_wait = 0; +		if (!ret) { +			dev_err(&info->pdev->dev, "Ready time out!!!\n"); +			return NAND_STATUS_FAIL; +		} +	}  	/* pxa3xx_nand_send_command has waited for command complete */  	if (this->state == FL_WRITING || this->state == FL_ERASING) {  		if (info->retcode == ERR_NONE)  			return 0; -		else { -			/* -			 * any error make it return 0x01 which will tell -			 * the caller the erase and write fail -			 */ -			return 0x01; -		} +		else +			return NAND_STATUS_FAIL;  	} -	return 0; +	return NAND_STATUS_READY;  }  static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, @@ -862,7 +1204,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,  	}  	/* calculate flash information */ -	host->page_size = f->page_size;  	host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;  	/* calculate addressing information */ @@ -899,39 +1240,35 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)  	uint32_t ndcr = nand_readl(info, NDCR);  	if (ndcr & NDCR_PAGE_SZ) { -		host->page_size = 2048; +		/* Controller's FIFO size */ +		info->chunk_size = 2048;  		host->read_id_bytes = 4;  	} else { -		host->page_size = 512; +		info->chunk_size = 512;  		host->read_id_bytes = 2;  	} +	/* Set an initial chunk size */  	info->reg_ndcr = ndcr & ~NDCR_INT_MASK;  	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);  	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);  	return 0;  } -/* the maximum possible buffer size for large page with OOB data - * is: 2048 + 64 = 2112 bytes, allocate a page here for both the - * data buffer and the DMA descriptor - */ -#define MAX_BUFF_SIZE	PAGE_SIZE -  #ifdef ARCH_HAS_DMA  static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)  {  	struct platform_device *pdev = info->pdev; -	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc); +	int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);  	if (use_dma == 0) { -		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL); +		info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);  		if (info->data_buff == NULL)  			return -ENOMEM;  		return 0;  	} -	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE, +	info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,  				&info->data_buff_phys, GFP_KERNEL);  	if (info->data_buff == NULL) {  		dev_err(&pdev->dev, "failed to allocate dma buffer\n"); @@ -945,20 +1282,25 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)  				pxa3xx_nand_data_dma_irq, info);  	if (info->data_dma_ch < 0) {  		dev_err(&pdev->dev, "failed to request data dma\n"); -		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, +		dma_free_coherent(&pdev->dev, info->buf_size,  				info->data_buff, info->data_buff_phys);  		return info->data_dma_ch;  	} +	/* +	 * Now that DMA buffers are allocated we turn on +	 * DMA proper for I/O operations. +	 */ +	info->use_dma = 1;  	return 0;  }  static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)  {  	struct platform_device *pdev = info->pdev; -	if (use_dma) { +	if (info->use_dma) {  		pxa_free_dma(info->data_dma_ch); -		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, +		dma_free_coherent(&pdev->dev, info->buf_size,  				  info->data_buff, info->data_buff_phys);  	} else {  		kfree(info->data_buff); @@ -967,7 +1309,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)  #else  static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)  { -	info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL); +	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);  	if (info->data_buff == NULL)  		return -ENOMEM;  	return 0; @@ -982,18 +1324,92 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)  static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)  {  	struct mtd_info *mtd; +	struct nand_chip *chip;  	int ret; +  	mtd = info->host[info->cs]->mtd; +	chip = mtd->priv; +  	/* use the common timing to make a try */  	ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);  	if (ret)  		return ret; -	pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); -	if (info->is_ready) -		return 0; +	chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); +	ret = chip->waitfunc(mtd, chip); +	if (ret & NAND_STATUS_FAIL) +		return -ENODEV; -	return -ENODEV; +	return 0; +} + +static int pxa_ecc_init(struct pxa3xx_nand_info *info, +			struct nand_ecc_ctrl *ecc, +			int strength, int ecc_stepsize, int page_size) +{ +	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { +		info->chunk_size = 2048; +		info->spare_size = 40; +		info->ecc_size = 24; +		ecc->mode = NAND_ECC_HW; +		ecc->size = 512; +		ecc->strength = 1; + +	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { +		info->chunk_size = 512; +		info->spare_size = 8; +		info->ecc_size = 8; +		ecc->mode = NAND_ECC_HW; +		ecc->size = 512; +		ecc->strength = 1; + +	/* +	 * Required ECC: 4-bit correction per 512 bytes +	 * Select: 16-bit correction per 2048 bytes +	 */ +	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { +		info->ecc_bch = 1; +		info->chunk_size = 2048; +		info->spare_size = 32; +		info->ecc_size = 32; +		ecc->mode = NAND_ECC_HW; +		ecc->size = info->chunk_size; +		ecc->layout = &ecc_layout_2KB_bch4bit; +		ecc->strength = 16; + +	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { +		info->ecc_bch = 1; +		info->chunk_size = 2048; +		info->spare_size = 32; +		info->ecc_size = 32; +		ecc->mode = NAND_ECC_HW; +		ecc->size = info->chunk_size; +		ecc->layout = &ecc_layout_4KB_bch4bit; +		ecc->strength = 16; + +	/* +	 * Required ECC: 8-bit correction per 512 bytes +	 * Select: 16-bit correction per 1024 bytes +	 */ +	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { +		info->ecc_bch = 1; +		info->chunk_size = 1024; +		info->spare_size = 0; +		info->ecc_size = 32; +		ecc->mode = NAND_ECC_HW; +		ecc->size = info->chunk_size; +		ecc->layout = &ecc_layout_4KB_bch8bit; +		ecc->strength = 16; +	} else { +		dev_err(&info->pdev->dev, +			"ECC strength %d at page size %d is not supported\n", +			strength, page_size); +		return -ENODEV; +	} + +	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n", +		 ecc->strength, ecc->size); +	return 0;  }  static int pxa3xx_nand_scan(struct mtd_info *mtd) @@ -1008,6 +1424,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)  	uint32_t id = -1;  	uint64_t chipsize;  	int i, ret, num; +	uint16_t ecc_strength, ecc_step;  	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))  		goto KEEP_CONFIG; @@ -1066,22 +1483,77 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)  	pxa3xx_flash_ids[1].name = NULL;  	def = pxa3xx_flash_ids;  KEEP_CONFIG: -	chip->ecc.mode = NAND_ECC_HW; -	chip->ecc.size = host->page_size; -	chip->ecc.strength = 1; -  	if (info->reg_ndcr & NDCR_DWIDTH_M)  		chip->options |= NAND_BUSWIDTH_16; +	/* Device detection must be done with ECC disabled */ +	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) +		nand_writel(info, NDECCCTRL, 0x0); +  	if (nand_scan_ident(mtd, 1, def))  		return -ENODEV; + +	if (pdata->flash_bbt) { +		/* +		 * We'll use a bad block table stored in-flash and don't +		 * allow writing the bad block marker to the flash. +		 */ +		chip->bbt_options |= NAND_BBT_USE_FLASH | +				     NAND_BBT_NO_OOB_BBM; +		chip->bbt_td = &bbt_main_descr; +		chip->bbt_md = &bbt_mirror_descr; +	} + +	/* +	 * If the page size is bigger than the FIFO size, let's check +	 * we are given the right variant and then switch to the extended +	 * (aka splitted) command handling, +	 */ +	if (mtd->writesize > PAGE_CHUNK_SIZE) { +		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { +			chip->cmdfunc = nand_cmdfunc_extended; +		} else { +			dev_err(&info->pdev->dev, +				"unsupported page size on this variant\n"); +			return -ENODEV; +		} +	} + +	if (pdata->ecc_strength && pdata->ecc_step_size) { +		ecc_strength = pdata->ecc_strength; +		ecc_step = pdata->ecc_step_size; +	} else { +		ecc_strength = chip->ecc_strength_ds; +		ecc_step = chip->ecc_step_ds; +	} + +	/* Set default ECC strength requirements on non-ONFI devices */ +	if (ecc_strength < 1 && ecc_step < 1) { +		ecc_strength = 1; +		ecc_step = 512; +	} + +	ret = pxa_ecc_init(info, &chip->ecc, ecc_strength, +			   ecc_step, mtd->writesize); +	if (ret) +		return ret; +  	/* calculate addressing information */  	if (mtd->writesize >= 2048)  		host->col_addr_cycles = 2;  	else  		host->col_addr_cycles = 1; +	/* release the initial buffer */ +	kfree(info->data_buff); + +	/* allocate the real data + oob buffer */ +	info->buf_size = mtd->writesize + mtd->oobsize; +	ret = pxa3xx_nand_init_buff(info); +	if (ret) +		return ret;  	info->oob_buff = info->data_buff + mtd->writesize; +  	if ((mtd->size >> chip->page_shift) > 65536)  		host->row_addr_cycles = 3;  	else @@ -1106,6 +1578,7 @@ static int alloc_nand_resource(struct platform_device *pdev)  		return -ENOMEM;  	info->pdev = pdev; +	info->variant = pxa3xx_nand_get_variant(pdev);  	for (cs = 0; cs < pdata->num_cs; cs++) {  		mtd = (struct mtd_info *)((unsigned int)&info[1] +  		      (sizeof(*mtd) + sizeof(*host)) * cs); @@ -1123,11 +1596,12 @@ static int alloc_nand_resource(struct platform_device *pdev)  		chip->controller        = &info->controller;  		chip->waitfunc		= pxa3xx_nand_waitfunc;  		chip->select_chip	= pxa3xx_nand_select_chip; -		chip->cmdfunc		= pxa3xx_nand_cmdfunc;  		chip->read_word		= pxa3xx_nand_read_word;  		chip->read_byte		= pxa3xx_nand_read_byte;  		chip->read_buf		= pxa3xx_nand_read_buf;  		chip->write_buf		= pxa3xx_nand_write_buf; +		chip->options		|= NAND_NO_SUBPAGE_WRITE; +		chip->cmdfunc		= nand_cmdfunc;  	}  	spin_lock_init(&chip->controller->lock); @@ -1187,15 +1661,18 @@ static int alloc_nand_resource(struct platform_device *pdev)  	}  	info->mmio_phys = r->start; -	ret = pxa3xx_nand_init_buff(info); -	if (ret) +	/* Allocate a buffer to allow flash detection */ +	info->buf_size = INIT_BUFFER_SIZE; +	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL); +	if (info->data_buff == NULL) { +		ret = -ENOMEM;  		goto fail_disable_clk; +	}  	/* initialize all interrupts to be disabled */  	disable_int(info, NDSR_MASK); -	ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED, -			  pdev->name, info); +	ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);  	if (ret < 0) {  		dev_err(&pdev->dev, "failed to request IRQ\n");  		goto fail_free_buf; @@ -1207,7 +1684,7 @@ static int alloc_nand_resource(struct platform_device *pdev)  fail_free_buf:  	free_irq(irq, info); -	pxa3xx_nand_free_buff(info); +	kfree(info->data_buff);  fail_disable_clk:  	clk_disable_unprepare(info->clk);  	return ret; @@ -1236,29 +1713,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)  	return 0;  } -static struct of_device_id pxa3xx_nand_dt_ids[] = { -	{ -		.compatible = "marvell,pxa3xx-nand", -		.data       = (void *)PXA3XX_NAND_VARIANT_PXA, -	}, -	{ -		.compatible = "marvell,armada370-nand", -		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370, -	}, -	{} -}; -MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); - -static enum pxa3xx_nand_variant -pxa3xx_nand_get_variant(struct platform_device *pdev) -{ -	const struct of_device_id *of_id = -			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev); -	if (!of_id) -		return PXA3XX_NAND_VARIANT_PXA; -	return (enum pxa3xx_nand_variant)of_id->data; -} -  static int pxa3xx_nand_probe_dt(struct platform_device *pdev)  {  	struct pxa3xx_nand_platform_data *pdata; @@ -1278,6 +1732,15 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)  	if (of_get_property(np, "marvell,nand-keep-config", NULL))  		pdata->keep_config = 1;  	of_property_read_u32(np, "num-cs", &pdata->num_cs); +	pdata->flash_bbt = of_get_nand_on_flash_bbt(np); + +	pdata->ecc_strength = of_get_nand_ecc_strength(np); +	if (pdata->ecc_strength < 0) +		pdata->ecc_strength = 0; + +	pdata->ecc_step_size = of_get_nand_ecc_step_size(np); +	if (pdata->ecc_step_size < 0) +		pdata->ecc_step_size = 0;  	pdev->dev.platform_data = pdata; @@ -1315,12 +1778,16 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)  	}  	info = platform_get_drvdata(pdev); -	info->variant = pxa3xx_nand_get_variant(pdev);  	probe_success = 0;  	for (cs = 0; cs < pdata->num_cs; cs++) {  		struct mtd_info *mtd = info->host[cs]->mtd; -		mtd->name = pdev->name; +		/* +		 * The mtd name matches the one used in 'mtdparts' kernel +		 * parameter. This name cannot be changed or otherwise +		 * user's mtd partitions configuration would get broken. +		 */ +		mtd->name = "pxa3xx_nand-0";  		info->cs = cs;  		ret = pxa3xx_nand_scan(mtd);  		if (ret) { @@ -1407,7 +1874,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)  static struct platform_driver pxa3xx_nand_driver = {  	.driver = {  		.name	= "pxa3xx-nand", -		.of_match_table = of_match_ptr(pxa3xx_nand_dt_ids), +		.of_match_table = pxa3xx_nand_dt_ids,  	},  	.probe		= pxa3xx_nand_probe,  	.remove		= pxa3xx_nand_remove, diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index 9dcf02d22aa..baea83f4dea 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c @@ -181,7 +181,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)  	/* Set dma direction */  	dev->dma_dir = do_read;  	dev->dma_stage = 1; -	INIT_COMPLETION(dev->dma_done); +	reinit_completion(&dev->dma_done);  	dbg_verbose("doing dma %s ", do_read ? "read" : "write"); @@ -245,7 +245,7 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)  	}  	/* write DWORD chinks - faster */ -	while (len) { +	while (len >= 4) {  		reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;  		r852_write_reg_dword(dev, R852_DATALINE, reg);  		buf += 4; @@ -254,8 +254,10 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)  	}  	/* write rest */ -	while (len) +	while (len > 0) {  		r852_write_reg(dev, R852_DATALINE, *buf++); +		len--; +	}  }  /* diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index d65cbe903d4..79acbb8691b 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -29,7 +29,6 @@  #include <linux/module.h>  #include <linux/types.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/string.h>  #include <linux/io.h> @@ -46,9 +45,43 @@  #include <linux/mtd/nand_ecc.h>  #include <linux/mtd/partitions.h> -#include <plat/regs-nand.h>  #include <linux/platform_data/mtd-nand-s3c2410.h> +#define S3C2410_NFREG(x) (x) + +#define S3C2410_NFCONF		S3C2410_NFREG(0x00) +#define S3C2410_NFCMD		S3C2410_NFREG(0x04) +#define S3C2410_NFADDR		S3C2410_NFREG(0x08) +#define S3C2410_NFDATA		S3C2410_NFREG(0x0C) +#define S3C2410_NFSTAT		S3C2410_NFREG(0x10) +#define S3C2410_NFECC		S3C2410_NFREG(0x14) +#define S3C2440_NFCONT		S3C2410_NFREG(0x04) +#define S3C2440_NFCMD		S3C2410_NFREG(0x08) +#define S3C2440_NFADDR		S3C2410_NFREG(0x0C) +#define S3C2440_NFDATA		S3C2410_NFREG(0x10) +#define S3C2440_NFSTAT		S3C2410_NFREG(0x20) +#define S3C2440_NFMECC0		S3C2410_NFREG(0x2C) +#define S3C2412_NFSTAT		S3C2410_NFREG(0x28) +#define S3C2412_NFMECC0		S3C2410_NFREG(0x34) +#define S3C2410_NFCONF_EN		(1<<15) +#define S3C2410_NFCONF_INITECC		(1<<12) +#define S3C2410_NFCONF_nFCE		(1<<11) +#define S3C2410_NFCONF_TACLS(x)		((x)<<8) +#define S3C2410_NFCONF_TWRPH0(x)	((x)<<4) +#define S3C2410_NFCONF_TWRPH1(x)	((x)<<0) +#define S3C2410_NFSTAT_BUSY		(1<<0) +#define S3C2440_NFCONF_TACLS(x)		((x)<<12) +#define S3C2440_NFCONF_TWRPH0(x)	((x)<<8) +#define S3C2440_NFCONF_TWRPH1(x)	((x)<<4) +#define S3C2440_NFCONT_INITECC		(1<<4) +#define S3C2440_NFCONT_nFCE		(1<<1) +#define S3C2440_NFCONT_ENABLE		(1<<0) +#define S3C2440_NFSTAT_READY		(1<<0) +#define S3C2412_NFCONF_NANDBOOT		(1<<31) +#define S3C2412_NFCONT_INIT_MAIN_ECC	(1<<5) +#define S3C2412_NFCONT_nFCE0		(1<<1) +#define S3C2412_NFSTAT_READY		(1<<0) +  /* new oob placement block for use with hardware ecc generation   */ @@ -919,7 +952,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)  	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);  	if (info == NULL) { -		dev_err(&pdev->dev, "no memory for flash info\n");  		err = -ENOMEM;  		goto exit_error;  	} @@ -974,7 +1006,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)  	size = nr_sets * sizeof(*info->mtds);  	info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);  	if (info->mtds == NULL) { -		dev_err(&pdev->dev, "failed to allocate mtd storage\n");  		err = -ENOMEM;  		goto exit_error;  	} diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index a3c84ebbe39..c0670237e7a 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -151,7 +151,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)  	dma_cap_set(DMA_SLAVE, mask);  	flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, -					    (void *)pdata->slave_id_fifo0_tx); +				(void *)(uintptr_t)pdata->slave_id_fifo0_tx);  	dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,  		flctl->chan_fifo0_tx); @@ -168,7 +168,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)  		goto err;  	flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, -					    (void *)pdata->slave_id_fifo0_rx); +				(void *)(uintptr_t)pdata->slave_id_fifo0_rx);  	dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,  		flctl->chan_fifo0_rx); @@ -897,7 +897,7 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)  		if (!flctl->qos_request) {  			ret = dev_pm_qos_add_request(&flctl->pdev->dev,  							&flctl->pm_qos, -							DEV_PM_QOS_LATENCY, +							DEV_PM_QOS_RESUME_LATENCY,  							100);  			if (ret < 0)  				dev_err(&flctl->pdev->dev, @@ -1021,7 +1021,6 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)  	return IRQ_HANDLED;  } -#ifdef CONFIG_OF  struct flctl_soc_config {  	unsigned long flcmncr_val;  	unsigned has_hwecc:1; @@ -1059,10 +1058,8 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)  	pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),  								GFP_KERNEL); -	if (!pdata) { -		dev_err(dev, "%s: failed to allocate config data\n", __func__); +	if (!pdata)  		return NULL; -	}  	/* set SoC specific options */  	pdata->flcmncr_val = config->flcmncr_val; @@ -1080,12 +1077,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)  	return pdata;  } -#else /* CONFIG_OF */ -static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) -{ -	return NULL; -} -#endif /* CONFIG_OF */  static int flctl_probe(struct platform_device *pdev)  { @@ -1094,38 +1085,30 @@ static int flctl_probe(struct platform_device *pdev)  	struct mtd_info *flctl_mtd;  	struct nand_chip *nand;  	struct sh_flctl_platform_data *pdata; -	int ret = -ENXIO; +	int ret;  	int irq;  	struct mtd_part_parser_data ppdata = {}; -	flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); -	if (!flctl) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL); +	if (!flctl)  		return -ENOMEM; -	}  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!res) { -		dev_err(&pdev->dev, "failed to get I/O memory\n"); -		goto err_iomap; -	} - -	flctl->reg = ioremap(res->start, resource_size(res)); -	if (flctl->reg == NULL) { -		dev_err(&pdev->dev, "failed to remap I/O memory\n"); -		goto err_iomap; -	} +	flctl->reg = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(flctl->reg)) +		return PTR_ERR(flctl->reg);  	irq = platform_get_irq(pdev, 0);  	if (irq < 0) {  		dev_err(&pdev->dev, "failed to get flste irq data\n"); -		goto err_flste; +		return -ENXIO;  	} -	ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl); +	ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, +			       "flste", flctl);  	if (ret) {  		dev_err(&pdev->dev, "request interrupt failed.\n"); -		goto err_flste; +		return ret;  	}  	if (pdev->dev.of_node) @@ -1135,8 +1118,7 @@ static int flctl_probe(struct platform_device *pdev)  	if (!pdata) {  		dev_err(&pdev->dev, "no setup data defined\n"); -		ret = -EINVAL; -		goto err_pdata; +		return -EINVAL;  	}  	platform_set_drvdata(pdev, flctl); @@ -1190,12 +1172,6 @@ static int flctl_probe(struct platform_device *pdev)  err_chip:  	flctl_release_dma(flctl);  	pm_runtime_disable(&pdev->dev); -err_pdata: -	free_irq(irq, flctl); -err_flste: -	iounmap(flctl->reg); -err_iomap: -	kfree(flctl);  	return ret;  } @@ -1206,9 +1182,6 @@ static int flctl_remove(struct platform_device *pdev)  	flctl_release_dma(flctl);  	nand_release(&flctl->mtd);  	pm_runtime_disable(&pdev->dev); -	free_irq(platform_get_irq(pdev, 0), flctl); -	iounmap(flctl->reg); -	kfree(flctl);  	return 0;  } diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index 87908d760fe..e81059b5838 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c @@ -121,10 +121,8 @@ static int sharpsl_nand_probe(struct platform_device *pdev)  	/* Allocate memory for MTD device structure and private data */  	sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL); -	if (!sharpsl) { -		printk("Unable to allocate SharpSL NAND MTD device structure.\n"); +	if (!sharpsl)  		return -ENOMEM; -	}  	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!r) { @@ -136,7 +134,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)  	/* map physical address */  	sharpsl->io = ioremap(r->start, resource_size(r));  	if (!sharpsl->io) { -		printk("ioremap to access Sharp SL NAND chip failed\n"); +		dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");  		err = -EIO;  		goto err_ioremap;  	} diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index 09dde7d2717..fe8058a4505 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -15,6 +15,7 @@  #include <linux/mtd/mtd.h>  #include <linux/mtd/nand.h>  #include <linux/mtd/partitions.h> +#include <linux/of_address.h>  #include <linux/of_platform.h>  #include <linux/io.h> @@ -149,17 +150,13 @@ static int socrates_nand_probe(struct platform_device *ofdev)  	struct mtd_part_parser_data ppdata;  	/* Allocate memory for the device structure (and zero it) */ -	host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); -	if (!host) { -		printk(KERN_ERR -		       "socrates_nand: failed to allocate device structure.\n"); +	host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL); +	if (!host)  		return -ENOMEM; -	}  	host->io_base = of_iomap(ofdev->dev.of_node, 0);  	if (host->io_base == NULL) { -		printk(KERN_ERR "socrates_nand: ioremap failed\n"); -		kfree(host); +		dev_err(&ofdev->dev, "ioremap failed\n");  		return -EIO;  	} @@ -211,9 +208,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)  	nand_release(mtd);  out: -	dev_set_drvdata(&ofdev->dev, NULL);  	iounmap(host->io_base); -	kfree(host);  	return res;  } @@ -227,9 +222,7 @@ static int socrates_nand_remove(struct platform_device *ofdev)  	nand_release(mtd); -	dev_set_drvdata(&ofdev->dev, NULL);  	iounmap(host->io_base); -	kfree(host);  	return 0;  } diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 396530d87ec..fb8fd35fa66 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c @@ -371,11 +371,9 @@ static int tmio_probe(struct platform_device *dev)  	if (data == NULL)  		dev_warn(&dev->dev, "NULL platform data!\n"); -	tmio = kzalloc(sizeof *tmio, GFP_KERNEL); -	if (!tmio) { -		retval = -ENOMEM; -		goto err_kzalloc; -	} +	tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL); +	if (!tmio) +		return -ENOMEM;  	tmio->dev = dev; @@ -385,22 +383,18 @@ static int tmio_probe(struct platform_device *dev)  	mtd->priv = nand_chip;  	mtd->name = "tmio-nand"; -	tmio->ccr = ioremap(ccr->start, resource_size(ccr)); -	if (!tmio->ccr) { -		retval = -EIO; -		goto err_iomap_ccr; -	} +	tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr)); +	if (!tmio->ccr) +		return -EIO;  	tmio->fcr_base = fcr->start & 0xfffff; -	tmio->fcr = ioremap(fcr->start, resource_size(fcr)); -	if (!tmio->fcr) { -		retval = -EIO; -		goto err_iomap_fcr; -	} +	tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr)); +	if (!tmio->fcr) +		return -EIO;  	retval = tmio_hw_init(dev, tmio);  	if (retval) -		goto err_hwinit; +		return retval;  	/* Set address of NAND IO lines */  	nand_chip->IO_ADDR_R = tmio->fcr; @@ -428,8 +422,8 @@ static int tmio_probe(struct platform_device *dev)  	/* 15 us command delay time */  	nand_chip->chip_delay = 15; -	retval = request_irq(irq, &tmio_irq, -				IRQF_DISABLED, dev_name(&dev->dev), tmio); +	retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0, +				  dev_name(&dev->dev), tmio);  	if (retval) {  		dev_err(&dev->dev, "request_irq error %d\n", retval);  		goto err_irq; @@ -441,7 +435,7 @@ static int tmio_probe(struct platform_device *dev)  	/* Scan to find existence of the device */  	if (nand_scan(mtd, 1)) {  		retval = -ENODEV; -		goto err_scan; +		goto err_irq;  	}  	/* Register the partitions */  	retval = mtd_device_parse_register(mtd, NULL, NULL, @@ -452,18 +446,8 @@ static int tmio_probe(struct platform_device *dev)  	nand_release(mtd); -err_scan: -	if (tmio->irq) -		free_irq(tmio->irq, tmio);  err_irq:  	tmio_hw_stop(dev, tmio); -err_hwinit: -	iounmap(tmio->fcr); -err_iomap_fcr: -	iounmap(tmio->ccr); -err_iomap_ccr: -	kfree(tmio); -err_kzalloc:  	return retval;  } @@ -472,12 +456,7 @@ static int tmio_remove(struct platform_device *dev)  	struct tmio_nand *tmio = platform_get_drvdata(dev);  	nand_release(&tmio->mtd); -	if (tmio->irq) -		free_irq(tmio->irq, tmio);  	tmio_hw_stop(dev, tmio); -	iounmap(tmio->fcr); -	iounmap(tmio->ccr); -	kfree(tmio);  	return 0;  } diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index 235714a421d..c1622a5ba81 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c @@ -319,11 +319,8 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)  			continue;  		txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),  				    GFP_KERNEL); -		if (!txx9_priv) { -			dev_err(&dev->dev, "Unable to allocate " -				"TXx9 NDFMC MTD device structure.\n"); +		if (!txx9_priv)  			continue; -		}  		chip = &txx9_priv->chip;  		mtd = &txx9_priv->mtd;  		mtd->owner = THIS_MODULE; diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index c5f4ebf4b38..46f27de018c 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c @@ -50,7 +50,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)  	struct NFTLrecord *nftl;  	unsigned long temp; -	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) +	if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)  		return;  	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */  	if (memcmp(mtd->name, "DiskOnChip", 10)) diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index d64f8c30945..aa26c32e1bc 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -81,7 +81,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,  		partname = of_get_property(pp, "label", &len);  		if (!partname)  			partname = of_get_property(pp, "name", &len); -		(*pparts)[i].name = (char *)partname; +		(*pparts)[i].name = partname;  		if (of_get_property(pp, "read-only", &len))  			(*pparts)[i].mask_flags |= MTD_WRITEABLE; @@ -152,7 +152,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,  		if (names && (plen > 0)) {  			int len = strlen(names) + 1; -			(*pparts)[i].name = (char *)names; +			(*pparts)[i].name = names;  			plen -= len;  			names += len;  		} else { @@ -173,18 +173,9 @@ static struct mtd_part_parser ofoldpart_parser = {  static int __init ofpart_parser_init(void)  { -	int rc; -	rc = register_mtd_parser(&ofpart_parser); -	if (rc) -		goto out; - -	rc = register_mtd_parser(&ofoldpart_parser); -	if (!rc) -		return 0; - -	deregister_mtd_parser(&ofoldpart_parser); -out: -	return rc; +	register_mtd_parser(&ofpart_parser); +	register_mtd_parser(&ofoldpart_parser); +	return 0;  }  static void __exit ofpart_parser_exit(void) diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index 63699fffc96..093c29ac1a1 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c @@ -13,7 +13,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/platform_device.h>  #include <linux/mtd/mtd.h> @@ -58,7 +57,7 @@ static int generic_onenand_probe(struct platform_device *pdev)  		goto out_release_mem_region;  	} -	info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0; +	info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;  	info->onenand.irq = platform_get_irq(pdev, 0);  	info->mtd.name = dev_name(&pdev->dev); diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 558071bf92d..d945473c388 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -25,7 +25,6 @@  #include <linux/device.h>  #include <linux/module.h> -#include <linux/init.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/onenand.h>  #include <linux/mtd/partitions.h> @@ -159,7 +158,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)  				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);  		} -		INIT_COMPLETION(c->irq_done); +		reinit_completion(&c->irq_done);  		if (c->gpio_irq) {  			result = gpio_get_value(c->gpio_irq);  			if (result == -1) { @@ -349,7 +348,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,  	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,  				 dma_dst, 0, 0); -	INIT_COMPLETION(c->dma_done); +	reinit_completion(&c->dma_done);  	omap_start_dma(c->dma_channel);  	timeout = jiffies + msecs_to_jiffies(20); @@ -420,7 +419,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,  	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,  				 dma_dst, 0, 0); -	INIT_COMPLETION(c->dma_done); +	reinit_completion(&c->dma_done);  	omap_start_dma(c->dma_channel);  	timeout = jiffies + msecs_to_jiffies(20); @@ -499,7 +498,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,  	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,  				 dma_dst, 0, 0); -	INIT_COMPLETION(c->dma_done); +	reinit_completion(&c->dma_done);  	omap_start_dma(c->dma_channel);  	wait_for_completion(&c->dma_done); @@ -544,7 +543,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,  	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,  				 dma_dst, 0, 0); -	INIT_COMPLETION(c->dma_done); +	reinit_completion(&c->dma_done);  	omap_start_dma(c->dma_channel);  	wait_for_completion(&c->dma_done); @@ -573,28 +572,6 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,  static struct platform_driver omap2_onenand_driver; -static int __adjust_timing(struct device *dev, void *data) -{ -	int ret = 0; -	struct omap2_onenand *c; - -	c = dev_get_drvdata(dev); - -	BUG_ON(c->setup == NULL); - -	/* DMA is not in use so this is all that is needed */ -	/* Revisit for OMAP3! */ -	ret = c->setup(c->onenand.base, &c->freq); - -	return ret; -} - -int omap2_onenand_rephase(void) -{ -	return driver_for_each_device(&omap2_onenand_driver.driver, NULL, -				      NULL, __adjust_timing); -} -  static void omap2_onenand_shutdown(struct platform_device *pdev)  {  	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index b3f41f20062..635ee002769 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -24,7 +24,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <linux/slab.h> -#include <linux/init.h>  #include <linux/sched.h>  #include <linux/delay.h>  #include <linux/interrupt.h> @@ -2556,10 +2555,6 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)  {  	int ret; -	/* Check for invalid offset */ -	if (ofs > mtd->size) -		return -EINVAL; -  	onenand_get_device(mtd, FL_READING);  	ret = onenand_block_isbad_nolock(mtd, ofs, 0);  	onenand_release_device(mtd); @@ -3242,20 +3237,17 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,  /**   * onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info   * @param mtd		MTD device structure - * @param buf		the databuffer to put/get data   * @param len		number of bytes to read + * @param retlen	pointer to variable to store the number of read bytes + * @param buf		the databuffer to put/get data   *   * Read factory OTP info.   */ -static int onenand_get_fact_prot_info(struct mtd_info *mtd, -			struct otp_info *buf, size_t len) +static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len, +				      size_t *retlen, struct otp_info *buf)  { -	size_t retlen; -	int ret; - -	ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_FACTORY); - -	return ret ? : retlen; +	return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL, +				MTD_OTP_FACTORY);  }  /** @@ -3277,20 +3269,17 @@ static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,  /**   * onenand_get_user_prot_info - [MTD Interface] Read user OTP info   * @param mtd		MTD device structure - * @param buf		the databuffer to put/get data + * @param retlen	pointer to variable to store the number of read bytes   * @param len		number of bytes to read + * @param buf		the databuffer to put/get data   *   * Read user OTP info.   */ -static int onenand_get_user_prot_info(struct mtd_info *mtd, -			struct otp_info *buf, size_t len) +static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len, +				      size_t *retlen, struct otp_info *buf)  { -	size_t retlen; -	int ret; - -	ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_USER); - -	return ret ? : retlen; +	return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL, +				MTD_OTP_USER);  }  /** @@ -3529,7 +3518,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)  {  	struct onenand_chip *this = mtd->priv;  	unsigned die, bdry; -	int ret, syscfg, locked; +	int syscfg, locked;  	/* Disable ECC */  	syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1); @@ -3540,7 +3529,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)  		this->wait(mtd, FL_SYNCING);  		this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0); -		ret = this->wait(mtd, FL_READING); +		this->wait(mtd, FL_READING);  		bdry = this->read_word(this->base + ONENAND_DATARAM);  		if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3) @@ -3550,7 +3539,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)  		this->boundary[die] = bdry & FLEXONENAND_PI_MASK;  		this->command(mtd, ONENAND_CMD_RESET, 0, 0); -		ret = this->wait(mtd, FL_RESETING); +		this->wait(mtd, FL_RESETING);  		printk(KERN_INFO "Die %d boundary: %d%s\n", die,  		       this->boundary[die], locked ? "(Locked)" : "(Unlocked)"); @@ -3734,7 +3723,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,  	/* Check is boundary is locked */  	this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0); -	ret = this->wait(mtd, FL_READING); +	this->wait(mtd, FL_READING);  	thisboundary = this->read_word(this->base + ONENAND_DATARAM);  	if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { @@ -3835,7 +3824,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)  static int onenand_probe(struct mtd_info *mtd)  {  	struct onenand_chip *this = mtd->priv; -	int maf_id, dev_id, ver_id; +	int dev_id, ver_id;  	int density;  	int ret; @@ -3843,8 +3832,7 @@ static int onenand_probe(struct mtd_info *mtd)  	if (ret)  		return ret; -	/* Read manufacturer and device IDs from Register */ -	maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); +	/* Device and version IDs from Register */  	dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);  	ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);  	this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY); @@ -4000,11 +3988,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)  	/* Allocate buffers, if necessary */  	if (!this->page_buf) {  		this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); -		if (!this->page_buf) { -			printk(KERN_ERR "%s: Can't allocate page_buf\n", -				__func__); +		if (!this->page_buf)  			return -ENOMEM; -		}  #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE  		this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);  		if (!this->verify_buf) { @@ -4017,8 +4002,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)  	if (!this->oob_buf) {  		this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);  		if (!this->oob_buf) { -			printk(KERN_ERR "%s: Can't allocate oob_buf\n", -				__func__);  			if (this->options & ONENAND_PAGEBUF_ALLOC) {  				this->options &= ~ONENAND_PAGEBUF_ALLOC;  				kfree(this->page_buf); diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index df7400dd4df..efb819c3df2 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c @@ -537,9 +537,9 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,  	return 0;  } -static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction); +static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction); -static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction) +static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)  {  	void __iomem *base = onenand->dma_addr;  	int status; @@ -605,7 +605,7 @@ static irqreturn_t s5pc110_onenand_irq(int irq, void *data)  	return IRQ_HANDLED;  } -static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction) +static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)  {  	void __iomem *base = onenand->dma_addr;  	int status; @@ -686,7 +686,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,  		dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);  		goto normal;  	} -	err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src, +	err = s5pc110_dma_ops(dma_dst, dma_src,  			count, S5PC110_DMA_DIR_READ);  	if (page_dma) @@ -872,10 +872,8 @@ static int s3c_onenand_probe(struct platform_device *pdev)  	size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);  	mtd = kzalloc(size, GFP_KERNEL); -	if (!mtd) { -		dev_err(&pdev->dev, "failed to allocate memory\n"); +	if (!mtd)  		return -ENOMEM; -	}  	onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);  	if (!onenand) { diff --git a/drivers/mtd/onenand/samsung.h b/drivers/mtd/onenand/samsung.h index c4a80e67e43..9016dc0136a 100644 --- a/drivers/mtd/onenand/samsung.h +++ b/drivers/mtd/onenand/samsung.h @@ -1,6 +1,4 @@  /* - * linux/arch/arm/plat-s3c/include/plat/regs-onenand.h - *   *  Copyright (C) 2008-2010 Samsung Electronics   *  Kyungmin Park <kyungmin.park@samsung.com>   * diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c index 580035c803d..5da911ebdf4 100644 --- a/drivers/mtd/redboot.c +++ b/drivers/mtd/redboot.c @@ -300,7 +300,8 @@ MODULE_ALIAS("RedBoot");  static int __init redboot_parser_init(void)  { -	return register_mtd_parser(&redboot_parser); +	register_mtd_parser(&redboot_parser); +	return 0;  }  static void __exit redboot_parser_exit(void) diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index 233b946e5d6..d1cbf26db2c 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c @@ -602,8 +602,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)  	if (rc) {  		printk(KERN_ERR PREFIX "error writing '%s' at "  			"0x%lx\n", part->mbd.mtd->name, addr); -		if (rc) -			goto err; +		goto err;  	}  	if (block == part->current_block)  		part->header_cache[offset + HEADER_MAP_OFFSET] = del; @@ -675,8 +674,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,  	if (rc) {  		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",  				part->mbd.mtd->name, addr); -		if (rc) -			goto err; +		goto err;  	}  	part->sector_map[sector] = addr; @@ -695,8 +693,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,  	if (rc) {  		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",  				part->mbd.mtd->name, addr); -		if (rc) -			goto err; +		goto err;  	}  	block->used_sectors++;  	block->free_sectors--; diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 4b8e89583f2..cf49c22673b 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -59,15 +59,12 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)  	struct attribute_group *attr_group;  	struct attribute **attributes;  	struct sm_sysfs_attribute *vendor_attribute; +	char *vendor; -	int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, -					SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); - -	char *vendor = kmalloc(vendor_len, GFP_KERNEL); +	vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, +			  SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);  	if (!vendor)  		goto error1; -	memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); -	vendor[vendor_len] = 0;  	/* Initialize sysfs attributes */  	vendor_attribute = @@ -78,7 +75,7 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)  	sysfs_attr_init(&vendor_attribute->dev_attr.attr);  	vendor_attribute->data = vendor; -	vendor_attribute->len = vendor_len; +	vendor_attribute->len = strlen(vendor);  	vendor_attribute->dev_attr.attr.name = "vendor";  	vendor_attribute->dev_attr.attr.mode = S_IRUGO;  	vendor_attribute->dev_attr.show = sm_attr_show; diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig new file mode 100644 index 00000000000..f8acfa4310e --- /dev/null +++ b/drivers/mtd/spi-nor/Kconfig @@ -0,0 +1,17 @@ +menuconfig MTD_SPI_NOR +	tristate "SPI-NOR device support" +	depends on MTD +	help +	  This is the framework for the SPI NOR which can be used by the SPI +	  device drivers and the SPI-NOR device driver. + +if MTD_SPI_NOR + +config SPI_FSL_QUADSPI +	tristate "Freescale Quad SPI controller" +	depends on ARCH_MXC +	help +	  This enables support for the Quad SPI controller in master mode. +	  We only connect the NOR to this controller now. + +endif # MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile new file mode 100644 index 00000000000..6a7ce146224 --- /dev/null +++ b/drivers/mtd/spi-nor/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MTD_SPI_NOR)	+= spi-nor.o +obj-$(CONFIG_SPI_FSL_QUADSPI)	+= fsl-quadspi.o diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c new file mode 100644 index 00000000000..8d659a2888d --- /dev/null +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -0,0 +1,1009 @@ +/* + * Freescale QuadSPI driver. + * + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/timer.h> +#include <linux/jiffies.h> +#include <linux/completion.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> + +/* The registers */ +#define QUADSPI_MCR			0x00 +#define QUADSPI_MCR_RESERVED_SHIFT	16 +#define QUADSPI_MCR_RESERVED_MASK	(0xF << QUADSPI_MCR_RESERVED_SHIFT) +#define QUADSPI_MCR_MDIS_SHIFT		14 +#define QUADSPI_MCR_MDIS_MASK		(1 << QUADSPI_MCR_MDIS_SHIFT) +#define QUADSPI_MCR_CLR_TXF_SHIFT	11 +#define QUADSPI_MCR_CLR_TXF_MASK	(1 << QUADSPI_MCR_CLR_TXF_SHIFT) +#define QUADSPI_MCR_CLR_RXF_SHIFT	10 +#define QUADSPI_MCR_CLR_RXF_MASK	(1 << QUADSPI_MCR_CLR_RXF_SHIFT) +#define QUADSPI_MCR_DDR_EN_SHIFT	7 +#define QUADSPI_MCR_DDR_EN_MASK		(1 << QUADSPI_MCR_DDR_EN_SHIFT) +#define QUADSPI_MCR_END_CFG_SHIFT	2 +#define QUADSPI_MCR_END_CFG_MASK	(3 << QUADSPI_MCR_END_CFG_SHIFT) +#define QUADSPI_MCR_SWRSTHD_SHIFT	1 +#define QUADSPI_MCR_SWRSTHD_MASK	(1 << QUADSPI_MCR_SWRSTHD_SHIFT) +#define QUADSPI_MCR_SWRSTSD_SHIFT	0 +#define QUADSPI_MCR_SWRSTSD_MASK	(1 << QUADSPI_MCR_SWRSTSD_SHIFT) + +#define QUADSPI_IPCR			0x08 +#define QUADSPI_IPCR_SEQID_SHIFT	24 +#define QUADSPI_IPCR_SEQID_MASK		(0xF << QUADSPI_IPCR_SEQID_SHIFT) + +#define QUADSPI_BUF0CR			0x10 +#define QUADSPI_BUF1CR			0x14 +#define QUADSPI_BUF2CR			0x18 +#define QUADSPI_BUFXCR_INVALID_MSTRID	0xe + +#define QUADSPI_BUF3CR			0x1c +#define QUADSPI_BUF3CR_ALLMST_SHIFT	31 +#define QUADSPI_BUF3CR_ALLMST		(1 << QUADSPI_BUF3CR_ALLMST_SHIFT) + +#define QUADSPI_BFGENCR			0x20 +#define QUADSPI_BFGENCR_PAR_EN_SHIFT	16 +#define QUADSPI_BFGENCR_PAR_EN_MASK	(1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT)) +#define QUADSPI_BFGENCR_SEQID_SHIFT	12 +#define QUADSPI_BFGENCR_SEQID_MASK	(0xF << QUADSPI_BFGENCR_SEQID_SHIFT) + +#define QUADSPI_BUF0IND			0x30 +#define QUADSPI_BUF1IND			0x34 +#define QUADSPI_BUF2IND			0x38 +#define QUADSPI_SFAR			0x100 + +#define QUADSPI_SMPR			0x108 +#define QUADSPI_SMPR_DDRSMP_SHIFT	16 +#define QUADSPI_SMPR_DDRSMP_MASK	(7 << QUADSPI_SMPR_DDRSMP_SHIFT) +#define QUADSPI_SMPR_FSDLY_SHIFT	6 +#define QUADSPI_SMPR_FSDLY_MASK		(1 << QUADSPI_SMPR_FSDLY_SHIFT) +#define QUADSPI_SMPR_FSPHS_SHIFT	5 +#define QUADSPI_SMPR_FSPHS_MASK		(1 << QUADSPI_SMPR_FSPHS_SHIFT) +#define QUADSPI_SMPR_HSENA_SHIFT	0 +#define QUADSPI_SMPR_HSENA_MASK		(1 << QUADSPI_SMPR_HSENA_SHIFT) + +#define QUADSPI_RBSR			0x10c +#define QUADSPI_RBSR_RDBFL_SHIFT	8 +#define QUADSPI_RBSR_RDBFL_MASK		(0x3F << QUADSPI_RBSR_RDBFL_SHIFT) + +#define QUADSPI_RBCT			0x110 +#define QUADSPI_RBCT_WMRK_MASK		0x1F +#define QUADSPI_RBCT_RXBRD_SHIFT	8 +#define QUADSPI_RBCT_RXBRD_USEIPS	(0x1 << QUADSPI_RBCT_RXBRD_SHIFT) + +#define QUADSPI_TBSR			0x150 +#define QUADSPI_TBDR			0x154 +#define QUADSPI_SR			0x15c +#define QUADSPI_SR_IP_ACC_SHIFT		1 +#define QUADSPI_SR_IP_ACC_MASK		(0x1 << QUADSPI_SR_IP_ACC_SHIFT) +#define QUADSPI_SR_AHB_ACC_SHIFT	2 +#define QUADSPI_SR_AHB_ACC_MASK		(0x1 << QUADSPI_SR_AHB_ACC_SHIFT) + +#define QUADSPI_FR			0x160 +#define QUADSPI_FR_TFF_MASK		0x1 + +#define QUADSPI_SFA1AD			0x180 +#define QUADSPI_SFA2AD			0x184 +#define QUADSPI_SFB1AD			0x188 +#define QUADSPI_SFB2AD			0x18c +#define QUADSPI_RBDR			0x200 + +#define QUADSPI_LUTKEY			0x300 +#define QUADSPI_LUTKEY_VALUE		0x5AF05AF0 + +#define QUADSPI_LCKCR			0x304 +#define QUADSPI_LCKER_LOCK		0x1 +#define QUADSPI_LCKER_UNLOCK		0x2 + +#define QUADSPI_RSER			0x164 +#define QUADSPI_RSER_TFIE		(0x1 << 0) + +#define QUADSPI_LUT_BASE		0x310 + +/* + * The definition of the LUT register shows below: + * + *  --------------------------------------------------- + *  | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + *  --------------------------------------------------- + */ +#define OPRND0_SHIFT		0 +#define PAD0_SHIFT		8 +#define INSTR0_SHIFT		10 +#define OPRND1_SHIFT		16 + +/* Instruction set for the LUT register. */ +#define LUT_STOP		0 +#define LUT_CMD			1 +#define LUT_ADDR		2 +#define LUT_DUMMY		3 +#define LUT_MODE		4 +#define LUT_MODE2		5 +#define LUT_MODE4		6 +#define LUT_READ		7 +#define LUT_WRITE		8 +#define LUT_JMP_ON_CS		9 +#define LUT_ADDR_DDR		10 +#define LUT_MODE_DDR		11 +#define LUT_MODE2_DDR		12 +#define LUT_MODE4_DDR		13 +#define LUT_READ_DDR		14 +#define LUT_WRITE_DDR		15 +#define LUT_DATA_LEARN		16 + +/* + * The PAD definitions for LUT register. + * + * The pad stands for the lines number of IO[0:3]. + * For example, the Quad read need four IO lines, so you should + * set LUT_PAD4 which means we use four IO lines. + */ +#define LUT_PAD1		0 +#define LUT_PAD2		1 +#define LUT_PAD4		2 + +/* Oprands for the LUT register. */ +#define ADDR24BIT		0x18 +#define ADDR32BIT		0x20 + +/* Macros for constructing the LUT register. */ +#define LUT0(ins, pad, opr)						\ +		(((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \ +		((LUT_##ins) << INSTR0_SHIFT)) + +#define LUT1(ins, pad, opr)	(LUT0(ins, pad, opr) << OPRND1_SHIFT) + +/* other macros for LUT register. */ +#define QUADSPI_LUT(x)          (QUADSPI_LUT_BASE + (x) * 4) +#define QUADSPI_LUT_NUM		64 + +/* SEQID -- we can have 16 seqids at most. */ +#define SEQID_QUAD_READ		0 +#define SEQID_WREN		1 +#define SEQID_WRDI		2 +#define SEQID_RDSR		3 +#define SEQID_SE		4 +#define SEQID_CHIP_ERASE	5 +#define SEQID_PP		6 +#define SEQID_RDID		7 +#define SEQID_WRSR		8 +#define SEQID_RDCR		9 +#define SEQID_EN4B		10 +#define SEQID_BRWR		11 + +enum fsl_qspi_devtype { +	FSL_QUADSPI_VYBRID, +	FSL_QUADSPI_IMX6SX, +}; + +struct fsl_qspi_devtype_data { +	enum fsl_qspi_devtype devtype; +	int rxfifo; +	int txfifo; +}; + +static struct fsl_qspi_devtype_data vybrid_data = { +	.devtype = FSL_QUADSPI_VYBRID, +	.rxfifo = 128, +	.txfifo = 64 +}; + +static struct fsl_qspi_devtype_data imx6sx_data = { +	.devtype = FSL_QUADSPI_IMX6SX, +	.rxfifo = 128, +	.txfifo = 512 +}; + +#define FSL_QSPI_MAX_CHIP	4 +struct fsl_qspi { +	struct mtd_info mtd[FSL_QSPI_MAX_CHIP]; +	struct spi_nor nor[FSL_QSPI_MAX_CHIP]; +	void __iomem *iobase; +	void __iomem *ahb_base; /* Used when read from AHB bus */ +	u32 memmap_phy; +	struct clk *clk, *clk_en; +	struct device *dev; +	struct completion c; +	struct fsl_qspi_devtype_data *devtype_data; +	u32 nor_size; +	u32 nor_num; +	u32 clk_rate; +	unsigned int chip_base_addr; /* We may support two chips. */ +}; + +static inline int is_vybrid_qspi(struct fsl_qspi *q) +{ +	return q->devtype_data->devtype == FSL_QUADSPI_VYBRID; +} + +static inline int is_imx6sx_qspi(struct fsl_qspi *q) +{ +	return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX; +} + +/* + * An IC bug makes us to re-arrange the 32-bit data. + * The following chips, such as IMX6SLX, have fixed this bug. + */ +static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) +{ +	return is_vybrid_qspi(q) ? __swab32(a) : a; +} + +static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) +{ +	writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); +	writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); +} + +static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) +{ +	writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); +	writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); +} + +static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) +{ +	struct fsl_qspi *q = dev_id; +	u32 reg; + +	/* clear interrupt */ +	reg = readl(q->iobase + QUADSPI_FR); +	writel(reg, q->iobase + QUADSPI_FR); + +	if (reg & QUADSPI_FR_TFF_MASK) +		complete(&q->c); + +	dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg); +	return IRQ_HANDLED; +} + +static void fsl_qspi_init_lut(struct fsl_qspi *q) +{ +	void __iomem *base = q->iobase; +	int rxfifo = q->devtype_data->rxfifo; +	u32 lut_base; +	u8 cmd, addrlen, dummy; +	int i; + +	fsl_qspi_unlock_lut(q); + +	/* Clear all the LUT table */ +	for (i = 0; i < QUADSPI_LUT_NUM; i++) +		writel(0, base + QUADSPI_LUT_BASE + i * 4); + +	/* Quad Read */ +	lut_base = SEQID_QUAD_READ * 4; + +	if (q->nor_size <= SZ_16M) { +		cmd = SPINOR_OP_READ_1_1_4; +		addrlen = ADDR24BIT; +		dummy = 8; +	} else { +		/* use the 4-byte address */ +		cmd = SPINOR_OP_READ_1_1_4; +		addrlen = ADDR32BIT; +		dummy = 8; +	} + +	writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), +			base + QUADSPI_LUT(lut_base)); +	writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo), +			base + QUADSPI_LUT(lut_base + 1)); + +	/* Write enable */ +	lut_base = SEQID_WREN * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base)); + +	/* Page Program */ +	lut_base = SEQID_PP * 4; + +	if (q->nor_size <= SZ_16M) { +		cmd = SPINOR_OP_PP; +		addrlen = ADDR24BIT; +	} else { +		/* use the 4-byte address */ +		cmd = SPINOR_OP_PP; +		addrlen = ADDR32BIT; +	} + +	writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), +			base + QUADSPI_LUT(lut_base)); +	writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1)); + +	/* Read Status */ +	lut_base = SEQID_RDSR * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1), +			base + QUADSPI_LUT(lut_base)); + +	/* Erase a sector */ +	lut_base = SEQID_SE * 4; + +	if (q->nor_size <= SZ_16M) { +		cmd = SPINOR_OP_SE; +		addrlen = ADDR24BIT; +	} else { +		/* use the 4-byte address */ +		cmd = SPINOR_OP_SE; +		addrlen = ADDR32BIT; +	} + +	writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), +			base + QUADSPI_LUT(lut_base)); + +	/* Erase the whole chip */ +	lut_base = SEQID_CHIP_ERASE * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), +			base + QUADSPI_LUT(lut_base)); + +	/* READ ID */ +	lut_base = SEQID_RDID * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8), +			base + QUADSPI_LUT(lut_base)); + +	/* Write Register */ +	lut_base = SEQID_WRSR * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2), +			base + QUADSPI_LUT(lut_base)); + +	/* Read Configuration Register */ +	lut_base = SEQID_RDCR * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1), +			base + QUADSPI_LUT(lut_base)); + +	/* Write disable */ +	lut_base = SEQID_WRDI * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base)); + +	/* Enter 4 Byte Mode (Micron) */ +	lut_base = SEQID_EN4B * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base)); + +	/* Enter 4 Byte Mode (Spansion) */ +	lut_base = SEQID_BRWR * 4; +	writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base)); + +	fsl_qspi_lock_lut(q); +} + +/* Get the SEQID for the command */ +static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) +{ +	switch (cmd) { +	case SPINOR_OP_READ_1_1_4: +		return SEQID_QUAD_READ; +	case SPINOR_OP_WREN: +		return SEQID_WREN; +	case SPINOR_OP_WRDI: +		return SEQID_WRDI; +	case SPINOR_OP_RDSR: +		return SEQID_RDSR; +	case SPINOR_OP_SE: +		return SEQID_SE; +	case SPINOR_OP_CHIP_ERASE: +		return SEQID_CHIP_ERASE; +	case SPINOR_OP_PP: +		return SEQID_PP; +	case SPINOR_OP_RDID: +		return SEQID_RDID; +	case SPINOR_OP_WRSR: +		return SEQID_WRSR; +	case SPINOR_OP_RDCR: +		return SEQID_RDCR; +	case SPINOR_OP_EN4B: +		return SEQID_EN4B; +	case SPINOR_OP_BRWR: +		return SEQID_BRWR; +	default: +		dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd); +		break; +	} +	return -EINVAL; +} + +static int +fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) +{ +	void __iomem *base = q->iobase; +	int seqid; +	u32 reg, reg2; +	int err; + +	init_completion(&q->c); +	dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n", +			q->chip_base_addr, addr, len, cmd); + +	/* save the reg */ +	reg = readl(base + QUADSPI_MCR); + +	writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR); +	writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, +			base + QUADSPI_RBCT); +	writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); + +	do { +		reg2 = readl(base + QUADSPI_SR); +		if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { +			udelay(1); +			dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); +			continue; +		} +		break; +	} while (1); + +	/* trigger the LUT now */ +	seqid = fsl_qspi_get_seqid(q, cmd); +	writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR); + +	/* Wait for the interrupt. */ +	err = wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)); +	if (!err) { +		dev_err(q->dev, +			"cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", +			cmd, addr, readl(base + QUADSPI_FR), +			readl(base + QUADSPI_SR)); +		err = -ETIMEDOUT; +	} else { +		err = 0; +	} + +	/* restore the MCR */ +	writel(reg, base + QUADSPI_MCR); + +	return err; +} + +/* Read out the data from the QUADSPI_RBDR buffer registers. */ +static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf) +{ +	u32 tmp; +	int i = 0; + +	while (len > 0) { +		tmp = readl(q->iobase + QUADSPI_RBDR + i * 4); +		tmp = fsl_qspi_endian_xchg(q, tmp); +		dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", +				q->chip_base_addr, tmp); + +		if (len >= 4) { +			*((u32 *)rxbuf) = tmp; +			rxbuf += 4; +		} else { +			memcpy(rxbuf, &tmp, len); +			break; +		} + +		len -= 4; +		i++; +	} +} + +/* + * If we have changed the content of the flash by writing or erasing, + * we need to invalidate the AHB buffer. If we do not do so, we may read out + * the wrong data. The spec tells us reset the AHB domain and Serial Flash + * domain at the same time. + */ +static inline void fsl_qspi_invalid(struct fsl_qspi *q) +{ +	u32 reg; + +	reg = readl(q->iobase + QUADSPI_MCR); +	reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; +	writel(reg, q->iobase + QUADSPI_MCR); + +	/* +	 * The minimum delay : 1 AHB + 2 SFCK clocks. +	 * Delay 1 us is enough. +	 */ +	udelay(1); + +	reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); +	writel(reg, q->iobase + QUADSPI_MCR); +} + +static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, +				u8 opcode, unsigned int to, u32 *txbuf, +				unsigned count, size_t *retlen) +{ +	int ret, i, j; +	u32 tmp; + +	dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n", +		q->chip_base_addr, to, count); + +	/* clear the TX FIFO. */ +	tmp = readl(q->iobase + QUADSPI_MCR); +	writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR); + +	/* fill the TX data to the FIFO */ +	for (j = 0, i = ((count + 3) / 4); j < i; j++) { +		tmp = fsl_qspi_endian_xchg(q, *txbuf); +		writel(tmp, q->iobase + QUADSPI_TBDR); +		txbuf++; +	} + +	/* Trigger it */ +	ret = fsl_qspi_runcmd(q, opcode, to, count); + +	if (ret == 0 && retlen) +		*retlen += count; + +	return ret; +} + +static void fsl_qspi_set_map_addr(struct fsl_qspi *q) +{ +	int nor_size = q->nor_size; +	void __iomem *base = q->iobase; + +	writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); +	writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); +	writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); +	writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); +} + +/* + * There are two different ways to read out the data from the flash: + *  the "IP Command Read" and the "AHB Command Read". + * + * The IC guy suggests we use the "AHB Command Read" which is faster + * then the "IP Command Read". (What's more is that there is a bug in + * the "IP Command Read" in the Vybrid.) + * + * After we set up the registers for the "AHB Command Read", we can use + * the memcpy to read the data directly. A "missed" access to the buffer + * causes the controller to clear the buffer, and use the sequence pointed + * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash. + */ +static void fsl_qspi_init_abh_read(struct fsl_qspi *q) +{ +	void __iomem *base = q->iobase; +	int seqid; + +	/* AHB configuration for access buffer 0/1/2 .*/ +	writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); +	writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); +	writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); +	writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR); + +	/* We only use the buffer3 */ +	writel(0, base + QUADSPI_BUF0IND); +	writel(0, base + QUADSPI_BUF1IND); +	writel(0, base + QUADSPI_BUF2IND); + +	/* Set the default lut sequence for AHB Read. */ +	seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); +	writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT, +		q->iobase + QUADSPI_BFGENCR); +} + +/* We use this function to do some basic init for spi_nor_scan(). */ +static int fsl_qspi_nor_setup(struct fsl_qspi *q) +{ +	void __iomem *base = q->iobase; +	u32 reg; +	int ret; + +	/* the default frequency, we will change it in the future.*/ +	ret = clk_set_rate(q->clk, 66000000); +	if (ret) +		return ret; + +	/* Init the LUT table. */ +	fsl_qspi_init_lut(q); + +	/* Disable the module */ +	writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, +			base + QUADSPI_MCR); + +	reg = readl(base + QUADSPI_SMPR); +	writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK +			| QUADSPI_SMPR_FSPHS_MASK +			| QUADSPI_SMPR_HSENA_MASK +			| QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); + +	/* Enable the module */ +	writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, +			base + QUADSPI_MCR); + +	/* enable the interrupt */ +	writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); + +	return 0; +} + +static int fsl_qspi_nor_setup_last(struct fsl_qspi *q) +{ +	unsigned long rate = q->clk_rate; +	int ret; + +	if (is_imx6sx_qspi(q)) +		rate *= 4; + +	ret = clk_set_rate(q->clk, rate); +	if (ret) +		return ret; + +	/* Init the LUT table again. */ +	fsl_qspi_init_lut(q); + +	/* Init for AHB read */ +	fsl_qspi_init_abh_read(q); + +	return 0; +} + +static struct of_device_id fsl_qspi_dt_ids[] = { +	{ .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, }, +	{ .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); + +static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor) +{ +	q->chip_base_addr = q->nor_size * (nor - q->nor); +} + +static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +{ +	int ret; +	struct fsl_qspi *q = nor->priv; + +	ret = fsl_qspi_runcmd(q, opcode, 0, len); +	if (ret) +		return ret; + +	fsl_qspi_read_data(q, len, buf); +	return 0; +} + +static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len, +			int write_enable) +{ +	struct fsl_qspi *q = nor->priv; +	int ret; + +	if (!buf) { +		ret = fsl_qspi_runcmd(q, opcode, 0, 1); +		if (ret) +			return ret; + +		if (opcode == SPINOR_OP_CHIP_ERASE) +			fsl_qspi_invalid(q); + +	} else if (len > 0) { +		ret = fsl_qspi_nor_write(q, nor, opcode, 0, +					(u32 *)buf, len, NULL); +	} else { +		dev_err(q->dev, "invalid cmd %d\n", opcode); +		ret = -EINVAL; +	} + +	return ret; +} + +static void fsl_qspi_write(struct spi_nor *nor, loff_t to, +		size_t len, size_t *retlen, const u_char *buf) +{ +	struct fsl_qspi *q = nor->priv; + +	fsl_qspi_nor_write(q, nor, nor->program_opcode, to, +				(u32 *)buf, len, retlen); + +	/* invalid the data in the AHB buffer. */ +	fsl_qspi_invalid(q); +} + +static int fsl_qspi_read(struct spi_nor *nor, loff_t from, +		size_t len, size_t *retlen, u_char *buf) +{ +	struct fsl_qspi *q = nor->priv; +	u8 cmd = nor->read_opcode; +	int ret; + +	dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n", +		cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len); + +	/* Wait until the previous command is finished. */ +	ret = nor->wait_till_ready(nor); +	if (ret) +		return ret; + +	/* Read out the data directly from the AHB buffer.*/ +	memcpy(buf, q->ahb_base + q->chip_base_addr + from, len); + +	*retlen += len; +	return 0; +} + +static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) +{ +	struct fsl_qspi *q = nor->priv; +	int ret; + +	dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n", +		nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs); + +	/* Wait until finished previous write command. */ +	ret = nor->wait_till_ready(nor); +	if (ret) +		return ret; + +	/* Send write enable, then erase commands. */ +	ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0); +	if (ret) +		return ret; + +	ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0); +	if (ret) +		return ret; + +	fsl_qspi_invalid(q); +	return 0; +} + +static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ +	struct fsl_qspi *q = nor->priv; +	int ret; + +	ret = clk_enable(q->clk_en); +	if (ret) +		return ret; + +	ret = clk_enable(q->clk); +	if (ret) { +		clk_disable(q->clk_en); +		return ret; +	} + +	fsl_qspi_set_base_addr(q, nor); +	return 0; +} + +static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ +	struct fsl_qspi *q = nor->priv; + +	clk_disable(q->clk); +	clk_disable(q->clk_en); +} + +static int fsl_qspi_probe(struct platform_device *pdev) +{ +	struct device_node *np = pdev->dev.of_node; +	struct mtd_part_parser_data ppdata; +	struct device *dev = &pdev->dev; +	struct fsl_qspi *q; +	struct resource *res; +	struct spi_nor *nor; +	struct mtd_info *mtd; +	int ret, i = 0; +	bool has_second_chip = false; +	const struct of_device_id *of_id = +			of_match_device(fsl_qspi_dt_ids, &pdev->dev); + +	q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); +	if (!q) +		return -ENOMEM; + +	q->nor_num = of_get_child_count(dev->of_node); +	if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP) +		return -ENODEV; + +	/* find the resources */ +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); +	q->iobase = devm_ioremap_resource(dev, res); +	if (IS_ERR(q->iobase)) { +		ret = PTR_ERR(q->iobase); +		goto map_failed; +	} + +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, +					"QuadSPI-memory"); +	q->ahb_base = devm_ioremap_resource(dev, res); +	if (IS_ERR(q->ahb_base)) { +		ret = PTR_ERR(q->ahb_base); +		goto map_failed; +	} +	q->memmap_phy = res->start; + +	/* find the clocks */ +	q->clk_en = devm_clk_get(dev, "qspi_en"); +	if (IS_ERR(q->clk_en)) { +		ret = PTR_ERR(q->clk_en); +		goto map_failed; +	} + +	q->clk = devm_clk_get(dev, "qspi"); +	if (IS_ERR(q->clk)) { +		ret = PTR_ERR(q->clk); +		goto map_failed; +	} + +	ret = clk_prepare_enable(q->clk_en); +	if (ret) { +		dev_err(dev, "can not enable the qspi_en clock\n"); +		goto map_failed; +	} + +	ret = clk_prepare_enable(q->clk); +	if (ret) { +		clk_disable_unprepare(q->clk_en); +		dev_err(dev, "can not enable the qspi clock\n"); +		goto map_failed; +	} + +	/* find the irq */ +	ret = platform_get_irq(pdev, 0); +	if (ret < 0) { +		dev_err(dev, "failed to get the irq\n"); +		goto irq_failed; +	} + +	ret = devm_request_irq(dev, ret, +			fsl_qspi_irq_handler, 0, pdev->name, q); +	if (ret) { +		dev_err(dev, "failed to request irq.\n"); +		goto irq_failed; +	} + +	q->dev = dev; +	q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data; +	platform_set_drvdata(pdev, q); + +	ret = fsl_qspi_nor_setup(q); +	if (ret) +		goto irq_failed; + +	if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) +		has_second_chip = true; + +	/* iterate the subnodes. */ +	for_each_available_child_of_node(dev->of_node, np) { +		const struct spi_device_id *id; +		char modalias[40]; + +		/* skip the holes */ +		if (!has_second_chip) +			i *= 2; + +		nor = &q->nor[i]; +		mtd = &q->mtd[i]; + +		nor->mtd = mtd; +		nor->dev = dev; +		nor->priv = q; +		mtd->priv = nor; + +		/* fill the hooks */ +		nor->read_reg = fsl_qspi_read_reg; +		nor->write_reg = fsl_qspi_write_reg; +		nor->read = fsl_qspi_read; +		nor->write = fsl_qspi_write; +		nor->erase = fsl_qspi_erase; + +		nor->prepare = fsl_qspi_prep; +		nor->unprepare = fsl_qspi_unprep; + +		if (of_modalias_node(np, modalias, sizeof(modalias)) < 0) +			goto map_failed; + +		id = spi_nor_match_id(modalias); +		if (!id) +			goto map_failed; + +		ret = of_property_read_u32(np, "spi-max-frequency", +				&q->clk_rate); +		if (ret < 0) +			goto map_failed; + +		/* set the chip address for READID */ +		fsl_qspi_set_base_addr(q, nor); + +		ret = spi_nor_scan(nor, id, SPI_NOR_QUAD); +		if (ret) +			goto map_failed; + +		ppdata.of_node = np; +		ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); +		if (ret) +			goto map_failed; + +		/* Set the correct NOR size now. */ +		if (q->nor_size == 0) { +			q->nor_size = mtd->size; + +			/* Map the SPI NOR to accessiable address */ +			fsl_qspi_set_map_addr(q); +		} + +		/* +		 * The TX FIFO is 64 bytes in the Vybrid, but the Page Program +		 * may writes 265 bytes per time. The write is working in the +		 * unit of the TX FIFO, not in the unit of the SPI NOR's page +		 * size. +		 * +		 * So shrink the spi_nor->page_size if it is larger then the +		 * TX FIFO. +		 */ +		if (nor->page_size > q->devtype_data->txfifo) +			nor->page_size = q->devtype_data->txfifo; + +		i++; +	} + +	/* finish the rest init. */ +	ret = fsl_qspi_nor_setup_last(q); +	if (ret) +		goto last_init_failed; + +	clk_disable(q->clk); +	clk_disable(q->clk_en); +	dev_info(dev, "QuadSPI SPI NOR flash driver\n"); +	return 0; + +last_init_failed: +	for (i = 0; i < q->nor_num; i++) +		mtd_device_unregister(&q->mtd[i]); + +irq_failed: +	clk_disable_unprepare(q->clk); +	clk_disable_unprepare(q->clk_en); +map_failed: +	dev_err(dev, "Freescale QuadSPI probe failed\n"); +	return ret; +} + +static int fsl_qspi_remove(struct platform_device *pdev) +{ +	struct fsl_qspi *q = platform_get_drvdata(pdev); +	int i; + +	for (i = 0; i < q->nor_num; i++) +		mtd_device_unregister(&q->mtd[i]); + +	/* disable the hardware */ +	writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); +	writel(0x0, q->iobase + QUADSPI_RSER); + +	clk_unprepare(q->clk); +	clk_unprepare(q->clk_en); +	return 0; +} + +static struct platform_driver fsl_qspi_driver = { +	.driver = { +		.name	= "fsl-quadspi", +		.bus	= &platform_bus_type, +		.owner	= THIS_MODULE, +		.of_match_table = fsl_qspi_dt_ids, +	}, +	.probe          = fsl_qspi_probe, +	.remove		= fsl_qspi_remove, +}; +module_platform_driver(fsl_qspi_driver); + +MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); +MODULE_AUTHOR("Freescale Semiconductor Inc."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c new file mode 100644 index 00000000000..c713c865671 --- /dev/null +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -0,0 +1,1107 @@ +/* + * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with + * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c + * + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/math64.h> + +#include <linux/mtd/cfi.h> +#include <linux/mtd/mtd.h> +#include <linux/of_platform.h> +#include <linux/spi/flash.h> +#include <linux/mtd/spi-nor.h> + +/* Define max times to check status register before we give up. */ +#define	MAX_READY_WAIT_JIFFIES	(40 * HZ) /* M25P16 specs 40s max chip erase */ + +#define JEDEC_MFR(_jedec_id)	((_jedec_id) >> 16) + +/* + * Read the status register, returning its value in the location + * Return the status register value. + * Returns negative if error occurred. + */ +static int read_sr(struct spi_nor *nor) +{ +	int ret; +	u8 val; + +	ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1); +	if (ret < 0) { +		pr_err("error %d reading SR\n", (int) ret); +		return ret; +	} + +	return val; +} + +/* + * Read configuration register, returning its value in the + * location. Return the configuration register value. + * Returns negative if error occured. + */ +static int read_cr(struct spi_nor *nor) +{ +	int ret; +	u8 val; + +	ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1); +	if (ret < 0) { +		dev_err(nor->dev, "error %d reading CR\n", ret); +		return ret; +	} + +	return val; +} + +/* + * Dummy Cycle calculation for different type of read. + * It can be used to support more commands with + * different dummy cycle requirements. + */ +static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor) +{ +	switch (nor->flash_read) { +	case SPI_NOR_FAST: +	case SPI_NOR_DUAL: +	case SPI_NOR_QUAD: +		return 1; +	case SPI_NOR_NORMAL: +		return 0; +	} +	return 0; +} + +/* + * Write status register 1 byte + * Returns negative if error occurred. + */ +static inline int write_sr(struct spi_nor *nor, u8 val) +{ +	nor->cmd_buf[0] = val; +	return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0); +} + +/* + * Set write enable latch with Write Enable command. + * Returns negative if error occurred. + */ +static inline int write_enable(struct spi_nor *nor) +{ +	return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0); +} + +/* + * Send write disble instruction to the chip. + */ +static inline int write_disable(struct spi_nor *nor) +{ +	return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0); +} + +static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) +{ +	return mtd->priv; +} + +/* Enable/disable 4-byte addressing mode. */ +static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable) +{ +	int status; +	bool need_wren = false; +	u8 cmd; + +	switch (JEDEC_MFR(jedec_id)) { +	case CFI_MFR_ST: /* Micron, actually */ +		/* Some Micron need WREN command; all will accept it */ +		need_wren = true; +	case CFI_MFR_MACRONIX: +	case 0xEF /* winbond */: +		if (need_wren) +			write_enable(nor); + +		cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B; +		status = nor->write_reg(nor, cmd, NULL, 0, 0); +		if (need_wren) +			write_disable(nor); + +		return status; +	default: +		/* Spansion style */ +		nor->cmd_buf[0] = enable << 7; +		return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0); +	} +} + +static int spi_nor_wait_till_ready(struct spi_nor *nor) +{ +	unsigned long deadline; +	int sr; + +	deadline = jiffies + MAX_READY_WAIT_JIFFIES; + +	do { +		cond_resched(); + +		sr = read_sr(nor); +		if (sr < 0) +			break; +		else if (!(sr & SR_WIP)) +			return 0; +	} while (!time_after_eq(jiffies, deadline)); + +	return -ETIMEDOUT; +} + +/* + * Service routine to read status register until ready, or timeout occurs. + * Returns non-zero if error. + */ +static int wait_till_ready(struct spi_nor *nor) +{ +	return nor->wait_till_ready(nor); +} + +/* + * Erase the whole flash memory + * + * Returns 0 if successful, non-zero otherwise. + */ +static int erase_chip(struct spi_nor *nor) +{ +	int ret; + +	dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10)); + +	/* Wait until finished previous write command. */ +	ret = wait_till_ready(nor); +	if (ret) +		return ret; + +	/* Send write enable, then erase commands. */ +	write_enable(nor); + +	return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0); +} + +static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ +	int ret = 0; + +	mutex_lock(&nor->lock); + +	if (nor->prepare) { +		ret = nor->prepare(nor, ops); +		if (ret) { +			dev_err(nor->dev, "failed in the preparation.\n"); +			mutex_unlock(&nor->lock); +			return ret; +		} +	} +	return ret; +} + +static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ +	if (nor->unprepare) +		nor->unprepare(nor, ops); +	mutex_unlock(&nor->lock); +} + +/* + * Erase an address range on the nor chip.  The address range may extend + * one or more erase sectors.  Return an error is there is a problem erasing. + */ +static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	struct spi_nor *nor = mtd_to_spi_nor(mtd); +	u32 addr, len; +	uint32_t rem; +	int ret; + +	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, +			(long long)instr->len); + +	div_u64_rem(instr->len, mtd->erasesize, &rem); +	if (rem) +		return -EINVAL; + +	addr = instr->addr; +	len = instr->len; + +	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE); +	if (ret) +		return ret; + +	/* whole-chip erase? */ +	if (len == mtd->size) { +		if (erase_chip(nor)) { +			ret = -EIO; +			goto erase_err; +		} + +	/* REVISIT in some cases we could speed up erasing large regions +	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up +	 * to use "small sector erase", but that's not always optimal. +	 */ + +	/* "sector"-at-a-time erase */ +	} else { +		while (len) { +			if (nor->erase(nor, addr)) { +				ret = -EIO; +				goto erase_err; +			} + +			addr += mtd->erasesize; +			len -= mtd->erasesize; +		} +	} + +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); + +	instr->state = MTD_ERASE_DONE; +	mtd_erase_callback(instr); + +	return ret; + +erase_err: +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); +	instr->state = MTD_ERASE_FAILED; +	return ret; +} + +static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	struct spi_nor *nor = mtd_to_spi_nor(mtd); +	uint32_t offset = ofs; +	uint8_t status_old, status_new; +	int ret = 0; + +	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK); +	if (ret) +		return ret; + +	/* Wait until finished previous command */ +	ret = wait_till_ready(nor); +	if (ret) +		goto err; + +	status_old = read_sr(nor); + +	if (offset < mtd->size - (mtd->size / 2)) +		status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0; +	else if (offset < mtd->size - (mtd->size / 4)) +		status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; +	else if (offset < mtd->size - (mtd->size / 8)) +		status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; +	else if (offset < mtd->size - (mtd->size / 16)) +		status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2; +	else if (offset < mtd->size - (mtd->size / 32)) +		status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; +	else if (offset < mtd->size - (mtd->size / 64)) +		status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1; +	else +		status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0; + +	/* Only modify protection if it will not unlock other areas */ +	if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) > +				(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) { +		write_enable(nor); +		ret = write_sr(nor, status_new); +		if (ret) +			goto err; +	} + +err: +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK); +	return ret; +} + +static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	struct spi_nor *nor = mtd_to_spi_nor(mtd); +	uint32_t offset = ofs; +	uint8_t status_old, status_new; +	int ret = 0; + +	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK); +	if (ret) +		return ret; + +	/* Wait until finished previous command */ +	ret = wait_till_ready(nor); +	if (ret) +		goto err; + +	status_old = read_sr(nor); + +	if (offset+len > mtd->size - (mtd->size / 64)) +		status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0); +	else if (offset+len > mtd->size - (mtd->size / 32)) +		status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0; +	else if (offset+len > mtd->size - (mtd->size / 16)) +		status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1; +	else if (offset+len > mtd->size - (mtd->size / 8)) +		status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; +	else if (offset+len > mtd->size - (mtd->size / 4)) +		status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2; +	else if (offset+len > mtd->size - (mtd->size / 2)) +		status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; +	else +		status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; + +	/* Only modify protection if it will not lock other areas */ +	if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) < +				(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) { +		write_enable(nor); +		ret = write_sr(nor, status_new); +		if (ret) +			goto err; +	} + +err: +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK); +	return ret; +} + +struct flash_info { +	/* JEDEC id zero means "no ID" (most older chips); otherwise it has +	 * a high byte of zero plus three data bytes: the manufacturer id, +	 * then a two byte device id. +	 */ +	u32		jedec_id; +	u16             ext_id; + +	/* The size listed here is what works with SPINOR_OP_SE, which isn't +	 * necessarily called a "sector" by the vendor. +	 */ +	unsigned	sector_size; +	u16		n_sectors; + +	u16		page_size; +	u16		addr_width; + +	u16		flags; +#define	SECT_4K			0x01	/* SPINOR_OP_BE_4K works uniformly */ +#define	SPI_NOR_NO_ERASE	0x02	/* No erase command needed */ +#define	SST_WRITE		0x04	/* use SST byte programming */ +#define	SPI_NOR_NO_FR		0x08	/* Can't do fastread */ +#define	SECT_4K_PMC		0x10	/* SPINOR_OP_BE_4K_PMC works uniformly */ +#define	SPI_NOR_DUAL_READ	0x20    /* Flash supports Dual Read */ +#define	SPI_NOR_QUAD_READ	0x40    /* Flash supports Quad Read */ +}; + +#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\ +	((kernel_ulong_t)&(struct flash_info) {				\ +		.jedec_id = (_jedec_id),				\ +		.ext_id = (_ext_id),					\ +		.sector_size = (_sector_size),				\ +		.n_sectors = (_n_sectors),				\ +		.page_size = 256,					\ +		.flags = (_flags),					\ +	}) + +#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)	\ +	((kernel_ulong_t)&(struct flash_info) {				\ +		.sector_size = (_sector_size),				\ +		.n_sectors = (_n_sectors),				\ +		.page_size = (_page_size),				\ +		.addr_width = (_addr_width),				\ +		.flags = (_flags),					\ +	}) + +/* NOTE: double check command sets and memory organization when you add + * more nor chips.  This current list focusses on newer chips, which + * have been converging on command sets which including JEDEC ID. + */ +const struct spi_device_id spi_nor_ids[] = { +	/* Atmel -- some are (confusingly) marketed as "DataFlash" */ +	{ "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) }, +	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) }, + +	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) }, +	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) }, +	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, + +	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) }, +	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, +	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, +	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, + +	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, + +	/* EON -- en25xxx */ +	{ "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) }, +	{ "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) }, +	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) }, +	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) }, +	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) }, +	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) }, + +	/* ESMT */ +	{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) }, + +	/* Everspin */ +	{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +	{ "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + +	/* GigaDevice */ +	{ "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) }, +	{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, + +	/* Intel/Numonyx -- xxxs33b */ +	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) }, +	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) }, +	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) }, + +	/* Macronix */ +	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) }, +	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) }, +	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) }, +	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) }, +	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, 0) }, +	{ "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) }, +	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, 0) }, +	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, +	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, +	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, +	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, +	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, +	{ "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, + +	/* Micron */ +	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, 0) }, +	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, 0) }, +	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, 0) }, +	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K) }, +	{ "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) }, + +	/* PMC */ +	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) }, +	{ "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) }, +	{ "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) }, + +	/* Spansion -- single (large) sector size only, at least +	 * for the chips listed here (without boot sectors). +	 */ +	{ "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, +	{ "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, 0) }, +	{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, +	{ "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, +	{ "s25fl512s",  INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, +	{ "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, +	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) }, +	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) }, +	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, 0) }, +	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, 0) }, +	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) }, +	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) }, +	{ "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) }, +	{ "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) }, +	{ "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) }, +	{ "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16, SECT_4K) }, +	{ "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K) }, +	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) }, + +	/* SST -- large erase sizes are "overlays", "sectors" are 4K */ +	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) }, +	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, +	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, +	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, +	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, +	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) }, +	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) }, +	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) }, +	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) }, + +	/* ST Microelectronics -- newer production may have feature updates */ +	{ "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) }, +	{ "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) }, +	{ "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) }, +	{ "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) }, +	{ "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) }, +	{ "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) }, +	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) }, +	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) }, +	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) }, +	{ "n25q032", INFO(0x20ba16,  0,  64 * 1024,  64, 0) }, + +	{ "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) }, +	{ "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) }, +	{ "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) }, +	{ "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) }, +	{ "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) }, +	{ "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) }, +	{ "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) }, +	{ "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) }, +	{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) }, + +	{ "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) }, +	{ "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) }, +	{ "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) }, + +	{ "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) }, +	{ "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) }, +	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) }, + +	{ "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) }, +	{ "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) }, +	{ "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) }, +	{ "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) }, +	{ "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) }, + +	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ +	{ "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) }, +	{ "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) }, +	{ "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) }, +	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) }, +	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) }, +	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) }, +	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) }, +	{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64, SECT_4K) }, +	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, +	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, +	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, +	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) }, +	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) }, +	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, +	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, + +	/* Catalyst / On Semiconductor -- non-JEDEC */ +	{ "cat25c11", CAT25_INFO(  16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +	{ "cat25c03", CAT25_INFO(  32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +	{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +	{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +	{ "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +	{ }, +}; +EXPORT_SYMBOL_GPL(spi_nor_ids); + +static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor) +{ +	int			tmp; +	u8			id[5]; +	u32			jedec; +	u16                     ext_jedec; +	struct flash_info	*info; + +	tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, 5); +	if (tmp < 0) { +		dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp); +		return ERR_PTR(tmp); +	} +	jedec = id[0]; +	jedec = jedec << 8; +	jedec |= id[1]; +	jedec = jedec << 8; +	jedec |= id[2]; + +	ext_jedec = id[3] << 8 | id[4]; + +	for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) { +		info = (void *)spi_nor_ids[tmp].driver_data; +		if (info->jedec_id == jedec) { +			if (info->ext_id == 0 || info->ext_id == ext_jedec) +				return &spi_nor_ids[tmp]; +		} +	} +	dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec); +	return ERR_PTR(-ENODEV); +} + +static const struct spi_device_id *jedec_probe(struct spi_nor *nor) +{ +	return nor->read_id(nor); +} + +static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, +			size_t *retlen, u_char *buf) +{ +	struct spi_nor *nor = mtd_to_spi_nor(mtd); +	int ret; + +	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); + +	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ); +	if (ret) +		return ret; + +	ret = nor->read(nor, from, len, retlen, buf); + +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); +	return ret; +} + +static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, +		size_t *retlen, const u_char *buf) +{ +	struct spi_nor *nor = mtd_to_spi_nor(mtd); +	size_t actual; +	int ret; + +	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + +	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE); +	if (ret) +		return ret; + +	/* Wait until finished previous write command. */ +	ret = wait_till_ready(nor); +	if (ret) +		goto time_out; + +	write_enable(nor); + +	nor->sst_write_second = false; + +	actual = to % 2; +	/* Start write from odd address. */ +	if (actual) { +		nor->program_opcode = SPINOR_OP_BP; + +		/* write one byte. */ +		nor->write(nor, to, 1, retlen, buf); +		ret = wait_till_ready(nor); +		if (ret) +			goto time_out; +	} +	to += actual; + +	/* Write out most of the data here. */ +	for (; actual < len - 1; actual += 2) { +		nor->program_opcode = SPINOR_OP_AAI_WP; + +		/* write two bytes. */ +		nor->write(nor, to, 2, retlen, buf + actual); +		ret = wait_till_ready(nor); +		if (ret) +			goto time_out; +		to += 2; +		nor->sst_write_second = true; +	} +	nor->sst_write_second = false; + +	write_disable(nor); +	ret = wait_till_ready(nor); +	if (ret) +		goto time_out; + +	/* Write out trailing byte if it exists. */ +	if (actual != len) { +		write_enable(nor); + +		nor->program_opcode = SPINOR_OP_BP; +		nor->write(nor, to, 1, retlen, buf + actual); + +		ret = wait_till_ready(nor); +		if (ret) +			goto time_out; +		write_disable(nor); +	} +time_out: +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); +	return ret; +} + +/* + * Write an address range to the nor chip.  Data must be written in + * FLASH_PAGESIZE chunks.  The address range may be any size provided + * it is within the physical boundaries. + */ +static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, +	size_t *retlen, const u_char *buf) +{ +	struct spi_nor *nor = mtd_to_spi_nor(mtd); +	u32 page_offset, page_size, i; +	int ret; + +	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + +	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE); +	if (ret) +		return ret; + +	/* Wait until finished previous write command. */ +	ret = wait_till_ready(nor); +	if (ret) +		goto write_err; + +	write_enable(nor); + +	page_offset = to & (nor->page_size - 1); + +	/* do all the bytes fit onto one page? */ +	if (page_offset + len <= nor->page_size) { +		nor->write(nor, to, len, retlen, buf); +	} else { +		/* the size of data remaining on the first page */ +		page_size = nor->page_size - page_offset; +		nor->write(nor, to, page_size, retlen, buf); + +		/* write everything in nor->page_size chunks */ +		for (i = page_size; i < len; i += page_size) { +			page_size = len - i; +			if (page_size > nor->page_size) +				page_size = nor->page_size; + +			wait_till_ready(nor); +			write_enable(nor); + +			nor->write(nor, to + i, page_size, retlen, buf + i); +		} +	} + +write_err: +	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); +	return 0; +} + +static int macronix_quad_enable(struct spi_nor *nor) +{ +	int ret, val; + +	val = read_sr(nor); +	write_enable(nor); + +	nor->cmd_buf[0] = val | SR_QUAD_EN_MX; +	nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0); + +	if (wait_till_ready(nor)) +		return 1; + +	ret = read_sr(nor); +	if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { +		dev_err(nor->dev, "Macronix Quad bit not set\n"); +		return -EINVAL; +	} + +	return 0; +} + +/* + * Write status Register and configuration register with 2 bytes + * The first byte will be written to the status register, while the + * second byte will be written to the configuration register. + * Return negative if error occured. + */ +static int write_sr_cr(struct spi_nor *nor, u16 val) +{ +	nor->cmd_buf[0] = val & 0xff; +	nor->cmd_buf[1] = (val >> 8); + +	return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0); +} + +static int spansion_quad_enable(struct spi_nor *nor) +{ +	int ret; +	int quad_en = CR_QUAD_EN_SPAN << 8; + +	write_enable(nor); + +	ret = write_sr_cr(nor, quad_en); +	if (ret < 0) { +		dev_err(nor->dev, +			"error while writing configuration register\n"); +		return -EINVAL; +	} + +	/* read back and check it */ +	ret = read_cr(nor); +	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { +		dev_err(nor->dev, "Spansion Quad bit not set\n"); +		return -EINVAL; +	} + +	return 0; +} + +static int set_quad_mode(struct spi_nor *nor, u32 jedec_id) +{ +	int status; + +	switch (JEDEC_MFR(jedec_id)) { +	case CFI_MFR_MACRONIX: +		status = macronix_quad_enable(nor); +		if (status) { +			dev_err(nor->dev, "Macronix quad-read not enabled\n"); +			return -EINVAL; +		} +		return status; +	default: +		status = spansion_quad_enable(nor); +		if (status) { +			dev_err(nor->dev, "Spansion quad-read not enabled\n"); +			return -EINVAL; +		} +		return status; +	} +} + +static int spi_nor_check(struct spi_nor *nor) +{ +	if (!nor->dev || !nor->read || !nor->write || +		!nor->read_reg || !nor->write_reg || !nor->erase) { +		pr_err("spi-nor: please fill all the necessary fields!\n"); +		return -EINVAL; +	} + +	if (!nor->read_id) +		nor->read_id = spi_nor_read_id; +	if (!nor->wait_till_ready) +		nor->wait_till_ready = spi_nor_wait_till_ready; + +	return 0; +} + +int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, +			enum read_mode mode) +{ +	struct flash_info		*info; +	struct flash_platform_data	*data; +	struct device *dev = nor->dev; +	struct mtd_info *mtd = nor->mtd; +	struct device_node *np = dev->of_node; +	int ret; +	int i; + +	ret = spi_nor_check(nor); +	if (ret) +		return ret; + +	/* Platform data helps sort out which chip type we have, as +	 * well as how this board partitions it.  If we don't have +	 * a chip ID, try the JEDEC id commands; they'll work for most +	 * newer chips, even if we don't recognize the particular chip. +	 */ +	data = dev_get_platdata(dev); +	if (data && data->type) { +		const struct spi_device_id *plat_id; + +		for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) { +			plat_id = &spi_nor_ids[i]; +			if (strcmp(data->type, plat_id->name)) +				continue; +			break; +		} + +		if (i < ARRAY_SIZE(spi_nor_ids) - 1) +			id = plat_id; +		else +			dev_warn(dev, "unrecognized id %s\n", data->type); +	} + +	info = (void *)id->driver_data; + +	if (info->jedec_id) { +		const struct spi_device_id *jid; + +		jid = jedec_probe(nor); +		if (IS_ERR(jid)) { +			return PTR_ERR(jid); +		} else if (jid != id) { +			/* +			 * JEDEC knows better, so overwrite platform ID. We +			 * can't trust partitions any longer, but we'll let +			 * mtd apply them anyway, since some partitions may be +			 * marked read-only, and we don't want to lose that +			 * information, even if it's not 100% accurate. +			 */ +			dev_warn(dev, "found %s, expected %s\n", +				 jid->name, id->name); +			id = jid; +			info = (void *)jid->driver_data; +		} +	} + +	mutex_init(&nor->lock); + +	/* +	 * Atmel, SST and Intel/Numonyx serial nor tend to power +	 * up with the software protection bits set +	 */ + +	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL || +	    JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL || +	    JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) { +		write_enable(nor); +		write_sr(nor, 0); +	} + +	if (data && data->name) +		mtd->name = data->name; +	else +		mtd->name = dev_name(dev); + +	mtd->type = MTD_NORFLASH; +	mtd->writesize = 1; +	mtd->flags = MTD_CAP_NORFLASH; +	mtd->size = info->sector_size * info->n_sectors; +	mtd->_erase = spi_nor_erase; +	mtd->_read = spi_nor_read; + +	/* nor protection support for STmicro chips */ +	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) { +		mtd->_lock = spi_nor_lock; +		mtd->_unlock = spi_nor_unlock; +	} + +	/* sst nor chips use AAI word program */ +	if (info->flags & SST_WRITE) +		mtd->_write = sst_write; +	else +		mtd->_write = spi_nor_write; + +	/* prefer "small sector" erase if possible */ +	if (info->flags & SECT_4K) { +		nor->erase_opcode = SPINOR_OP_BE_4K; +		mtd->erasesize = 4096; +	} else if (info->flags & SECT_4K_PMC) { +		nor->erase_opcode = SPINOR_OP_BE_4K_PMC; +		mtd->erasesize = 4096; +	} else { +		nor->erase_opcode = SPINOR_OP_SE; +		mtd->erasesize = info->sector_size; +	} + +	if (info->flags & SPI_NOR_NO_ERASE) +		mtd->flags |= MTD_NO_ERASE; + +	mtd->dev.parent = dev; +	nor->page_size = info->page_size; +	mtd->writebufsize = nor->page_size; + +	if (np) { +		/* If we were instantiated by DT, use it */ +		if (of_property_read_bool(np, "m25p,fast-read")) +			nor->flash_read = SPI_NOR_FAST; +		else +			nor->flash_read = SPI_NOR_NORMAL; +	} else { +		/* If we weren't instantiated by DT, default to fast-read */ +		nor->flash_read = SPI_NOR_FAST; +	} + +	/* Some devices cannot do fast-read, no matter what DT tells us */ +	if (info->flags & SPI_NOR_NO_FR) +		nor->flash_read = SPI_NOR_NORMAL; + +	/* Quad/Dual-read mode takes precedence over fast/normal */ +	if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { +		ret = set_quad_mode(nor, info->jedec_id); +		if (ret) { +			dev_err(dev, "quad mode not supported\n"); +			return ret; +		} +		nor->flash_read = SPI_NOR_QUAD; +	} else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) { +		nor->flash_read = SPI_NOR_DUAL; +	} + +	/* Default commands */ +	switch (nor->flash_read) { +	case SPI_NOR_QUAD: +		nor->read_opcode = SPINOR_OP_READ_1_1_4; +		break; +	case SPI_NOR_DUAL: +		nor->read_opcode = SPINOR_OP_READ_1_1_2; +		break; +	case SPI_NOR_FAST: +		nor->read_opcode = SPINOR_OP_READ_FAST; +		break; +	case SPI_NOR_NORMAL: +		nor->read_opcode = SPINOR_OP_READ; +		break; +	default: +		dev_err(dev, "No Read opcode defined\n"); +		return -EINVAL; +	} + +	nor->program_opcode = SPINOR_OP_PP; + +	if (info->addr_width) +		nor->addr_width = info->addr_width; +	else if (mtd->size > 0x1000000) { +		/* enable 4-byte addressing if the device exceeds 16MiB */ +		nor->addr_width = 4; +		if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) { +			/* Dedicated 4-byte command set */ +			switch (nor->flash_read) { +			case SPI_NOR_QUAD: +				nor->read_opcode = SPINOR_OP_READ4_1_1_4; +				break; +			case SPI_NOR_DUAL: +				nor->read_opcode = SPINOR_OP_READ4_1_1_2; +				break; +			case SPI_NOR_FAST: +				nor->read_opcode = SPINOR_OP_READ4_FAST; +				break; +			case SPI_NOR_NORMAL: +				nor->read_opcode = SPINOR_OP_READ4; +				break; +			} +			nor->program_opcode = SPINOR_OP_PP_4B; +			/* No small sector erase for 4-byte command set */ +			nor->erase_opcode = SPINOR_OP_SE_4B; +			mtd->erasesize = info->sector_size; +		} else +			set_4byte(nor, info->jedec_id, 1); +	} else { +		nor->addr_width = 3; +	} + +	nor->read_dummy = spi_nor_read_dummy_cycles(nor); + +	dev_info(dev, "%s (%lld Kbytes)\n", id->name, +			(long long)mtd->size >> 10); + +	dev_dbg(dev, +		"mtd .name = %s, .size = 0x%llx (%lldMiB), " +		".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", +		mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), +		mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); + +	if (mtd->numeraseregions) +		for (i = 0; i < mtd->numeraseregions; i++) +			dev_dbg(dev, +				"mtd.eraseregions[%d] = { .offset = 0x%llx, " +				".erasesize = 0x%.8x (%uKiB), " +				".numblocks = %d }\n", +				i, (long long)mtd->eraseregions[i].offset, +				mtd->eraseregions[i].erasesize, +				mtd->eraseregions[i].erasesize / 1024, +				mtd->eraseregions[i].numblocks); +	return 0; +} +EXPORT_SYMBOL_GPL(spi_nor_scan); + +const struct spi_device_id *spi_nor_match_id(char *name) +{ +	const struct spi_device_id *id = spi_nor_ids; + +	while (id->name[0]) { +		if (!strcmp(name, id->name)) +			return id; +		id++; +	} +	return NULL; +} +EXPORT_SYMBOL_GPL(spi_nor_match_id); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); +MODULE_AUTHOR("Mike Lavender"); +MODULE_DESCRIPTION("framework for SPI NOR"); diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index ab2a52a039c..daf82ba7aba 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c @@ -290,7 +290,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)  	int cis_sector;  	/* Check for small page NAND flash */ -	if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE || +	if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||  	    mtd->size > UINT_MAX)  		return; diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c index 70106607c24..e579f9027c4 100644 --- a/drivers/mtd/tests/mtd_nandecctest.c +++ b/drivers/mtd/tests/mtd_nandecctest.c @@ -19,7 +19,7 @@   * or detected.   */ -#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE) +#if IS_ENABLED(CONFIG_MTD_NAND)  struct nand_ecc_test {  	const char *name; diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c index c818a63532e..111ee46a742 100644 --- a/drivers/mtd/tests/mtd_test.c +++ b/drivers/mtd/tests/mtd_test.c @@ -1,6 +1,5 @@  #define pr_fmt(fmt) "mtd_test: " fmt -#include <linux/init.h>  #include <linux/module.h>  #include <linux/sched.h>  #include <linux/printk.h> diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c index 3cd3aabbe1c..6f976159611 100644 --- a/drivers/mtd/tests/nandbiterrs.c +++ b/drivers/mtd/tests/nandbiterrs.c @@ -349,7 +349,7 @@ static int __init mtd_nandbiterrs_init(void)  		goto exit_mtddev;  	} -	if (mtd->type != MTD_NANDFLASH) { +	if (!mtd_type_is_nand(mtd)) {  		pr_info("this test requires NAND flash\n");  		err = -ENODEV;  		goto exit_nand; diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c index ff35c465bfe..f19ab1acde1 100644 --- a/drivers/mtd/tests/oobtest.c +++ b/drivers/mtd/tests/oobtest.c @@ -69,8 +69,8 @@ static int write_eraseblock(int ebnum)  	int err = 0;  	loff_t addr = ebnum * mtd->erasesize; +	prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);  	for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { -		prandom_bytes_state(&rnd_state, writebuf, use_len);  		ops.mode      = MTD_OPS_AUTO_OOB;  		ops.len       = 0;  		ops.retlen    = 0; @@ -78,7 +78,7 @@ static int write_eraseblock(int ebnum)  		ops.oobretlen = 0;  		ops.ooboffs   = use_offset;  		ops.datbuf    = NULL; -		ops.oobbuf    = writebuf; +		ops.oobbuf    = writebuf + (use_len_max * i) + use_offset;  		err = mtd_write_oob(mtd, addr, &ops);  		if (err || ops.oobretlen != use_len) {  			pr_err("error: writeoob failed at %#llx\n", @@ -122,8 +122,8 @@ static int verify_eraseblock(int ebnum)  	int err = 0;  	loff_t addr = ebnum * mtd->erasesize; +	prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);  	for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { -		prandom_bytes_state(&rnd_state, writebuf, use_len);  		ops.mode      = MTD_OPS_AUTO_OOB;  		ops.len       = 0;  		ops.retlen    = 0; @@ -139,7 +139,8 @@ static int verify_eraseblock(int ebnum)  			errcnt += 1;  			return err ? err : -1;  		} -		if (memcmp(readbuf, writebuf, use_len)) { +		if (memcmp(readbuf, writebuf + (use_len_max * i) + use_offset, +			   use_len)) {  			pr_err("error: verify failed at %#llx\n",  			       (long long)addr);  			errcnt += 1; @@ -166,7 +167,9 @@ static int verify_eraseblock(int ebnum)  				errcnt += 1;  				return err ? err : -1;  			} -			if (memcmp(readbuf + use_offset, writebuf, use_len)) { +			if (memcmp(readbuf + use_offset, +				   writebuf + (use_len_max * i) + use_offset, +				   use_len)) {  				pr_err("error: verify failed at %#llx\n",  						(long long)addr);  				errcnt += 1; @@ -289,7 +292,7 @@ static int __init mtd_oobtest_init(void)  		return err;  	} -	if (mtd->type != MTD_NANDFLASH) { +	if (!mtd_type_is_nand(mtd)) {  		pr_info("this test requires NAND flash\n");  		goto out;  	} @@ -566,8 +569,8 @@ static int __init mtd_oobtest_init(void)  		if (bbt[i] || bbt[i + 1])  			continue;  		addr = (i + 1) * mtd->erasesize - mtd->writesize; +		prandom_bytes_state(&rnd_state, writebuf, sz * cnt);  		for (pg = 0; pg < cnt; ++pg) { -			prandom_bytes_state(&rnd_state, writebuf, sz);  			ops.mode      = MTD_OPS_AUTO_OOB;  			ops.len       = 0;  			ops.retlen    = 0; @@ -575,7 +578,7 @@ static int __init mtd_oobtest_init(void)  			ops.oobretlen = 0;  			ops.ooboffs   = 0;  			ops.datbuf    = NULL; -			ops.oobbuf    = writebuf; +			ops.oobbuf    = writebuf + pg * sz;  			err = mtd_write_oob(mtd, addr, &ops);  			if (err)  				goto out; diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c index 44b96e999ad..ed2d3f656fd 100644 --- a/drivers/mtd/tests/pagetest.c +++ b/drivers/mtd/tests/pagetest.c @@ -353,7 +353,7 @@ static int __init mtd_pagetest_init(void)  		return err;  	} -	if (mtd->type != MTD_NANDFLASH) { +	if (!mtd_type_is_nand(mtd)) {  		pr_info("this test requires NAND flash\n");  		goto out;  	} diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c index e2c0adf24cf..a876371ad41 100644 --- a/drivers/mtd/tests/subpagetest.c +++ b/drivers/mtd/tests/subpagetest.c @@ -299,7 +299,7 @@ static int __init mtd_subpagetest_init(void)  		return err;  	} -	if (mtd->type != MTD_NANDFLASH) { +	if (!mtd_type_is_nand(mtd)) {  		pr_info("this test requires NAND flash\n");  		goto out;  	} diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 36663af56d8..f0855ce08ed 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig @@ -87,4 +87,20 @@ config MTD_UBI_GLUEBI  	   work on top of UBI. Do not enable this unless you use legacy  	   software. +config MTD_UBI_BLOCK +	bool "Read-only block devices on top of UBI volumes" +	default n +	depends on BLOCK +	help +	   This option enables read-only UBI block devices support. UBI block +	   devices will be layered on top of UBI volumes, which means that the +	   UBI driver will transparently handle things like bad eraseblocks and +	   bit-flips. You can put any block-oriented file system on top of UBI +	   volumes in read-only mode (e.g., ext4), but it is probably most +	   practical for read-only file systems, like squashfs. + +	   When selected, this feature will be built in the UBI driver. + +	   If in doubt, say "N". +  endif # MTD_UBI diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile index b46b0c97858..4e3c3d70d8c 100644 --- a/drivers/mtd/ubi/Makefile +++ b/drivers/mtd/ubi/Makefile @@ -3,5 +3,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o  ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o  ubi-y += misc.o debug.o  ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o +ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o  obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index c071d410488..6f27d9a1be3 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -900,10 +900,9 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,  		 * number.  		 */  		image_seq = be32_to_cpu(ech->image_seq); -		if (!ubi->image_seq && image_seq) +		if (!ubi->image_seq)  			ubi->image_seq = image_seq; -		if (ubi->image_seq && image_seq && -		    ubi->image_seq != image_seq) { +		if (image_seq && ubi->image_seq != image_seq) {  			ubi_err("bad image sequence number %d in PEB %d, expected %d",  				image_seq, pnum, ubi->image_seq);  			ubi_dump_ec_hdr(ech); @@ -1417,9 +1416,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)  				ai = alloc_ai("ubi_aeb_slab_cache2");  				if (!ai)  					return -ENOMEM; -			} -			err = scan_all(ubi, ai, UBI_FM_MAX_START); +				err = scan_all(ubi, ai, 0); +			} else { +				err = scan_all(ubi, ai, UBI_FM_MAX_START); +			}  		}  	}  #else @@ -1452,8 +1453,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)  		struct ubi_attach_info *scan_ai;  		scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache"); -		if (!scan_ai) +		if (!scan_ai) { +			err = -ENOMEM;  			goto out_wl; +		}  		err = scan_all(ubi, scan_ai, 0);  		if (err) { diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c new file mode 100644 index 00000000000..8457df7ec5a --- /dev/null +++ b/drivers/mtd/ubi/block.c @@ -0,0 +1,649 @@ +/* + * Copyright (c) 2014 Ezequiel Garcia + * Copyright (c) 2011 Free Electrons + * + * Driver parameter handling strongly based on drivers/mtd/ubi/build.c + *   Copyright (c) International Business Machines Corp., 2006 + *   Copyright (c) Nokia Corporation, 2007 + *   Authors: Artem Bityutskiy, Frank Haverkamp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + */ + +/* + * Read-only block devices on top of UBI volumes + * + * A simple implementation to allow a block device to be layered on top of a + * UBI volume. The implementation is provided by creating a static 1-to-1 + * mapping between the block device and the UBI volume. + * + * The addressed byte is obtained from the addressed block sector, which is + * mapped linearly into the corresponding LEB: + * + *   LEB number = addressed byte / LEB size + * + * This feature is compiled in the UBI core, and adds a 'block' parameter + * to allow early creation of block devices on top of UBI volumes. Runtime + * block creation/removal for UBI volumes is provided through two UBI ioctls: + * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mtd/ubi.h> +#include <linux/workqueue.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> +#include <asm/div64.h> + +#include "ubi-media.h" +#include "ubi.h" + +/* Maximum number of supported devices */ +#define UBIBLOCK_MAX_DEVICES 32 + +/* Maximum length of the 'block=' parameter */ +#define UBIBLOCK_PARAM_LEN 63 + +/* Maximum number of comma-separated items in the 'block=' parameter */ +#define UBIBLOCK_PARAM_COUNT 2 + +struct ubiblock_param { +	int ubi_num; +	int vol_id; +	char name[UBIBLOCK_PARAM_LEN+1]; +}; + +/* Numbers of elements set in the @ubiblock_param array */ +static int ubiblock_devs __initdata; + +/* MTD devices specification parameters */ +static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata; + +struct ubiblock { +	struct ubi_volume_desc *desc; +	int ubi_num; +	int vol_id; +	int refcnt; +	int leb_size; + +	struct gendisk *gd; +	struct request_queue *rq; + +	struct workqueue_struct *wq; +	struct work_struct work; + +	struct mutex dev_mutex; +	spinlock_t queue_lock; +	struct list_head list; +}; + +/* Linked list of all ubiblock instances */ +static LIST_HEAD(ubiblock_devices); +static DEFINE_MUTEX(devices_mutex); +static int ubiblock_major; + +static int __init ubiblock_set_param(const char *val, +				     const struct kernel_param *kp) +{ +	int i, ret; +	size_t len; +	struct ubiblock_param *param; +	char buf[UBIBLOCK_PARAM_LEN]; +	char *pbuf = &buf[0]; +	char *tokens[UBIBLOCK_PARAM_COUNT]; + +	if (!val) +		return -EINVAL; + +	len = strnlen(val, UBIBLOCK_PARAM_LEN); +	if (len == 0) { +		ubi_warn("block: empty 'block=' parameter - ignored\n"); +		return 0; +	} + +	if (len == UBIBLOCK_PARAM_LEN) { +		ubi_err("block: parameter \"%s\" is too long, max. is %d\n", +			val, UBIBLOCK_PARAM_LEN); +		return -EINVAL; +	} + +	strcpy(buf, val); + +	/* Get rid of the final newline */ +	if (buf[len - 1] == '\n') +		buf[len - 1] = '\0'; + +	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++) +		tokens[i] = strsep(&pbuf, ","); + +	param = &ubiblock_param[ubiblock_devs]; +	if (tokens[1]) { +		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */ +		ret = kstrtoint(tokens[0], 10, ¶m->ubi_num); +		if (ret < 0) +			return -EINVAL; + +		/* Second param can be a number or a name */ +		ret = kstrtoint(tokens[1], 10, ¶m->vol_id); +		if (ret < 0) { +			param->vol_id = -1; +			strcpy(param->name, tokens[1]); +		} + +	} else { +		/* One parameter: must be device path */ +		strcpy(param->name, tokens[0]); +		param->ubi_num = -1; +		param->vol_id = -1; +	} + +	ubiblock_devs++; + +	return 0; +} + +static struct kernel_param_ops ubiblock_param_ops = { +	.set    = ubiblock_set_param, +}; +module_param_cb(block, &ubiblock_param_ops, NULL, 0); +MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n" +			"Multiple \"block\" parameters may be specified.\n" +			"UBI volumes may be specified by their number, name, or path to the device node.\n" +			"Examples\n" +			"Using the UBI volume path:\n" +			"ubi.block=/dev/ubi0_0\n" +			"Using the UBI device, and the volume name:\n" +			"ubi.block=0,rootfs\n" +			"Using both UBI device number and UBI volume number:\n" +			"ubi.block=0,0\n"); + +static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id) +{ +	struct ubiblock *dev; + +	list_for_each_entry(dev, &ubiblock_devices, list) +		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id) +			return dev; +	return NULL; +} + +static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer, +				int leb, int offset, int len) +{ +	int ret; + +	ret = ubi_read(dev->desc, leb, buffer, offset, len); +	if (ret) { +		ubi_err("%s ubi_read error %d", +			dev->gd->disk_name, ret); +		return ret; +	} +	return 0; +} + +static int ubiblock_read(struct ubiblock *dev, char *buffer, +			 sector_t sec, int len) +{ +	int ret, leb, offset; +	int bytes_left = len; +	int to_read = len; +	u64 pos = sec << 9; + +	/* Get LEB:offset address to read from */ +	offset = do_div(pos, dev->leb_size); +	leb = pos; + +	while (bytes_left) { +		/* +		 * We can only read one LEB at a time. Therefore if the read +		 * length is larger than one LEB size, we split the operation. +		 */ +		if (offset + to_read > dev->leb_size) +			to_read = dev->leb_size - offset; + +		ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read); +		if (ret) +			return ret; + +		buffer += to_read; +		bytes_left -= to_read; +		to_read = bytes_left; +		leb += 1; +		offset = 0; +	} +	return 0; +} + +static int do_ubiblock_request(struct ubiblock *dev, struct request *req) +{ +	int len, ret; +	sector_t sec; + +	if (req->cmd_type != REQ_TYPE_FS) +		return -EIO; + +	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > +	    get_capacity(req->rq_disk)) +		return -EIO; + +	if (rq_data_dir(req) != READ) +		return -ENOSYS; /* Write not implemented */ + +	sec = blk_rq_pos(req); +	len = blk_rq_cur_bytes(req); + +	/* +	 * Let's prevent the device from being removed while we're doing I/O +	 * work. Notice that this means we serialize all the I/O operations, +	 * but it's probably of no impact given the NAND core serializes +	 * flash access anyway. +	 */ +	mutex_lock(&dev->dev_mutex); +	ret = ubiblock_read(dev, bio_data(req->bio), sec, len); +	mutex_unlock(&dev->dev_mutex); + +	return ret; +} + +static void ubiblock_do_work(struct work_struct *work) +{ +	struct ubiblock *dev = +		container_of(work, struct ubiblock, work); +	struct request_queue *rq = dev->rq; +	struct request *req; +	int res; + +	spin_lock_irq(rq->queue_lock); + +	req = blk_fetch_request(rq); +	while (req) { + +		spin_unlock_irq(rq->queue_lock); +		res = do_ubiblock_request(dev, req); +		spin_lock_irq(rq->queue_lock); + +		/* +		 * If we're done with this request, +		 * we need to fetch a new one +		 */ +		if (!__blk_end_request_cur(req, res)) +			req = blk_fetch_request(rq); +	} + +	spin_unlock_irq(rq->queue_lock); +} + +static void ubiblock_request(struct request_queue *rq) +{ +	struct ubiblock *dev; +	struct request *req; + +	dev = rq->queuedata; + +	if (!dev) +		while ((req = blk_fetch_request(rq)) != NULL) +			__blk_end_request_all(req, -ENODEV); +	else +		queue_work(dev->wq, &dev->work); +} + +static int ubiblock_open(struct block_device *bdev, fmode_t mode) +{ +	struct ubiblock *dev = bdev->bd_disk->private_data; +	int ret; + +	mutex_lock(&dev->dev_mutex); +	if (dev->refcnt > 0) { +		/* +		 * The volume is already open, just increase the reference +		 * counter. +		 */ +		goto out_done; +	} + +	/* +	 * We want users to be aware they should only mount us as read-only. +	 * It's just a paranoid check, as write requests will get rejected +	 * in any case. +	 */ +	if (mode & FMODE_WRITE) { +		ret = -EPERM; +		goto out_unlock; +	} + +	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY); +	if (IS_ERR(dev->desc)) { +		ubi_err("%s failed to open ubi volume %d_%d", +			dev->gd->disk_name, dev->ubi_num, dev->vol_id); +		ret = PTR_ERR(dev->desc); +		dev->desc = NULL; +		goto out_unlock; +	} + +out_done: +	dev->refcnt++; +	mutex_unlock(&dev->dev_mutex); +	return 0; + +out_unlock: +	mutex_unlock(&dev->dev_mutex); +	return ret; +} + +static void ubiblock_release(struct gendisk *gd, fmode_t mode) +{ +	struct ubiblock *dev = gd->private_data; + +	mutex_lock(&dev->dev_mutex); +	dev->refcnt--; +	if (dev->refcnt == 0) { +		ubi_close_volume(dev->desc); +		dev->desc = NULL; +	} +	mutex_unlock(&dev->dev_mutex); +} + +static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ +	/* Some tools might require this information */ +	geo->heads = 1; +	geo->cylinders = 1; +	geo->sectors = get_capacity(bdev->bd_disk); +	geo->start = 0; +	return 0; +} + +static const struct block_device_operations ubiblock_ops = { +	.owner = THIS_MODULE, +	.open = ubiblock_open, +	.release = ubiblock_release, +	.getgeo	= ubiblock_getgeo, +}; + +int ubiblock_create(struct ubi_volume_info *vi) +{ +	struct ubiblock *dev; +	struct gendisk *gd; +	int disk_capacity; +	int ret; + +	/* Check that the volume isn't already handled */ +	mutex_lock(&devices_mutex); +	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { +		mutex_unlock(&devices_mutex); +		return -EEXIST; +	} +	mutex_unlock(&devices_mutex); + +	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); +	if (!dev) +		return -ENOMEM; + +	mutex_init(&dev->dev_mutex); + +	dev->ubi_num = vi->ubi_num; +	dev->vol_id = vi->vol_id; +	dev->leb_size = vi->usable_leb_size; + +	/* Initialize the gendisk of this ubiblock device */ +	gd = alloc_disk(1); +	if (!gd) { +		ubi_err("block: alloc_disk failed"); +		ret = -ENODEV; +		goto out_free_dev; +	} + +	gd->fops = &ubiblock_ops; +	gd->major = ubiblock_major; +	gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id; +	gd->private_data = dev; +	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id); +	disk_capacity = (vi->size * vi->usable_leb_size) >> 9; +	set_capacity(gd, disk_capacity); +	dev->gd = gd; + +	spin_lock_init(&dev->queue_lock); +	dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock); +	if (!dev->rq) { +		ubi_err("block: blk_init_queue failed"); +		ret = -ENODEV; +		goto out_put_disk; +	} + +	dev->rq->queuedata = dev; +	dev->gd->queue = dev->rq; + +	/* +	 * Create one workqueue per volume (per registered block device). +	 * Rembember workqueues are cheap, they're not threads. +	 */ +	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); +	if (!dev->wq) { +		ret = -ENOMEM; +		goto out_free_queue; +	} +	INIT_WORK(&dev->work, ubiblock_do_work); + +	mutex_lock(&devices_mutex); +	list_add_tail(&dev->list, &ubiblock_devices); +	mutex_unlock(&devices_mutex); + +	/* Must be the last step: anyone can call file ops from now on */ +	add_disk(dev->gd); +	ubi_msg("%s created from ubi%d:%d(%s)", +		dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name); +	return 0; + +out_free_queue: +	blk_cleanup_queue(dev->rq); +out_put_disk: +	put_disk(dev->gd); +out_free_dev: +	kfree(dev); + +	return ret; +} + +static void ubiblock_cleanup(struct ubiblock *dev) +{ +	del_gendisk(dev->gd); +	blk_cleanup_queue(dev->rq); +	ubi_msg("%s released", dev->gd->disk_name); +	put_disk(dev->gd); +} + +int ubiblock_remove(struct ubi_volume_info *vi) +{ +	struct ubiblock *dev; + +	mutex_lock(&devices_mutex); +	dev = find_dev_nolock(vi->ubi_num, vi->vol_id); +	if (!dev) { +		mutex_unlock(&devices_mutex); +		return -ENODEV; +	} + +	/* Found a device, let's lock it so we can check if it's busy */ +	mutex_lock(&dev->dev_mutex); +	if (dev->refcnt > 0) { +		mutex_unlock(&dev->dev_mutex); +		mutex_unlock(&devices_mutex); +		return -EBUSY; +	} + +	/* Remove from device list */ +	list_del(&dev->list); +	mutex_unlock(&devices_mutex); + +	/* Flush pending work and stop this workqueue */ +	destroy_workqueue(dev->wq); + +	ubiblock_cleanup(dev); +	mutex_unlock(&dev->dev_mutex); +	kfree(dev); +	return 0; +} + +static void ubiblock_resize(struct ubi_volume_info *vi) +{ +	struct ubiblock *dev; +	int disk_capacity; + +	/* +	 * Need to lock the device list until we stop using the device, +	 * otherwise the device struct might get released in +	 * 'ubiblock_remove()'. +	 */ +	mutex_lock(&devices_mutex); +	dev = find_dev_nolock(vi->ubi_num, vi->vol_id); +	if (!dev) { +		mutex_unlock(&devices_mutex); +		return; +	} + +	mutex_lock(&dev->dev_mutex); +	disk_capacity = (vi->size * vi->usable_leb_size) >> 9; +	set_capacity(dev->gd, disk_capacity); +	ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size); +	mutex_unlock(&dev->dev_mutex); +	mutex_unlock(&devices_mutex); +} + +static int ubiblock_notify(struct notifier_block *nb, +			 unsigned long notification_type, void *ns_ptr) +{ +	struct ubi_notification *nt = ns_ptr; + +	switch (notification_type) { +	case UBI_VOLUME_ADDED: +		/* +		 * We want to enforce explicit block device creation for +		 * volumes, so when a volume is added we do nothing. +		 */ +		break; +	case UBI_VOLUME_REMOVED: +		ubiblock_remove(&nt->vi); +		break; +	case UBI_VOLUME_RESIZED: +		ubiblock_resize(&nt->vi); +		break; +	default: +		break; +	} +	return NOTIFY_OK; +} + +static struct notifier_block ubiblock_notifier = { +	.notifier_call = ubiblock_notify, +}; + +static struct ubi_volume_desc * __init +open_volume_desc(const char *name, int ubi_num, int vol_id) +{ +	if (ubi_num == -1) +		/* No ubi num, name must be a vol device path */ +		return ubi_open_volume_path(name, UBI_READONLY); +	else if (vol_id == -1) +		/* No vol_id, must be vol_name */ +		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY); +	else +		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY); +} + +static int __init ubiblock_create_from_param(void) +{ +	int i, ret; +	struct ubiblock_param *p; +	struct ubi_volume_desc *desc; +	struct ubi_volume_info vi; + +	for (i = 0; i < ubiblock_devs; i++) { +		p = &ubiblock_param[i]; + +		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); +		if (IS_ERR(desc)) { +			ubi_err("block: can't open volume, err=%ld\n", +				PTR_ERR(desc)); +			ret = PTR_ERR(desc); +			break; +		} + +		ubi_get_volume_info(desc, &vi); +		ubi_close_volume(desc); + +		ret = ubiblock_create(&vi); +		if (ret) { +			ubi_err("block: can't add '%s' volume, err=%d\n", +				vi.name, ret); +			break; +		} +	} +	return ret; +} + +static void ubiblock_remove_all(void) +{ +	struct ubiblock *next; +	struct ubiblock *dev; + +	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { +		/* Flush pending work and stop workqueue */ +		destroy_workqueue(dev->wq); +		/* The module is being forcefully removed */ +		WARN_ON(dev->desc); +		/* Remove from device list */ +		list_del(&dev->list); +		ubiblock_cleanup(dev); +		kfree(dev); +	} +} + +int __init ubiblock_init(void) +{ +	int ret; + +	ubiblock_major = register_blkdev(0, "ubiblock"); +	if (ubiblock_major < 0) +		return ubiblock_major; + +	/* Attach block devices from 'block=' module param */ +	ret = ubiblock_create_from_param(); +	if (ret) +		goto err_remove; + +	/* +	 * Block devices are only created upon user requests, so we ignore +	 * existing volumes. +	 */ +	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1); +	if (ret) +		goto err_unreg; +	return 0; + +err_unreg: +	unregister_blkdev(ubiblock_major, "ubiblock"); +err_remove: +	ubiblock_remove_all(); +	return ret; +} + +void __exit ubiblock_exit(void) +{ +	ubi_unregister_volume_notifier(&ubiblock_notifier); +	ubiblock_remove_all(); +	unregister_blkdev(ubiblock_major, "ubiblock"); +} diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 315dcc6ec1f..6e30a3c280d 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -41,6 +41,7 @@  #include <linux/kthread.h>  #include <linux/kernel.h>  #include <linux/slab.h> +#include <linux/major.h>  #include "ubi.h"  /* Maximum length of the 'mtd=' parameter */ @@ -1244,8 +1245,10 @@ static int __init ubi_init(void)  	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",  					      sizeof(struct ubi_wl_entry),  					      0, 0, NULL); -	if (!ubi_wl_entry_slab) +	if (!ubi_wl_entry_slab) { +		err = -ENOMEM;  		goto out_dev_unreg; +	}  	err = ubi_debugfs_init();  	if (err) @@ -1295,6 +1298,15 @@ static int __init ubi_init(void)  		}  	} +	err = ubiblock_init(); +	if (err) { +		ubi_err("block: cannot initialize, error %d", err); + +		/* See comment above re-ubi_is_module(). */ +		if (ubi_is_module()) +			goto out_detach; +	} +  	return 0;  out_detach: @@ -1323,6 +1335,8 @@ static void __exit ubi_exit(void)  {  	int i; +	ubiblock_exit(); +  	for (i = 0; i < UBI_MAX_DEVICES; i++)  		if (ubi_devices[i]) {  			mutex_lock(&ubi_devices_mutex); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 8ca49f2043e..7646220ca6e 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -561,6 +561,26 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,  		break;  	} +	/* Create a R/O block device on top of the UBI volume */ +	case UBI_IOCVOLCRBLK: +	{ +		struct ubi_volume_info vi; + +		ubi_get_volume_info(desc, &vi); +		err = ubiblock_create(&vi); +		break; +	} + +	/* Remove the R/O block device */ +	case UBI_IOCVOLRMBLK: +	{ +		struct ubi_volume_info vi; + +		ubi_get_volume_info(desc, &vi); +		err = ubiblock_remove(&vi); +		break; +	} +  	default:  		err = -ENOTTY;  		break; @@ -711,7 +731,7 @@ static int rename_volumes(struct ubi_device *ubi,  			goto out_free;  		} -		re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); +		re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE);  		if (IS_ERR(re->desc)) {  			err = PTR_ERR(re->desc);  			ubi_err("cannot open volume %d, error %d", vol_id, err); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index f5aa4b02cfa..0431b46d9fd 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -127,7 +127,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,  		if (vol_id > av->vol_id)  			p = &(*p)->rb_left; -		else if (vol_id > av->vol_id) +		else  			p = &(*p)->rb_right;  	} @@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,  	 */  	for (i = 0; i < pool_size; i++) {  		int scrub = 0; +		int image_seq;  		pnum = be32_to_cpu(pebs[i]); @@ -422,13 +423,19 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,  				pnum, err);  			ret = err > 0 ? UBI_BAD_FASTMAP : err;  			goto out; -		} else if (ret == UBI_IO_BITFLIPS) +		} else if (err == UBI_IO_BITFLIPS)  			scrub = 1; -		if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { +		/* +		 * Older UBI implementations have image_seq set to zero, so +		 * we shouldn't fail if image_seq == 0. +		 */ +		image_seq = be32_to_cpu(ech->image_seq); + +		if (image_seq && (image_seq != ubi->image_seq)) {  			ubi_err("bad image seq: 0x%x, expected: 0x%x",  				be32_to_cpu(ech->image_seq), ubi->image_seq); -			err = UBI_BAD_FASTMAP; +			ret = UBI_BAD_FASTMAP;  			goto out;  		} @@ -456,8 +463,8 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,  				}  			}  			if (found_orphan) { -				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);  				list_del(&tmp_aeb->u.list); +				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);  			}  			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, @@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,  	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)  		list_move_tail(&tmp_aeb->u.list, &ai->free); +	ubi_assert(list_empty(&used)); +	ubi_assert(list_empty(&eba_orphans)); +	ubi_assert(list_empty(&free)); +  	/*  	 * If fastmap is leaking PEBs (must not happen), raise a  	 * fat warning and fall back to scanning mode. @@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,  fail_bad:  	ret = UBI_BAD_FASTMAP;  fail: +	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { +		list_del(&tmp_aeb->u.list); +		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); +	} +	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { +		list_del(&tmp_aeb->u.list); +		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); +	} +	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { +		list_del(&tmp_aeb->u.list); +		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); +	} +  	return ret;  } @@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,  	}  	for (i = 0; i < used_blocks; i++) { +		int image_seq; +  		pnum = be32_to_cpu(fmsb->block_loc[i]);  		if (ubi_io_is_bad(ubi, pnum)) { @@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,  		} else if (ret == UBI_IO_BITFLIPS)  			fm->to_be_tortured[i] = 1; +		image_seq = be32_to_cpu(ech->image_seq);  		if (!ubi->image_seq) -			ubi->image_seq = be32_to_cpu(ech->image_seq); +			ubi->image_seq = image_seq; -		if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { +		/* +		 * Older UBI implementations have image_seq set to zero, so +		 * we shouldn't fail if image_seq == 0. +		 */ +		if (image_seq && (image_seq != ubi->image_seq)) { +			ubi_err("wrong image seq:%d instead of %d", +				be32_to_cpu(ech->image_seq), ubi->image_seq);  			ret = UBI_BAD_FASTMAP;  			goto free_hdr;  		} diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index bf79def4012..d36134925d3 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -495,10 +495,12 @@ out:   */  static int nor_erase_prepare(struct ubi_device *ubi, int pnum)  { -	int err, err1; +	int err;  	size_t written;  	loff_t addr;  	uint32_t data = 0; +	struct ubi_ec_hdr ec_hdr; +  	/*  	 * Note, we cannot generally define VID header buffers on stack,  	 * because of the way we deal with these buffers (see the header @@ -509,50 +511,38 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)  	struct ubi_vid_hdr vid_hdr;  	/* +	 * If VID or EC is valid, we have to corrupt them before erasing.  	 * It is important to first invalidate the EC header, and then the VID  	 * header. Otherwise a power cut may lead to valid EC header and  	 * invalid VID header, in which case UBI will treat this PEB as  	 * corrupted and will try to preserve it, and print scary warnings.  	 */  	addr = (loff_t)pnum * ubi->peb_size; -	err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); -	if (!err) { -		addr += ubi->vid_hdr_aloffset; +	err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); +	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && +	    err != UBI_IO_FF){  		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); -		if (!err) -			return 0; +		if(err) +			goto error;  	} -	/* -	 * We failed to write to the media. This was observed with Spansion -	 * S29GL512N NOR flash. Most probably the previously eraseblock erasure -	 * was interrupted at a very inappropriate moment, so it became -	 * unwritable. In this case we probably anyway have garbage in this -	 * PEB. -	 */ -	err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); -	if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || -	    err1 == UBI_IO_FF) { -		struct ubi_ec_hdr ec_hdr; - -		err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); -		if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || -		    err1 == UBI_IO_FF) -			/* -			 * Both VID and EC headers are corrupted, so we can -			 * safely erase this PEB and not afraid that it will be -			 * treated as a valid PEB in case of an unclean reboot. -			 */ -			return 0; +	err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); +	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && +	    err != UBI_IO_FF){ +		addr += ubi->vid_hdr_aloffset; +		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); +		if (err) +			goto error;  	} +	return 0; +error:  	/* -	 * The PEB contains a valid VID header, but we cannot invalidate it. -	 * Supposedly the flash media or the driver is screwed up, so return an -	 * error. +	 * The PEB contains a valid VID or EC header, but we cannot invalidate +	 * it. Supposedly the flash media or the driver is screwed up, so +	 * return an error.  	 */ -	ubi_err("cannot invalidate PEB %d, write returned %d read returned %d", -		pnum, err, err1); +	ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);  	ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);  	return -EIO;  } diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 8ea6297a208..7bf416329c1 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -22,7 +22,6 @@  #ifndef __UBI_UBI_H__  #define __UBI_UBI_H__ -#include <linux/init.h>  #include <linux/types.h>  #include <linux/list.h>  #include <linux/rbtree.h> @@ -864,6 +863,26 @@ int ubi_update_fastmap(struct ubi_device *ubi);  int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,  		     int fm_anchor); +/* block.c */ +#ifdef CONFIG_MTD_UBI_BLOCK +int ubiblock_init(void); +void ubiblock_exit(void); +int ubiblock_create(struct ubi_volume_info *vi); +int ubiblock_remove(struct ubi_volume_info *vi); +#else +static inline int ubiblock_init(void) { return 0; } +static inline void ubiblock_exit(void) {} +static inline int ubiblock_create(struct ubi_volume_info *vi) +{ +	return -ENOSYS; +} +static inline int ubiblock_remove(struct ubi_volume_info *vi) +{ +	return -ENOSYS; +} +#endif + +  /*   * ubi_rb_for_each_entry - walk an RB-tree.   * @rb: a pointer to type 'struct rb_node' to use as a loop counter diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index c95bfb183c6..0f3425dac91 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -599,10 +599,6 @@ static void refill_wl_user_pool(struct ubi_device *ubi)  	return_unused_pool_pebs(ubi, pool);  	for (pool->size = 0; pool->size < pool->max_size; pool->size++) { -		if (!ubi->free.rb_node || -		   (ubi->free_count - ubi->beb_rsvd_pebs < 1)) -			break; -  		pool->pebs[pool->size] = __wl_get_peb(ubi);  		if (pool->pebs[pool->size] < 0)  			break; @@ -675,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)  	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);  	self_check_in_wl_tree(ubi, e, &ubi->free); +	ubi->free_count--; +	ubi_assert(ubi->free_count >= 0);  	rb_erase(&e->u.rb, &ubi->free);  	return e; @@ -688,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)  	peb = __wl_get_peb(ubi);  	spin_unlock(&ubi->wl_lock); +	if (peb < 0) +		return peb; +  	err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,  				    ubi->peb_size - ubi->vid_hdr_aloffset);  	if (err) { @@ -1072,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,  			/* Give the unused PEB back */  			wl_tree_add(e2, &ubi->free); +			ubi->free_count++;  			goto out_cancel;  		}  		self_check_in_wl_tree(ubi, e1, &ubi->used);  | 
