aboutsummaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig67
-rw-r--r--drivers/mtd/Makefile8
-rw-r--r--drivers/mtd/afs.c11
-rw-r--r--drivers/mtd/ar7part.c34
-rw-r--r--drivers/mtd/bcm47xxpart.c282
-rw-r--r--drivers/mtd/bcm63xxpart.c241
-rw-r--r--drivers/mtd/chips/Kconfig30
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c186
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c645
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c53
-rw-r--r--drivers/mtd/chips/cfi_probe.c4
-rw-r--r--drivers/mtd/chips/cfi_util.c12
-rw-r--r--drivers/mtd/chips/chipreg.c5
-rw-r--r--drivers/mtd/chips/fwh_lock.h7
-rw-r--r--drivers/mtd/chips/gen_probe.c8
-rw-r--r--drivers/mtd/chips/jedec_probe.c47
-rw-r--r--drivers/mtd/chips/map_absent.c10
-rw-r--r--drivers/mtd/chips/map_ram.c14
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/cmdlinepart.c270
-rw-r--r--drivers/mtd/devices/Kconfig160
-rw-r--r--drivers/mtd/devices/Makefile13
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c340
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h76
-rw-r--r--drivers/mtd/devices/block2mtd.c116
-rw-r--r--drivers/mtd/devices/doc2000.c1201
-rw-r--r--drivers/mtd/devices/doc2001.c841
-rw-r--r--drivers/mtd/devices/doc2001plus.c1106
-rw-r--r--drivers/mtd/devices/docecc.c521
-rw-r--r--drivers/mtd/devices/docg3.c2143
-rw-r--r--drivers/mtd/devices/docg3.h370
-rw-r--r--drivers/mtd/devices/docprobe.c337
-rw-r--r--drivers/mtd/devices/elm.c579
-rw-r--r--drivers/mtd/devices/lart.c35
-rw-r--r--drivers/mtd/devices/m25p80.c1043
-rw-r--r--drivers/mtd/devices/ms02-nv.c14
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c197
-rw-r--r--drivers/mtd/devices/mtdram.c37
-rw-r--r--drivers/mtd/devices/phram.c167
-rw-r--r--drivers/mtd/devices/pmc551.c107
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h61
-rw-r--r--drivers/mtd/devices/slram.c48
-rw-r--r--drivers/mtd/devices/spear_smi.c1093
-rw-r--r--drivers/mtd/devices/sst25l.c117
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c2080
-rw-r--r--drivers/mtd/ftl.c121
-rw-r--r--drivers/mtd/inftlcore.c98
-rw-r--r--drivers/mtd/inftlmount.c136
-rw-r--r--drivers/mtd/lpddr/Kconfig13
-rw-r--r--drivers/mtd/lpddr/Makefile1
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c507
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c83
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c7
-rw-r--r--drivers/mtd/maps/Kconfig184
-rw-r--r--drivers/mtd/maps/Makefile17
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c125
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c276
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c45
-rw-r--r--drivers/mtd/maps/cdb89712.c278
-rw-r--r--drivers/mtd/maps/ceiva.c341
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c10
-rw-r--r--drivers/mtd/maps/ck804xrom.c9
-rw-r--r--drivers/mtd/maps/dbox2-flash.c123
-rw-r--r--drivers/mtd/maps/dc21285.c12
-rw-r--r--drivers/mtd/maps/dilnetpc.c496
-rw-r--r--drivers/mtd/maps/dmv182.c146
-rw-r--r--drivers/mtd/maps/edb7312.c134
-rw-r--r--drivers/mtd/maps/esb2rom.c8
-rw-r--r--drivers/mtd/maps/fortunet.c277
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c46
-rw-r--r--drivers/mtd/maps/h720x-flash.c139
-rw-r--r--drivers/mtd/maps/ichxrom.c8
-rw-r--r--drivers/mtd/maps/impa7.c45
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c45
-rw-r--r--drivers/mtd/maps/ixp2000.c271
-rw-r--r--drivers/mtd/maps/ixp4xx.c83
-rw-r--r--drivers/mtd/maps/l440gx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c126
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c47
-rw-r--r--drivers/mtd/maps/mbx860.c98
-rw-r--r--drivers/mtd/maps/octagon-5066.c246
-rw-r--r--drivers/mtd/maps/pci.c49
-rw-r--r--drivers/mtd/maps/pcmciamtd.c138
-rw-r--r--drivers/mtd/maps/physmap.c86
-rw-r--r--drivers/mtd/maps/physmap_of.c146
-rw-r--r--drivers/mtd/maps/pismo.c31
-rw-r--r--drivers/mtd/maps/plat-ram.c66
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c59
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c53
-rw-r--r--drivers/mtd/maps/rpxlite.c64
-rw-r--r--drivers/mtd/maps/sa1100-flash.c182
-rw-r--r--drivers/mtd/maps/sc520cdp.c6
-rw-r--r--drivers/mtd/maps/scb2_flash.c32
-rw-r--r--drivers/mtd/maps/solutionengine.c45
-rw-r--r--drivers/mtd/maps/sun_uflash.c22
-rw-r--r--drivers/mtd/maps/tqm8xxl.c249
-rw-r--r--drivers/mtd/maps/tsunami_flash.c5
-rw-r--r--drivers/mtd/maps/uclinux.c48
-rw-r--r--drivers/mtd/maps/vmax301.c196
-rw-r--r--drivers/mtd/maps/vmu-flash.c24
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c181
-rw-r--r--drivers/mtd/mtd_blkdevs.c76
-rw-r--r--drivers/mtd/mtdblock.c52
-rw-r--r--drivers/mtd/mtdblock_ro.c8
-rw-r--r--drivers/mtd/mtdchar.c533
-rw-r--r--drivers/mtd/mtdconcat.c159
-rw-r--r--drivers/mtd/mtdcore.c605
-rw-r--r--drivers/mtd/mtdcore.h27
-rw-r--r--drivers/mtd/mtdoops.c99
-rw-r--r--drivers/mtd/mtdpart.c320
-rw-r--r--drivers/mtd/mtdsuper.c26
-rw-r--r--drivers/mtd/mtdswap.c62
-rw-r--r--drivers/mtd/nand/Kconfig271
-rw-r--r--drivers/mtd/nand/Makefile18
-rw-r--r--drivers/mtd/nand/alauda.c733
-rw-r--r--drivers/mtd/nand/ams-delta.c132
-rw-r--r--drivers/mtd/nand/atmel_nand.c1871
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h114
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h98
-rw-r--r--drivers/mtd/nand/au1550nd.c369
-rw-r--r--drivers/mtd/nand/autcpu12.c239
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h26
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c80
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c413
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c213
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c579
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c54
-rw-r--r--drivers/mtd/nand/cafe_nand.c164
-rw-r--r--drivers/mtd/nand/cmx270_nand.c38
-rw-r--r--drivers/mtd/nand/cs553x_nand.c22
-rw-r--r--drivers/mtd/nand/davinci_nand.c262
-rw-r--r--drivers/mtd/nand/denali.c265
-rw-r--r--drivers/mtd/nand/denali.h9
-rw-r--r--drivers/mtd/nand/denali_dt.c132
-rw-r--r--drivers/mtd/nand/denali_pci.c142
-rw-r--r--drivers/mtd/nand/diskonchip.c101
-rw-r--r--drivers/mtd/nand/docg4.c1394
-rw-r--r--drivers/mtd/nand/edb7312.c203
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c260
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c1181
-rw-r--r--drivers/mtd/nand/fsl_upm.c41
-rw-r--r--drivers/mtd/nand/fsmc_nand.c1046
-rw-r--r--drivers/mtd/nand/gpio.c349
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h128
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1355
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1844
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h305
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h187
-rw-r--r--drivers/mtd/nand/h1910.c183
-rw-r--r--drivers/mtd/nand/jz4740_nand.c278
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c896
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c1018
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c101
-rw-r--r--drivers/mtd/nand/mxc_nand.c882
-rw-r--r--drivers/mtd/nand/nand_base.c3008
-rw-r--r--drivers/mtd/nand/nand_bbt.c1026
-rw-r--r--drivers/mtd/nand/nand_bch.c4
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h337
-rw-r--r--drivers/mtd/nand/nand_ecc.c15
-rw-r--r--drivers/mtd/nand/nand_ids.c254
-rw-r--r--drivers/mtd/nand/nandsim.c355
-rw-r--r--drivers/mtd/nand/ndfc.c56
-rw-r--r--drivers/mtd/nand/nomadik_nand.c246
-rw-r--r--drivers/mtd/nand/nuc900_nand.c108
-rw-r--r--drivers/mtd/nand/omap2.c1486
-rw-r--r--drivers/mtd/nand/orion_nand.c105
-rw-r--r--drivers/mtd/nand/pasemi_nand.c23
-rw-r--r--drivers/mtd/nand/plat_nand.c117
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c432
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1478
-rw-r--r--drivers/mtd/nand/r852.c110
-rw-r--r--drivers/mtd/nand/rtc_from4.c624
-rw-r--r--drivers/mtd/nand/s3c2410.c264
-rw-r--r--drivers/mtd/nand/sh_flctl.c736
-rw-r--r--drivers/mtd/nand/sharpsl.c43
-rw-r--r--drivers/mtd/nand/sm_common.c77
-rw-r--r--drivers/mtd/nand/socrates_nand.c81
-rw-r--r--drivers/mtd/nand/spia.c176
-rw-r--r--drivers/mtd/nand/tmio_nand.c93
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c64
-rw-r--r--drivers/mtd/nand/xway_nand.c201
-rw-r--r--drivers/mtd/nftlcore.c69
-rw-r--r--drivers/mtd/nftlmount.c39
-rw-r--r--drivers/mtd/ofpart.c142
-rw-r--r--drivers/mtd/onenand/Kconfig8
-rw-r--r--drivers/mtd/onenand/Makefile3
-rw-r--r--drivers/mtd/onenand/generic.c43
-rw-r--r--drivers/mtd/onenand/omap2.c155
-rw-r--r--drivers/mtd/onenand/onenand_base.c248
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c10
-rw-r--r--drivers/mtd/onenand/onenand_sim.c564
-rw-r--r--drivers/mtd/onenand/samsung.c53
-rw-r--r--drivers/mtd/onenand/samsung.h59
-rw-r--r--drivers/mtd/redboot.c30
-rw-r--r--drivers/mtd/rfd_ftl.c56
-rw-r--r--drivers/mtd/sm_ftl.c81
-rw-r--r--drivers/mtd/spi-nor/Kconfig17
-rw-r--r--drivers/mtd/spi-nor/Makefile2
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1009
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1107
-rw-r--r--drivers/mtd/ssfdc.c60
-rw-r--r--drivers/mtd/tests/Makefile10
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c298
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c632
-rw-r--r--drivers/mtd/tests/mtd_test.c113
-rw-r--r--drivers/mtd/tests/mtd_test.h11
-rw-r--r--drivers/mtd/tests/nandbiterrs.c428
-rw-r--r--drivers/mtd/tests/oobtest.c (renamed from drivers/mtd/tests/mtd_oobtest.c)366
-rw-r--r--drivers/mtd/tests/pagetest.c464
-rw-r--r--drivers/mtd/tests/readtest.c (renamed from drivers/mtd/tests/mtd_readtest.c)103
-rw-r--r--drivers/mtd/tests/speedtest.c (renamed from drivers/mtd/tests/mtd_speedtest.c)297
-rw-r--r--drivers/mtd/tests/stresstest.c (renamed from drivers/mtd/tests/mtd_stresstest.c)176
-rw-r--r--drivers/mtd/tests/subpagetest.c (renamed from drivers/mtd/tests/mtd_subpagetest.c)284
-rw-r--r--drivers/mtd/tests/torturetest.c (renamed from drivers/mtd/tests/mtd_torturetest.c)160
-rw-r--r--drivers/mtd/ubi/Kconfig81
-rw-r--r--drivers/mtd/ubi/Makefile7
-rw-r--r--drivers/mtd/ubi/attach.c (renamed from drivers/mtd/ubi/scan.c)1260
-rw-r--r--drivers/mtd/ubi/block.c649
-rw-r--r--drivers/mtd/ubi/build.c428
-rw-r--r--drivers/mtd/ubi/cdev.c113
-rw-r--r--drivers/mtd/ubi/debug.c469
-rw-r--r--drivers/mtd/ubi/debug.h171
-rw-r--r--drivers/mtd/ubi/eba.c265
-rw-r--r--drivers/mtd/ubi/fastmap.c1567
-rw-r--r--drivers/mtd/ubi/gluebi.c89
-rw-r--r--drivers/mtd/ubi/io.c414
-rw-r--r--drivers/mtd/ubi/kapi.c67
-rw-r--r--drivers/mtd/ubi/misc.c41
-rw-r--r--drivers/mtd/ubi/scan.h174
-rw-r--r--drivers/mtd/ubi/ubi-media.h145
-rw-r--r--drivers/mtd/ubi/ubi.h389
-rw-r--r--drivers/mtd/ubi/upd.c22
-rw-r--r--drivers/mtd/ubi/vmt.c87
-rw-r--r--drivers/mtd/ubi/vtbl.c258
-rw-r--r--drivers/mtd/ubi/wl.c909
239 files changed, 40479 insertions, 26914 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 4be8373d43e..94b821042d9 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,6 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
- depends on HAS_IOMEM
+ depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
@@ -12,27 +12,17 @@ menuconfig MTD
if MTD
-config MTD_DEBUG
- bool "Debugging"
- help
- This turns on low-level debugging for the entire MTD sub-system.
- Normally, you should say 'N'.
-
-config MTD_DEBUG_VERBOSE
- int "Debugging verbosity (0 = quiet, 3 = noisy)"
- depends on MTD_DEBUG
- default "0"
- help
- Determines the verbosity level of the MTD debugging messages.
-
config MTD_TESTS
- tristate "MTD tests support"
+ tristate "MTD tests support (DANGEROUS)"
depends on m
help
This option includes various MTD tests into compilation. The tests
should normally be compiled as kernel modules. The modules perform
various checks and verifications when loaded.
+ WARNING: some of the tests will ERASE entire MTD device which they
+ test. Do not use these tests unless you really know what you do.
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
---help---
@@ -84,8 +74,8 @@ config MTD_REDBOOT_PARTS_READONLY
endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
- bool "Command line partition table parsing"
- depends on MTD = "y"
+ tristate "Command line partition table parsing"
+ depends on MTD
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
@@ -137,37 +127,41 @@ config MTD_AFS_PARTS
'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
config MTD_OF_PARTS
- def_bool y
+ tristate "OpenFirmware partitioning information support"
+ default y
depends on OF
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
- as described in Documentation/powerpc/booting-without-of.txt.
+ as described in Documentation/devicetree/booting-without-of.txt.
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
---help---
TI AR7 partitioning support
-comment "User Modules And Translation Layers"
-
-config MTD_CHAR
- tristate "Direct char device access to MTD devices"
+config MTD_BCM63XX_PARTS
+ tristate "BCM63XX CFE partitioning support"
+ depends on BCM63XX
+ select CRC32
help
- This provides a character device for each MTD device present in
- the system, allowing the user to read and write directly to the
- memory chips, and also use ioctl() to obtain information about
- the device, or to erase parts of it.
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
-config HAVE_MTD_OTP
- bool
+config MTD_BCM47XX_PARTS
+ tristate "BCM47XX partitioning support"
+ depends on BCM47XX || ARCH_BCM_5301X
help
- Enable access to OTP regions using MTD_CHAR.
+ This provides partitions parser for devices based on BCM47xx
+ boards.
+comment "User Modules And Translation Layers"
+
+#
+# MTD block device support is select'ed if needed
+#
config MTD_BLKDEVS
- tristate "Common interface to block layer for MTD 'translation layers'"
- depends on BLOCK
- default n
+ tristate
config MTD_BLOCK
tristate "Caching block device access to MTD devices"
@@ -285,7 +279,7 @@ config SSFDC
config SM_FTL
tristate "SmartMedia/xD new translation layer"
- depends on EXPERIMENTAL && BLOCK
+ depends on BLOCK
select MTD_BLKDEVS
select MTD_NAND_ECC
help
@@ -305,9 +299,6 @@ config MTD_OOPS
buffer in a flash partition where it can be read back at some
later point.
- To use, add console=ttyMTDx to the kernel command line,
- where x is the MTD device number to use.
-
config MTD_SWAP
tristate "Swap on MTD device support"
depends on MTD && SWAP
@@ -330,6 +321,8 @@ source "drivers/mtd/onenand/Kconfig"
source "drivers/mtd/lpddr/Kconfig"
+source "drivers/mtd/spi-nor/Kconfig"
+
source "drivers/mtd/ubi/Kconfig"
endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 39664c4229f..99bb9a1f6e1 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,16 +4,17 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
-mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
# 'Users' - code which presents functionality to userspace.
-obj-$(CONFIG_MTD_CHAR) += mtdchar.o
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
@@ -31,4 +32,5 @@ inftl-objs := inftlcore.o inftlmount.o
obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 302372c08b5..96a33e3f7b0 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -75,7 +75,7 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
size_t sz;
int ret;
- ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
if (ret >= 0 && sz != sizeof(fs))
ret = -EINVAL;
@@ -132,7 +132,7 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
int ret, i;
memset(iis, 0, sizeof(*iis));
- ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+ ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
if (ret < 0)
goto failed;
@@ -162,8 +162,8 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
}
static int parse_afs_partitions(struct mtd_info *mtd,
- struct mtd_partition **pparts,
- unsigned long origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
struct mtd_partition *parts;
u_int mask, off, idx, sz;
@@ -264,7 +264,8 @@ static struct mtd_part_parser afs_parser = {
static int __init afs_parser_init(void)
{
- return register_mtd_parser(&afs_parser);
+ register_mtd_parser(&afs_parser);
+ return 0;
}
static void __exit afs_parser_exit(void)
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 6697a1ec72d..7c9172ad262 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -26,7 +26,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
-#include <linux/magic.h>
+#include <linux/module.h>
+
+#include <uapi/linux/magic.h>
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
@@ -34,10 +36,6 @@
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
-#ifndef SQUASHFS_MAGIC
-#define SQUASHFS_MAGIC 0x73717368
-#endif
-
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
@@ -46,7 +44,7 @@ struct ar7_bin_rec {
static int create_mtd_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
- unsigned long origin)
+ struct mtd_part_parser_data *data)
{
struct ar7_bin_rec header;
unsigned int offset;
@@ -72,8 +70,8 @@ static int create_mtd_partitions(struct mtd_info *master,
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
- master->read(master, offset,
- sizeof(header), &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
@@ -94,16 +92,16 @@ static int create_mtd_partitions(struct mtd_info *master,
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
@@ -113,8 +111,7 @@ static int create_mtd_partitions(struct mtd_info *master,
break;
}
- master->read(master, root_offset,
- sizeof(header), &len, (u8 *)&header);
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
@@ -142,10 +139,17 @@ static struct mtd_part_parser ar7_parser = {
static int __init ar7_parser_init(void)
{
- return register_mtd_parser(&ar7_parser);
+ register_mtd_parser(&ar7_parser);
+ return 0;
+}
+
+static void __exit ar7_parser_exit(void)
+{
+ deregister_mtd_parser(&ar7_parser);
}
module_init(ar7_parser_init);
+module_exit(ar7_parser_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
new file mode 100644
index 00000000000..adfa74c1bc4
--- /dev/null
+++ b/drivers/mtd/bcm47xxpart.c
@@ -0,0 +1,282 @@
+/*
+ * BCM47XX MTD partitioning
+ *
+ * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* 10 parts were found on sflash on Netgear WNDR4500 */
+#define BCM47XXPART_MAX_PARTS 12
+
+/*
+ * Amount of bytes we read when analyzing each block of flash memory.
+ * Set it big enough to allow detecting partition and reading important data.
+ */
+#define BCM47XXPART_BYTES_TO_READ 0x4e8
+
+/* Magics */
+#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define BOARD_DATA_MAGIC2 0xBD0D0BBD
+#define CFE_MAGIC 0x43464531 /* 1EFC */
+#define FACTORY_MAGIC 0x59544346 /* FCTY */
+#define NVRAM_HEADER 0x48534C46 /* FLSH */
+#define POT_MAGIC1 0x54544f50 /* POTT */
+#define POT_MAGIC2 0x504f /* OP */
+#define ML_MAGIC1 0x39685a42
+#define ML_MAGIC2 0x26594131
+#define TRX_MAGIC 0x30524448
+#define SQSH_MAGIC 0x71736873 /* shsq */
+
+struct trx_header {
+ uint32_t magic;
+ uint32_t length;
+ uint32_t crc32;
+ uint16_t flags;
+ uint16_t version;
+ uint32_t offset[3];
+} __packed;
+
+static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
+ u64 offset, uint32_t mask_flags)
+{
+ part->name = name;
+ part->offset = offset;
+ part->mask_flags = mask_flags;
+}
+
+static int bcm47xxpart_parse(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ uint8_t i, curr_part = 0;
+ uint32_t *buf;
+ size_t bytes_read;
+ uint32_t offset;
+ uint32_t blocksize = master->erasesize;
+ struct trx_header *trx;
+ int trx_part = -1;
+ int last_trx_part = -1;
+ int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+
+ if (blocksize <= 0x10000)
+ blocksize = 0x10000;
+
+ /* Alloc */
+ parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
+ GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ if (!buf) {
+ kfree(parts);
+ return -ENOMEM;
+ }
+
+ /* Parse block by block looking for magics */
+ for (offset = 0; offset <= master->size - blocksize;
+ offset += blocksize) {
+ /* Nothing more in higher memory */
+ if (offset >= 0x2000000)
+ break;
+
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ /* Read beginning of the block */
+ if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* Magic or small NVRAM at 0x400 */
+ if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
+ (buf[0x400 / 4] == NVRAM_HEADER)) {
+ bcm47xxpart_add_part(&parts[curr_part++], "boot",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /*
+ * board_data starts with board_id which differs across boards,
+ * but we can use 'MPFR' (hopefully) magic at 0x100
+ */
+ if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* Found on Huawei E970 */
+ if (buf[0x000 / 4] == FACTORY_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "factory",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* POT(TOP) */
+ if (buf[0x000 / 4] == POT_MAGIC1 &&
+ (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* ML */
+ if (buf[0x010 / 4] == ML_MAGIC1 &&
+ buf[0x014 / 4] == ML_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
+ if (BCM47XXPART_MAX_PARTS - curr_part < 4) {
+ pr_warn("Not enough partitions left to register trx, scanning stopped!\n");
+ break;
+ }
+
+ trx = (struct trx_header *)buf;
+
+ trx_part = curr_part;
+ bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+ offset, 0);
+
+ i = 0;
+ /* We have LZMA loader if offset[2] points to sth */
+ if (trx->offset[2]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "loader",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
+
+ bcm47xxpart_add_part(&parts[curr_part++], "linux",
+ offset + trx->offset[i], 0);
+ i++;
+
+ /*
+ * Pure rootfs size is known and can be calculated as:
+ * trx->length - trx->offset[i]. We don't fill it as
+ * we want to have jffs2 (overlay) in the same mtd.
+ */
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset + trx->offset[i], 0);
+ i++;
+
+ last_trx_part = curr_part - 1;
+
+ /*
+ * We have whole TRX scanned, skip to the next part. Use
+ * roundown (not roundup), as the loop will increase
+ * offset in next step.
+ */
+ offset = rounddown(offset + trx->length, blocksize);
+ continue;
+ }
+
+ /* Squashfs on devices not using TRX */
+ if (buf[0x000 / 4] == SQSH_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset, 0);
+ continue;
+ }
+
+ /* Read middle of the block */
+ if (mtd_read(master, offset + 0x8000, 0x4,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
+ if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+ }
+
+ /* Look for NVRAM at the end of the last block. */
+ for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ offset = master->size - possible_nvram_sizes[i];
+ if (mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while reading at offset 0x%X!\n",
+ offset);
+ continue;
+ }
+
+ /* Standard NVRAM */
+ if (buf[0] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ master->size - blocksize, 0);
+ break;
+ }
+ }
+
+ kfree(buf);
+
+ /*
+ * Assume that partitions end at the beginning of the one they are
+ * followed by.
+ */
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : master->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ if (i == last_trx_part && trx_part >= 0)
+ parts[trx_part].size = next_part_offset -
+ parts[trx_part].offset;
+ }
+
+ *pparts = parts;
+ return curr_part;
+};
+
+static struct mtd_part_parser bcm47xxpart_mtd_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm47xxpart_parse,
+ .name = "bcm47xxpart",
+};
+
+static int __init bcm47xxpart_init(void)
+{
+ register_mtd_parser(&bcm47xxpart_mtd_parser);
+ return 0;
+}
+
+static void __exit bcm47xxpart_exit(void)
+{
+ deregister_mtd_parser(&bcm47xxpart_mtd_parser);
+}
+
+module_init(bcm47xxpart_init);
+module_exit(bcm47xxpart_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
new file mode 100644
index 00000000000..b2443f7031c
--- /dev/null
+++ b/drivers/mtd/bcm63xxpart.c
@@ -0,0 +1,241 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ if (ret)
+ return ret;
+
+ if (strncmp("cfe-v", buf, 5) == 0)
+ return 0;
+
+ /* very old CFE's do not have the cfe-v string, so check for magic */
+ ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ unsigned int cfelen, nvramlen;
+ unsigned int cfe_erasesize;
+ int i;
+ u32 computed_crc;
+ bool rootfs_first = false;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ cfe_erasesize = max_t(uint32_t, master->erasesize,
+ BCM63XX_CFE_BLOCK_SIZE);
+
+ cfelen = cfe_erasesize;
+ nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K;
+ nvramlen = roundup(nvramlen, cfe_erasesize);
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+ (void *)buf);
+
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ char *boardid = &(buf->board_id[0]);
+ char *tagversion = &(buf->tag_version[0]);
+
+ sscanf(buf->flash_image_start, "%u", &rootfsaddr);
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+
+ pr_info("CFE boot tag found with version %s and board type %s\n",
+ tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
+ spareaddr = roundup(totallen, master->erasesize) + cfelen;
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
+ } else {
+ pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+ buf->header_crc, computed_crc);
+ kernellen = 0;
+ rootfslen = 0;
+ rootfsaddr = 0;
+ spareaddr = cfelen;
+ }
+ sparelen = master->size - spareaddr - nvramlen;
+
+ /* Determine number of partitions */
+ if (rootfslen > 0)
+ nrparts++;
+
+ if (kernellen > 0)
+ nrparts++;
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ if (kernellen > 0) {
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
+ curpart++;
+ }
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+ curpart++;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+ register_mtd_parser(&bcm63xx_cfe_parser);
+ return 0;
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+ deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index b1e3c26edd6..9f02c28c020 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -43,9 +43,6 @@ choice
prompt "Flash cmd/query data swapping"
depends on MTD_CFI_ADV_OPTIONS
default MTD_CFI_NOSWAP
-
-config MTD_CFI_NOSWAP
- bool "NO"
---help---
This option defines the way in which the CPU attempts to arrange
data bits when writing the 'magic' commands to the chips. Saying
@@ -55,12 +52,8 @@ config MTD_CFI_NOSWAP
Specific arrangements are possible with the BIG_ENDIAN_BYTE and
LITTLE_ENDIAN_BYTE, if the bytes are reversed.
- If you have a LART, on which the data (and address) lines were
- connected in a fashion which ensured that the nets were as short
- as possible, resulting in a bit-shuffling which seems utterly
- random to the untrained eye, you need the LART_ENDIAN_BYTE option.
-
- Yes, there really exists something sicker than PDP-endian :)
+config MTD_CFI_NOSWAP
+ bool "NO"
config MTD_CFI_BE_BYTE_SWAP
bool "BIG_ENDIAN_BYTE"
@@ -153,7 +146,6 @@ config MTD_CFI_I8
config MTD_OTP
bool "Protection Registers aka one-time programmable (OTP) bits"
depends on MTD_CFI_ADV_OPTIONS
- select HAVE_MTD_OTP
default n
help
This enables support for reading, writing and locking so called
@@ -177,33 +169,33 @@ config MTD_OTP
in the programming of OTP bits will waste them.
config MTD_CFI_INTELEXT
- tristate "Support for Intel/Sharp flash chips"
+ tristate "Support for CFI command set 0001 (Intel/Sharp chips)"
depends on MTD_GEN_PROBE
select MTD_CFI_UTIL
help
The Common Flash Interface defines a number of different command
sets which a CFI-compliant chip may claim to implement. This code
- provides support for one of those command sets, used on Intel
- StrataFlash and other parts.
+ provides support for command set 0001, used on Intel StrataFlash
+ and other parts.
config MTD_CFI_AMDSTD
- tristate "Support for AMD/Fujitsu/Spansion flash chips"
+ tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)"
depends on MTD_GEN_PROBE
select MTD_CFI_UTIL
help
The Common Flash Interface defines a number of different command
sets which a CFI-compliant chip may claim to implement. This code
- provides support for one of those command sets, used on chips
- including the AMD Am29LV320.
+ provides support for command set 0002, used on chips including
+ the AMD Am29LV320.
config MTD_CFI_STAA
- tristate "Support for ST (Advanced Architecture) flash chips"
+ tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)"
depends on MTD_GEN_PROBE
select MTD_CFI_UTIL
help
The Common Flash Interface defines a number of different command
sets which a CFI-compliant chip may claim to implement. This code
- provides support for one of those command sets.
+ provides support for command set 0020.
config MTD_CFI_UTIL
tristate
@@ -232,7 +224,7 @@ config MTD_ABSENT
config MTD_XIP
bool "XIP aware MTD support"
- depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP
+ depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && ARCH_MTD_XIP
default y if XIP_KERNEL
help
This allows MTD support to work with flash memory which is also
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e1e122f2f92..a7543ba3e19 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -21,7 +21,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -53,6 +52,11 @@
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90 0x00b0
+#define LH28F640BFHE_PBTL90 0x00b1
+#define LH28F640BFHE_PTTL70A 0x00b2
+#define LH28F640BFHE_PBTL70A 0x00b3
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -69,10 +73,10 @@ static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, s
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
-static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
- struct otp_info *, size_t);
-static int cfi_intelext_get_user_prot_info (struct mtd_info *,
- struct otp_info *, size_t);
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
#endif
static int cfi_intelext_suspend (struct mtd_info *);
static void cfi_intelext_resume (struct mtd_info *);
@@ -87,7 +91,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **
static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
-static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -259,12 +263,42 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+ /* Sharp LH28F640BF Family */
+ if (cfi->mfr == CFI_MFR_SHARP && (
+ cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+ cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+ return 1;
+ return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /* Reset the Partition Configuration Register on LH28F640BF
+ * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+ if (is_LH28F640BF(cfi)) {
+ printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+ map_write(map, CMD(0x60), 0);
+ map_write(map, CMD(0x04), 0);
+
+ /* We have set one single partition thus
+ * Simultaneous Operations are not allowed */
+ printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+ extp->FeatureSupport &= ~512;
+ }
+}
+
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
- if (!mtd->point && map_is_linear(map)) {
- mtd->point = cfi_intelext_point;
- mtd->unpoint = cfi_intelext_unpoint;
+ if (!mtd->_point && map_is_linear(map)) {
+ mtd->_point = cfi_intelext_point;
+ mtd->_unpoint = cfi_intelext_unpoint;
}
}
@@ -274,8 +308,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
printk(KERN_INFO "Using buffer write method\n" );
- mtd->write = cfi_intelext_write_buffers;
- mtd->writev = cfi_intelext_writev;
+ mtd->_write = cfi_intelext_write_buffers;
+ mtd->_writev = cfi_intelext_writev;
}
}
@@ -310,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
{ 0, 0, NULL }
};
@@ -435,23 +471,21 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ if (!mtd)
return NULL;
- }
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
- mtd->erase = cfi_intelext_erase_varsize;
- mtd->read = cfi_intelext_read;
- mtd->write = cfi_intelext_write_words;
- mtd->sync = cfi_intelext_sync;
- mtd->lock = cfi_intelext_lock;
- mtd->unlock = cfi_intelext_unlock;
- mtd->is_locked = cfi_intelext_is_locked;
- mtd->suspend = cfi_intelext_suspend;
- mtd->resume = cfi_intelext_resume;
+ mtd->_erase = cfi_intelext_erase_varsize;
+ mtd->_read = cfi_intelext_read;
+ mtd->_write = cfi_intelext_write_words;
+ mtd->_sync = cfi_intelext_sync;
+ mtd->_lock = cfi_intelext_lock;
+ mtd->_unlock = cfi_intelext_unlock;
+ mtd->_is_locked = cfi_intelext_is_locked;
+ mtd->_suspend = cfi_intelext_suspend;
+ mtd->_resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
@@ -564,10 +598,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
+ if (!mtd->eraseregions)
goto setup_err;
- }
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
@@ -600,12 +632,12 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
}
#ifdef CONFIG_MTD_OTP
- mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
- mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
- mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
- mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
- mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
- mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
+ mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
+ mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
+ mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
+ mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
#endif
/* This function has the potential to distort the reality
@@ -1017,8 +1049,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
case FL_READY:
case FL_STATUS:
case FL_JEDEC_QUERY:
- /* We should really make set_vpp() count, rather than doing this */
- DISABLE_VPP(map);
break;
default:
printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
@@ -1324,7 +1354,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
int chipnum;
int ret = 0;
- if (!map->virt || (from + len > mtd->size))
+ if (!map->virt)
return -EINVAL;
/* Now lock the chip(s) to POINT state */
@@ -1334,7 +1364,6 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
ofs = from - (chipnum << cfi->chipshift);
*virt = map->virt + cfi->chips[chipnum].start + ofs;
- *retlen = 0;
if (phys)
*phys = map->phys + cfi->chips[chipnum].start + ofs;
@@ -1369,12 +1398,12 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
-static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
- int chipnum;
+ int chipnum, err = 0;
/* Now unlock the chip(s) POINT state */
@@ -1382,7 +1411,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
- while (len) {
+ while (len && !err) {
unsigned long thislen;
struct flchip *chip;
@@ -1400,8 +1429,10 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
chip->state = FL_READY;
- } else
- printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
+ } else {
+ printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
+ err = -EINVAL;
+ }
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
@@ -1410,6 +1441,8 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
ofs = 0;
chipnum++;
}
+
+ return err;
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
@@ -1456,8 +1489,6 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -1551,7 +1582,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, adr);
- out: put_chip(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -1565,10 +1597,6 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
int chipnum;
unsigned long ofs;
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
@@ -1658,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1);
+ /* Sharp LH28F640BF chips need the first address for the
+ * Page Buffer Program command. See Table 5 of
+ * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+ if (is_LH28F640BF(cfi))
+ cmd_adr = adr;
+
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
@@ -1794,7 +1828,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, cmd_adr);
- out: put_chip(map, chip, cmd_adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, cmd_adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -1813,7 +1848,6 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
- *retlen = 0;
if (!len)
return 0;
@@ -1932,6 +1966,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
ret = -EIO;
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
goto retry;
@@ -1944,7 +1979,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, adr);
- out: put_chip(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -2045,7 +2081,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
- int udelay;
+ int mdelay;
int ret;
adr += chip->start;
@@ -2074,9 +2110,17 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
* If Instant Individual Block Locking supported then no need
* to delay.
*/
- udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
+ /*
+ * Unlocking may take up to 1.4 seconds on some Intel flashes. So
+ * lets use a max of 1.5 seconds (1500ms) as timeout.
+ *
+ * See "Clear Block Lock-Bits Time" on page 40 in
+ * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
+ * from February 2003
+ */
+ mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
- ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
+ ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
@@ -2086,7 +2130,8 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
}
xip_enable(map, chip, adr);
-out: put_chip(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
@@ -2392,24 +2437,19 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
NULL, do_otp_lock, 1);
}
-static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
-{
- size_t retlen;
- int ret;
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
- ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
- return ret ? : retlen;
+{
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
}
-static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
- size_t retlen;
- int ret;
-
- ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
- return ret ? : retlen;
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
}
#endif
@@ -2483,7 +2523,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
allowed to. Or should we return -EAGAIN, because the upper layers
ought to have already shut down anything which was using the device
anyway? The latter for now. */
- printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
+ printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
ret = -EAGAIN;
case FL_PM_SUSPENDED:
break;
@@ -2526,12 +2566,10 @@ static void cfi_intelext_restore_locks(struct mtd_info *mtd)
if (!region->lockmap)
continue;
- for (block = 0; block < region->numblocks; block++) {
+ for_each_clear_bit(block, region->lockmap, region->numblocks) {
len = region->erasesize;
adr = region->offset + block * len;
-
- if (!test_bit(block, region->lockmap))
- cfi_intelext_unlock(mtd, adr, len);
+ cfi_intelext_unlock(mtd, adr, len);
}
}
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 23175edd563..e21fde9d4d7 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -33,6 +32,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -59,6 +60,9 @@ static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+
static void cfi_amdstd_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
@@ -71,6 +75,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
@@ -145,8 +153,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
if (((major << 8) | minor) < 0x3131) {
/* CFI version 1.0 => don't trust bootloc */
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
+ pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
map->name, cfi->mfr, cfi->id);
/* AFAICS all 29LV400 with a bottom boot block have a device ID
@@ -166,8 +173,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
* the 8-bit device ID.
*/
(cfi->mfr == CFI_MFR_MACRONIX)) {
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: Macronix MX29LV400C with bottom boot block"
+ pr_debug("%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
extp->TopBottom = 2; /* bottom boot */
} else
@@ -178,8 +184,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
extp->TopBottom = 2; /* bottom boot */
}
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: AMD CFI PRI V%c.%c has no boot block field;"
+ pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
" deduced %s from Device ID\n", map->name, major, minor,
extp->TopBottom == 2 ? "bottom" : "top");
}
@@ -191,8 +196,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
- DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
- mtd->write = cfi_amdstd_write_buffers;
+ pr_debug("Using buffer write method\n" );
+ mtd->_write = cfi_amdstd_write_buffers;
}
}
@@ -231,8 +236,8 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd)
static void fixup_use_secsi(struct mtd_info *mtd)
{
/* Setup for chips with a secsi area */
- mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
- mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
+ mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
+ mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
}
static void fixup_use_erase_chip(struct mtd_info *mtd)
@@ -241,7 +246,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->NumEraseRegions == 1) &&
((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
- mtd->erase = cfi_amdstd_erase_chip;
+ mtd->_erase = cfi_amdstd_erase_chip;
}
}
@@ -252,8 +257,8 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
*/
static void fixup_use_atmel_lock(struct mtd_info *mtd)
{
- mtd->lock = cfi_atmel_lock;
- mtd->unlock = cfi_atmel_unlock;
+ mtd->_lock = cfi_atmel_lock;
+ mtd->_unlock = cfi_atmel_unlock;
mtd->flags |= MTD_POWERUP_LOCK;
}
@@ -317,7 +322,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
- pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+ pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
}
}
@@ -328,10 +333,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
- pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+ pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
}
}
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * S29NS512P flash uses more than 8bits to report number of sectors,
+ * which is not permitted by CFI.
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+ pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +380,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
@@ -417,35 +436,97 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
}
}
+static int is_m29ew(struct cfi_private *cfi)
+{
+ if (cfi->mfr == CFI_MFR_INTEL &&
+ ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
+ (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
+ return 1;
+ return 0;
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
+ * Some revisions of the M29EW suffer from erase suspend hang ups. In
+ * particular, it can occur when the sequence
+ * Erase Confirm -> Suspend -> Program -> Resume
+ * causes a lockup due to internal timing issues. The consequence is that the
+ * erase cannot be resumed without inserting a dummy command after programming
+ * and prior to resuming. [...] The work-around is to issue a dummy write cycle
+ * that writes an F0 command code before the RESUME command.
+ */
+static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
+ if (is_m29ew(cfi))
+ map_write(map, CMD(0xF0), adr);
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
+ *
+ * Some revisions of the M29EW (for example, A1 and A2 step revisions)
+ * are affected by a problem that could cause a hang up when an ERASE SUSPEND
+ * command is issued after an ERASE RESUME operation without waiting for a
+ * minimum delay. The result is that once the ERASE seems to be completed
+ * (no bits are toggling), the contents of the Flash memory block on which
+ * the erase was ongoing could be inconsistent with the expected values
+ * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
+ * values), causing a consequent failure of the ERASE operation.
+ * The occurrence of this issue could be high, especially when file system
+ * operations on the Flash are intensive. As a result, it is recommended
+ * that a patch be applied. Intensive file system operations can cause many
+ * calls to the garbage routine to free Flash space (also by erasing physical
+ * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
+ * commands can occur. The problem disappears when a delay is inserted after
+ * the RESUME command by using the udelay() function available in Linux.
+ * The DELAY value must be tuned based on the customer's platform.
+ * The maximum value that fixes the problem in all cases is 500us.
+ * But, in our experience, a delay of 30 µs to 50 µs is sufficient
+ * in most cases.
+ * We have chosen 500µs because this latency is acceptable.
+ */
+static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
+{
+ /*
+ * Resolving the Delay After Resume Issue see Micron TN-13-07
+ * Worst case delay must be 500µs but 30-50µs should be ok as well
+ */
+ if (is_m29ew(cfi))
+ cfi_udelay(500);
+}
+
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
+ struct device_node __maybe_unused *np = map->device_node;
struct mtd_info *mtd;
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
+ if (!mtd)
return NULL;
- }
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
- mtd->erase = cfi_amdstd_erase_varsize;
- mtd->write = cfi_amdstd_write_words;
- mtd->read = cfi_amdstd_read;
- mtd->sync = cfi_amdstd_sync;
- mtd->suspend = cfi_amdstd_suspend;
- mtd->resume = cfi_amdstd_resume;
+ mtd->_erase = cfi_amdstd_erase_varsize;
+ mtd->_write = cfi_amdstd_write_words;
+ mtd->_read = cfi_amdstd_read;
+ mtd->_sync = cfi_amdstd_sync;
+ mtd->_suspend = cfi_amdstd_suspend;
+ mtd->_resume = cfi_amdstd_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
- __func__, mtd->writebufsize);
+ pr_debug("MTD %s(): write buffer size %d\n", __func__,
+ mtd->writebufsize);
+ mtd->_panic_write = cfi_amdstd_panic_write;
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
if (cfi->cfi_mode==CFI_MODE_CFI){
@@ -493,6 +574,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_tell_features(extp);
#endif
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(
+ np, "use-advanced-sector-protection")
+ && extp->BlkProtUnprot == 8) {
+ printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
+ mtd->_lock = cfi_ppb_lock;
+ mtd->_unlock = cfi_ppb_unlock;
+ mtd->_is_locked = cfi_ppb_is_locked;
+ }
+#endif
+
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
@@ -566,10 +658,8 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
+ if (!mtd->eraseregions)
goto setup_err;
- }
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
@@ -761,7 +851,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
+ cfi_fixup_m29ew_erase_suspend(map,
+ chip->in_progress_block_addr);
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
+ cfi_fixup_m29ew_delay_after_resume(cfi);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -773,8 +866,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
case FL_READY:
case FL_STATUS:
- /* We should really make set_vpp() count, rather than doing this */
- DISABLE_VPP(map);
break;
default:
printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
@@ -903,6 +994,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
/* Disallow XIP again */
local_irq_disable();
+ /* Correct Erase Suspend Hangups for M29EW */
+ cfi_fixup_m29ew_erase_suspend(map, adr);
/* Resume the write or erase operation */
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = oldstate;
@@ -1016,13 +1109,9 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
-
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
-
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -1100,16 +1189,11 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
int chipnum;
int ret = 0;
-
/* ofs: offset within the first chip that the first read should start */
-
/* 8 secsi bytes per chip */
chipnum=from>>3;
ofs=from & 7;
-
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -1163,7 +1247,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return ret;
}
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
/*
@@ -1174,7 +1258,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
*/
oldd = map_read(map, adr);
if (map_word_equal(map, oldd, datum)) {
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
+ pr_debug("MTD %s(): NOP\n",
__func__);
goto op_done;
}
@@ -1237,6 +1321,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
op_done:
chip->state = FL_READY;
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
@@ -1254,10 +1339,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
chipstart = cfi->chips[chipnum].start;
@@ -1400,7 +1481,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
datum = map_word_load(map, buf);
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1468,17 +1549,30 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1);
}
- /* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ /*
+ * Recovery from write-buffer programming failures requires
+ * the write-to-buffer-reset sequence. Since the last part
+ * of the sequence also works as a normal reset, we can run
+ * the same commands regardless of why we are here.
+ * See e.g.
+ * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
+ */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
xip_enable(map, chip, adr);
/* FIXME - should have reset delay before continuing */
- printk(KERN_WARNING "MTD %s(): software timeout\n",
- __func__ );
+ printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
ret = -EIO;
op_done:
chip->state = FL_READY;
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
@@ -1496,10 +1590,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
int chipnum;
unsigned long ofs;
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
@@ -1565,6 +1655,238 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
return 0;
}
+/*
+ * Wait for the flash chip to become ready to write data
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ */
+static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retries = 10;
+ int i;
+
+ /*
+ * If the driver thinks the chip is idle, and no toggle bits
+ * are changing, then the chip is actually idle for sure.
+ */
+ if (chip->state == FL_READY && chip_ready(map, adr))
+ return 0;
+
+ /*
+ * Try several times to reset the chip and then wait for it
+ * to become idle. The upper limit of a few milliseconds of
+ * delay isn't a big problem: the kernel is dying anyway. It
+ * is more important to save the messages.
+ */
+ while (retries > 0) {
+ const unsigned long timeo = (HZ / 1000) + 1;
+
+ /* send the reset command */
+ map_write(map, CMD(0xF0), chip->start);
+
+ /* wait for the chip to become ready */
+ for (i = 0; i < jiffies_to_usecs(timeo); i++) {
+ if (chip_ready(map, adr))
+ return 0;
+
+ udelay(1);
+ }
+ }
+
+ /* the chip never became ready */
+ return -EBUSY;
+}
+
+/*
+ * Write out one word of data to a single flash chip during a kernel panic
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ *
+ * The implementation of this routine is intentionally similar to
+ * do_write_oneword(), in order to ease code maintenance.
+ */
+static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum)
+{
+ const unsigned long uWriteTimeout = (HZ / 1000) + 1;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retry_cnt = 0;
+ map_word oldd;
+ int ret = 0;
+ int i;
+
+ adr += chip->start;
+
+ ret = cfi_amdstd_panic_wait(map, chip, adr);
+ if (ret)
+ return ret;
+
+ pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0]);
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ pr_debug("MTD %s(): NOP\n", __func__);
+ goto op_done;
+ }
+
+ ENABLE_VPP(map);
+
+retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+
+ for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
+ if (chip_ready(map, adr))
+ break;
+
+ udelay(1);
+ }
+
+ if (!chip_good(map, adr, datum)) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_WORD_RETRIES)
+ goto retry;
+
+ ret = -EIO;
+ }
+
+op_done:
+ DISABLE_VPP(map);
+ return ret;
+}
+
+/*
+ * Write out some data during a kernel panic
+ *
+ * This is used by the mtdoops driver to save the dying messages from a
+ * kernel which has panic'd.
+ *
+ * This routine ignores all of the locking used throughout the rest of the
+ * driver, in order to ensure that the data gets written out no matter what
+ * state this driver (and the flash chip itself) was in when the kernel crashed.
+ *
+ * The implementation of this routine is intentionally similar to
+ * cfi_amdstd_write_words(), in order to ease code maintenance.
+ */
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs, chipstart;
+ int ret = 0;
+ int chipnum;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ chipstart = cfi->chips[chipnum].start;
+
+ /* If it's not bus aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map) - 1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
+ int i = ofs - bus_ofs;
+ int n = 0;
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
+ if (ret)
+ return ret;
+
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs + chipstart);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map) - i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+ len -= n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* We are now aligned, write as much as possible */
+ while (len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+
+ chipstart = cfi->chips[chipnum].start;
+ }
+ }
+
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map) - 1)) {
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
+ if (ret)
+ return ret;
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
/*
* Handle devices with one erase region, that only implement
@@ -1587,7 +1909,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
return ret;
}
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
__func__, chip->start );
XIP_INVAL_CACHED_RANGE(map, adr, map->size);
@@ -1652,6 +1974,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
@@ -1675,7 +1998,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
return ret;
}
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
__func__, adr );
XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1742,6 +2065,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
}
chip->state = FL_READY;
+ DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
@@ -1801,8 +2125,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
goto out_unlock;
chip->state = FL_LOCKING;
- DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
- __func__, adr, len);
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
@@ -1837,8 +2160,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
goto out_unlock;
chip->state = FL_UNLOCKING;
- DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
- __func__, adr, len);
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
@@ -1863,6 +2185,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+ struct flchip *chip;
+ loff_t offset;
+ int locked;
+};
+
+#define MAX_SECTORS 512
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* PPB entry command */
+ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+ map_write(map, CMD(0xA0), chip->start + adr);
+ map_write(map, CMD(0x00), chip->start + adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+ * have to unlock all sectors of this device instead
+ */
+ chip->state = FL_UNLOCKING;
+ map_write(map, CMD(0x80), chip->start);
+ map_write(map, CMD(0x30), chip->start);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+ chip->state = FL_JEDEC_QUERY;
+ /* Return locked status: 0->locked, 1->unlocked */
+ ret = !cfi_read_query(map, adr);
+ } else
+ BUG();
+
+ /*
+ * Wait for some time as unlocking of all sectors takes quite long
+ */
+ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /* Exit BC commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct ppb_lock *sect;
+ unsigned long adr;
+ loff_t offset;
+ uint64_t length;
+ int chipnum;
+ int i;
+ int sectors;
+ int ret;
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors. So lets
+ * first check the locking status of all sectors and save
+ * it for future use.
+ */
+ sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+ if (!sect)
+ return -ENOMEM;
+
+ /*
+ * This code to walk all sectors is a slightly modified version
+ * of the cfi_varsize_frob() code.
+ */
+ i = 0;
+ chipnum = 0;
+ adr = 0;
+ sectors = 0;
+ offset = 0;
+ length = mtd->size;
+
+ while (length) {
+ int size = regions[i].erasesize;
+
+ /*
+ * Only test sectors that shall not be unlocked. The other
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+ if ((adr < ofs) || (adr >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+ sect[sectors].offset = offset;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+ }
+
+ adr += size;
+ offset += size;
+ length -= size;
+
+ if (offset == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+
+ sectors++;
+ if (sectors >= MAX_SECTORS) {
+ printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+ MAX_SECTORS);
+ kfree(sect);
+ return -EINVAL;
+ }
+ }
+
+ /* Now unlock the whole chip */
+ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_UNLOCK);
+ if (ret) {
+ kfree(sect);
+ return ret;
+ }
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors.
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+ do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+ kfree(sect);
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 179814a95f3..423666b51ef 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -22,7 +22,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -139,8 +138,9 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
}
/* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
@@ -175,7 +175,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
kfree(cfi->cmdset_priv);
return NULL;
}
@@ -188,7 +187,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
if (!mtd->eraseregions) {
- printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
kfree(cfi->cmdset_priv);
kfree(mtd);
return NULL;
@@ -227,15 +225,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
}
/* Also select the correct geometry setup too */
- mtd->erase = cfi_staa_erase_varsize;
- mtd->read = cfi_staa_read;
- mtd->write = cfi_staa_write_buffers;
- mtd->writev = cfi_staa_writev;
- mtd->sync = cfi_staa_sync;
- mtd->lock = cfi_staa_lock;
- mtd->unlock = cfi_staa_unlock;
- mtd->suspend = cfi_staa_suspend;
- mtd->resume = cfi_staa_resume;
+ mtd->_erase = cfi_staa_erase_varsize;
+ mtd->_read = cfi_staa_read;
+ mtd->_write = cfi_staa_write_buffers;
+ mtd->_writev = cfi_staa_writev;
+ mtd->_sync = cfi_staa_sync;
+ mtd->_lock = cfi_staa_lock;
+ mtd->_unlock = cfi_staa_unlock;
+ mtd->_suspend = cfi_staa_suspend;
+ mtd->_resume = cfi_staa_resume;
mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
@@ -393,8 +391,6 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
- *retlen = 0;
-
while (len) {
unsigned long thislen;
@@ -616,10 +612,6 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
int chipnum;
unsigned long ofs;
- *retlen = 0;
- if (!len)
- return 0;
-
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
@@ -698,7 +690,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
continue;
}
memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
- ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
@@ -707,7 +700,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
to += ECCBUF_SIZE;
}
if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
- ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
totlen += thislen;
if (ret || thislen != ECCBUF_DIV(elem_len))
goto write_error;
@@ -721,7 +715,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
if (buflen) { /* flush last page, even if not full */
/* This is sometimes intended behaviour, really */
- ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
@@ -901,12 +895,6 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
- if (instr->addr > mtd->size)
- return -EINVAL;
-
- if ((instr->len + instr->addr) > mtd->size)
- return -EINVAL;
-
/* Check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
*/
@@ -973,7 +961,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
chipnum++;
if (chipnum >= cfi->numchips)
- break;
+ break;
}
}
@@ -1152,9 +1140,6 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (len & (mtd->erasesize -1))
return -EINVAL;
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
chipnum = ofs >> cfi->chipshift;
adr = ofs - (chipnum << cfi->chipshift);
@@ -1185,7 +1170,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
chipnum++;
if (chipnum >= cfi->numchips)
- break;
+ break;
}
}
return 0;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index d2553527940..e8d0164498b 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -168,10 +168,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
return 0;
cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
- if (!cfi->cfiq) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
+ if (!cfi->cfiq)
return 0;
- }
memset(cfi->cfiq,0,sizeof(struct cfi_ident));
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 8e464054a63..09c79bd0b4f 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -116,10 +116,8 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
extp = kmalloc(size, GFP_KERNEL);
- if (!extp) {
- printk(KERN_ERR "Failed to allocate memory\n");
+ if (!extp)
goto out;
- }
#ifdef CONFIG_MTD_XIP
local_irq_disable();
@@ -173,12 +171,6 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
- if (ofs > mtd->size)
- return -EINVAL;
-
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
/* Check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
*/
@@ -247,7 +239,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
chipnum++;
if (chipnum >= cfi->numchips)
- break;
+ break;
}
}
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index da1f96f385c..0bbc61ba952 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -76,10 +76,7 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map)
*/
module_put(drv->module);
- if (ret)
- return ret;
-
- return NULL;
+ return ret;
}
/*
* Destroy an MTD device which was created for a map device.
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 5e3cc80128a..800b0e853e8 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -34,8 +34,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Refuse the operation if the we cannot look behind the chip */
if (chip->start < 0x400000) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
+ pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
__func__, chip->start );
return -EIO;
}
@@ -102,7 +101,7 @@ static void fixup_use_fwh_lock(struct mtd_info *mtd)
{
printk(KERN_NOTICE "using fwh lock/unlock method\n");
/* Setup for the chips with the fwh lock method */
- mtd->lock = fwh_lock_varsize;
- mtd->unlock = fwh_unlock_varsize;
+ mtd->_lock = fwh_lock_varsize;
+ mtd->_unlock = fwh_unlock_varsize;
}
#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 3b9a2843c5f..b57ceea2151 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -114,7 +114,6 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
chip_map = kzalloc(mapsize, GFP_KERNEL);
if (!chip_map) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
kfree(cfi.cfiq);
return NULL;
}
@@ -139,7 +138,6 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
if (!retcfi) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
kfree(cfi.cfiq);
kfree(chip_map);
return NULL;
@@ -204,14 +202,14 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
#ifdef CONFIG_MODULES
- char probename[16+sizeof(MODULE_SYMBOL_PREFIX)];
+ char probename[sizeof(VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X))];
cfi_cmdset_fn_t *probe_function;
- sprintf(probename, MODULE_SYMBOL_PREFIX "cfi_cmdset_%4.4X", type);
+ sprintf(probename, VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X), type);
probe_function = __symbol_get(probename);
if (!probe_function) {
- request_module(probename + sizeof(MODULE_SYMBOL_PREFIX) - 1);
+ request_module("cfi_cmdset_%4.4X", type);
probe_function = __symbol_get(probename);
}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index ea832ea0e4a..7c0b27d132b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -120,7 +120,7 @@
#define PM49FL008 0x006A
/* Sharp */
-#define LH28F640BF 0x00b0
+#define LH28F640BF 0x00B0
/* ST - www.st.com */
#define M29F800AB 0x0058
@@ -1299,13 +1299,14 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
- .devtypes = CFI_DEVICETYPE_X8,
+ .devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
- .dev_size = SIZE_4MiB,
- .cmd_set = P_ID_INTEL_STD,
- .nr_regions = 1,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 2,
.regions = {
- ERASEINFO(0x40000,16),
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_SST,
@@ -1914,11 +1915,10 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
* (oh and incidentaly the jedec spec - 3.5.3.3) the reset
* sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
* 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
- * as they will ignore the writes and dont care what address
+ * as they will ignore the writes and don't care what address
* the F0 is written to */
if (cfi->addr_unlock1) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "reset unlock called %x %x \n",
+ pr_debug( "reset unlock called %x %x \n",
cfi->addr_unlock1,cfi->addr_unlock2);
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1941,7 +1941,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in
uint8_t uaddr;
if (!(jedec_table[index].devtypes & cfi->device_type)) {
- DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
+ pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
jedec_table[index].name, 4 * (1<<cfi->device_type));
return 0;
}
@@ -2021,7 +2021,7 @@ static inline int jedec_match( uint32_t base,
* there aren't.
*/
if (finfo->dev_id > 0xff) {
- DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n",
+ pr_debug("%s(): ID is not 8bit\n",
__func__);
goto match_done;
}
@@ -2045,12 +2045,10 @@ static inline int jedec_match( uint32_t base,
}
/* the part size must fit in the memory window */
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
+ pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
__func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
1 << finfo->dev_size );
goto match_done;
@@ -2061,13 +2059,12 @@ static inline int jedec_match( uint32_t base,
uaddr = finfo->uaddr;
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
+ pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
&& ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): 0x%.4x 0x%.4x did not match\n",
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
unlock_addrs[uaddr].addr1,
unlock_addrs[uaddr].addr2);
@@ -2083,15 +2080,13 @@ static inline int jedec_match( uint32_t base,
* FIXME - write a driver that takes all of the chip info as
* module parameters, doesn't probe but forces a load.
*/
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): check ID's disappear when not in ID mode\n",
+ pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
__func__ );
jedec_reset( base, map, cfi );
mfr = jedec_read_mfr( map, base, cfi );
id = jedec_read_id( map, base, cfi );
if ( mfr == cfi->mfr && id == cfi->id ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
+ pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
"You might need to manually specify JEDEC parameters.\n",
__func__, cfi->mfr, cfi->id );
goto match_done;
@@ -2104,7 +2099,7 @@ static inline int jedec_match( uint32_t base,
* Put the device back in ID mode - only need to do this if we
* were truly frobbing a real device.
*/
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
+ pr_debug("MTD %s(): return to ID mode\n", __func__ );
if (cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -2167,13 +2162,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
cfi->mfr = jedec_read_mfr(map, base, cfi);
cfi->id = jedec_read_id(map, base, cfi);
- DEBUG(MTD_DEBUG_LEVEL3,
- "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+ pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
+ pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
__func__, cfi->mfr, cfi->id,
cfi->addr_unlock1, cfi->addr_unlock2 );
if (!cfi_jedec_setup(map, cfi, i))
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index f2b87294687..f7a5bca92ae 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -55,10 +55,10 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ABSENT;
mtd->size = map->size;
- mtd->erase = map_absent_erase;
- mtd->read = map_absent_read;
- mtd->write = map_absent_write;
- mtd->sync = map_absent_sync;
+ mtd->_erase = map_absent_erase;
+ mtd->_read = map_absent_read;
+ mtd->_write = map_absent_write;
+ mtd->_sync = map_absent_sync;
mtd->flags = 0;
mtd->erasesize = PAGE_SIZE;
mtd->writesize = 1;
@@ -70,13 +70,11 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- *retlen = 0;
return -ENODEV;
}
static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- *retlen = 0;
return -ENODEV;
}
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 67640ccb2d4..991c2a1c05d 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -64,11 +64,11 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_RAM;
mtd->size = map->size;
- mtd->erase = mapram_erase;
- mtd->get_unmapped_area = mapram_unmapped_area;
- mtd->read = mapram_read;
- mtd->write = mapram_write;
- mtd->sync = mapram_nop;
+ mtd->_erase = mapram_erase;
+ mtd->_get_unmapped_area = mapram_unmapped_area;
+ mtd->_read = mapram_read;
+ mtd->_write = mapram_write;
+ mtd->_sync = mapram_nop;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@@ -122,14 +122,10 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
unsigned long i;
allff = map_word_ff(map);
-
for (i=0; i<instr->len; i += map_bankwidth(map))
map_write(map, allff, instr->addr + i);
-
instr->state = MTD_ERASE_DONE;
-
mtd_erase_callback(instr);
-
return 0;
}
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 593f73d480d..47a43cf7e5c 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -41,11 +41,11 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
- mtd->get_unmapped_area = maprom_unmapped_area;
- mtd->read = maprom_read;
- mtd->write = maprom_write;
- mtd->sync = maprom_nop;
- mtd->erase = maprom_erase;
+ mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_read = maprom_read;
+ mtd->_write = maprom_write;
+ mtd->_sync = maprom_nop;
+ mtd->_erase = maprom_erase;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = map->size;
mtd->writesize = 1;
@@ -85,8 +85,7 @@ static void maprom_nop(struct mtd_info *mtd)
static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- printk(KERN_NOTICE "maprom_write called\n");
- return -EIO;
+ return -EROFS;
}
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index e790f38893b..3e829b37af8 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -22,11 +22,22 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * where <mtd-id> is the name from the "cat /proc/mtd" command
- * <partdef> := <size>[@offset][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
+ * size is automatically truncated at end of device
+ * if specified or trucated size is 0 the part is skipped
+ * <offset> := standard linux memsize
+ * if omitted the part will immediately follow the previous part
+ * or 0 if the first part
* <name> := '(' NAME ')'
+ * NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
*
* Examples:
*
@@ -39,10 +50,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/err.h>
/* error message prefix */
#define ERRP "mtd: "
@@ -56,8 +67,8 @@
/* special size referring to all the remaining space in a partition */
-#define SIZE_REMAINING UINT_MAX
-#define OFFSET_CONTINUOUS UINT_MAX
+#define SIZE_REMAINING ULLONG_MAX
+#define OFFSET_CONTINUOUS ULLONG_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
@@ -69,9 +80,10 @@ struct cmdline_mtd_partition {
/* mtdpart_setup() parses into here */
static struct cmdline_mtd_partition *partitions;
-/* the command line passed to mtdpart_setupd() */
+/* the command line passed to mtdpart_setup() */
+static char *mtdparts;
static char *cmdline;
-static int cmdline_parsed = 0;
+static int cmdline_parsed;
/*
* Parse one partition definition for an MTD. Since there can be many
@@ -82,15 +94,14 @@ static int cmdline_parsed = 0;
* syntax has been verified ok.
*/
static struct mtd_partition * newpart(char *s,
- char **retptr,
- int *num_parts,
- int this_part,
- unsigned char **extra_mem_ptr,
- int extra_mem_size)
+ char **retptr,
+ int *num_parts,
+ int this_part,
+ unsigned char **extra_mem_ptr,
+ int extra_mem_size)
{
struct mtd_partition *parts;
- unsigned long size;
- unsigned long offset = OFFSET_CONTINUOUS;
+ unsigned long long size, offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -98,127 +109,107 @@ static struct mtd_partition * newpart(char *s,
unsigned int mask_flags;
/* fetch the partition size */
- if (*s == '-')
- { /* assign all remaining space to this partition */
+ if (*s == '-') {
+ /* assign all remaining space to this partition */
size = SIZE_REMAINING;
s++;
- }
- else
- {
+ } else {
size = memparse(s, &s);
- if (size < PAGE_SIZE)
- {
- printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
- return NULL;
+ if (size < PAGE_SIZE) {
+ printk(KERN_ERR ERRP "partition size too small (%llx)\n",
+ size);
+ return ERR_PTR(-EINVAL);
}
}
/* fetch partition name and flags */
mask_flags = 0; /* this is going to be a regular partition */
delim = 0;
- /* check for offset */
- if (*s == '@')
- {
- s++;
- offset = memparse(s, &s);
- }
- /* now look for name */
+
+ /* check for offset */
+ if (*s == '@') {
+ s++;
+ offset = memparse(s, &s);
+ }
+
+ /* now look for name */
if (*s == '(')
- {
delim = ')';
- }
- if (delim)
- {
+ if (delim) {
char *p;
- name = ++s;
+ name = ++s;
p = strchr(name, delim);
- if (!p)
- {
+ if (!p) {
printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
name_len = p - name;
s = p + 1;
- }
- else
- {
- name = NULL;
+ } else {
+ name = NULL;
name_len = 13; /* Partition_000 */
}
/* record name length for memory allocation later */
extra_mem_size += name_len + 1;
- /* test for options */
- if (strncmp(s, "ro", 2) == 0)
- {
+ /* test for options */
+ if (strncmp(s, "ro", 2) == 0) {
mask_flags |= MTD_WRITEABLE;
s += 2;
- }
+ }
- /* if lk is found do NOT unlock the MTD partition*/
- if (strncmp(s, "lk", 2) == 0)
- {
+ /* if lk is found do NOT unlock the MTD partition*/
+ if (strncmp(s, "lk", 2) == 0) {
mask_flags |= MTD_POWERUP_LOCK;
s += 2;
- }
+ }
/* test if more partitions are following */
- if (*s == ',')
- {
- if (size == SIZE_REMAINING)
- {
+ if (*s == ',') {
+ if (size == SIZE_REMAINING) {
printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/* more partitions follow, parse them */
parts = newpart(s + 1, &s, num_parts, this_part + 1,
&extra_mem, extra_mem_size);
- if (!parts)
- return NULL;
- }
- else
- { /* this is the last partition: allocate space for all */
+ if (IS_ERR(parts))
+ return parts;
+ } else {
+ /* this is the last partition: allocate space for all */
int alloc_size;
*num_parts = this_part + 1;
alloc_size = *num_parts * sizeof(struct mtd_partition) +
extra_mem_size;
+
parts = kzalloc(alloc_size, GFP_KERNEL);
if (!parts)
- {
- printk(KERN_ERR ERRP "out of memory\n");
- return NULL;
- }
+ return ERR_PTR(-ENOMEM);
extra_mem = (unsigned char *)(parts + *num_parts);
}
+
/* enter this partition (offset will be calculated later if it is zero at this point) */
parts[this_part].size = size;
parts[this_part].offset = offset;
parts[this_part].mask_flags = mask_flags;
if (name)
- {
strlcpy(extra_mem, name, name_len + 1);
- }
else
- {
sprintf(extra_mem, "Partition_%03d", this_part);
- }
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
- this_part,
- parts[this_part].name,
- parts[this_part].offset,
- parts[this_part].size,
- parts[this_part].mask_flags));
+ this_part, parts[this_part].name, parts[this_part].offset,
+ parts[this_part].size, parts[this_part].mask_flags));
/* return (updated) pointer to extra_mem memory */
if (extra_mem_ptr)
- *extra_mem_ptr = extra_mem;
+ *extra_mem_ptr = extra_mem;
/* return (updated) pointer command line string */
*retptr = s;
@@ -238,16 +229,16 @@ static int mtdpart_setup_real(char *s)
{
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
- int mtd_id_len;
- int num_parts;
+ int mtd_id_len, num_parts;
char *p, *mtd_id;
- mtd_id = s;
+ mtd_id = s;
+
/* fetch <mtd-id> */
- if (!(p = strchr(s, ':')))
- {
+ p = strchr(s, ':');
+ if (!p) {
printk(KERN_ERR ERRP "no mtd-id\n");
- return 0;
+ return -EINVAL;
}
mtd_id_len = p - mtd_id;
@@ -264,8 +255,7 @@ static int mtdpart_setup_real(char *s)
(unsigned char**)&this_mtd, /* out: extra mem */
mtd_id_len + 1 + sizeof(*this_mtd) +
sizeof(void*)-1 /*alignment*/);
- if(!parts)
- {
+ if (IS_ERR(parts)) {
/*
* An error occurred. We're either:
* a) out of memory, or
@@ -273,12 +263,12 @@ static int mtdpart_setup_real(char *s)
* Either way, this mtd is hosed and we're
* unlikely to succeed in parsing any more
*/
- return 0;
+ return PTR_ERR(parts);
}
/* align this_mtd */
this_mtd = (struct cmdline_mtd_partition *)
- ALIGN((unsigned long)this_mtd, sizeof(void*));
+ ALIGN((unsigned long)this_mtd, sizeof(void *));
/* enter results */
this_mtd->parts = parts;
this_mtd->num_parts = num_parts;
@@ -298,14 +288,14 @@ static int mtdpart_setup_real(char *s)
break;
/* does another spec follow? */
- if (*s != ';')
- {
+ if (*s != ';') {
printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
- return 0;
+ return -EINVAL;
}
s++;
}
- return 1;
+
+ return 0;
}
/*
@@ -316,49 +306,67 @@ static int mtdpart_setup_real(char *s)
* the first one in the chain if a NULL mtd_id is passed in.
*/
static int parse_cmdline_partitions(struct mtd_info *master,
- struct mtd_partition **pparts,
- unsigned long origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
- unsigned long offset;
- int i;
+ unsigned long long offset;
+ int i, err;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
/* parse command line */
- if (!cmdline_parsed)
- mtdpart_setup_real(cmdline);
+ if (!cmdline_parsed) {
+ err = mtdpart_setup_real(cmdline);
+ if (err)
+ return err;
+ }
- for(part = partitions; part; part = part->next)
- {
+ /*
+ * Search for the partition definition matching master->name.
+ * If master->name is not set, stop at first partition definition.
+ */
+ for (part = partitions; part; part = part->next) {
if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
- {
- for(i = 0, offset = 0; i < part->num_parts; i++)
- {
- if (part->parts[i].offset == OFFSET_CONTINUOUS)
- part->parts[i].offset = offset;
- else
- offset = part->parts[i].offset;
- if (part->parts[i].size == SIZE_REMAINING)
- part->parts[i].size = master->size - offset;
- if (offset + part->parts[i].size > master->size)
- {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
- part->num_parts = i;
- }
- offset += part->parts[i].size;
- }
- *pparts = kmemdup(part->parts,
- sizeof(*part->parts) * part->num_parts,
- GFP_KERNEL);
- if (!*pparts)
- return -ENOMEM;
- return part->num_parts;
+ break;
+ }
+
+ if (!part)
+ return 0;
+
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
+
+ if (part->parts[i].size == 0) {
+ printk(KERN_WARNING ERRP
+ "%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i], &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ i--;
}
}
- return 0;
+
+ *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
+ GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ return part->num_parts;
}
@@ -369,7 +377,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
*
* This function needs to be visible for bootloaders.
*/
-static int mtdpart_setup(char *s)
+static int __init mtdpart_setup(char *s)
{
cmdline = s;
return 1;
@@ -385,10 +393,22 @@ static struct mtd_part_parser cmdline_parser = {
static int __init cmdline_parser_init(void)
{
- return register_mtd_parser(&cmdline_parser);
+ if (mtdparts)
+ mtdpart_setup(mtdparts);
+ register_mtd_parser(&cmdline_parser);
+ return 0;
+}
+
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
}
module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce77fb..c49d0b127fe 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -1,5 +1,6 @@
menu "Self-contained MTD device drivers"
depends on MTD!=n
+ depends on HAS_IOMEM
config MTD_PMC551
tristate "Ramix PMC551 PCI Mezzanine RAM card support"
@@ -51,7 +52,7 @@ config MTD_MS02NV
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
This enables access to AT45xxx DataFlash chips, using SPI.
Sometimes DataFlash chips are packaged inside MMC-format
@@ -70,7 +71,6 @@ config MTD_DATAFLASH_WRITE_VERIFY
config MTD_DATAFLASH_OTP
bool "DataFlash OTP support (Security Register)"
depends on MTD_DATAFLASH
- select HAVE_MTD_OTP
help
Newer DataFlash chips (revisions C and D) support 128 bytes of
one-time-programmable (OTP) data. The first half may be written
@@ -80,7 +80,7 @@ config MTD_DATAFLASH_OTP
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER && MTD_SPI_NOR
help
This enables access to most modern SPI flash chips, used for
program and data storage. Series supported include Atmel AT26DF,
@@ -95,12 +95,12 @@ config MTD_M25P80
if you want to specify device partitioning or to use a device which
doesn't support the JEDEC ID instruction.
-config M25PXX_USE_FAST_READ
- bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz"
- depends on MTD_M25P80
+config MTD_SPEAR_SMI
+ tristate "SPEAR MTD NOR Support through SMI controller"
+ depends on PLAT_SPEAR
default y
help
- This option enables FAST_READ access supported by ST M25Pxx.
+ This enable SNOR support on SPEAR platforms using SMI controller
config MTD_SST25L
tristate "Support SST25L (non JEDEC) SPI Flash chips"
@@ -112,6 +112,14 @@ config MTD_SST25L
Set up your spi devices with the right board-specific platform data,
if you want to specify device partitioning.
+config MTD_BCM47XXSFLASH
+ tristate "R/O support for serial flash on BCMA bus"
+ depends on BCMA_SFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ serial flash memories (only read-only mode is implemented).
+
config MTD_SLRAM
tristate "Uncached system RAM"
help
@@ -189,120 +197,32 @@ config MTD_BLOCK2MTD
comment "Disk-On-Chip Device Drivers"
-config MTD_DOC2000
- tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
- select MTD_DOCPROBE
- select MTD_NAND_IDS
+config MTD_DOCG3
+ tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS
+ select BITREVERSE
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
- 2000 and Millennium devices. Originally designed for the DiskOnChip
- 2000, it also now includes support for the DiskOnChip Millennium.
- If you have problems with this driver and the DiskOnChip Millennium,
- you may wish to try the alternative Millennium driver below. To use
- the alternative driver, you will need to undefine DOC_SINGLE_DRIVER
- in the <file:drivers/mtd/devices/docprobe.c> source code.
-
- If you use this device, you probably also want to enable the NFTL
- 'NAND Flash Translation Layer' option below, which is used to
- emulate a block device by using a kind of file system on the flash
- chips.
-
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
-config MTD_DOC2001
- tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an alternative MTD device driver for the M-Systems
- DiskOnChip Millennium devices. Use this if you have problems with
- the combined DiskOnChip 2000 and Millennium driver above. To get
- the DiskOnChip probe code to load and use this driver instead of
- the other one, you will need to undefine DOC_SINGLE_DRIVER near
- the beginning of <file:drivers/mtd/devices/docprobe.c>.
-
- If you use this device, you probably also want to enable the NFTL
- 'NAND Flash Translation Layer' option below, which is used to
- emulate a block device by using a kind of file system on the flash
- chips.
-
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
-config MTD_DOC2001PLUS
- tristate "M-Systems Disk-On-Chip Millennium Plus"
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an MTD device driver for the M-Systems DiskOnChip
- Millennium Plus devices.
-
- If you use this device, you probably also want to enable the INFTL
- 'Inverse NAND Flash Translation Layer' option below, which is used
- to emulate a block device by using a kind of file system on the
- flash chips.
-
- NOTE: This driver will soon be replaced by the new DiskOnChip driver
- under "NAND Flash Device Drivers" (currently that driver does not
- support all Millennium Plus devices).
-
-config MTD_DOCPROBE
- tristate
- select MTD_DOCECC
-
-config MTD_DOCECC
- tristate
-
-config MTD_DOCPROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_DOCPROBE
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
-
-config MTD_DOCPROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
- depends on MTD_DOCPROBE
- default "0x0000" if MTD_DOCPROBE_ADVANCED
- default "0" if !MTD_DOCPROBE_ADVANCED
- ---help---
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
-
-config MTD_DOCPROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_DOCPROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
-
-config MTD_DOCPROBE_55AA
- bool "Probe for 0x55 0xAA BIOS Extension Signature"
- depends on MTD_DOCPROBE_ADVANCED
- help
- Check for the 0x55 0xAA signature of a DiskOnChip, and do not
- continue with probing if it is absent. The signature will always be
- present for a DiskOnChip 2000 or a normal DiskOnChip Millennium.
- Only if you have overwritten the first block of a DiskOnChip
- Millennium will it be absent. Enable this option if you are using
- LinuxBIOS or if you need to recover a DiskOnChip Millennium on which
- you have managed to wipe the first block.
+ G3 devices.
+
+ The driver provides access to G3 DiskOnChip, distributed by
+ M-Systems and now Sandisk. The support is very experimental,
+ and doesn't give access to any write operations.
+
+config MTD_ST_SPI_FSM
+ tristate "ST Microelectronics SPI FSM Serial Flash Controller"
+ depends on ARCH_STI
+ help
+ This provides an MTD device driver for the ST Microelectronics
+ SPI Fast Sequence Mode (FSM) Serial Flash Controller and support
+ for a subset of connected Serial Flash devices.
+
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
endmenu
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1d38f..c68868f6058 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -2,11 +2,7 @@
# linux/drivers/mtd/devices/Makefile
#
-obj-$(CONFIG_MTD_DOC2000) += doc2000.o
-obj-$(CONFIG_MTD_DOC2001) += doc2001.o
-obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
-obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
-obj-$(CONFIG_MTD_DOCECC) += docecc.o
+obj-$(CONFIG_MTD_DOCG3) += docg3.o
obj-$(CONFIG_MTD_SLRAM) += slram.o
obj-$(CONFIG_MTD_PHRAM) += phram.o
obj-$(CONFIG_MTD_PMC551) += pmc551.o
@@ -16,4 +12,11 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o
+obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
+obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
+
+
+CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
new file mode 100644
index 00000000000..77de29bc02b
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -0,0 +1,340 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxsflash.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
+
+static const char * const probes[] = { "bcm47xxpart", NULL };
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static void bcm47xxsflash_cmd(struct bcm47xxsflash *b47s, u32 opcode)
+{
+ int i;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, BCMA_CC_FLASHCTL_START | opcode);
+ for (i = 0; i < 1000; i++) {
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHCTL) &
+ BCMA_CC_FLASHCTL_BUSY))
+ return;
+ cpu_relax();
+ }
+ pr_err("Control command failed (timeout)!\n");
+}
+
+static int bcm47xxsflash_poll(struct bcm47xxsflash *b47s, int timeout)
+{
+ unsigned long deadline = jiffies + timeout;
+
+ do {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_RDSR);
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_ST_WIP))
+ return 0;
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_STATUS);
+ if (b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_AT_READY)
+ return 0;
+ break;
+ }
+
+ cpu_relax();
+ udelay(1);
+ } while (!time_after_eq(jiffies, deadline));
+
+ pr_err("Timeout waiting for flash to be ready!\n");
+
+ return -EBUSY;
+}
+
+/**************************************************
+ * MTD ops
+ **************************************************/
+
+static int bcm47xxsflash_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int err;
+
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr);
+ /* Newer flashes have "sub-sectors" which can be erased
+ * independently with a new command: ST_SSE. The ST_SE command
+ * erases 64KB just as before.
+ */
+ if (b47s->blocksize < (64 * 1024))
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SSE);
+ else
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SE);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr << 1);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_PAGE_ERASE);
+ break;
+ }
+
+ err = bcm47xxsflash_poll(b47s, HZ);
+ if (err)
+ erase->state = MTD_ERASE_FAILED;
+ else
+ erase->state = MTD_ERASE_DONE;
+
+ if (erase->callback)
+ erase->callback(erase);
+
+ return err;
+}
+
+static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+
+ /* Check address range */
+ if ((from + len) > mtd->size)
+ return -EINVAL;
+
+ memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
+ len);
+ *retlen = len;
+
+ return len;
+}
+
+static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written = 0;
+
+ /* Enable writes */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+
+ /* Write first byte */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, offset);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+
+ /* Program page */
+ if (b47s->bcma_cc->core->id.rev < 20) {
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_PP);
+ return 1; /* 1B written */
+ }
+
+ /* Program page and set CSA (on newer chips we can continue writing) */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | OPCODE_ST_PP);
+ offset++;
+ len--;
+ written++;
+
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if ((offset & 0xFF) == 0)
+ break;
+
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | *buf++);
+ offset++;
+ len--;
+ written++;
+ }
+
+ /* All done, drop CSA & poll */
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, 0);
+ udelay(1);
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_err("Flash rejected dropping CSA\n");
+
+ return written;
+}
+
+static int bcm47xxsflash_write_at(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ u32 mask = b47s->blocksize - 1;
+ u32 page = (offset & ~mask) << 1;
+ u32 byte = offset & mask;
+ int written = 0;
+
+ /* If we don't overwrite whole page, read it to the buffer first */
+ if (byte || (len < b47s->blocksize)) {
+ int err;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_LOAD);
+ /* 250 us for AT45DB321B */
+ err = bcm47xxsflash_poll(b47s, HZ / 1000);
+ if (err) {
+ pr_err("Timeout reading page 0x%X info buffer\n", page);
+ return err;
+ }
+ }
+
+ /* Change buffer content with our data */
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if (byte == b47s->blocksize)
+ break;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, byte++);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_WRITE);
+ len--;
+ written++;
+ }
+
+ /* Program page with the buffer content */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_PROGRAM);
+
+ return written;
+}
+
+static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written;
+
+ /* Writing functions can return without writing all passed data, for
+ * example when the hardware is too old or when we git page boundary.
+ */
+ while (len > 0) {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ written = bcm47xxsflash_write_st(mtd, to, len, buf);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ written = bcm47xxsflash_write_at(mtd, to, len, buf);
+ break;
+ default:
+ BUG_ON(1);
+ }
+ if (written < 0) {
+ pr_err("Error writing at offset 0x%llX\n", to);
+ return written;
+ }
+ to += (loff_t)written;
+ len -= written;
+ *retlen += written;
+ buf += written;
+ }
+
+ return 0;
+}
+
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
+{
+ struct mtd_info *mtd = &b47s->mtd;
+
+ mtd->priv = b47s;
+ mtd->name = "bcm47xxsflash";
+ mtd->owner = THIS_MODULE;
+
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = b47s->size;
+ mtd->erasesize = b47s->blocksize;
+ mtd->writesize = 1;
+ mtd->writebufsize = 1;
+
+ mtd->_erase = bcm47xxsflash_erase;
+ mtd->_read = bcm47xxsflash_read;
+ mtd->_write = bcm47xxsflash_write;
+}
+
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_cc_read(struct bcm47xxsflash *b47s, u16 offset)
+{
+ return bcma_cc_read32(b47s->bcma_cc, offset);
+}
+
+static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
+ u32 value)
+{
+ bcma_cc_write32(b47s->bcma_cc, offset, value);
+}
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
+{
+ struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s;
+ int err;
+
+ b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL);
+ if (!b47s)
+ return -ENOMEM;
+ sflash->priv = b47s;
+
+ b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+ b47s->cc_read = bcm47xxsflash_bcma_cc_read;
+ b47s->cc_write = bcm47xxsflash_bcma_cc_write;
+
+ switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ST;
+ break;
+ case BCMA_CC_FLASHT_ATSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
+ break;
+ }
+
+ b47s->window = sflash->window;
+ b47s->blocksize = sflash->blocksize;
+ b47s->numblocks = sflash->numblocks;
+ b47s->size = sflash->size;
+ bcm47xxsflash_fill_mtd(b47s);
+
+ err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ return err;
+ }
+
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_warn("Serial flash busy\n");
+
+ return 0;
+}
+
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
+{
+ struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s = sflash->priv;
+
+ mtd_device_unregister(&b47s->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcma_sflash_driver = {
+ .probe = bcm47xxsflash_bcma_probe,
+ .remove = bcm47xxsflash_bcma_remove,
+ .driver = {
+ .name = "bcma_sflash",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+module_platform_driver(bcma_sflash_driver);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 00000000000..fe93daf4f48
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,76 @@
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+/* Used for ST flashes only. */
+#define OPCODE_ST_WREN 0x0006 /* Write Enable */
+#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */
+#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */
+#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */
+#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */
+#define OPCODE_ST_PP 0x0302 /* Page Program */
+#define OPCODE_ST_SE 0x02d8 /* Sector Erase */
+#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */
+#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */
+#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */
+#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */
+#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */
+
+/* Used for Atmel flashes only. */
+#define OPCODE_AT_READ 0x07e8
+#define OPCODE_AT_PAGE_READ 0x07d2
+#define OPCODE_AT_STATUS 0x01d7
+#define OPCODE_AT_BUF1_WRITE 0x0384
+#define OPCODE_AT_BUF2_WRITE 0x0387
+#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283
+#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286
+#define OPCODE_AT_BUF1_PROGRAM 0x0288
+#define OPCODE_AT_BUF2_PROGRAM 0x0289
+#define OPCODE_AT_PAGE_ERASE 0x0281
+#define OPCODE_AT_BLOCK_ERASE 0x0250
+#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define OPCODE_AT_BUF1_LOAD 0x0253
+#define OPCODE_AT_BUF2_LOAD 0x0255
+#define OPCODE_AT_BUF1_COMPARE 0x0260
+#define OPCODE_AT_BUF2_COMPARE 0x0261
+#define OPCODE_AT_BUF1_REPROGRAM 0x0258
+#define OPCODE_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for ST flashes */
+#define SR_ST_WIP 0x01 /* Write In Progress */
+#define SR_ST_WEL 0x02 /* Write Enable Latch */
+#define SR_ST_BP_MASK 0x1c /* Block Protect */
+#define SR_ST_BP_SHIFT 2
+#define SR_ST_SRWD 0x80 /* Status Register Write Disable */
+
+/* Status register bits for Atmel flashes */
+#define SR_AT_READY 0x80
+#define SR_AT_MISMATCH 0x40
+#define SR_AT_ID_MASK 0x38
+#define SR_AT_ID_SHIFT 3
+
+struct bcma_drv_cc;
+
+enum bcm47xxsflash_type {
+ BCM47XXSFLASH_TYPE_ATMEL,
+ BCM47XXSFLASH_TYPE_ST,
+};
+
+struct bcm47xxsflash {
+ struct bcma_drv_cc *bcma_cc;
+ int (*cc_read)(struct bcm47xxsflash *b47s, u16 offset);
+ void (*cc_write)(struct bcm47xxsflash *b47s, u16 offset, u32 value);
+
+ enum bcm47xxsflash_type type;
+
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index b78f23169d4..66f0405f7e5 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -6,6 +6,9 @@
*
* Licence: GPL
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
@@ -14,14 +17,10 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
-#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
-
-#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
-#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
-
+#include <linux/major.h>
/* Info for the block device */
struct block2mtd_dev {
@@ -53,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
while (pages) {
page = page_read(mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -65,6 +62,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
memset(page_address(page), 0xff, PAGE_SIZE);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
break;
}
@@ -86,7 +84,7 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
err = _block2mtd_erase(dev, from, len);
mutex_unlock(&dev->write_mutex);
if (err) {
- ERROR("erase failed err = %d", err);
+ pr_err("erase failed err = %d\n", err);
instr->state = MTD_ERASE_FAILED;
} else
instr->state = MTD_ERASE_DONE;
@@ -105,14 +103,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
int offset = from & (PAGE_SIZE-1);
int cpylen;
- if (from > mtd->size)
- return -EINVAL;
- if (from + len > mtd->size)
- len = mtd->size - from;
-
- if (retlen)
- *retlen = 0;
-
while (len) {
if ((offset + len) > PAGE_SIZE)
cpylen = PAGE_SIZE - offset; // multiple pages
@@ -121,8 +111,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -149,8 +137,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
- if (retlen)
- *retlen = 0;
while (len) {
if ((offset+len) > PAGE_SIZE)
cpylen = PAGE_SIZE - offset; // multiple pages
@@ -159,8 +145,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
len = len - cpylen;
page = page_read(mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -169,6 +153,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
memcpy(page_address(page) + offset, buf, cpylen);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
}
page_cache_release(page);
@@ -189,13 +174,6 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
struct block2mtd_dev *dev = mtd->priv;
int err;
- if (!len)
- return 0;
- if (to >= mtd->size)
- return -ENOSPC;
- if (to + len > mtd->size)
- len = mtd->size - to;
-
mutex_lock(&dev->write_mutex);
err = _block2mtd_write(dev, buf, to, len, retlen);
mutex_unlock(&dev->write_mutex);
@@ -231,7 +209,6 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
}
-/* FIXME: ensure that mtd->size % erase_size == 0 */
static struct block2mtd_dev *add_device(char *devname, int erase_size)
{
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
@@ -261,14 +238,19 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
#endif
if (IS_ERR(bdev)) {
- ERROR("error: cannot open device %s", devname);
- goto devinit_err;
+ pr_err("error: cannot open device %s\n", devname);
+ goto err_free_block2mtd;
}
dev->blkdev = bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
- ERROR("attempting to use an MTD device as a block device");
- goto devinit_err;
+ pr_err("attempting to use an MTD device as a block device\n");
+ goto err_free_block2mtd;
+ }
+
+ if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ pr_err("erasesize must be a divisor of device size\n");
+ goto err_free_block2mtd;
}
mutex_init(&dev->write_mutex);
@@ -277,34 +259,37 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* make the name contain the block device in */
name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
- goto devinit_err;
+ goto err_destroy_mutex;
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
dev->mtd.type = MTD_RAM;
dev->mtd.flags = MTD_CAP_RAM;
- dev->mtd.erase = block2mtd_erase;
- dev->mtd.write = block2mtd_write;
- dev->mtd.writev = default_mtd_writev;
- dev->mtd.sync = block2mtd_sync;
- dev->mtd.read = block2mtd_read;
+ dev->mtd._erase = block2mtd_erase;
+ dev->mtd._write = block2mtd_write;
+ dev->mtd._sync = block2mtd_sync;
+ dev->mtd._read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
if (mtd_device_register(&dev->mtd, NULL, 0)) {
/* Device didn't get added, so free the entry */
- goto devinit_err;
+ goto err_destroy_mutex;
}
list_add(&dev->list, &blkmtd_device_list);
- INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "),
+ dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
-devinit_err:
+err_destroy_mutex:
+ mutex_destroy(&dev->write_mutex);
+err_free_block2mtd:
block2mtd_free_device(dev);
return NULL;
}
@@ -361,17 +346,11 @@ static inline void kill_final_newline(char *str)
}
-#define parse_err(fmt, args...) do { \
- ERROR(fmt, ## args); \
- return 0; \
-} while (0)
-
#ifndef MODULE
static int block2mtd_init_called = 0;
static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
#endif
-
static int block2mtd_setup2(const char *val)
{
char buf[80 + 12]; /* 80 for device, 12 for erase size */
@@ -381,8 +360,10 @@ static int block2mtd_setup2(const char *val)
size_t erase_size = PAGE_SIZE;
int i, ret;
- if (strnlen(val, sizeof(buf)) >= sizeof(buf))
- parse_err("parameter too long");
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
+ pr_err("parameter too long\n");
+ return 0;
+ }
strcpy(str, val);
kill_final_newline(str);
@@ -390,20 +371,27 @@ static int block2mtd_setup2(const char *val)
for (i = 0; i < 2; i++)
token[i] = strsep(&str, ",");
- if (str)
- parse_err("too many arguments");
+ if (str) {
+ pr_err("too many arguments\n");
+ return 0;
+ }
- if (!token[0])
- parse_err("no argument");
+ if (!token[0]) {
+ pr_err("no argument\n");
+ return 0;
+ }
name = token[0];
- if (strlen(name) + 1 > 80)
- parse_err("device name too long");
+ if (strlen(name) + 1 > 80) {
+ pr_err("device name too long\n");
+ return 0;
+ }
if (token[1]) {
ret = parse_num(&erase_size, token[1]);
if (ret) {
- parse_err("illegal erase size");
+ pr_err("illegal erase size\n");
+ return 0;
}
}
@@ -457,7 +445,7 @@ static int __init block2mtd_init(void)
}
-static void __devexit block2mtd_exit(void)
+static void block2mtd_exit(void)
{
struct list_head *pos, *next;
@@ -466,8 +454,10 @@ static void __devexit block2mtd_exit(void)
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
mtd_device_unregister(&dev->mtd);
- INFO("mtd%d: [%s] removed", dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "));
+ mutex_destroy(&dev->write_mutex);
+ pr_info("mtd%d: [%s] removed\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "));
list_del(&dev->list);
block2mtd_free_device(dev);
}
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
deleted file mode 100644
index f7fbf6025ef..00000000000
--- a/drivers/mtd/devices/doc2000.c
+++ /dev/null
@@ -1,1201 +0,0 @@
-
-/*
- * Linux driver for Disk-On-Chip 2000 and Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-#define DOC_SUPPORT_2000
-#define DOC_SUPPORT_2000TSOP
-#define DOC_SUPPORT_MILLENNIUM
-
-#ifdef DOC_SUPPORT_2000
-#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k)
-#else
-#define DoC_is_2000(doc) (0)
-#endif
-
-#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
-#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
-#else
-#define DoC_is_Millennium(doc) (0)
-#endif
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcpy_from|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:
- #undef USE_MEMCPY
-*/
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *doc2klist = NULL;
-
-/* Perform the required delay cycles by reading from the appropriate register */
-static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++) {
- if (DoC_is_Millennium(doc))
- dummy = ReadDOC(doc->virtadr, NOP);
- else
- dummy = ReadDOC(doc->virtadr, DOCStatus);
- }
-
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(struct DiskOnChip *doc)
-{
- void __iomem *docptr = doc->virtadr;
- unsigned long timeo = jiffies + (HZ * 10);
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
- if (time_after(jiffies, timeo)) {
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
- }
-
- return 0;
-}
-
-static inline int DoC_WaitReady(struct DiskOnChip *doc)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* 4 read form NOP register should be issued in prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
-
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
- return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to
- bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Command(struct DiskOnChip *doc, unsigned char command,
- unsigned char xtraflags)
-{
- void __iomem *docptr = doc->virtadr;
-
- if (DoC_is_2000(doc))
- xtraflags |= CDSN_CTRL_FLASH_IO;
-
- /* Assert the CLE (Command Latch Enable) line to the flash chip */
- WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, CDSNSlowIO);
-
- /* Send the command */
- WriteDOC_(command, docptr, doc->ioreg);
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, WritePipeTerm);
-
- /* Lower the CLE line */
- WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to
- bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
- unsigned char xtraflags1, unsigned char xtraflags2)
-{
- int i;
- void __iomem *docptr = doc->virtadr;
-
- if (DoC_is_2000(doc))
- xtraflags1 |= CDSN_CTRL_FLASH_IO;
-
- /* Assert the ALE (Address Latch Enable) line to the flash chip */
- WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
-
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Send the address */
- /* Devices with 256-byte page are addressed as:
- Column (bits 0-7), Page (bits 8-15, 16-23, 24-31)
- * there is no device on the market with page256
- and more than 24 bits.
- Devices with 512-byte page are addressed as:
- Column (bits 0-7), Page (bits 9-16, 17-24, 25-31)
- * 25-31 is sent only if the chip support it.
- * bit 8 changes the read command to be sent
- (NAND_CMD_READ0 or NAND_CMD_READ1).
- */
-
- if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) {
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
- WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
- }
-
- if (doc->page256) {
- ofs = ofs >> 8;
- } else {
- ofs = ofs >> 9;
- }
-
- if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) {
- for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) {
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
- WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
- }
- }
-
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
-
- DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */
-
- /* FIXME: The SlowIO's for millennium could be replaced by
- a single WritePipeTerm here. mf. */
-
- /* Lower the ALE line */
- WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr,
- CDSNControl);
-
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for the chip to respond - Software requirement 11.4.1 */
- return DoC_WaitReady(doc);
-}
-
-/* Read a buffer from DoC, taking care of Millennium odditys */
-static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
-{
- volatile int dummy;
- int modulus = 0xffff;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (len <= 0)
- return;
-
- if (DoC_is_Millennium(doc)) {
- /* Read the data via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-
- /* Millennium should use the LastDataRead register - Pipeline Reads */
- len--;
-
- /* This is needed for correctly ECC calculation */
- modulus = 0xff;
- }
-
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus));
-
- if (DoC_is_Millennium(doc)) {
- buf[i] = ReadDOC(docptr, LastDataRead);
- }
-}
-
-/* Write a buffer to DoC, taking care of Millennium odditys */
-static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
-{
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (len <= 0)
- return;
-
- for (i = 0; i < len; i++)
- WriteDOC_(buf[i], docptr, doc->ioreg + i);
-
- if (DoC_is_Millennium(doc)) {
- WriteDOC(0x00, docptr, WritePipeTerm);
- }
-}
-
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-
-static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* Software requirement 11.4.4 before writing DeviceSelect */
- /* Deassert the CE line to eliminate glitches on the FCE# outputs */
- WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Select the individual flash chip requested */
- WriteDOC(chip, docptr, CDSNDeviceSelect);
- DoC_Delay(doc, 4);
-
- /* Reassert the CE line */
- WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr,
- CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for it to be ready */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-
-static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* Select the floor (bank) of chips required */
- WriteDOC(floor, docptr, FloorSelect);
-
- /* Wait for the chip to be ready */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
-
- /* Page in the required floor/chip */
- DoC_SelectFloor(doc, floor);
- DoC_SelectChip(doc, chip);
-
- /* Reset the chip */
- if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
- DEBUG(MTD_DEBUG_LEVEL2,
- "DoC_Command (reset) for %d,%d returned true\n",
- floor, chip);
- return 0;
- }
-
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
- DEBUG(MTD_DEBUG_LEVEL2,
- "DoC_Command (ReadID) for %d,%d returned true\n",
- floor, chip);
- return 0;
- }
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0);
-
- /* Read the manufacturer and device id codes from the device */
-
- if (DoC_is_Millennium(doc)) {
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- mfr = ReadDOC(doc->virtadr, LastDataRead);
-
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- id = ReadDOC(doc->virtadr, LastDataRead);
- } else {
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- mfr = ReadDOC_(doc->virtadr, doc->ioreg);
-
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- id = ReadDOC_(doc->virtadr, doc->ioreg);
- }
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- /* Check it's the same as the first chip we identified.
- * M-Systems say that any given DiskOnChip device should only
- * contain _one_ type of flash part, although that's not a
- * hardware restriction. */
- if (doc->mfr) {
- if (doc->mfr == mfr && doc->id == id)
- return 1; /* This is the same as the first */
- else
- printk(KERN_WARNING
- "Flash chip at floor %d, chip %d is different:\n",
- floor, chip);
- }
-
- /* Print and store the manufacturer and ID codes. */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO
- "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n", mfr, id,
- nand_manuf_ids[j].name, nand_flash_ids[i].name);
- if (!doc->mfr) {
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift =
- ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
- doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
- doc->erasesize =
- nand_flash_ids[i].erasesize;
- return 1;
- }
- return 0;
- }
- }
-
-
- /* We haven't fully identified the chip. Print as much as we know. */
- printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n",
- id, mfr);
-
- printk(KERN_WARNING "Please report to dwmw2@infradead.org\n");
- return 0;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-
-static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
-{
- int floor, chip;
- int numchips[MAX_FLOORS];
- int ret = 1;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0; floor < MAX_FLOORS; floor++) {
- ret = 1;
- numchips[floor] = 0;
- for (chip = 0; chip < maxchips && ret != 0; chip++) {
-
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
-
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk(KERN_NOTICE "No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips) {
- printk(KERN_NOTICE "No memory for allocating chip info structures\n");
- return;
- }
-
- ret = 0;
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0; floor < MAX_FLOORS; floor++) {
- for (chip = 0; chip < numchips[floor]; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
-
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips, this->totlen >> 20);
-}
-
-static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millennium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp2 == (tmp1 + 1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoC2k_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
- int maxchips;
-
- /* We must avoid being called twice for the same device. */
-
- if (doc2klist)
- old = doc2klist->priv;
-
- while (old) {
- if (DoC2k_is_alias(old, this)) {
- printk(KERN_NOTICE
- "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n",
- this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
-
- switch (this->ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- mtd->name = "DiskOnChip 2000 TSOP";
- this->ioreg = DoC_Mil_CDSN_IO;
- /* Pretend it's a Millennium */
- this->ChipID = DOC_ChipID_DocMil;
- maxchips = MAX_CHIPS;
- break;
- case DOC_ChipID_Doc2k:
- mtd->name = "DiskOnChip 2000";
- this->ioreg = DoC_2k_CDSN_IO;
- maxchips = MAX_CHIPS;
- break;
- case DOC_ChipID_DocMil:
- mtd->name = "DiskOnChip Millennium";
- this->ioreg = DoC_Mil_CDSN_IO;
- maxchips = MAX_CHIPS_MIL;
- break;
- default:
- printk("Unknown ChipID 0x%02x\n", this->ChipID);
- kfree(mtd);
- iounmap(this->virtadr);
- return;
- }
-
- printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
- this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
- mtd->erasesize = 0;
- mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
-
- this->curfloor = -1;
- this->curchip = -1;
- mutex_init(&this->lock);
-
- /* Ident all the chips present. */
- DoC_ScanChips(this, maxchips);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = doc2klist;
- doc2klist = mtd;
- mtd->size = this->totlen;
- mtd->erasesize = this->erasesize;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoC2k_init);
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip;
- unsigned char syndrome[6], eccbuf[6];
- volatile char dummy;
- int i, len256 = 0, ret=0;
- size_t left = len;
-
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
- mutex_lock(&this->lock);
-
- *retlen = 0;
- while (left) {
- len = left;
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- /* The ECC will not be calculated correctly if less than 512 is read */
- if (len != 0x200)
- printk(KERN_WARNING
- "ECC needs a full sector read (adr: %lx size %lx)\n",
- (long) from, (long) len);
-
- /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
-
-
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[from >> (this->chipshift)];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- DoC_Command(this,
- (!this->page256
- && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
- CDSN_CTRL_ECC_IO);
-
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, ECCConf);
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && from + len > (from | 0xff) + 1) {
- len256 = (from | 0xff) + 1 - from;
- DoC_ReadBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
- CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
- }
-
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- /* Note: this will work even with 2M x 8bit devices as */
- /* they have 8 bytes of OOB per 256 page. mf. */
- DoC_ReadBuf(this, eccbuf, 6);
-
- /* Flush the pipeline */
- if (DoC_is_Millennium(this)) {
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- i = ReadDOC(docptr, ECCConf);
- } else {
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- i = ReadDOC(docptr, 2k_ECCStatus);
- }
-
- /* Check the ECC Status */
- if (i & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrom through the DiskOnChip ECC
- logic. These syndrome will be all ZERO when there
- is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] =
- ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
-
-#ifdef ECC_DEBUG
- printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the
- read. Not that this can be told to
- user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by
- checking *retlen */
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
-#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
- /* according to 11.4.1, we need to wait for the busy line
- * drop if we read to the end of the page. */
- if(0 == ((from + len) & 0x1ff))
- {
- DoC_WaitReady(this);
- }
-
- from += len;
- left -= len;
- buf += len;
- }
-
- mutex_unlock(&this->lock);
-
- return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t * retlen, const u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
- void __iomem *docptr = this->virtadr;
- unsigned char eccbuf[6];
- volatile char dummy;
- int len256 = 0;
- struct Nand *mychip;
- size_t left = len;
- int status;
-
- /* Don't allow write past end of device */
- if (to >= this->totlen)
- return -EINVAL;
-
- mutex_lock(&this->lock);
-
- *retlen = 0;
- while (left) {
- len = left;
-
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ((to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
-
- /* The ECC will not be calculated correctly if less than 512 is written */
-/* DBB-
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector write (adr: %lx size %lx)\n",
- (long) to, (long) len);
- -DBB */
-
- /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
-
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[to >> (this->chipshift)];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Set device to main plane of flash */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_Command(this,
- (!this->page256
- && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
-
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && to + len > (to | 0xff) + 1) {
- len256 = (to | 0xff) + 1 - to;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
-
- if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- mutex_unlock(&this->lock);
- return -EIO;
- }
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
- CDSN_CTRL_ECC_IO);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
-
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl);
-
- if (DoC_is_Millennium(this)) {
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- } else {
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- }
-
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
- CDSNControl);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (di = 0; di < 6; di++) {
- eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
- }
-
- /* Reset the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-
-#ifdef PSYCHO_DEBUG
- printk
- ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- mutex_unlock(&this->lock);
- return -EIO;
- }
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- {
- unsigned char x[8];
- size_t dummy;
- int ret;
-
- /* Write the ECC data to flash */
- for (di=0; di<6; di++)
- x[di] = eccbuf[di];
-
- x[6]=0x55;
- x[7]=0x55;
-
- ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
- if (ret) {
- mutex_unlock(&this->lock);
- return ret;
- }
- }
-
- to += len;
- left -= len;
- buf += len;
- }
-
- mutex_unlock(&this->lock);
- return 0;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- struct DiskOnChip *this = mtd->priv;
- int len256 = 0, ret;
- struct Nand *mychip;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OOB_PLACE);
-
- ofs += ops->ooboffs;
-
- mutex_lock(&this->lock);
-
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* update address for 2M x 8bit devices. OOB starts on the second */
- /* page to maintain compatibility with doc_read_ecc. */
- if (this->page256) {
- if (!(ofs & 0x8))
- ofs += 0x100;
- else
- ofs -= 0x8;
- }
-
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0);
-
- /* treat crossing 8-byte OOB data for 2M x 8bit devices */
- /* Note: datasheet says it should automaticaly wrap to the */
- /* next OOB block, but it didn't work here. mf. */
- if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
- len256 = (ofs | 0x7) + 1 - ofs;
- DoC_ReadBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff),
- CDSN_CTRL_WP, 0);
- }
-
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- ops->retlen = len;
- /* Reading the full OOB data drops us off of the end of the page,
- * causing the flash device to go into busy mode, so we need
- * to wait until ready 11.4.1 and Toshiba TC58256FT docs */
-
- ret = DoC_WaitReady(this);
-
- mutex_unlock(&this->lock);
- return ret;
-
-}
-
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t * retlen, const u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- int len256 = 0;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- volatile int dummy;
- int status;
-
- // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
- // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
-
- /* issue the Read2 command to set the pointer to the Spare Data Area. */
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
- /* update address for 2M x 8bit devices. OOB starts on the second */
- /* page to maintain compatibility with doc_read_ecc. */
- if (this->page256) {
- if (!(ofs & 0x8))
- ofs += 0x100;
- else
- ofs -= 0x8;
- }
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0);
-
- /* treat crossing 8-byte OOB data for 2M x 8bit devices */
- /* Note: datasheet says it should automaticaly wrap to the */
- /* next OOB block, but it didn't work here. mf. */
- if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
- len256 = (ofs | 0x7) + 1 - ofs;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- DoC_Command(this, NAND_CMD_STATUS, 0);
- /* DoC_WaitReady() is implicit in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming oob data\n");
- /* There was an error */
- *retlen = 0;
- return -EIO;
- }
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- DoC_Command(this, NAND_CMD_STATUS, 0);
- /* DoC_WaitReady() is implicit in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming oob data\n");
- /* There was an error */
- *retlen = 0;
- return -EIO;
- }
-
- *retlen = len;
- return 0;
-
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- struct DiskOnChip *this = mtd->priv;
- int ret;
-
- BUG_ON(ops->mode != MTD_OOB_PLACE);
-
- mutex_lock(&this->lock);
- ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
- &ops->retlen, ops->oobbuf);
-
- mutex_unlock(&this->lock);
- return ret;
-}
-
-static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- volatile int dummy;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip;
- int status;
-
- mutex_lock(&this->lock);
-
- if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
- mutex_unlock(&this->lock);
- return -EINVAL;
- }
-
- instr->state = MTD_ERASING;
-
- /* FIXME: Do this in the background. Use timers or schedule_task() */
- while(len) {
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- DoC_Command(this, NAND_CMD_ERASE1, 0);
- DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
- DoC_Command(this, NAND_CMD_ERASE2, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
- /* There was an error */
- instr->state = MTD_ERASE_FAILED;
- goto callback;
- }
- ofs += mtd->erasesize;
- len -= mtd->erasesize;
- }
- instr->state = MTD_ERASE_DONE;
-
- callback:
- mtd_erase_callback(instr);
-
- mutex_unlock(&this->lock);
- return 0;
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2000(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd = doc2klist)) {
- this = mtd->priv;
- doc2klist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
-
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
deleted file mode 100644
index 241192f05bc..00000000000
--- a/drivers/mtd/devices/doc2001.c
+++ /dev/null
@@ -1,841 +0,0 @@
-
-/*
- * Linux driver for Disk-On-Chip Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmillist = NULL;
-
-/* Perform the required delay cycles by reading from the NOP register */
-static void DoC_Delay(void __iomem * docptr, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++)
- dummy = ReadDOC(docptr, NOP);
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
- unsigned short c = 0xffff;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
- ;
-
- if (c == 0)
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
-
- return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* 4 read form NOP register should be issued in prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(docptr);
-
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 2);
-
- return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register
- with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static void DoC_Command(void __iomem * docptr, unsigned char command,
- unsigned char xtraflags)
-{
- /* Assert the CLE (Command Latch Enable) line to the flash chip */
- WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-
- /* Send the command */
- WriteDOC(command, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Lower the CLE line */
- WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN IO register
- with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs,
- unsigned char xtraflags1, unsigned char xtraflags2)
-{
- /* Assert the ALE (Address Latch Enable) line to the flash chip */
- WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-
- /* Send the address */
- switch (numbytes)
- {
- case 1:
- /* Send single byte, bits 0-7. */
- WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- case 2:
- /* Send bits 9-16 followed by 17-23 */
- WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- case 3:
- /* Send 0-7, 9-16, then 17-23 */
- WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- default:
- return;
- }
-
- /* Lower the ALE line */
- WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
- /* Select the individual flash chip requested */
- WriteDOC(chip, docptr, CDSNDeviceSelect);
- DoC_Delay(docptr, 4);
-
- /* Wait for it to be ready */
- return DoC_WaitReady(docptr);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
- /* Select the floor (bank) of chips required */
- WriteDOC(floor, docptr, FloorSelect);
-
- /* Wait for the chip to be ready */
- return DoC_WaitReady(docptr);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
-
- /* Page in the required floor/chip
- FIXME: is this supported by Millennium ?? */
- DoC_SelectFloor(doc->virtadr, floor);
- DoC_SelectChip(doc->virtadr, chip);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_WaitReady(doc->virtadr);
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP);
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00);
-
- /* Read the manufacturer and device id codes of the flash device through
- CDSN IO register see Software Requirement 11.4 item 5.*/
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- DoC_Delay(doc->virtadr, 2);
- mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO);
-
- DoC_Delay(doc->virtadr, 2);
- id = ReadDOC(doc->virtadr, Mil_CDSN_IO);
- dummy = ReadDOC(doc->virtadr, LastDataRead);
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- /* FIXME: to deal with multi-flash on multi-Millennium case more carefully */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if ( id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n",
- mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- break;
- }
- }
-
- if (nand_flash_ids[i].name == NULL)
- return 0;
- else
- return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
- int floor, chip;
- int numchips[MAX_FLOORS_MIL];
- int ret;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) {
- numchips[floor] = 0;
- for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) {
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk("No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips){
- printk("No memory for allocating chip info structures\n");
- return;
- }
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) {
- for (chip = 0 ; chip < numchips[floor] ; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
-
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millenium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp2 == (tmp1+1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMil_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
-
- /* We must avoid being called twice for the same device. */
- if (docmillist)
- old = docmillist->priv;
-
- while (old) {
- if (DoCMil_is_alias(this, old)) {
- printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at "
- "0x%lX - already configured\n", this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
- mtd->name = "DiskOnChip Millennium";
- printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n",
- this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
-
- /* FIXME: erase size is not always 8KiB */
- mtd->erasesize = 0x2000;
-
- mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
- this->curfloor = -1;
- this->curchip = -1;
-
- /* Ident all the chips present. */
- DoC_ScanChips(this);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = docmillist;
- docmillist = mtd;
- mtd->size = this->totlen;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoCMil_init);
-
-static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- int i, ret;
- volatile char dummy;
- unsigned char syndrome[6], eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* issue the Read0 or Read1 command depend on which half of the page
- we are accessing. Polling the Flash Ready bit after issue 3 bytes
- address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP);
- DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_EN, docptr, ECCConf);
-
- /* Read the data via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < len-1; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
- }
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
- buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
- /* Let the caller know we completed it */
- *retlen = len;
- ret = 0;
-
- /* Read the ECC data from Spare Data Area,
- see Reed-Solomon EDC/ECC 11.1 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < 5; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5);
-#endif
- eccbuf[5] = ReadDOC(docptr, LastDataRead);
-
- /* Flush the pipeline */
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
-
- /* Check the ECC Status */
- if (ReadDOC(docptr, ECCConf) & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrom through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
- printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the read. Not that
- this can be told to user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by checking *retlen */
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
- return ret;
-}
-
-static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- int i,ret = 0;
- char eccbuf[6];
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
- /* Don't allow write past end of device */
- if (to >= this->totlen)
- return -EINVAL;
-
-#if 0
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ( (to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
-#else
- /* Don't allow writes which aren't exactly one block */
- if (to & 0x1ff || len != 0x200)
- return -EINVAL;
-#endif
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0x00);
- DoC_WaitReady(docptr);
- /* Set device to main plane of flash */
- DoC_Command(docptr, NAND_CMD_READ0, 0x00);
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(docptr, 3, to, 0x00, 0x00);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
- /* Write the data via the internal pipeline through CDSN IO register,
- see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
- for (i = 0; i < len; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic
- see Reed-Solomon EDC/ECC 11.1 */
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (i = 0; i < 6; i++) {
- eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i);
- }
-
- /* ignore the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
-#ifndef USE_MEMCPY
- /* Write the ECC data to flash */
- for (i = 0; i < 6; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6);
-#endif
-
- /* write the block status BLOCK_USED (0x5555) at the end of ECC data
- FIXME: this is only a hack for programming the IPL area for LinuxBIOS
- and should be replace with proper codes in user space utilities */
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
-
- WriteDOC(0x00, docptr, WritePipeTerm);
-
-#ifdef PSYCHO_DEBUG
- printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error programming flash\n");
- /* Error in programming
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- *retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, LastDataRead);
-
- /* Let the caller know we completed it */
- *retlen = len;
-
- return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
- int i;
-#endif
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OOB_PLACE);
-
- ofs += ops->ooboffs;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* issue the Read2 command to set the pointer to the Spare Data Area.
- Polling the Flash Ready bit after issue 3 bytes address in
- Sequence Read Mode, see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the data out via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < len-1; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
- buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
- ops->retlen = len;
-
- return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
- int i;
-#endif
- volatile char dummy;
- int ret = 0;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OOB_PLACE);
-
- ofs += ops->ooboffs;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_WaitReady(docptr);
- /* issue the Read2 command to set the pointer to the Spare Data Area. */
- DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(docptr, 3, ofs, 0x00, 0x00);
-
- /* Write the data via the internal pipeline through CDSN IO register,
- see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
- for (i = 0; i < len; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error programming oob data\n");
- /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ops->retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, LastDataRead);
-
- ops->retlen = len;
-
- return ret;
-}
-
-int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
-{
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
- if (len != mtd->erasesize)
- printk(KERN_WARNING "Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- instr->state = MTD_ERASE_PENDING;
-
- /* issue the Erase Setup command */
- DoC_Command(docptr, NAND_CMD_ERASE1, 0x00);
- DoC_Address(docptr, 2, ofs, 0x00, 0x00);
-
- /* Commit the Erase Start command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_ERASE2, 0x00);
- DoC_WaitReady(docptr);
-
- instr->state = MTD_ERASING;
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.
- FIXME: it seems that we are not wait long enough, some blocks are not
- erased fully */
- DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error Erasing at 0x%x\n", ofs);
- /* There was an error
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- instr->state = MTD_ERASE_FAILED;
- } else
- instr->state = MTD_ERASE_DONE;
- dummy = ReadDOC(docptr, LastDataRead);
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd=docmillist)) {
- this = mtd->priv;
- docmillist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2001);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium");
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
deleted file mode 100644
index 09ae0adc3ad..00000000000
--- a/drivers/mtd/devices/doc2001plus.c
+++ /dev/null
@@ -1,1106 +0,0 @@
-/*
- * Linux driver for Disk-On-Chip Millennium Plus
- *
- * (c) 2002-2003 Greg Ungerer <gerg@snapgear.com>
- * (c) 2002-2003 SnapGear Inc
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- *
- * Released under GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmilpluslist = NULL;
-
-
-/* Perform the required delay cycles by writing to the NOP register */
-static void DoC_Delay(void __iomem * docptr, int cycles)
-{
- int i;
-
- for (i = 0; (i < cycles); i++)
- WriteDOC(0, docptr, Mplus_NOP);
-}
-
-#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
- unsigned int c = 0xffff;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
- ;
-
- if (c == 0)
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
-
- return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* read form NOP register should be issued prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 4);
-
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(docptr);
-
- return ret;
-}
-
-/* For some reason the Millennium Plus seems to occasionally put itself
- * into reset mode. For me this happens randomly, with no pattern that I
- * can detect. M-systems suggest always check this on any block level
- * operation and setting to normal mode if in reset mode.
- */
-static inline void DoC_CheckASIC(void __iomem * docptr)
-{
- /* Make sure the DoC is in normal mode */
- if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) {
- WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl);
- WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm);
- }
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the Flash
- * command register. Need 2 Write Pipeline Terminates to complete send.
- */
-static void DoC_Command(void __iomem * docptr, unsigned char command,
- unsigned char xtraflags)
-{
- WriteDOC(command, docptr, Mplus_FlashCmd);
- WriteDOC(command, docptr, Mplus_WritePipeTerm);
- WriteDOC(command, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the Flash
- * Address register. Need 2 Write Pipeline Terminates to complete send.
- */
-static inline void DoC_Address(struct DiskOnChip *doc, int numbytes,
- unsigned long ofs, unsigned char xtraflags1,
- unsigned char xtraflags2)
-{
- void __iomem * docptr = doc->virtadr;
-
- /* Allow for possible Mill Plus internal flash interleaving */
- ofs >>= doc->interleave;
-
- switch (numbytes) {
- case 1:
- /* Send single byte, bits 0-7. */
- WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
- break;
- case 2:
- /* Send bits 9-16 followed by 17-23 */
- WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
- break;
- case 3:
- /* Send 0-7, 9-16, then 17-23 */
- WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
- break;
- default:
- return;
- }
-
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
- /* No choice for flash chip on Millennium Plus */
- return 0;
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
- WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect);
- return 0;
-}
-
-/*
- * Translate the given offset into the appropriate command and offset.
- * This does the mapping using the 16bit interleave layout defined by
- * M-Systems, and looks like this for a sector pair:
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055|
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 |
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- */
-/* FIXME: This lives in INFTL not here. Other users of flash devices
- may not want it */
-static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
-{
- struct DiskOnChip *this = mtd->priv;
-
- if (this->interleave) {
- unsigned int ofs = *from & 0x3ff;
- unsigned int cmd;
-
- if (ofs < 512) {
- cmd = NAND_CMD_READ0;
- ofs &= 0x1ff;
- } else if (ofs < 1014) {
- cmd = NAND_CMD_READ1;
- ofs = (ofs & 0x1ff) + 10;
- } else {
- cmd = NAND_CMD_READOOB;
- ofs = ofs - 1014;
- }
-
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
- } else {
- /* No interleave */
- if ((*from) & 0x100)
- return NAND_CMD_READ1;
- return NAND_CMD_READ0;
- }
-}
-
-static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- if (*from & 0x200) {
- cmd = NAND_CMD_READOOB;
- ofs = 10 + (*from & 0xf);
- } else {
- cmd = NAND_CMD_READ1;
- ofs = (*from & 0xf);
- }
-
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- cmd = NAND_CMD_READ1;
- ofs = (*from & 0x200) ? 8 : 6;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- cmd = NAND_CMD_READOOB;
- ofs = (*from & 0x200) ? 24 : 16;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
- int i;
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len);
-#endif
-}
-
-static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
- int i;
- for (i = 0; i < len; i++)
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
- void __iomem * docptr = doc->virtadr;
-
- /* Page in the required floor/chip */
- DoC_SelectFloor(docptr, floor);
- DoC_SelectChip(docptr, chip);
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- DoC_Command(docptr, NAND_CMD_READID, 0);
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc, 1, 0x00, 0, 0x00);
-
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* Read the manufacturer and device id codes of the flash device through
- CDSN IO register see Software Requirement 11.4 item 5.*/
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-
- mfr = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
- id = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n", mfr, id,
- nand_manuf_ids[j].name, nand_flash_ids[i].name);
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
- break;
- }
- }
-
- if (nand_flash_ids[i].name == NULL)
- return 0;
- return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
- int floor, chip;
- int numchips[MAX_FLOORS_MPLUS];
- int ret;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* Work out the intended interleave setting */
- this->interleave = 0;
- if (this->ChipID == DOC_ChipID_DocMilPlus32)
- this->interleave = 1;
-
- /* Check the ASIC agrees */
- if ( (this->interleave << 2) !=
- (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
- u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
- printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
- this->interleave?"on (16-bit)":"off (8-bit)");
- conf ^= 4;
- WriteDOC(conf, this->virtadr, Mplus_Configuration);
- }
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
- numchips[floor] = 0;
- for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) {
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk("No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips){
- printk("MTD: No memory for allocating chip info structures\n");
- return;
- }
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) {
- for (chip = 0 ; chip < numchips[floor] ; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
-
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millennium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
- if (tmp2 == (tmp1+1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMilPlus_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
-
- /* We must avoid being called twice for the same device. */
- if (docmilpluslist)
- old = docmilpluslist->priv;
-
- while (old) {
- if (DoCMilPlus_is_alias(this, old)) {
- printk(KERN_NOTICE "Ignoring DiskOnChip Millennium "
- "Plus at 0x%lX - already configured\n",
- this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
- mtd->name = "DiskOnChip Millennium Plus";
- printk(KERN_NOTICE "DiskOnChip Millennium Plus found at "
- "address 0x%lX\n", this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
-
- mtd->erasesize = 0;
- mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
- this->curfloor = -1;
- this->curchip = -1;
-
- /* Ident all the chips present. */
- DoC_ScanChips(this);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = docmilpluslist;
- docmilpluslist = mtd;
- mtd->size = this->totlen;
- mtd->erasesize = this->erasesize;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoCMilPlus_init);
-
-#if 0
-static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
-{
- int i;
- loff_t fofs;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
- unsigned char *bp, buf[1056];
- char c[32];
-
- from &= ~0x3ff;
-
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- fofs = from;
- DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- /* Read the data via the internal pipeline through CDSN IO
- register, see Pipelined Read Operations 11.3 */
- MemReadDOC(docptr, buf, 1054);
- buf[1054] = ReadDOC(docptr, Mplus_LastDataRead);
- buf[1055] = ReadDOC(docptr, Mplus_LastDataRead);
-
- memset(&c[0], 0, sizeof(c));
- printk("DUMP OFFSET=%x:\n", (int)from);
-
- for (i = 0, bp = &buf[0]; (i < 1056); i++) {
- if ((i % 16) == 0)
- printk("%08x: ", i);
- printk(" %02x", *bp);
- c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.';
- bp++;
- if (((i + 1) % 16) == 0)
- printk(" %s\n", c);
- }
- printk("\n");
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- return 0;
-}
-#endif
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- int ret, i;
- volatile char dummy;
- loff_t fofs;
- unsigned char syndrome[6], eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- fofs = from;
- DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
-
- /* Let the caller know we completed it */
- *retlen = len;
- ret = 0;
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- /* Read the data via the internal pipeline through CDSN IO
- register, see Pipelined Read Operations 11.3 */
- MemReadDOC(docptr, buf, len);
-
- /* Read the ECC data following raw data */
- MemReadDOC(docptr, eccbuf, 4);
- eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
- eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Flush the pipeline */
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
-
- /* Check the ECC Status */
- if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrom through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++)
- syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
- nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
- printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the
- read. Not that this can be told to user-space, via
- sys_read(), but at least MTD-aware stuff can know
- about it by checking *retlen */
-#ifdef ECC_DEBUG
- printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
- __FILE__, __LINE__, (int)from);
- printk(" syndrome= %02x:%02x:%02x:%02x:%02x:"
- "%02x\n",
- syndrome[0], syndrome[1], syndrome[2],
- syndrome[3], syndrome[4], syndrome[5]);
- printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:"
- "%02x\n",
- eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
-#endif
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- int i, before, ret = 0;
- loff_t fto;
- volatile char dummy;
- char eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
- /* Don't allow write past end of device */
- if (to >= this->totlen)
- return -EINVAL;
-
- /* Don't allow writes which aren't exactly one block (512 bytes) */
- if ((to & 0x1ff) || (len != 0x200))
- return -EINVAL;
-
- /* Determine position of OOB flags, before or after data */
- before = (this->interleave && (to & 0x200));
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Set device to appropriate plane of flash */
- fto = to;
- WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
-
- /* On interleaved devices the flags for 2nd half 512 are before data */
- if (before)
- fto -= 2;
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(this, 3, fto, 0x00, 0x00);
-
- /* Disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- if (before) {
- /* Write the block status BLOCK_USED (0x5555) */
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- }
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
-
- MemWriteDOC(docptr, (unsigned char *) buf, len);
-
- /* Write ECC data to flash, the ECC info is generated by
- the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
- DoC_Delay(docptr, 3);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (i = 0; i < 6; i++)
- eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
-
- /* Write the ECC data to flash */
- MemWriteDOC(docptr, eccbuf, 6);
-
- if (!before) {
- /* Write the block status BLOCK_USED (0x5555) */
- WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
- WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
- }
-
-#ifdef PSYCHO_DEBUG
- printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- DoC_Delay(docptr, 2);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
- /* Error in programming
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- *retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- /* Let the caller know we completed it */
- *retlen = len;
-
- return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- loff_t fofs, base;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- size_t i, size, got, want;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OOB_PLACE);
-
- ofs += ops->ooboffs;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- DoC_WaitReady(docptr);
-
- /* Maximum of 16 bytes in the OOB region, so limit read to that */
- if (len > 16)
- len = 16;
- got = 0;
- want = len;
-
- for (i = 0; ((i < 3) && (want > 0)); i++) {
- /* Figure out which region we are accessing... */
- fofs = ofs;
- base = ofs & 0xf;
- if (!this->interleave) {
- DoC_Command(docptr, NAND_CMD_READOOB, 0);
- size = 16 - base;
- } else if (base < 6) {
- DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
- size = 6 - base;
- } else if (base < 8) {
- DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0);
- size = 8 - base;
- } else {
- DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0);
- size = 16 - base;
- }
- if (size > want)
- size = want;
-
- /* Issue read command */
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
- MemReadDOC(docptr, &buf[got], size - 2);
- buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead);
- buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead);
-
- ofs += size;
- got += size;
- want -= size;
- }
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- ops->retlen = len;
- return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- volatile char dummy;
- loff_t fofs, base;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- size_t i, size, got, want;
- int ret = 0;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OOB_PLACE);
-
- ofs += ops->ooboffs;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
-
- /* Maximum of 16 bytes in the OOB region, so limit write to that */
- if (len > 16)
- len = 16;
- got = 0;
- want = len;
-
- for (i = 0; ((i < 3) && (want > 0)); i++) {
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Figure out which region we are accessing... */
- fofs = ofs;
- base = ofs & 0x0f;
- if (!this->interleave) {
- WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
- size = 16 - base;
- } else if (base < 6) {
- WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 6 - base;
- } else if (base < 8) {
- WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 8 - base;
- } else {
- WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 16 - base;
- }
- if (size > want)
- size = want;
-
- /* Issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(this, 3, fofs, 0, 0x00);
-
- /* Disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- /* Write the data via the internal pipeline through CDSN IO
- register, see Pipelined Write Operations 11.2 */
- MemWriteDOC(docptr, (unsigned char *) &buf[got], size);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- DoC_Delay(docptr, 2);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x programming oob at 0x%x\n",
- dummy, (int)ofs);
- /* FIXME: implement Bad Block Replacement */
- ops->retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- ofs += size;
- got += size;
- want -= size;
- }
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- ops->retlen = len;
- return ret;
-}
-
-int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
- DoC_CheckASIC(docptr);
-
- if (len != mtd->erasesize)
- printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- instr->state = MTD_ERASE_PENDING;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
- DoC_Command(docptr, NAND_CMD_RESET, 0x00);
- DoC_WaitReady(docptr);
-
- DoC_Command(docptr, NAND_CMD_ERASE1, 0);
- DoC_Address(this, 2, ofs, 0, 0x00);
- DoC_Command(docptr, NAND_CMD_ERASE2, 0);
- DoC_WaitReady(docptr);
- instr->state = MTD_ERASING;
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5. */
- DoC_Command(docptr, NAND_CMD_STATUS, 0);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs);
- /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
- instr->state = MTD_ERASE_FAILED;
- } else {
- instr->state = MTD_ERASE_DONE;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001plus(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd=docmilpluslist)) {
- this = mtd->priv;
- docmilpluslist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2001plus);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
-MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus");
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
deleted file mode 100644
index 37ef29a73ee..00000000000
--- a/drivers/mtd/devices/docecc.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * ECC algorithm for M-systems disk on chip. We use the excellent Reed
- * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
- * GNU GPL License. The rest is simply to convert the disk on chip
- * syndrom into a standard syndom.
- *
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/doc2000.h>
-
-#define DEBUG_ECC 0
-/* need to undef it (from asm/termbits.h) */
-#undef B0
-
-#define MM 10 /* Symbol size in bits */
-#define KK (1023-4) /* Number of data symbols per block */
-#define B0 510 /* First root of generator polynomial, alpha form */
-#define PRIM 1 /* power of alpha used to generate roots of generator poly */
-#define NN ((1 << MM) - 1)
-
-typedef unsigned short dtype;
-
-/* 1+x^3+x^10 */
-static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 };
-
-/* This defines the type used to store an element of the Galois Field
- * used by the code. Make sure this is something larger than a char if
- * if anything larger than GF(256) is used.
- *
- * Note: unsigned char will work up to GF(256) but int seems to run
- * faster on the Pentium.
- */
-typedef int gf;
-
-/* No legal value in index form represents zero, so
- * we need a special value for this purpose
- */
-#define A0 (NN)
-
-/* Compute x % NN, where NN is 2**MM - 1,
- * without a slow divide
- */
-static inline gf
-modnn(int x)
-{
- while (x >= NN) {
- x -= NN;
- x = (x >> MM) + (x & NN);
- }
- return x;
-}
-
-#define CLEAR(a,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = 0;\
-}
-
-#define COPY(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define COPYDOWN(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define Ldec 1
-
-/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m]
- lookup tables: index->polynomial form alpha_to[] contains j=alpha**i;
- polynomial form -> index form index_of[j=alpha**i] = i
- alpha=2 is the primitive element of GF(2**m)
- HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows:
- Let @ represent the primitive element commonly called "alpha" that
- is the root of the primitive polynomial p(x). Then in GF(2^m), for any
- 0 <= i <= 2^m-2,
- @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
- where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation
- of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for
- example the polynomial representation of @^5 would be given by the binary
- representation of the integer "alpha_to[5]".
- Similarly, index_of[] can be used as follows:
- As above, let @ represent the primitive element of GF(2^m) that is
- the root of the primitive polynomial p(x). In order to find the power
- of @ (alpha) that has the polynomial representation
- a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
- we consider the integer "i" whose binary representation with a(0) being LSB
- and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry
- "index_of[i]". Now, @^index_of[i] is that element whose polynomial
- representation is (a(0),a(1),a(2),...,a(m-1)).
- NOTE:
- The element alpha_to[2^m-1] = 0 always signifying that the
- representation of "@^infinity" = 0 is (0,0,0,...,0).
- Similarly, the element index_of[0] = A0 always signifying
- that the power of alpha which has the polynomial representation
- (0,0,...,0) is "infinity".
-
-*/
-
-static void
-generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
-{
- register int i, mask;
-
- mask = 1;
- Alpha_to[MM] = 0;
- for (i = 0; i < MM; i++) {
- Alpha_to[i] = mask;
- Index_of[Alpha_to[i]] = i;
- /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */
- if (Pp[i] != 0)
- Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */
- mask <<= 1; /* single left-shift */
- }
- Index_of[Alpha_to[MM]] = MM;
- /*
- * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by
- * poly-repr of @^i shifted left one-bit and accounting for any @^MM
- * term that may occur when poly-repr of @^i is shifted.
- */
- mask >>= 1;
- for (i = MM + 1; i < NN; i++) {
- if (Alpha_to[i - 1] >= mask)
- Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1);
- else
- Alpha_to[i] = Alpha_to[i - 1] << 1;
- Index_of[Alpha_to[i]] = i;
- }
- Index_of[0] = A0;
- Alpha_to[NN] = 0;
-}
-
-/*
- * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content
- * of the feedback shift register after having processed the data and
- * the ECC.
- *
- * Return number of symbols corrected, or -1 if codeword is illegal
- * or uncorrectable. If eras_pos is non-null, the detected error locations
- * are written back. NOTE! This array must be at least NN-KK elements long.
- * The corrected data are written in eras_val[]. They must be xor with the data
- * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] .
- *
- * First "no_eras" erasures are declared by the calling program. Then, the
- * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2).
- * If the number of channel errors is not greater than "t_after_eras" the
- * transmitted codeword will be recovered. Details of algorithm can be found
- * in R. Blahut's "Theory ... of Error-Correcting Codes".
-
- * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure
- * will result. The decoder *could* check for this condition, but it would involve
- * extra time on every decoding operation.
- * */
-static int
-eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
- gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
- int no_eras)
-{
- int deg_lambda, el, deg_omega;
- int i, j, r,k;
- gf u,q,tmp,num1,num2,den,discr_r;
- gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly
- * and syndrome poly */
- gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1];
- gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK];
- int syn_error, count;
-
- syn_error = 0;
- for(i=0;i<NN-KK;i++)
- syn_error |= bb[i];
-
- if (!syn_error) {
- /* if remainder is zero, data[] is a codeword and there are no
- * errors to correct. So return data[] unmodified
- */
- count = 0;
- goto finish;
- }
-
- for(i=1;i<=NN-KK;i++){
- s[i] = bb[0];
- }
- for(j=1;j<NN-KK;j++){
- if(bb[j] == 0)
- continue;
- tmp = Index_of[bb[j]];
-
- for(i=1;i<=NN-KK;i++)
- s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)];
- }
-
- /* undo the feedback register implicit multiplication and convert
- syndromes to index form */
-
- for(i=1;i<=NN-KK;i++) {
- tmp = Index_of[s[i]];
- if (tmp != A0)
- tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM);
- s[i] = tmp;
- }
-
- CLEAR(&lambda[1],NN-KK);
- lambda[0] = 1;
-
- if (no_eras > 0) {
- /* Init lambda to be the erasure locator polynomial */
- lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])];
- for (i = 1; i < no_eras; i++) {
- u = modnn(PRIM*eras_pos[i]);
- for (j = i+1; j > 0; j--) {
- tmp = Index_of[lambda[j - 1]];
- if(tmp != A0)
- lambda[j] ^= Alpha_to[modnn(u + tmp)];
- }
- }
-#if DEBUG_ECC >= 1
- /* Test code that verifies the erasure locator polynomial just constructed
- Needed only for decoder debugging. */
-
- /* find roots of the erasure location polynomial */
- for(i=1;i<=no_eras;i++)
- reg[i] = Index_of[lambda[i]];
- count = 0;
- for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
- q = 1;
- for (j = 1; j <= no_eras; j++)
- if (reg[j] != A0) {
- reg[j] = modnn(reg[j] + j);
- q ^= Alpha_to[reg[j]];
- }
- if (q != 0)
- continue;
- /* store root and error location number indices */
- root[count] = i;
- loc[count] = k;
- count++;
- }
- if (count != no_eras) {
- printf("\n lambda(x) is WRONG\n");
- count = -1;
- goto finish;
- }
-#if DEBUG_ECC >= 2
- printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
- for (i = 0; i < count; i++)
- printf("%d ", loc[i]);
- printf("\n");
-#endif
-#endif
- }
- for(i=0;i<NN-KK+1;i++)
- b[i] = Index_of[lambda[i]];
-
- /*
- * Begin Berlekamp-Massey algorithm to determine error+erasure
- * locator polynomial
- */
- r = no_eras;
- el = no_eras;
- while (++r <= NN-KK) { /* r is the step number */
- /* Compute discrepancy at the r-th step in poly-form */
- discr_r = 0;
- for (i = 0; i < r; i++){
- if ((lambda[i] != 0) && (s[r - i] != A0)) {
- discr_r ^= Alpha_to[modnn(Index_of[lambda[i]] + s[r - i])];
- }
- }
- discr_r = Index_of[discr_r]; /* Index form */
- if (discr_r == A0) {
- /* 2 lines below: B(x) <-- x*B(x) */
- COPYDOWN(&b[1],b,NN-KK);
- b[0] = A0;
- } else {
- /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */
- t[0] = lambda[0];
- for (i = 0 ; i < NN-KK; i++) {
- if(b[i] != A0)
- t[i+1] = lambda[i+1] ^ Alpha_to[modnn(discr_r + b[i])];
- else
- t[i+1] = lambda[i+1];
- }
- if (2 * el <= r + no_eras - 1) {
- el = r + no_eras - el;
- /*
- * 2 lines below: B(x) <-- inv(discr_r) *
- * lambda(x)
- */
- for (i = 0; i <= NN-KK; i++)
- b[i] = (lambda[i] == 0) ? A0 : modnn(Index_of[lambda[i]] - discr_r + NN);
- } else {
- /* 2 lines below: B(x) <-- x*B(x) */
- COPYDOWN(&b[1],b,NN-KK);
- b[0] = A0;
- }
- COPY(lambda,t,NN-KK+1);
- }
- }
-
- /* Convert lambda to index form and compute deg(lambda(x)) */
- deg_lambda = 0;
- for(i=0;i<NN-KK+1;i++){
- lambda[i] = Index_of[lambda[i]];
- if(lambda[i] != A0)
- deg_lambda = i;
- }
- /*
- * Find roots of the error+erasure locator polynomial by Chien
- * Search
- */
- COPY(&reg[1],&lambda[1],NN-KK);
- count = 0; /* Number of roots of lambda(x) */
- for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
- q = 1;
- for (j = deg_lambda; j > 0; j--){
- if (reg[j] != A0) {
- reg[j] = modnn(reg[j] + j);
- q ^= Alpha_to[reg[j]];
- }
- }
- if (q != 0)
- continue;
- /* store root (index-form) and error location number */
- root[count] = i;
- loc[count] = k;
- /* If we've already found max possible roots,
- * abort the search to save time
- */
- if(++count == deg_lambda)
- break;
- }
- if (deg_lambda != count) {
- /*
- * deg(lambda) unequal to number of roots => uncorrectable
- * error detected
- */
- count = -1;
- goto finish;
- }
- /*
- * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
- * x**(NN-KK)). in index form. Also find deg(omega).
- */
- deg_omega = 0;
- for (i = 0; i < NN-KK;i++){
- tmp = 0;
- j = (deg_lambda < i) ? deg_lambda : i;
- for(;j >= 0; j--){
- if ((s[i + 1 - j] != A0) && (lambda[j] != A0))
- tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])];
- }
- if(tmp != 0)
- deg_omega = i;
- omega[i] = Index_of[tmp];
- }
- omega[NN-KK] = A0;
-
- /*
- * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
- * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form
- */
- for (j = count-1; j >=0; j--) {
- num1 = 0;
- for (i = deg_omega; i >= 0; i--) {
- if (omega[i] != A0)
- num1 ^= Alpha_to[modnn(omega[i] + i * root[j])];
- }
- num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)];
- den = 0;
-
- /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
- for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
- if(lambda[i+1] != A0)
- den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
- }
- if (den == 0) {
-#if DEBUG_ECC >= 1
- printf("\n ERROR: denominator = 0\n");
-#endif
- /* Convert to dual- basis */
- count = -1;
- goto finish;
- }
- /* Apply error to data */
- if (num1 != 0) {
- eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])];
- } else {
- eras_val[j] = 0;
- }
- }
- finish:
- for(i=0;i<count;i++)
- eras_pos[i] = loc[i];
- return count;
-}
-
-/***************************************************************************/
-/* The DOC specific code begins here */
-
-#define SECTOR_SIZE 512
-/* The sector bytes are packed into NB_DATA MM bits words */
-#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM)
-
-/*
- * Correct the errors in 'sector[]' by using 'ecc1[]' which is the
- * content of the feedback shift register applyied to the sector and
- * the ECC. Return the number of errors corrected (and correct them in
- * sector), or -1 if error
- */
-int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
-{
- int parity, i, nb_errors;
- gf bb[NN - KK + 1];
- gf error_val[NN-KK];
- int error_pos[NN-KK], pos, bitpos, index, val;
- dtype *Alpha_to, *Index_of;
-
- /* init log and exp tables here to save memory. However, it is slower */
- Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
- if (!Alpha_to)
- return -1;
-
- Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
- if (!Index_of) {
- kfree(Alpha_to);
- return -1;
- }
-
- generate_gf(Alpha_to, Index_of);
-
- parity = ecc1[1];
-
- bb[0] = (ecc1[4] & 0xff) | ((ecc1[5] & 0x03) << 8);
- bb[1] = ((ecc1[5] & 0xfc) >> 2) | ((ecc1[2] & 0x0f) << 6);
- bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4);
- bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2);
-
- nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
- error_val, error_pos, 0);
- if (nb_errors <= 0)
- goto the_end;
-
- /* correct the errors */
- for(i=0;i<nb_errors;i++) {
- pos = error_pos[i];
- if (pos >= NB_DATA && pos < KK) {
- nb_errors = -1;
- goto the_end;
- }
- if (pos < NB_DATA) {
- /* extract bit position (MSB first) */
- pos = 10 * (NB_DATA - 1 - pos) - 6;
- /* now correct the following 10 bits. At most two bytes
- can be modified since pos is even */
- index = (pos >> 3) ^ 1;
- bitpos = pos & 7;
- if ((index >= 0 && index < SECTOR_SIZE) ||
- index == (SECTOR_SIZE + 1)) {
- val = error_val[i] >> (2 + bitpos);
- parity ^= val;
- if (index < SECTOR_SIZE)
- sector[index] ^= val;
- }
- index = ((pos >> 3) + 1) ^ 1;
- bitpos = (bitpos + 10) & 7;
- if (bitpos == 0)
- bitpos = 8;
- if ((index >= 0 && index < SECTOR_SIZE) ||
- index == (SECTOR_SIZE + 1)) {
- val = error_val[i] << (8 - bitpos);
- parity ^= val;
- if (index < SECTOR_SIZE)
- sector[index] ^= val;
- }
- }
- }
-
- /* use parity to test extra errors */
- if ((parity & 0xff) != 0)
- nb_errors = -1;
-
- the_end:
- kfree(Alpha_to);
- kfree(Index_of);
- return nb_errors;
-}
-
-EXPORT_SYMBOL_GPL(doc_decode_ecc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
-MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
new file mode 100644
index 00000000000..91a169c44b3
--- /dev/null
+++ b/drivers/mtd/devices/docg3.c
@@ -0,0 +1,2143 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CREATE_TRACE_POINTS
+#include "docg3.h"
+
+/*
+ * This driver handles the DiskOnChip G3 flash memory.
+ *
+ * As no specification is available from M-Systems/Sandisk, this drivers lacks
+ * several functions available on the chip, as :
+ * - IPL write
+ *
+ * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
+ * the driver assumes a 16bits data bus.
+ *
+ * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
+ * - a 1 byte Hamming code stored in the OOB for each page
+ * - a 7 bytes BCH code stored in the OOB for each page
+ * The BCH ECC is :
+ * - BCH is in GF(2^14)
+ * - BCH is over data of 520 bytes (512 page + 7 page_info bytes
+ * + 1 hamming byte)
+ * - BCH can correct up to 4 bits (t = 4)
+ * - BCH syndroms are calculated in hardware, and checked in hardware as well
+ *
+ */
+
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+ .eccbytes = 8,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+ .oobfree = {{0, 7}, {15, 1} },
+ .oobavail = 8,
+};
+
+static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
+{
+ u8 val = readb(docg3->cascade->base + reg);
+
+ trace_docg3_io(0, 8, reg, (int)val);
+ return val;
+}
+
+static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
+{
+ u16 val = readw(docg3->cascade->base + reg);
+
+ trace_docg3_io(0, 16, reg, (int)val);
+ return val;
+}
+
+static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
+{
+ writeb(val, docg3->cascade->base + reg);
+ trace_docg3_io(1, 8, reg, val);
+}
+
+static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
+{
+ writew(val, docg3->cascade->base + reg);
+ trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
+{
+ doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
+}
+
+static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
+{
+ doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
+}
+
+static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
+{
+ doc_writeb(docg3, addr, DOC_FLASHADDRESS);
+}
+
+static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int doc_register_readb(struct docg3 *docg3, int reg)
+{
+ u8 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readb(docg3, reg);
+ doc_vdbg("Read register %04x : %02x\n", reg, val);
+ return val;
+}
+
+static int doc_register_readw(struct docg3 *docg3, int reg)
+{
+ u16 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readw(docg3, reg);
+ doc_vdbg("Read register %04x : %04x\n", reg, val);
+ return val;
+}
+
+/**
+ * doc_delay - delay docg3 operations
+ * @docg3: the device
+ * @nbNOPs: the number of NOPs to issue
+ *
+ * As no specification is available, the right timings between chip commands are
+ * unknown. The only available piece of information are the observed nops on a
+ * working docg3 chip.
+ * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
+ * friendlier msleep() functions or blocking mdelay().
+ */
+static void doc_delay(struct docg3 *docg3, int nbNOPs)
+{
+ int i;
+
+ doc_vdbg("NOP x %d\n", nbNOPs);
+ for (i = 0; i < nbNOPs; i++)
+ doc_writeb(docg3, 0, DOC_NOP);
+}
+
+static int is_prot_seq_error(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
+}
+
+static int doc_is_ready(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & DOC_CTRL_FLASHREADY;
+}
+
+static int doc_wait_ready(struct docg3 *docg3)
+{
+ int maxWaitCycles = 100;
+
+ do {
+ doc_delay(docg3, 4);
+ cpu_relax();
+ } while (!doc_is_ready(docg3) && maxWaitCycles--);
+ doc_delay(docg3, 2);
+ if (maxWaitCycles > 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int doc_reset_seq(struct docg3 *docg3)
+{
+ int ret;
+
+ doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
+ doc_flash_sequence(docg3, DOC_SEQ_RESET);
+ doc_flash_command(docg3, DOC_CMD_RESET);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+
+ doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
+ return ret;
+}
+
+/**
+ * doc_read_data_area - Read data from data area
+ * @docg3: the device
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
+ * @first: first time read, DOC_READADDRESS should be set
+ *
+ * Reads bytes from flash data. Handles the single byte / even bytes reads.
+ */
+static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
+ int first)
+{
+ int i, cdr, len4;
+ u16 data16, *dst16;
+ u8 data8, *dst8;
+
+ doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x1;
+ len4 = len - cdr;
+
+ if (first)
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ dst16 = buf;
+ for (i = 0; i < len4; i += 2) {
+ data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
+ }
+
+ if (cdr) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_delay(docg3, 1);
+ dst8 = (u8 *)dst16;
+ for (i = 0; i < cdr; i++) {
+ data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
+ }
+ }
+}
+
+/**
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
+ * @docg3: the device
+ *
+ * The reliable data mode is a bit slower than the fast mode, but less errors
+ * occur. Entering the reliable mode cannot be done without entering the fast
+ * mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
+ */
+static void doc_set_reliable_mode(struct docg3 *docg3)
+{
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_set_asic_mode - Set the ASIC mode
+ * @docg3: the device
+ * @mode: the mode
+ *
+ * The ASIC can work in 3 modes :
+ * - RESET: all registers are zeroed
+ * - NORMAL: receives and handles commands
+ * - POWERDOWN: minimal poweruse, flash parts shut off
+ */
+static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+
+ mode |= DOC_ASICMODE_MDWREN;
+ doc_dbg("doc_set_asic_mode(%02x)\n", mode);
+ doc_writeb(docg3, mode, DOC_ASICMODE);
+ doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_set_device_id - Sets the devices id for cascaded G3 chips
+ * @docg3: the device
+ * @id: the chip to select (amongst 0, 1, 2, 3)
+ *
+ * There can be 4 cascaded G3 chips. This function selects the one which will
+ * should be the active one.
+ */
+static void doc_set_device_id(struct docg3 *docg3, int id)
+{
+ u8 ctrl;
+
+ doc_dbg("doc_set_device_id(%d)\n", id);
+ doc_writeb(docg3, id, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+ ctrl &= ~DOC_CTRL_VIOLATION;
+ ctrl |= DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+}
+
+/**
+ * doc_set_extra_page_mode - Change flash page layout
+ * @docg3: the device
+ *
+ * Normally, the flash page is split into the data (512 bytes) and the out of
+ * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
+ * leveling counters are stored. To access this last area of 4 bytes, a special
+ * mode must be input to the flash ASIC.
+ *
+ * Returns 0 if no error occurred, -EIO else.
+ */
+static int doc_set_extra_page_mode(struct docg3 *docg3)
+{
+ int fctrl;
+
+ doc_dbg("doc_set_extra_page_mode()\n");
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
+ doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
+ doc_delay(docg3, 2);
+
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
+ return -EIO;
+ else
+ return 0;
+}
+
+/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_seek - Set both flash planes to the specified block, page for reading
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @wear: if true, read will occur on the 4 extra bytes of the wear area
+ * @ofs: offset in page to read
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int wear, int ofs)
+{
+ int sector, ret = 0;
+
+ doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
+ block0, block1, page, ofs, wear);
+
+ if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_set_reliable_mode(docg3);
+ if (wear)
+ ret = doc_set_extra_page_mode(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
+
+out:
+ return ret;
+}
+
+
+/**
+ * doc_read_page_ecc_init - Initialize hardware ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = decode_bch(docg3->cascade->bch, NULL,
+ DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
+/**
+ * doc_read_page_prepare - Prepares reading data from a flash page
+ * @docg3: the device
+ * @block0: the first plane block index on flash memory
+ * @block1: the second plane block index on flash memory
+ * @page: the page index in the block
+ * @offset: the offset in the page (must be a multiple of 4)
+ *
+ * Prepares the page to be read in the flash memory :
+ * - tell ASIC to map the flash pages
+ * - tell ASIC to be in read mode
+ *
+ * After a call to this method, a call to doc_read_page_finish is mandatory,
+ * to end the read cycle of the flash.
+ *
+ * Read data from a flash page. The length to be read must be between 0 and
+ * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
+ * the extra bytes reading is not implemented).
+ *
+ * As pages are grouped by 2 (in 2 planes), reading from a page must be done
+ * in two steps:
+ * - one read of 512 bytes at offset 0
+ * - one read of 512 bytes at offset 512 + 16
+ *
+ * Returns 0 if successful, -EIO if a read error occurred.
+ */
+static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
+ int page, int offset)
+{
+ int wear_area = 0, ret = 0;
+
+ doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
+ block0, block1, page, offset);
+ if (offset >= DOC_LAYOUT_WEAR_OFFSET)
+ wear_area = 1;
+ if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
+ if (ret)
+ goto err;
+
+ doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
+ doc_delay(docg3, 2);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
+ doc_delay(docg3, 1);
+ if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
+ offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
+ doc_flash_address(docg3, offset >> 2);
+ doc_delay(docg3, 1);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_READ_FLASH);
+
+ return 0;
+err:
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+ return -EIO;
+}
+
+/**
+ * doc_read_page_getbytes - Reads bytes from a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be read (must be a multiple of 4)
+ * @buf: the buffer to be filled in (or NULL is forget bytes)
+ * @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
+ *
+ */
+static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
+ int first, int last_odd)
+{
+ if (last_odd && len > 0) {
+ doc_read_data_area(docg3, buf, 1, first);
+ doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+ } else {
+ doc_read_data_area(docg3, buf, len, first);
+ }
+ doc_delay(docg3, 2);
+ return len;
+}
+
+/**
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
+ * @docg3: the device
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
+ */
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
+{
+ int i;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_read_page_finish - Ends reading of a flash page
+ * @docg3: the device
+ *
+ * As a side effect, resets the chip selector to 0. This ensures that after each
+ * read operation, the floor 0 is selected. Therefore, if the systems halts, the
+ * reboot will boot on floor 0, where the IPL is.
+ */
+static void doc_read_page_finish(struct docg3 *docg3)
+{
+ doc_page_finish(docg3);
+ doc_set_device_id(docg3, 0);
+}
+
+/**
+ * calc_block_sector - Calculate blocks, pages and ofs.
+
+ * @from: offset in flash
+ * @block0: first plane block index calculated
+ * @block1: second plane block index calculated
+ * @page: page calculated
+ * @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
+ */
+static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
+ int *ofs, int reliable)
+{
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
+
+ sector = from / DOC_LAYOUT_PAGE_SIZE;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
+ *block1 = *block0 + 1;
+ *page = sector % pages_biblock;
+ *page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
+ if (sector % 2)
+ *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
+ else
+ *ofs = 0;
+}
+
+/**
+ * doc_read_oob - Read out of band bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Reads flash memory OOB area of pages.
+ *
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
+ */
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, skip, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ int max_bitflips = 0;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if (ooblen % DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+
+ if (from + len > mtd->size)
+ return -EINVAL;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ skip = from % DOC_LAYOUT_PAGE_SIZE;
+ mutex_lock(&docg3->cascade->lock);
+ while (ret >= 0 && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
+ ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
+ if (ret < 0)
+ goto out;
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ if (ret < 0)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
+ if (ret < skip)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
+ if (ret < nbdata)
+ goto err_in_read;
+ doc_read_page_getbytes(docg3,
+ DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
+ NULL, 0, (skip + nbdata) % 2);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
+ if (ret < nboob)
+ goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0, nboob % 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
+ doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
+ doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(max_bitflips, ret);
+ ret = max_bitflips;
+ }
+ }
+
+ doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
+ skip = 0;
+ }
+
+out:
+ mutex_unlock(&docg3->cascade->lock);
+ return ret;
+err_in_read:
+ doc_read_page_finish(docg3);
+ goto out;
+}
+
+/**
+ * doc_read - Read bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
+ *
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
+ *
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
+ */
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_oob_ops ops;
+ size_t ret;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.datbuf = buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_AUTO_OOB;
+
+ ret = doc_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static int doc_reload_bbt(struct docg3 *docg3)
+{
+ int block = DOC_LAYOUT_BLOCK_BBT;
+ int ret = 0, nbpages, page;
+ u_char *buf = docg3->bbt;
+
+ nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
+ for (page = 0; !ret && (page < nbpages); page++) {
+ ret = doc_read_page_prepare(docg3, block, block + 1,
+ page + DOC_LAYOUT_PAGE_BBT, 0);
+ if (!ret)
+ ret = doc_read_page_ecc_init(docg3,
+ DOC_LAYOUT_PAGE_SIZE);
+ if (!ret)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
+ buf, 1, 0);
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ }
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_block_isbad - Checks whether a block is good or not
+ * @mtd: the device
+ * @from: the offset to find the correct block
+ *
+ * Returns 1 if block is bad, 0 if block is good
+ */
+static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ofs, is_good;
+
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
+ from, block0, block1, page, ofs);
+
+ if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
+ return 0;
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
+ return !is_good;
+}
+
+#if 0
+/**
+ * doc_get_erase_count - Get block erase count
+ * @docg3: the device
+ * @from: the offset in which the block is.
+ *
+ * Get the number of times a block was erased. The number is the maximum of
+ * erase times between first and second plane (which should be equal normally).
+ *
+ * Returns The number of erases, or -EINVAL or -EIO on error.
+ */
+static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
+{
+ u8 buf[DOC_LAYOUT_WEAR_SIZE];
+ int ret, plane1_erase_count, plane2_erase_count;
+ int block0, block1, page, ofs;
+
+ doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
+ if (from % DOC_LAYOUT_PAGE_SIZE)
+ return -EINVAL;
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ ret = doc_reset_seq(docg3);
+ if (!ret)
+ ret = doc_read_page_prepare(docg3, block0, block1, page,
+ ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
+ if (!ret)
+ ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
+ buf, 1, 0);
+ doc_read_page_finish(docg3);
+
+ if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
+ return -EIO;
+ plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
+ | ((u8)(~buf[5]) << 16);
+ plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
+ | ((u8)(~buf[7]) << 16);
+
+ return max(plane1_erase_count, plane2_erase_count);
+}
+#endif
+
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
+ */
+static int doc_get_op_status(struct docg3 *docg3)
+{
+ u8 status;
+
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
+
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int i, status, ret = 0;
+
+ for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
+ msleep(20);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+
+ info->state = MTD_ERASE_PENDING;
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ ret = -EINVAL;
+ if (info->addr + info->len > mtd->size || page || ofs)
+ goto reset_err;
+
+ ret = 0;
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ info->state = MTD_ERASING;
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+ mutex_unlock(&docg3->cascade->lock);
+
+ if (ret)
+ goto reset_err;
+
+ info->state = MTD_ERASE_DONE;
+ return 0;
+
+reset_err:
+ info->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successful, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->ecclayout->oobavail;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+ if (ofs + len > mtd->size)
+ return -EINVAL;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ mutex_lock(&docg3->cascade->lock);
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+ ops.datbuf = (char *)buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+ ops.ooboffs = 0;
+
+ ret = doc_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUSR|S_IWGRP, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUSR|S_IWGRP, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct docg3_cascade *cascade)
+{
+ int ret = 0, floor, i = 0;
+ struct device *dev = &pdev->dev;
+
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
+ cascade->floors[floor]; floor++)
+ for (i = 0; !ret && i < 4; i++)
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (!ret)
+ return 0;
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct docg3_cascade *cascade)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int dbg_flashctrl_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ int pos = 0;
+ u8 fctrl;
+
+ mutex_lock(&docg3->cascade->lock);
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ mutex_unlock(&docg3->cascade->lock);
+
+ pos += seq_printf(s,
+ "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+ return pos;
+}
+DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
+
+static int dbg_asicmode_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ int pos = 0, pctrl, mode;
+
+ mutex_lock(&docg3->cascade->lock);
+ pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+ mode = pctrl & 0x03;
+ mutex_unlock(&docg3->cascade->lock);
+
+ pos += seq_printf(s,
+ "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+ pctrl,
+ pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+ pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+ pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+ mode >> 1, mode & 0x1);
+
+ switch (mode) {
+ case DOC_ASICMODE_RESET:
+ pos += seq_printf(s, "reset");
+ break;
+ case DOC_ASICMODE_NORMAL:
+ pos += seq_printf(s, "normal");
+ break;
+ case DOC_ASICMODE_POWERDOWN:
+ pos += seq_printf(s, "powerdown");
+ break;
+ }
+ pos += seq_printf(s, ")\n");
+ return pos;
+}
+DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
+
+static int dbg_device_id_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+ int pos = 0;
+ int id;
+
+ mutex_lock(&docg3->cascade->lock);
+ id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ mutex_unlock(&docg3->cascade->lock);
+
+ pos += seq_printf(s, "DeviceId = %d\n", id);
+ return pos;
+}
+DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
+
+static int dbg_protection_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+ int pos = 0;
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ mutex_lock(&docg3->cascade->lock);
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
+ mutex_unlock(&docg3->cascade->lock);
+
+ pos += seq_printf(s, "Protection = 0x%02x (",
+ protect);
+ if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
+ pos += seq_printf(s, "FOUNDRY_OTP_LOCK,");
+ if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
+ pos += seq_printf(s, "CUSTOMER_OTP_LOCK,");
+ if (protect & DOC_PROTECT_LOCK_INPUT)
+ pos += seq_printf(s, "LOCK_INPUT,");
+ if (protect & DOC_PROTECT_STICKY_LOCK)
+ pos += seq_printf(s, "STICKY_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ENABLED)
+ pos += seq_printf(s, "PROTECTION ON,");
+ if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
+ pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ERROR)
+ pos += seq_printf(s, "PROTECT_ERR,");
+ else
+ pos += seq_printf(s, "NO_PROTECT_ERR");
+ pos += seq_printf(s, ")\n");
+
+ pos += seq_printf(s, "DPS0 = 0x%02x : "
+ "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
+ "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps0, dps0_low, dps0_high,
+ !!(dps0 & DOC_DPS_OTP_PROTECTED),
+ !!(dps0 & DOC_DPS_READ_PROTECTED),
+ !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps0 & DOC_DPS_KEY_OK));
+ pos += seq_printf(s, "DPS1 = 0x%02x : "
+ "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
+ "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps1, dps1_low, dps1_high,
+ !!(dps1 & DOC_DPS_OTP_PROTECTED),
+ !!(dps1 & DOC_DPS_READ_PROTECTED),
+ !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps1 & DOC_DPS_KEY_OK));
+ return pos;
+}
+DEBUGFS_RO_ATTR(protection, dbg_protection_show);
+
+static int __init doc_dbg_register(struct docg3 *docg3)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("docg3", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3,
+ &flashcontrol_fops);
+ if (entry)
+ entry = debugfs_create_file("asic_mode", S_IRUSR, root,
+ docg3, &asic_mode_fops);
+ if (entry)
+ entry = debugfs_create_file("device_id", S_IRUSR, root,
+ docg3, &device_id_fops);
+ if (entry)
+ entry = debugfs_create_file("protection", S_IRUSR, root,
+ docg3, &protection_fops);
+ if (entry) {
+ docg3->debugfs_root = root;
+ return 0;
+ } else {
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+ }
+}
+
+static void __exit doc_dbg_unregister(struct docg3 *docg3)
+{
+ debugfs_remove_recursive(docg3->debugfs_root);
+}
+
+/**
+ * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
+ * @chip_id: The chip ID of the supported chip
+ * @mtd: The structure to fill
+ */
+static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int cfg;
+
+ cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
+ docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
+ docg3->device_id);
+ docg3->max_block = 2047;
+ break;
+ }
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
+ mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
+ mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+ mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
+ mtd->_block_isbad = doc_block_isbad;
+ mtd->ecclayout = &docg3_oobinfo;
+ mtd->ecc_strength = DOC_ECC_BCH_T;
+}
+
+/**
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
+ * @cascade: the cascade of chips this devices will belong to
+ *
+ * Checks whether a device at the specified IO range, and floor is available.
+ *
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
+ */
+static struct mtd_info * __init
+doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
+{
+ int ret, bbt_nbpages;
+ u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
+
+ ret = -ENOMEM;
+ docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
+ if (!docg3)
+ goto nomem1;
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ goto nomem2;
+ mtd->priv = docg3;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
+
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->cascade = cascade;
+ doc_set_device_id(docg3, docg3->device_id);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
+
+ chip_id = doc_register_readw(docg3, DOC_CHIPID);
+ chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
+
+ ret = 0;
+ if (chip_id != (u16)(~chip_id_inv)) {
+ goto nomem3;
+ }
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ docg3->cascade->base, floor);
+ break;
+ default:
+ doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
+ goto nomem3;
+ }
+
+ doc_set_driver_info(chip_id, mtd);
+
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ doc_reload_bbt(docg3);
+ return mtd;
+
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ERR_PTR(ret);
+}
+
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+
+ mtd_device_unregister(mtd);
+ kfree(docg3->bbt);
+ kfree(docg3);
+ kfree(mtd->name);
+ kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successful)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct docg3_cascade *cascade;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct docg3_cascade *cascade;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor, found = 0;
+ struct docg3_cascade *cascade;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ return ret;
+ }
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+
+ ret = -ENOMEM;
+ cascade = devm_kzalloc(dev, sizeof(*cascade) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!cascade)
+ return ret;
+ cascade->base = base;
+ mutex_init(&cascade->lock);
+ cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY);
+ if (!cascade->bch)
+ return ret;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(cascade, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ cascade->floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+ found++;
+ }
+
+ ret = doc_register_sysfs(pdev, cascade);
+ if (ret)
+ goto err_probe;
+ if (!found)
+ goto notfound;
+
+ platform_set_drvdata(pdev, cascade);
+ doc_dbg_register(cascade->floors[0]->priv);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ free_bch(cascade->bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
+ return ret;
+}
+
+/**
+ * docg3_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int __exit docg3_release(struct platform_device *pdev)
+{
+ struct docg3_cascade *cascade = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = cascade->floors[0]->priv;
+ int floor;
+
+ doc_unregister_sysfs(pdev, cascade);
+ doc_dbg_unregister(docg3);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
+
+ free_bch(docg3->cascade->bch);
+ return 0;
+}
+
+static struct platform_driver g3_driver = {
+ .driver = {
+ .name = "docg3",
+ .owner = THIS_MODULE,
+ },
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
+ .remove = __exit_p(docg3_release),
+};
+
+module_platform_driver_probe(g3_driver, docg3_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
new file mode 100644
index 00000000000..19fb93f96a3
--- /dev/null
+++ b/drivers/mtd/devices/docg3.h
@@ -0,0 +1,370 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _MTD_DOCG3_H
+#define _MTD_DOCG3_H
+
+#include <linux/mtd/mtd.h>
+
+/*
+ * Flash memory areas :
+ * - 0x0000 .. 0x07ff : IPL
+ * - 0x0800 .. 0x0fff : Data area
+ * - 0x1000 .. 0x17ff : Registers
+ * - 0x1800 .. 0x1fff : Unknown
+ */
+#define DOC_IOSPACE_IPL 0x0000
+#define DOC_IOSPACE_DATA 0x0800
+#define DOC_IOSPACE_SIZE 0x2000
+
+/*
+ * DOC G3 layout and adressing scheme
+ * A page address for the block "b", plane "P" and page "p":
+ * address = [bbbb bPpp pppp]
+ */
+
+#define DOC_ADDR_PAGE_MASK 0x3f
+#define DOC_ADDR_BLOCK_SHIFT 6
+#define DOC_LAYOUT_NBPLANES 2
+#define DOC_LAYOUT_PAGES_PER_BLOCK 64
+#define DOC_LAYOUT_PAGE_SIZE 512
+#define DOC_LAYOUT_OOB_SIZE 16
+#define DOC_LAYOUT_WEAR_SIZE 8
+#define DOC_LAYOUT_PAGE_OOB_SIZE \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
+#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
+#define DOC_LAYOUT_BLOCK_SIZE \
+ (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
+#define DOC_ECC_BCH_SIZE 7
+#define DOC_ECC_BCH_COVERED_BYTES \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
+
+/*
+ * Blocks distribution
+ */
+#define DOC_LAYOUT_BLOCK_BBT 0
+#define DOC_LAYOUT_BLOCK_OTP 0
+#define DOC_LAYOUT_BLOCK_FIRST_DATA 6
+
+#define DOC_LAYOUT_PAGE_BBT 4
+
+/*
+ * Extra page OOB (16 bytes wide) layout
+ */
+#define DOC_LAYOUT_OOB_PAGEINFO_OFS 0
+#define DOC_LAYOUT_OOB_HAMMING_OFS 7
+#define DOC_LAYOUT_OOB_BCH_OFS 8
+#define DOC_LAYOUT_OOB_UNUSED_OFS 15
+#define DOC_LAYOUT_OOB_PAGEINFO_SZ 7
+#define DOC_LAYOUT_OOB_HAMMING_SZ 1
+#define DOC_LAYOUT_OOB_BCH_SZ 7
+#define DOC_LAYOUT_OOB_UNUSED_SZ 1
+
+
+#define DOC_CHIPID_G3 0x200
+#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
+/*
+ * Flash registers
+ */
+#define DOC_CHIPID 0x1000
+#define DOC_TEST 0x1004
+#define DOC_BUSLOCK 0x1006
+#define DOC_ENDIANCONTROL 0x1008
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_CONFIGURATION 0x100e
+#define DOC_INTERRUPTCONTROL 0x1010
+#define DOC_READADDRESS 0x101a
+#define DOC_DATAEND 0x101e
+#define DOC_INTERRUPTSTATUS 0x1020
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_NOP 0x103e
+
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_ECCPRESET 0x1044
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
+
+#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
+#define DOC_DPS0_ADDRLOW 0x1060
+#define DOC_DPS0_ADDRHIGH 0x1062
+#define DOC_DPS1_ADDRLOW 0x1064
+#define DOC_DPS1_ADDRHIGH 0x1066
+#define DOC_DPS0_STATUS 0x106c
+#define DOC_DPS1_STATUS 0x106e
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+/*
+ * Flash sequences
+ * A sequence is preset before one or more commands are input to the chip.
+ */
+#define DOC_SEQ_RESET 0x00
+#define DOC_SEQ_PAGE_SIZE_532 0x03
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
+#define DOC_SEQ_READ 0x12
+#define DOC_SEQ_SET_PLANE1 0x0e
+#define DOC_SEQ_SET_PLANE2 0x10
+#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
+
+/*
+ * Flash commands
+ */
+#define DOC_CMD_READ_PLANE1 0x00
+#define DOC_CMD_SET_ADDR_READ 0x05
+#define DOC_CMD_READ_ALL_PLANES 0x30
+#define DOC_CMD_READ_PLANE2 0x50
+#define DOC_CMD_READ_FLASH 0xe0
+#define DOC_CMD_PAGE_SIZE_532 0x3c
+
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOC_CMD_PROG_CYCLE1 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
+
+#define DOC_CMD_RELIABLE_MODE 0x22
+#define DOC_CMD_FAST_MODE 0xa2
+
+#define DOC_CMD_RESET 0xff
+
+/*
+ * Flash register : DOC_FLASHCONTROL
+ */
+#define DOC_CTRL_VIOLATION 0x20
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN_BITS 0x08
+#define DOC_CTRL_PROTECTION_ERROR 0x04
+#define DOC_CTRL_SEQUENCE_ERROR 0x02
+#define DOC_CTRL_FLASHREADY 0x01
+
+/*
+ * Flash register : DOC_ASICMODE
+ */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/*
+ * Flash register : DOC_ECCCONF0
+ */
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
+#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
+#define DOC_ECCCONF0_BCH_ENABLE 0x0800
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/*
+ * Flash register : DOC_ECCCONF1
+ */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_UNKOWN1 0x40
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+#define DOC_ECCCONF1_UNKOWN3 0x10
+#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
+
+/*
+ * Flash register : DOC_PROTECTION
+ */
+#define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01
+#define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02
+#define DOC_PROTECT_LOCK_INPUT 0x04
+#define DOC_PROTECT_STICKY_LOCK 0x08
+#define DOC_PROTECT_PROTECTION_ENABLED 0x10
+#define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20
+#define DOC_PROTECT_PROTECTION_ERROR 0x80
+
+/*
+ * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
+ */
+#define DOC_DPS_OTP_PROTECTED 0x01
+#define DOC_DPS_READ_PROTECTED 0x02
+#define DOC_DPS_WRITE_PROTECTED 0x04
+#define DOC_DPS_HW_LOCK_ENABLED 0x08
+#define DOC_DPS_KEY_OK 0x80
+
+/*
+ * Flash register : DOC_CONFIGURATION
+ */
+#define DOC_CONF_IF_CFG 0x80
+#define DOC_CONF_MAX_ID_MASK 0x30
+#define DOC_CONF_VCCQ_3V 0x01
+
+/*
+ * Flash register : DOC_READADDRESS
+ */
+#define DOC_READADDR_INC 0x8000
+#define DOC_READADDR_ONE_BYTE 0x4000
+#define DOC_READADDR_ADDR_MASK 0x1fff
+
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
+/**
+ * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
+ * @floors: floors (ie. one physical docg3 chip is one floor)
+ * @base: IO space to access all chips in the cascade
+ * @bch: the BCH correcting control structure
+ * @lock: lock to protect docg3 IO space from concurrent accesses
+ */
+struct docg3_cascade {
+ struct mtd_info *floors[DOC_MAX_NBFLOORS];
+ void __iomem *base;
+ struct bch_control *bch;
+ struct mutex lock;
+};
+
+/**
+ * struct docg3 - DiskOnChip driver private data
+ * @dev: the device currently under control
+ * @cascade: the cascade this device belongs to
+ * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
+ * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
+ * @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
+ * @debugfs_root: debugfs root node
+ */
+struct docg3 {
+ struct device *dev;
+ struct docg3_cascade *cascade;
+ unsigned int device_id:4;
+ unsigned int if_cfg:1;
+ unsigned int reliable:2;
+ int max_block;
+ u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
+ struct dentry *debugfs_root;
+};
+
+#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
+#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
+#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
+#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
+
+#define DEBUGFS_RO_ATTR(name, show_fct) \
+ static int name##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, show_fct, inode->i_private); } \
+ static const struct file_operations name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = name##_open, \
+ .llseek = seq_lseek, \
+ .read = seq_read, \
+ .release = single_release \
+ };
+#endif
+
+/*
+ * Trace events part
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM docg3
+
+#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define _MTD_DOCG3_TRACE
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(docg3_io,
+ TP_PROTO(int op, int width, u16 reg, int val),
+ TP_ARGS(op, width, reg, val),
+ TP_STRUCT__entry(
+ __field(int, op)
+ __field(unsigned char, width)
+ __field(u16, reg)
+ __field(int, val)),
+ TP_fast_assign(
+ __entry->op = op;
+ __entry->width = width;
+ __entry->reg = reg;
+ __entry->val = val;),
+ TP_printk("docg3: %s%02d reg=%04x, val=%04x",
+ __entry->op ? "write" : "read", __entry->width,
+ __entry->reg, __entry->val)
+ );
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE docg3
+#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
deleted file mode 100644
index d374603493a..00000000000
--- a/drivers/mtd/devices/docprobe.c
+++ /dev/null
@@ -1,337 +0,0 @@
-
-/* Linux driver for Disk-On-Chip devices */
-/* Probe routines common to all DoC devices */
-/* (C) 1999 Machine Vision Holdings, Inc. */
-/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
-
-
-/* DOC_PASSIVE_PROBE:
- In order to ensure that the BIOS checksum is correct at boot time, and
- hence that the onboard BIOS extension gets executed, the DiskOnChip
- goes into reset mode when it is read sequentially: all registers
- return 0xff until the chip is woken up again by writing to the
- DOCControl register.
-
- Unfortunately, this means that the probe for the DiskOnChip is unsafe,
- because one of the first things it does is write to where it thinks
- the DOCControl register should be - which may well be shared memory
- for another device. I've had machines which lock up when this is
- attempted. Hence the possibility to do a passive probe, which will fail
- to detect a chip in reset mode, but is at least guaranteed not to lock
- the machine.
-
- If you have this problem, uncomment the following line:
-#define DOC_PASSIVE_PROBE
-*/
-
-
-/* DOC_SINGLE_DRIVER:
- Millennium driver has been merged into DOC2000 driver.
-
- The old Millennium-only driver has been retained just in case there
- are problems with the new code. If the combined driver doesn't work
- for you, you can try the old one by undefining DOC_SINGLE_DRIVER
- below and also enabling it in your configuration. If this fixes the
- problems, please send a report to the MTD mailing list at
- <linux-mtd@lists.infradead.org>.
-*/
-#define DOC_SINGLE_DRIVER
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* Where to look for the devices? */
-#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
-#define CONFIG_MTD_DOCPROBE_ADDRESS 0
-#endif
-
-
-static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
-module_param(doc_config_location, ulong, 0);
-MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
-
-static unsigned long __initdata doc_locations[] = {
-#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_DOCPROBE_HIGH
- 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
- 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
- 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
- 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
- 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /* CONFIG_MTD_DOCPROBE_HIGH */
- 0xc8000, 0xca000, 0xcc000, 0xce000,
- 0xd0000, 0xd2000, 0xd4000, 0xd6000,
- 0xd8000, 0xda000, 0xdc000, 0xde000,
- 0xe0000, 0xe2000, 0xe4000, 0xe6000,
- 0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
-#endif
- 0xffffffff };
-
-/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */
-
-static inline int __init doccheck(void __iomem *potential, unsigned long physadr)
-{
- void __iomem *window=potential;
- unsigned char tmp, tmpb, tmpc, ChipID;
-#ifndef DOC_PASSIVE_PROBE
- unsigned char tmp2;
-#endif
-
- /* Routine copied from the Linux DOC driver */
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- /* Check for 0x55 0xAA signature at beginning of window,
- this is no longer true once we remove the IPL (for Millennium */
- if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa)
- return 0;
-#endif /* CONFIG_MTD_DOCPROBE_55AA */
-
-#ifndef DOC_PASSIVE_PROBE
- /* It's not possible to cleanly detect the DiskOnChip - the
- * bootup procedure will put the device into reset mode, and
- * it's not possible to talk to it without actually writing
- * to the DOCControl register. So we store the current contents
- * of the DOCControl register's location, in case we later decide
- * that it's not a DiskOnChip, and want to put it back how we
- * found it.
- */
- tmp2 = ReadDOC(window, DOCControl);
-
- /* Reset the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- window, DOCControl);
-
- /* Enable the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- window, DOCControl);
-#endif /* !DOC_PASSIVE_PROBE */
-
- /* We need to read the ChipID register four times. For some
- newer DiskOnChip 2000 units, the first three reads will
- return the DiskOnChip Millennium ident. Don't ask. */
- ChipID = ReadDOC(window, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_Doc2k:
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- break;
-
- case DOC_ChipID_DocMil:
- /* Check for the new 2000 with Millennium ASIC */
- ReadDOC(window, ChipID);
- ReadDOC(window, ChipID);
- if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
- ChipID = DOC_ChipID_Doc2kTSOP;
-
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- break;
-
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- case 0:
- /* Possible Millennium+, need to do more checks */
-#ifndef DOC_PASSIVE_PROBE
- /* Possibly release from power down mode */
- for (tmp = 0; (tmp < 4); tmp++)
- ReadDOC(window, Mplus_Power);
-
- /* Reset the DiskOnChip ASIC */
- tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, window, Mplus_DOCControl);
- WriteDOC(~tmp, window, Mplus_CtrlConfirm);
-
- mdelay(1);
- /* Enable the DiskOnChip ASIC */
- tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, window, Mplus_DOCControl);
- WriteDOC(~tmp, window, Mplus_CtrlConfirm);
- mdelay(1);
-#endif /* !DOC_PASSIVE_PROBE */
-
- ChipID = ReadDOC(window, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- /* Check the TOGGLE bit in the toggle register */
- tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- default:
- break;
- }
- /* FALL TRHU */
-
- default:
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
- ChipID, physadr);
-#endif
-#ifndef DOC_PASSIVE_PROBE
- /* Put back the contents of the DOCControl register, in case it's not
- * actually a DiskOnChip.
- */
- WriteDOC(tmp2, window, DOCControl);
-#endif
- return 0;
- }
-
- printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n");
-
-#ifndef DOC_PASSIVE_PROBE
- /* Put back the contents of the DOCControl register: it's not a DiskOnChip */
- WriteDOC(tmp2, window, DOCControl);
-#endif
- return 0;
-}
-
-static int docfound;
-
-extern void DoC2k_init(struct mtd_info *);
-extern void DoCMil_init(struct mtd_info *);
-extern void DoCMilPlus_init(struct mtd_info *);
-
-static void __init DoC_Probe(unsigned long physadr)
-{
- void __iomem *docptr;
- struct DiskOnChip *this;
- struct mtd_info *mtd;
- int ChipID;
- char namebuf[15];
- char *name = namebuf;
- void (*initroutine)(struct mtd_info *) = NULL;
-
- docptr = ioremap(physadr, DOC_IOREMAP_LEN);
-
- if (!docptr)
- return;
-
- if ((ChipID = doccheck(docptr, physadr))) {
- if (ChipID == DOC_ChipID_Doc2kTSOP) {
- /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
- printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
- iounmap(docptr);
- return;
- }
- docfound = 1;
- mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-
- if (!mtd) {
- printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
- iounmap(docptr);
- return;
- }
-
- this = (struct DiskOnChip *)(&mtd[1]);
-
- memset((char *)mtd,0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct DiskOnChip));
-
- mtd->priv = this;
- this->virtadr = docptr;
- this->physadr = physadr;
- this->ChipID = ChipID;
- sprintf(namebuf, "with ChipID %2.2X", ChipID);
-
- switch(ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- name="2000 TSOP";
- initroutine = symbol_request(DoC2k_init);
- break;
-
- case DOC_ChipID_Doc2k:
- name="2000";
- initroutine = symbol_request(DoC2k_init);
- break;
-
- case DOC_ChipID_DocMil:
- name="Millennium";
-#ifdef DOC_SINGLE_DRIVER
- initroutine = symbol_request(DoC2k_init);
-#else
- initroutine = symbol_request(DoCMil_init);
-#endif /* DOC_SINGLE_DRIVER */
- break;
-
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- name="MillenniumPlus";
- initroutine = symbol_request(DoCMilPlus_init);
- break;
- }
-
- if (initroutine) {
- (*initroutine)(mtd);
- symbol_put_addr(initroutine);
- return;
- }
- printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
- kfree(mtd);
- }
- iounmap(docptr);
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static int __init init_doc(void)
-{
- int i;
-
- if (doc_config_location) {
- printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
- DoC_Probe(doc_config_location);
- } else {
- for (i=0; (doc_locations[i] != 0xffffffff); i++) {
- DoC_Probe(doc_locations[i]);
- }
- }
- /* No banner message any more. Print a message if no DiskOnChip
- found, so the user knows we at least tried. */
- if (!docfound)
- printk(KERN_INFO "No recognised DiskOnChip devices found\n");
- return -EAGAIN;
-}
-
-module_init(init_doc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices");
-
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
new file mode 100644
index 00000000000..b4f61c7fc16
--- /dev/null
+++ b/drivers/mtd/devices/elm.c
@@ -0,0 +1,579 @@
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRIVER_NAME "omap-elm"
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_SYSCONFIG 0x010
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_1 0x404
+#define ELM_SYNDROME_FRAGMENT_2 0x408
+#define ELM_SYNDROME_FRAGMENT_3 0x40c
+#define ELM_SYNDROME_FRAGMENT_4 0x410
+#define ELM_SYNDROME_FRAGMENT_5 0x414
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_registers {
+ u32 elm_irqenable;
+ u32 elm_sysconfig;
+ u32 elm_location_config;
+ u32 elm_page_ctrl;
+ u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+ struct elm_registers elm_regs;
+ int ecc_steps;
+ int ecc_syndrome_size;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ */
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ if (!info) {
+ dev_err(dev, "Unable to configure elm - device not probed?\n");
+ return -ENODEV;
+ }
+ /* ELM cannot detect ECC errors for chunks > 1KB */
+ if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
+ dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
+ return -EINVAL;
+ }
+ /* ELM support 8 error syndrome process */
+ if (ecc_steps > ERROR_VECTOR_MAX) {
+ dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
+ return -EINVAL;
+ }
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+ info->ecc_steps = ecc_steps;
+ info->ecc_syndrome_size = ecc_syndrome_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ switch (info->bch_type) {
+ case BCH8_ECC:
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = cpu_to_be32(*(u32 *) &ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH4_ECC:
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH16_ECC:
+ val = cpu_to_be32(*(u32 *) &ecc[22]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[18]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[14]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[10]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[6]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[2]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
+ elm_write_reg(info, offset, val);
+ break;
+ default:
+ pr_err("invalid config bch_type\n");
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->ecc_syndrome_size;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < info->ecc_steps; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j, errors = 0;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+
+ errors += err_vec[i].error_count;
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res, *irq;
+ struct elm_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->elm_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->elm_base))
+ return PTR_ERR(info->elm_base);
+
+ ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ ret = -EINVAL;
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
+ regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
+ regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+ regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_5 + offset);
+ regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_4 + offset);
+ case BCH8_ECC:
+ regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_3 + offset);
+ regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_2 + offset);
+ case BCH4_ECC:
+ regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_1 + offset);
+ regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_0 + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+ * to be saved for all BCH schemes*/
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ }
+ return 0;
+}
+
+/**
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
+ elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+ elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
+ regs->elm_syndrome_fragment_5[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
+ regs->elm_syndrome_fragment_4[i]);
+ case BCH8_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+ regs->elm_syndrome_fragment_3[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+ regs->elm_syndrome_fragment_2[i]);
+ case BCH4_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+ regs->elm_syndrome_fragment_1[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ regs->elm_syndrome_fragment_0[i]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i] &
+ ELM_SYNDROME_VALID);
+ }
+ return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ elm_context_save(info);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
+ elm_context_restore(info);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(elm_of_match),
+ .pm = &elm_pm_ops,
+ },
+ .probe = elm_probe,
+ .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform: elm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 772a0ff89e0..82bd00af5cc 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -34,9 +34,6 @@
/* debugging */
//#define LART_DEBUG
-/* partition support */
-#define HAVE_PARTITIONS
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -44,9 +41,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
-#ifdef HAVE_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#ifndef CONFIG_SA1100_LART
#error This is for LART architecture only
@@ -372,9 +367,6 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
#endif
- /* sanity checks */
- if (instr->addr + instr->len > mtd->size) return (-EINVAL);
-
/*
* check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
@@ -445,10 +437,6 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle
printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
#endif
- /* sanity checks */
- if (!len) return (0);
- if (from + len > mtd->size) return (-EINVAL);
-
/* we always read len bytes */
*retlen = len;
@@ -527,11 +515,8 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
#endif
- *retlen = 0;
-
/* sanity checks */
if (!len) return (0);
- if (to + len > mtd->size) return (-EINVAL);
/* first, we write a 0xFF.... padded byte until we reach a dword boundary */
if (to & (BUSWIDTH - 1))
@@ -598,7 +583,6 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
-#ifdef HAVE_PARTITIONS
static struct mtd_partition lart_partitions[] = {
/* blob */
{
@@ -619,7 +603,7 @@ static struct mtd_partition lart_partitions[] = {
.size = INITRD_LEN, /* MTDPART_SIZ_FULL */
}
};
-#endif
+#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
static int __init lart_flash_init (void)
{
@@ -636,14 +620,15 @@ static int __init lart_flash_init (void)
mtd.name = module_name;
mtd.type = MTD_NORFLASH;
mtd.writesize = 1;
+ mtd.writebufsize = 4;
mtd.flags = MTD_CAP_NORFLASH;
mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
mtd.numeraseregions = ARRAY_SIZE(erase_regions);
mtd.eraseregions = erase_regions;
- mtd.erase = flash_erase;
- mtd.read = flash_read;
- mtd.write = flash_write;
+ mtd._erase = flash_erase;
+ mtd._read = flash_read;
+ mtd._write = flash_write;
mtd.owner = THIS_MODULE;
#ifdef LART_DEBUG
@@ -668,7 +653,6 @@ static int __init lart_flash_init (void)
result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
result,mtd.eraseregions[result].numblocks);
-#ifdef HAVE_PARTITIONS
printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
@@ -681,25 +665,16 @@ static int __init lart_flash_init (void)
result,lart_partitions[result].offset,
result,lart_partitions[result].size,lart_partitions[result].size / 1024);
#endif
-#endif
-#ifndef HAVE_PARTITIONS
- result = mtd_device_register(&mtd, NULL, 0);
-#else
result = mtd_device_register(&mtd, lart_partitions,
ARRAY_SIZE(lart_partitions));
-#endif
return (result);
}
static void __exit lart_flash_exit (void)
{
-#ifndef HAVE_PARTITIONS
- mtd_device_unregister(&mtd);
-#else
mtd_device_unregister(&mtd);
-#endif
}
module_init (lart_flash_init);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 35180e475c4..ed7e0a1bed3 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -15,1021 +15,244 @@
*
*/
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/math64.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mod_devicetable.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
-/* Flash opcodes. */
-#define OPCODE_WREN 0x06 /* Write enable */
-#define OPCODE_RDSR 0x05 /* Read status register */
-#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
-#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
-#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
-#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
-#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
-#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
-#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
-#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
-#define OPCODE_RDID 0x9f /* Read JEDEC ID */
-
-/* Used for SST flashes only. */
-#define OPCODE_BP 0x02 /* Byte program */
-#define OPCODE_WRDI 0x04 /* Write disable */
-#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
-
-/* Used for Macronix flashes only. */
-#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
-#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
-
-/* Used for Spansion flashes only. */
-#define OPCODE_BRWR 0x17 /* Bank register write */
-
-/* Status Register bits. */
-#define SR_WIP 1 /* Write in progress */
-#define SR_WEL 2 /* Write enable latch */
-/* meaning of other SR_* bits may differ between vendors */
-#define SR_BP0 4 /* Block protect 0 */
-#define SR_BP1 8 /* Block protect 1 */
-#define SR_BP2 0x10 /* Block protect 2 */
-#define SR_SRWD 0x80 /* SR write protect */
-
-/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define MAX_CMD_SIZE 5
-
-#ifdef CONFIG_M25PXX_USE_FAST_READ
-#define OPCODE_READ OPCODE_FAST_READ
-#define FAST_READ_DUMMY_BYTE 1
-#else
-#define OPCODE_READ OPCODE_NORM_READ
-#define FAST_READ_DUMMY_BYTE 0
-#endif
-
-#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
-
-/****************************************************************************/
-
+#define MAX_CMD_SIZE 6
struct m25p {
struct spi_device *spi;
- struct mutex lock;
+ struct spi_nor spi_nor;
struct mtd_info mtd;
- unsigned partitioned:1;
- u16 page_size;
- u16 addr_width;
- u8 erase_opcode;
- u8 *command;
+ u8 command[MAX_CMD_SIZE];
};
-static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
-{
- return container_of(mtd, struct m25p, mtd);
-}
-
-/****************************************************************************/
-
-/*
- * Internal helper functions
- */
-
-/*
- * Read the status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
- */
-static int read_sr(struct m25p *flash)
-{
- ssize_t retval;
- u8 code = OPCODE_RDSR;
- u8 val;
-
- retval = spi_write_then_read(flash->spi, &code, 1, &val, 1);
-
- if (retval < 0) {
- dev_err(&flash->spi->dev, "error %d reading SR\n",
- (int) retval);
- return retval;
- }
-
- return val;
-}
-
-/*
- * Write status register 1 byte
- * Returns negative if error occurred.
- */
-static int write_sr(struct m25p *flash, u8 val)
-{
- flash->command[0] = OPCODE_WRSR;
- flash->command[1] = val;
-
- return spi_write(flash->spi, flash->command, 2);
-}
-
-/*
- * Set write enable latch with Write Enable command.
- * Returns negative if error occurred.
- */
-static inline int write_enable(struct m25p *flash)
-{
- u8 code = OPCODE_WREN;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
-}
-
-/*
- * Send write disble instruction to the chip.
- */
-static inline int write_disable(struct m25p *flash)
-{
- u8 code = OPCODE_WRDI;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
-}
-
-/*
- * Enable/disable 4-byte addressing mode.
- */
-static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
+static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
{
- switch (JEDEC_MFR(jedec_id)) {
- case CFI_MFR_MACRONIX:
- flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
- return spi_write(flash->spi, flash->command, 1);
- default:
- /* Spansion style */
- flash->command[0] = OPCODE_BRWR;
- flash->command[1] = enable << 7;
- return spi_write(flash->spi, flash->command, 2);
- }
-}
-
-/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
- */
-static int wait_till_ready(struct m25p *flash)
-{
- unsigned long deadline;
- int sr;
-
- deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+ int ret;
- do {
- if ((sr = read_sr(flash)) < 0)
- break;
- else if (!(sr & SR_WIP))
- return 0;
+ ret = spi_write_then_read(spi, &code, 1, val, len);
+ if (ret < 0)
+ dev_err(&spi->dev, "error %d reading %x\n", ret, code);
- cond_resched();
-
- } while (!time_after_eq(jiffies, deadline));
-
- return 1;
-}
-
-/*
- * Erase the whole flash memory
- *
- * Returns 0 if successful, non-zero otherwise.
- */
-static int erase_chip(struct m25p *flash)
-{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
- dev_name(&flash->spi->dev), __func__,
- (long long)(flash->mtd.size >> 10));
-
- /* Wait until finished previous write command. */
- if (wait_till_ready(flash))
- return 1;
-
- /* Send write enable, then erase commands. */
- write_enable(flash);
-
- /* Set up command buffer. */
- flash->command[0] = OPCODE_CHIP_ERASE;
-
- spi_write(flash->spi, flash->command, 1);
-
- return 0;
+ return ret;
}
-static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
+static void m25p_addr2cmd(struct spi_nor *nor, unsigned int addr, u8 *cmd)
{
/* opcode is in cmd[0] */
- cmd[1] = addr >> (flash->addr_width * 8 - 8);
- cmd[2] = addr >> (flash->addr_width * 8 - 16);
- cmd[3] = addr >> (flash->addr_width * 8 - 24);
- cmd[4] = addr >> (flash->addr_width * 8 - 32);
+ cmd[1] = addr >> (nor->addr_width * 8 - 8);
+ cmd[2] = addr >> (nor->addr_width * 8 - 16);
+ cmd[3] = addr >> (nor->addr_width * 8 - 24);
+ cmd[4] = addr >> (nor->addr_width * 8 - 32);
}
-static int m25p_cmdsz(struct m25p *flash)
+static int m25p_cmdsz(struct spi_nor *nor)
{
- return 1 + flash->addr_width;
+ return 1 + nor->addr_width;
}
-/*
- * Erase one sector of flash memory at offset ``offset'' which is any
- * address within the sector which should be erased.
- *
- * Returns 0 if successful, non-zero otherwise.
- */
-static int erase_sector(struct m25p *flash, u32 offset)
+static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int wr_en)
{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
- dev_name(&flash->spi->dev), __func__,
- flash->mtd.erasesize / 1024, offset);
-
- /* Wait until finished previous write command. */
- if (wait_till_ready(flash))
- return 1;
-
- /* Send write enable, then erase commands. */
- write_enable(flash);
-
- /* Set up command buffer. */
- flash->command[0] = flash->erase_opcode;
- m25p_addr2cmd(flash, offset, flash->command);
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
- spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
+ flash->command[0] = opcode;
+ if (buf)
+ memcpy(&flash->command[1], buf, len);
- return 0;
+ return spi_write(spi, flash->command, len + 1);
}
-/****************************************************************************/
-
-/*
- * MTD implementation
- */
-
-/*
- * Erase an address range on the flash chip. The address range may extend
- * one or more erase sectors. Return an error is there is a problem erasing.
- */
-static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
+static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
- struct m25p *flash = mtd_to_m25p(mtd);
- u32 addr,len;
- uint32_t rem;
-
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
- dev_name(&flash->spi->dev), __func__, "at",
- (long long)instr->addr, (long long)instr->len);
-
- /* sanity checks */
- if (instr->addr + instr->len > flash->mtd.size)
- return -EINVAL;
- div_u64_rem(instr->len, mtd->erasesize, &rem);
- if (rem)
- return -EINVAL;
-
- addr = instr->addr;
- len = instr->len;
-
- mutex_lock(&flash->lock);
-
- /* whole-chip erase? */
- if (len == flash->mtd.size) {
- if (erase_chip(flash)) {
- instr->state = MTD_ERASE_FAILED;
- mutex_unlock(&flash->lock);
- return -EIO;
- }
-
- /* REVISIT in some cases we could speed up erasing large regions
- * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
- * to use "small sector erase", but that's not always optimal.
- */
-
- /* "sector"-at-a-time erase */
- } else {
- while (len) {
- if (erase_sector(flash, addr)) {
- instr->state = MTD_ERASE_FAILED;
- mutex_unlock(&flash->lock);
- return -EIO;
- }
-
- addr += mtd->erasesize;
- len -= mtd->erasesize;
- }
- }
-
- mutex_unlock(&flash->lock);
-
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/*
- * Read an address range from the flash chip. The address range
- * may be any size provided it is within the physical boundaries.
- */
-static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct m25p *flash = mtd_to_m25p(mtd);
- struct spi_transfer t[2];
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+ struct spi_transfer t[2] = {};
struct spi_message m;
+ int cmd_sz = m25p_cmdsz(nor);
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- dev_name(&flash->spi->dev), __func__, "from",
- (u32)from, len);
-
- /* sanity checks */
- if (!len)
- return 0;
+ spi_message_init(&m);
- if (from + len > flash->mtd.size)
- return -EINVAL;
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ cmd_sz = 1;
- spi_message_init(&m);
- memset(t, 0, (sizeof t));
+ flash->command[0] = nor->program_opcode;
+ m25p_addr2cmd(nor, to, flash->command);
- /* NOTE:
- * OPCODE_FAST_READ (if available) is faster.
- * Should add 1 byte DUMMY_BYTE.
- */
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
+ t[0].len = cmd_sz;
spi_message_add_tail(&t[0], &m);
- t[1].rx_buf = buf;
+ t[1].tx_buf = buf;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- /* Byte count starts at zero. */
- *retlen = 0;
+ spi_sync(spi, &m);
- mutex_lock(&flash->lock);
+ *retlen += m.actual_length - cmd_sz;
+}
- /* Wait till previous write/erase is done. */
- if (wait_till_ready(flash)) {
- /* REVISIT status return?? */
- mutex_unlock(&flash->lock);
- return 1;
+static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
+{
+ switch (nor->flash_read) {
+ case SPI_NOR_DUAL:
+ return 2;
+ case SPI_NOR_QUAD:
+ return 4;
+ default:
+ return 0;
}
-
- /* FIXME switch to OPCODE_FAST_READ. It's required for higher
- * clocks; and at this writing, every chip this driver handles
- * supports that opcode.
- */
-
- /* Set up the write data buffer. */
- flash->command[0] = OPCODE_READ;
- m25p_addr2cmd(flash, from, flash->command);
-
- spi_sync(flash->spi, &m);
-
- *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
-
- mutex_unlock(&flash->lock);
-
- return 0;
}
/*
- * Write an address range to the flash chip. Data must be written in
- * FLASH_PAGESIZE chunks. The address range may be any size provided
- * it is within the physical boundaries.
+ * Read an address range from the nor chip. The address range
+ * may be any size provided it is within the physical boundaries.
*/
-static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
- struct m25p *flash = mtd_to_m25p(mtd);
- u32 page_offset, page_size;
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
struct spi_transfer t[2];
struct spi_message m;
+ int dummy = nor->read_dummy;
+ int ret;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- dev_name(&flash->spi->dev), __func__, "to",
- (u32)to, len);
-
- *retlen = 0;
-
- /* sanity checks */
- if (!len)
- return(0);
-
- if (to + len > flash->mtd.size)
- return -EINVAL;
+ /* Wait till previous write/erase is done. */
+ ret = nor->wait_till_ready(nor);
+ if (ret)
+ return ret;
spi_message_init(&m);
memset(t, 0, (sizeof t));
+ flash->command[0] = nor->read_opcode;
+ m25p_addr2cmd(nor, from, flash->command);
+
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash);
+ t[0].len = m25p_cmdsz(nor) + dummy;
spi_message_add_tail(&t[0], &m);
- t[1].tx_buf = buf;
+ t[1].rx_buf = buf;
+ t[1].rx_nbits = m25p80_rx_nbits(nor);
+ t[1].len = len;
spi_message_add_tail(&t[1], &m);
- mutex_lock(&flash->lock);
-
- /* Wait until finished previous write command. */
- if (wait_till_ready(flash)) {
- mutex_unlock(&flash->lock);
- return 1;
- }
-
- write_enable(flash);
-
- /* Set up the opcode in the write buffer. */
- flash->command[0] = OPCODE_PP;
- m25p_addr2cmd(flash, to, flash->command);
-
- page_offset = to & (flash->page_size - 1);
-
- /* do all the bytes fit onto one page? */
- if (page_offset + len <= flash->page_size) {
- t[1].len = len;
-
- spi_sync(flash->spi, &m);
-
- *retlen = m.actual_length - m25p_cmdsz(flash);
- } else {
- u32 i;
-
- /* the size of data remaining on the first page */
- page_size = flash->page_size - page_offset;
-
- t[1].len = page_size;
- spi_sync(flash->spi, &m);
-
- *retlen = m.actual_length - m25p_cmdsz(flash);
-
- /* write everything in flash->page_size chunks */
- for (i = page_size; i < len; i += page_size) {
- page_size = len - i;
- if (page_size > flash->page_size)
- page_size = flash->page_size;
-
- /* write the next page to flash */
- m25p_addr2cmd(flash, to + i, flash->command);
-
- t[1].tx_buf = buf + i;
- t[1].len = page_size;
-
- wait_till_ready(flash);
-
- write_enable(flash);
-
- spi_sync(flash->spi, &m);
-
- *retlen += m.actual_length - m25p_cmdsz(flash);
- }
- }
-
- mutex_unlock(&flash->lock);
+ spi_sync(spi, &m);
+ *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
return 0;
}
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int m25p80_erase(struct spi_nor *nor, loff_t offset)
{
- struct m25p *flash = mtd_to_m25p(mtd);
- struct spi_transfer t[2];
- struct spi_message m;
- size_t actual;
- int cmd_sz, ret;
-
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- dev_name(&flash->spi->dev), __func__, "to",
- (u32)to, len);
-
- *retlen = 0;
-
- /* sanity checks */
- if (!len)
- return 0;
-
- if (to + len > flash->mtd.size)
- return -EINVAL;
-
- spi_message_init(&m);
- memset(t, 0, (sizeof t));
-
- t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash);
- spi_message_add_tail(&t[0], &m);
+ struct m25p *flash = nor->priv;
+ int ret;
- t[1].tx_buf = buf;
- spi_message_add_tail(&t[1], &m);
-
- mutex_lock(&flash->lock);
+ dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
+ flash->mtd.erasesize / 1024, (u32)offset);
/* Wait until finished previous write command. */
- ret = wait_till_ready(flash);
+ ret = nor->wait_till_ready(nor);
if (ret)
- goto time_out;
-
- write_enable(flash);
-
- actual = to % 2;
- /* Start write from odd address. */
- if (actual) {
- flash->command[0] = OPCODE_BP;
- m25p_addr2cmd(flash, to, flash->command);
-
- /* write one byte. */
- t[1].len = 1;
- spi_sync(flash->spi, &m);
- ret = wait_till_ready(flash);
- if (ret)
- goto time_out;
- *retlen += m.actual_length - m25p_cmdsz(flash);
- }
- to += actual;
-
- flash->command[0] = OPCODE_AAI_WP;
- m25p_addr2cmd(flash, to, flash->command);
-
- /* Write out most of the data here. */
- cmd_sz = m25p_cmdsz(flash);
- for (; actual < len - 1; actual += 2) {
- t[0].len = cmd_sz;
- /* write two bytes. */
- t[1].len = 2;
- t[1].tx_buf = buf + actual;
+ return ret;
- spi_sync(flash->spi, &m);
- ret = wait_till_ready(flash);
- if (ret)
- goto time_out;
- *retlen += m.actual_length - cmd_sz;
- cmd_sz = 1;
- to += 2;
- }
- write_disable(flash);
- ret = wait_till_ready(flash);
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
if (ret)
- goto time_out;
-
- /* Write out trailing byte if it exists. */
- if (actual != len) {
- write_enable(flash);
- flash->command[0] = OPCODE_BP;
- m25p_addr2cmd(flash, to, flash->command);
- t[0].len = m25p_cmdsz(flash);
- t[1].len = 1;
- t[1].tx_buf = buf + actual;
-
- spi_sync(flash->spi, &m);
- ret = wait_till_ready(flash);
- if (ret)
- goto time_out;
- *retlen += m.actual_length - m25p_cmdsz(flash);
- write_disable(flash);
- }
-
-time_out:
- mutex_unlock(&flash->lock);
- return ret;
-}
-
-/****************************************************************************/
-
-/*
- * SPI device driver setup and teardown
- */
-
-struct flash_info {
- /* JEDEC id zero means "no ID" (most older chips); otherwise it has
- * a high byte of zero plus three data bytes: the manufacturer id,
- * then a two byte device id.
- */
- u32 jedec_id;
- u16 ext_id;
-
- /* The size listed here is what works with OPCODE_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u16 flags;
-#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
-#define M25P_NO_ERASE 0x02 /* No erase command needed */
-};
-
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
- .jedec_id = (_jedec_id), \
- .ext_id = (_ext_id), \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags), \
- })
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
- ((kernel_ulong_t)&(struct flash_info) { \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .addr_width = (_addr_width), \
- .flags = M25P_NO_ERASE, \
- })
-
-/* NOTE: double check command sets and memory organization when you add
- * more flash chips. This current list focusses on newer chips, which
- * have been converging on command sets which including JEDEC ID.
- */
-static const struct spi_device_id m25p_ids[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ return ret;
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
-
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
-
- /* Macronix */
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
-
- /* Spansion -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
-
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
-
- /* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
-
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
-
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
- { },
-};
-MODULE_DEVICE_TABLE(spi, m25p_ids);
-
-static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
-{
- int tmp;
- u8 code = OPCODE_RDID;
- u8 id[5];
- u32 jedec;
- u16 ext_jedec;
- struct flash_info *info;
-
- /* JEDEC also defines an optional "extended device information"
- * string for after vendor-specific data, after the three bytes
- * we use here. Supporting some chips might require using it.
- */
- tmp = spi_write_then_read(spi, &code, 1, id, 5);
- if (tmp < 0) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
- dev_name(&spi->dev), tmp);
- return ERR_PTR(tmp);
- }
- jedec = id[0];
- jedec = jedec << 8;
- jedec |= id[1];
- jedec = jedec << 8;
- jedec |= id[2];
+ /* Set up command buffer. */
+ flash->command[0] = nor->erase_opcode;
+ m25p_addr2cmd(nor, offset, flash->command);
- ext_jedec = id[3] << 8 | id[4];
+ spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
- for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
- info = (void *)m25p_ids[tmp].driver_data;
- if (info->jedec_id == jedec) {
- if (info->ext_id != 0 && info->ext_id != ext_jedec)
- continue;
- return &m25p_ids[tmp];
- }
- }
- dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
- return ERR_PTR(-ENODEV);
+ return 0;
}
-
/*
* board specific setup should have ensured the SPI clock used here
* matches what the READ command supports, at least until this driver
* understands FAST_READ (for clocks over 25 MHz).
*/
-static int __devinit m25p_probe(struct spi_device *spi)
+static int m25p_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
+ struct mtd_part_parser_data ppdata;
struct flash_platform_data *data;
- struct m25p *flash;
- struct flash_info *info;
- unsigned i;
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
-
- /* Platform data helps sort out which chip type we have, as
- * well as how this board partitions it. If we don't have
- * a chip ID, try the JEDEC id commands; they'll work for most
- * newer chips, even if we don't recognize the particular chip.
- */
- data = spi->dev.platform_data;
- if (data && data->type) {
- const struct spi_device_id *plat_id;
-
- for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
- plat_id = &m25p_ids[i];
- if (strcmp(data->type, plat_id->name))
- continue;
- break;
- }
-
- if (i < ARRAY_SIZE(m25p_ids) - 1)
- id = plat_id;
- else
- dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
- }
+ struct m25p *flash;
+ struct spi_nor *nor;
+ enum read_mode mode = SPI_NOR_NORMAL;
+ int ret;
- info = (void *)id->driver_data;
-
- if (info->jedec_id) {
- const struct spi_device_id *jid;
-
- jid = jedec_probe(spi);
- if (IS_ERR(jid)) {
- return PTR_ERR(jid);
- } else if (jid != id) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to lose that
- * information, even if it's not 100% accurate.
- */
- dev_warn(&spi->dev, "found %s, expected %s\n",
- jid->name, id->name);
- id = jid;
- info = (void *)jid->driver_data;
- }
- }
-
- flash = kzalloc(sizeof *flash, GFP_KERNEL);
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
- if (!flash->command) {
- kfree(flash);
- return -ENOMEM;
- }
- flash->spi = spi;
- mutex_init(&flash->lock);
- dev_set_drvdata(&spi->dev, flash);
+ nor = &flash->spi_nor;
- /*
- * Atmel, SST and Intel/Numonyx serial flash tend to power
- * up with the software protection bits set
- */
+ /* install the hooks */
+ nor->read = m25p80_read;
+ nor->write = m25p80_write;
+ nor->erase = m25p80_erase;
+ nor->write_reg = m25p80_write_reg;
+ nor->read_reg = m25p80_read_reg;
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
- write_enable(flash);
- write_sr(flash, 0);
- }
-
- if (data && data->name)
- flash->mtd.name = data->name;
- else
- flash->mtd.name = dev_name(&spi->dev);
-
- flash->mtd.type = MTD_NORFLASH;
- flash->mtd.writesize = 1;
- flash->mtd.flags = MTD_CAP_NORFLASH;
- flash->mtd.size = info->sector_size * info->n_sectors;
- flash->mtd.erase = m25p80_erase;
- flash->mtd.read = m25p80_read;
-
- /* sst flash chips use AAI word program */
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
- flash->mtd.write = sst_write;
- else
- flash->mtd.write = m25p80_write;
-
- /* prefer "small sector" erase if possible */
- if (info->flags & SECT_4K) {
- flash->erase_opcode = OPCODE_BE_4K;
- flash->mtd.erasesize = 4096;
- } else {
- flash->erase_opcode = OPCODE_SE;
- flash->mtd.erasesize = info->sector_size;
- }
-
- if (info->flags & M25P_NO_ERASE)
- flash->mtd.flags |= MTD_NO_ERASE;
-
- flash->mtd.dev.parent = &spi->dev;
- flash->page_size = info->page_size;
-
- if (info->addr_width)
- flash->addr_width = info->addr_width;
- else {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- if (flash->mtd.size > 0x1000000) {
- flash->addr_width = 4;
- set_4byte(flash, info->jedec_id, 1);
- } else
- flash->addr_width = 3;
- }
-
- dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
- (long long)flash->mtd.size >> 10);
-
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd .name = %s, .size = 0x%llx (%lldMiB) "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- flash->mtd.name,
- (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
- flash->mtd.erasesize, flash->mtd.erasesize / 1024,
- flash->mtd.numeraseregions);
-
- if (flash->mtd.numeraseregions)
- for (i = 0; i < flash->mtd.numeraseregions; i++)
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)flash->mtd.eraseregions[i].offset,
- flash->mtd.eraseregions[i].erasesize,
- flash->mtd.eraseregions[i].erasesize / 1024,
- flash->mtd.eraseregions[i].numblocks);
-
-
- /* partitions should match sector boundaries; and it may be good to
- * use readonly partitions for writeprotected sectors (BP2..BP0).
- */
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes, &parts, 0);
- }
+ nor->dev = &spi->dev;
+ nor->mtd = &flash->mtd;
+ nor->priv = flash;
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ spi_set_drvdata(spi, flash);
+ flash->mtd.priv = nor;
+ flash->spi = spi;
-#ifdef CONFIG_MTD_OF_PARTS
- if (nr_parts <= 0 && spi->dev.of_node) {
- nr_parts = of_mtd_parse_partitions(&spi->dev,
- spi->dev.of_node, &parts);
- }
-#endif
+ if (spi->mode & SPI_RX_QUAD)
+ mode = SPI_NOR_QUAD;
+ else if (spi->mode & SPI_RX_DUAL)
+ mode = SPI_NOR_DUAL;
+ ret = spi_nor_scan(nor, spi_get_device_id(spi), mode);
+ if (ret)
+ return ret;
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
- flash->partitioned = 1;
- }
+ data = dev_get_platdata(&spi->dev);
+ ppdata.of_node = spi->dev.of_node;
- return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
- -ENODEV : 0;
+ return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
}
-static int __devexit m25p_remove(struct spi_device *spi)
+static int m25p_remove(struct spi_device *spi)
{
- struct m25p *flash = dev_get_drvdata(&spi->dev);
- int status;
+ struct m25p *flash = spi_get_drvdata(spi);
/* Clean up MTD stuff. */
- status = mtd_device_unregister(&flash->mtd);
- if (status == 0) {
- kfree(flash->command);
- kfree(flash);
- }
- return 0;
+ return mtd_device_unregister(&flash->mtd);
}
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
- .id_table = m25p_ids,
+ .id_table = spi_nor_ids,
.probe = m25p_probe,
- .remove = __devexit_p(m25p_remove),
+ .remove = m25p_remove,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
@@ -1037,21 +260,7 @@ static struct spi_driver m25p80_driver = {
*/
};
-
-static int __init m25p80_init(void)
-{
- return spi_register_driver(&m25p80_driver);
-}
-
-
-static void __exit m25p80_exit(void)
-{
- spi_unregister_driver(&m25p80_driver);
-}
-
-
-module_init(m25p80_init);
-module_exit(m25p80_exit);
+module_spi_driver(m25p80_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 8423fb6d4f2..5c8b322ba90 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -59,12 +59,8 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from,
{
struct ms02nv_private *mp = mtd->priv;
- if (from + len > mtd->size)
- return -EINVAL;
-
memcpy(buf, mp->uaddr + from, len);
*retlen = len;
-
return 0;
}
@@ -73,12 +69,8 @@ static int ms02nv_write(struct mtd_info *mtd, loff_t to,
{
struct ms02nv_private *mp = mtd->priv;
- if (to + len > mtd->size)
- return -EINVAL;
-
memcpy(mp->uaddr + to, buf, len);
*retlen = len;
-
return 0;
}
@@ -213,10 +205,10 @@ static int __init ms02nv_init_one(ulong addr)
mtd->type = MTD_RAM;
mtd->flags = MTD_CAP_RAM;
mtd->size = fixsize;
- mtd->name = (char *)ms02nv_name;
+ mtd->name = ms02nv_name;
mtd->owner = THIS_MODULE;
- mtd->read = ms02nv_read;
- mtd->write = ms02nv_write;
+ mtd->_read = ms02nv_read;
+ mtd->_write = ms02nv_write;
mtd->writesize = 1;
ret = -EIO;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 13749d458a3..dd22ce2cc9a 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -10,13 +10,14 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -24,7 +25,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-
/*
* DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
* each chip, which may be used for double buffered I/O; but this driver
@@ -87,8 +87,6 @@ struct dataflash {
uint8_t command[4];
char name[24];
- unsigned partitioned:1;
-
unsigned short page_offset; /* offset in flash address */
unsigned int page_size; /* of bytes per page */
@@ -98,6 +96,14 @@ struct dataflash {
struct mtd_info mtd;
};
+#ifdef CONFIG_OF
+static const struct of_device_id dataflash_dt_ids[] = {
+ { .compatible = "atmel,at45", },
+ { .compatible = "atmel,dataflash", },
+ { /* sentinel */ }
+};
+#endif
+
/* ......................................................................... */
/*
@@ -122,7 +128,7 @@ static int dataflash_waitready(struct spi_device *spi)
for (;;) {
status = dataflash_status(spi);
if (status < 0) {
- DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
+ pr_debug("%s: status %d?\n",
dev_name(&spi->dev), status);
status = 0;
}
@@ -149,13 +155,10 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
uint8_t *command;
uint32_t rem;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
+ pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
dev_name(&spi->dev), (long long)instr->addr,
(long long)instr->len);
- /* Sanity checks */
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
div_u64_rem(instr->len, priv->page_size, &rem);
if (rem)
return -EINVAL;
@@ -187,7 +190,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
command[2] = (uint8_t)(pageaddr >> 8);
command[3] = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
+ pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
do_block ? "block" : "page",
command[0], command[1], command[2], command[3],
pageaddr);
@@ -238,16 +241,8 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
uint8_t *command;
int status;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
- dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
-
- *retlen = 0;
-
- /* Sanity checks */
- if (!len)
- return 0;
- if (from + len > mtd->size)
- return -EINVAL;
+ pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
+ (unsigned)from, (unsigned)(from + len));
/* Calculate flash page/byte address */
addr = (((unsigned)from / priv->page_size) << priv->page_offset)
@@ -255,7 +250,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
command = priv->command;
- DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
+ pr_debug("READ: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
spi_message_init(&msg);
@@ -287,7 +282,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
*retlen = msg.actual_length - 8;
status = 0;
} else
- DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
+ pr_debug("%s: read %x..%x --> %d\n",
dev_name(&priv->spi->dev),
(unsigned)from, (unsigned)(from + len),
status);
@@ -314,17 +309,9 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
int status = -EINVAL;
uint8_t *command;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
+ pr_debug("%s: write 0x%x..0x%x\n",
dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
- *retlen = 0;
-
- /* Sanity checks */
- if (!len)
- return 0;
- if ((to + len) > mtd->size)
- return -EINVAL;
-
spi_message_init(&msg);
x[0].tx_buf = command = priv->command;
@@ -340,7 +327,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&priv->lock);
while (remaining > 0) {
- DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n",
+ pr_debug("write @ %i:%i len=%i\n",
pageaddr, offset, writelen);
/* REVISIT:
@@ -368,12 +355,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
+ pr_debug("TRANSFER: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
+ pr_debug("%s: xfer %u -> %d\n",
dev_name(&spi->dev), addr, status);
(void) dataflash_waitready(priv->spi);
@@ -386,7 +373,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = (addr & 0x000000FF);
- DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n",
+ pr_debug("PROGRAM: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
x[1].tx_buf = writebuf;
@@ -395,7 +382,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = spi_sync(spi, &msg);
spi_transfer_del(x + 1);
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
+ pr_debug("%s: pgm %u/%u -> %d\n",
dev_name(&spi->dev), addr, writelen, status);
(void) dataflash_waitready(priv->spi);
@@ -410,12 +397,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
+ pr_debug("COMPARE: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
+ pr_debug("%s: compare %u -> %d\n",
dev_name(&spi->dev), addr, status);
status = dataflash_waitready(priv->spi);
@@ -452,8 +439,8 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
#ifdef CONFIG_MTD_DATAFLASH_OTP
-static int dataflash_get_otp_info(struct mtd_info *mtd,
- struct otp_info *info, size_t len)
+static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *info)
{
/* Report both blocks as identical: bytes 0..64, locked.
* Unless the user block changed from all-ones, we can't
@@ -462,7 +449,8 @@ static int dataflash_get_otp_info(struct mtd_info *mtd,
info->start = 0;
info->length = 64;
info->locked = 1;
- return sizeof(*info);
+ *retlen = sizeof(*info);
+ return 0;
}
static ssize_t otp_read(struct spi_device *spi, unsigned base,
@@ -479,8 +467,6 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base,
if ((off + len) > 64)
len = 64 - off;
- if (len == 0)
- return len;
spi_message_init(&m);
@@ -556,14 +542,18 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
struct dataflash *priv = mtd->priv;
int status;
- if (len > 64)
- return -EINVAL;
+ if (from >= 64) {
+ /*
+ * Attempting to write beyond the end of OTP memory,
+ * no data can be written.
+ */
+ *retlen = 0;
+ return 0;
+ }
- /* Strictly speaking, we *could* truncate the write ... but
- * let's not do that for the only write that's ever possible.
- */
+ /* Truncate the write to fit into OTP memory. */
if ((from + len) > 64)
- return -EINVAL;
+ len = 64 - from;
/* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
* IN: ignore all
@@ -600,16 +590,16 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
static char *otp_setup(struct mtd_info *device, char revision)
{
- device->get_fact_prot_info = dataflash_get_otp_info;
- device->read_fact_prot_reg = dataflash_read_fact_otp;
- device->get_user_prot_info = dataflash_get_otp_info;
- device->read_user_prot_reg = dataflash_read_user_otp;
+ device->_get_fact_prot_info = dataflash_get_otp_info;
+ device->_read_fact_prot_reg = dataflash_read_fact_otp;
+ device->_get_user_prot_info = dataflash_get_otp_info;
+ device->_read_user_prot_reg = dataflash_read_user_otp;
/* rev c parts (at45db321c and at45db1281 only!) use a
* different write procedure; not (yet?) implemented.
*/
if (revision > 'c')
- device->write_user_prot_reg = dataflash_write_user_otp;
+ device->_write_user_prot_reg = dataflash_write_user_otp;
return ", OTP";
}
@@ -628,17 +618,15 @@ static char *otp_setup(struct mtd_info *device, char revision)
/*
* Register DataFlash device with MTD subsystem.
*/
-static int __devinit
-add_dataflash_otp(struct spi_device *spi, char *name,
- int nr_pages, int pagesize, int pageoffset, char revision)
+static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
+ int pagesize, int pageoffset, char revision)
{
struct dataflash *priv;
struct mtd_info *device;
- struct flash_platform_data *pdata = spi->dev.platform_data;
+ struct mtd_part_parser_data ppdata;
+ struct flash_platform_data *pdata = dev_get_platdata(&spi->dev);
char *otp_tag = "";
int err = 0;
- struct mtd_partition *parts;
- int nr_parts = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@@ -662,9 +650,9 @@ add_dataflash_otp(struct spi_device *spi, char *name,
device->owner = THIS_MODULE;
device->type = MTD_DATAFLASH;
device->flags = MTD_WRITEABLE;
- device->erase = dataflash_erase;
- device->read = dataflash_read;
- device->write = dataflash_write;
+ device->_erase = dataflash_erase;
+ device->_read = dataflash_read;
+ device->_write = dataflash_write;
device->priv = priv;
device->dev.parent = &spi->dev;
@@ -675,41 +663,22 @@ add_dataflash_otp(struct spi_device *spi, char *name,
dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
name, (long long)((device->size + 1023) >> 10),
pagesize, otp_tag);
- dev_set_drvdata(&spi->dev, priv);
-
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(device, part_probes, &parts,
- 0);
- }
-
- if (nr_parts <= 0 && pdata && pdata->parts) {
- parts = pdata->parts;
- nr_parts = pdata->nr_parts;
- }
-
- if (nr_parts > 0) {
- priv->partitioned = 1;
- err = mtd_device_register(device, parts, nr_parts);
- goto out;
- }
+ spi_set_drvdata(spi, priv);
- if (mtd_device_register(device, NULL, 0) == 1)
- err = -ENODEV;
+ ppdata.of_node = spi->dev.of_node;
+ err = mtd_device_parse_register(device, NULL, &ppdata,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
-out:
if (!err)
return 0;
- dev_set_drvdata(&spi->dev, NULL);
kfree(priv);
return err;
}
-static inline int __devinit
-add_dataflash(struct spi_device *spi, char *name,
- int nr_pages, int pagesize, int pageoffset)
+static inline int add_dataflash(struct spi_device *spi, char *name,
+ int nr_pages, int pagesize, int pageoffset)
{
return add_dataflash_otp(spi, name, nr_pages, pagesize,
pageoffset, 0);
@@ -733,7 +702,7 @@ struct flash_info {
#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
};
-static struct flash_info __devinitdata dataflash_data [] = {
+static struct flash_info dataflash_data[] = {
/*
* NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
@@ -768,7 +737,7 @@ static struct flash_info __devinitdata dataflash_data [] = {
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
};
-static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
+static struct flash_info *jedec_probe(struct spi_device *spi)
{
int tmp;
uint8_t code = OP_READ_ID;
@@ -787,7 +756,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
*/
tmp = spi_write_then_read(spi, &code, 1, id, 3);
if (tmp < 0) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
+ pr_debug("%s: error %d reading JEDEC ID\n",
dev_name(&spi->dev), tmp);
return ERR_PTR(tmp);
}
@@ -804,7 +773,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
tmp < ARRAY_SIZE(dataflash_data);
tmp++, info++) {
if (info->jedec_id == jedec) {
- DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n",
+ pr_debug("%s: OTP, sector protect%s\n",
dev_name(&spi->dev),
(info->flags & SUP_POW2PS)
? ", binary pagesize" : ""
@@ -812,8 +781,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
if (info->flags & SUP_POW2PS) {
status = dataflash_status(spi);
if (status < 0) {
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: status error %d\n",
+ pr_debug("%s: status error %d\n",
dev_name(&spi->dev), status);
return ERR_PTR(status);
}
@@ -852,7 +820,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
* AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
* AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
*/
-static int __devinit dataflash_probe(struct spi_device *spi)
+static int dataflash_probe(struct spi_device *spi)
{
int status;
struct flash_info *info;
@@ -878,7 +846,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
*/
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
- DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
+ pr_debug("%s: status error %d\n",
dev_name(&spi->dev), status);
if (status == 0 || status == 0xff)
status = -ENODEV;
@@ -914,58 +882,45 @@ static int __devinit dataflash_probe(struct spi_device *spi)
break;
/* obsolete AT45DB1282 not (yet?) supported */
default:
- DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
- dev_name(&spi->dev), status & 0x3c);
+ dev_info(&spi->dev, "unsupported device (%x)\n",
+ status & 0x3c);
status = -ENODEV;
}
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
- dev_name(&spi->dev), status);
+ pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev),
+ status);
return status;
}
-static int __devexit dataflash_remove(struct spi_device *spi)
+static int dataflash_remove(struct spi_device *spi)
{
- struct dataflash *flash = dev_get_drvdata(&spi->dev);
+ struct dataflash *flash = spi_get_drvdata(spi);
int status;
- DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
+ pr_debug("%s: remove\n", dev_name(&spi->dev));
status = mtd_device_unregister(&flash->mtd);
- if (status == 0) {
- dev_set_drvdata(&spi->dev, NULL);
+ if (status == 0)
kfree(flash);
- }
return status;
}
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dataflash_dt_ids),
},
.probe = dataflash_probe,
- .remove = __devexit_p(dataflash_remove),
+ .remove = dataflash_remove,
/* FIXME: investigate suspend and resume... */
};
-static int __init dataflash_init(void)
-{
- return spi_register_driver(&dataflash_driver);
-}
-module_init(dataflash_init);
-
-static void __exit dataflash_exit(void)
-{
- spi_unregister_driver(&dataflash_driver);
-}
-module_exit(dataflash_exit);
-
+module_spi_driver(dataflash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrew Victor, David Brownell");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 2562689ba6b..8e285089229 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -34,34 +34,23 @@ static struct mtd_info *mtd_info;
static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
-
memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
-
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
-
return 0;
}
static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
*virt = mtd->priv + from;
*retlen = len;
return 0;
}
-static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
+ return 0;
}
/*
@@ -80,11 +69,7 @@ static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
memcpy(buf, mtd->priv + from, len);
-
*retlen = len;
return 0;
}
@@ -92,11 +77,7 @@ static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- if (to + len > mtd->size)
- return -EINVAL;
-
memcpy((char *)mtd->priv + to, buf, len);
-
*retlen = len;
return 0;
}
@@ -111,7 +92,7 @@ static void __exit cleanup_mtdram(void)
}
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
- unsigned long size, char *name)
+ unsigned long size, const char *name)
{
memset(mtd, 0, sizeof(*mtd));
@@ -126,12 +107,12 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->priv = mapped_address;
mtd->owner = THIS_MODULE;
- mtd->erase = ram_erase;
- mtd->point = ram_point;
- mtd->unpoint = ram_unpoint;
- mtd->get_unmapped_area = ram_get_unmapped_area;
- mtd->read = ram_read;
- mtd->write = ram_write;
+ mtd->_erase = ram_erase;
+ mtd->_point = ram_point;
+ mtd->_unpoint = ram_unpoint;
+ mtd->_get_unmapped_area = ram_get_unmapped_area;
+ mtd->_read = ram_read;
+ mtd->_write = ram_write;
if (mtd_device_register(mtd, NULL, 0))
return -EIO;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 23423bd00b0..2cceebfb251 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -33,45 +33,33 @@ struct phram_mtd_list {
static LIST_HEAD(phram_list);
-
static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
u_char *start = mtd->priv;
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
-
memset(start + instr->addr, 0xff, instr->len);
- /* This'll catch a few races. Free the thing before returning :)
+ /*
+ * This'll catch a few races. Free the thing before returning :)
* I don't feel at all ashamed. This kind of thing is possible anyway
* with flash, but unlikely.
*/
-
instr->state = MTD_ERASE_DONE;
-
mtd_erase_callback(instr);
-
return 0;
}
static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
*virt = mtd->priv + from;
*retlen = len;
return 0;
}
-static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
+ return 0;
}
static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -79,14 +67,7 @@ static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
{
u_char *start = mtd->priv;
- if (from >= mtd->size)
- return -EINVAL;
-
- if (len > mtd->size - from)
- len = mtd->size - from;
-
memcpy(buf, start + from, len);
-
*retlen = len;
return 0;
}
@@ -96,20 +77,11 @@ static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
{
u_char *start = mtd->priv;
- if (to >= mtd->size)
- return -EINVAL;
-
- if (len > mtd->size - to)
- len = mtd->size - to;
-
memcpy(start + to, buf, len);
-
*retlen = len;
return 0;
}
-
-
static void unregister_devices(void)
{
struct phram_mtd_list *this, *safe;
@@ -122,7 +94,7 @@ static void unregister_devices(void)
}
}
-static int register_device(char *name, unsigned long start, unsigned long len)
+static int register_device(char *name, phys_addr_t start, size_t len)
{
struct phram_mtd_list *new;
int ret = -ENOMEM;
@@ -142,11 +114,11 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.name = name;
new->mtd.size = len;
new->mtd.flags = MTD_CAP_RAM;
- new->mtd.erase = phram_erase;
- new->mtd.point = phram_point;
- new->mtd.unpoint = phram_unpoint;
- new->mtd.read = phram_read;
- new->mtd.write = phram_write;
+ new->mtd._erase = phram_erase;
+ new->mtd._point = phram_point;
+ new->mtd._unpoint = phram_unpoint;
+ new->mtd._read = phram_read;
+ new->mtd._write = phram_write;
new->mtd.owner = THIS_MODULE;
new->mtd.type = MTD_RAM;
new->mtd.erasesize = PAGE_SIZE;
@@ -169,35 +141,35 @@ out0:
return ret;
}
-static int ustrtoul(const char *cp, char **endp, unsigned int base)
+static int parse_num64(uint64_t *num64, char *token)
{
- unsigned long result = simple_strtoul(cp, endp, base);
-
- switch (**endp) {
- case 'G':
- result *= 1024;
- case 'M':
- result *= 1024;
- case 'k':
- result *= 1024;
+ size_t len;
+ int shift = 0;
+ int ret;
+
+ len = strlen(token);
/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
- if ((*endp)[1] == 'i')
- (*endp) += 2;
+ if (len > 2) {
+ if (token[len - 1] == 'i') {
+ switch (token[len - 2]) {
+ case 'G':
+ shift += 10;
+ case 'M':
+ shift += 10;
+ case 'k':
+ shift += 10;
+ token[len - 2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
}
- return result;
-}
-
-static int parse_num32(uint32_t *num32, const char *token)
-{
- char *endp;
- unsigned long n;
- n = ustrtoul(token, &endp, 0);
- if (*endp)
- return -EINVAL;
+ ret = kstrtou64(token, 0, num64);
+ *num64 <<= shift;
- *num32 = n;
- return 0;
+ return ret;
}
static int parse_name(char **pname, const char *token)
@@ -233,13 +205,26 @@ static inline void kill_final_newline(char *str)
return 1; \
} while (0)
-static int phram_setup(const char *val, struct kernel_param *kp)
+#ifndef MODULE
+static int phram_init_called;
+/*
+ * This shall contain the module parameter if any. It is of the form:
+ * - phram=<device>,<address>,<size> for module case
+ * - phram.phram=<device>,<address>,<size> for built-in case
+ * We leave 64 bytes for the device name, 20 for the address and 20 for the
+ * size.
+ * Example: phram.phram=rootfs,0xa0000000,512Mi
+ */
+static char phram_paramline[64 + 20 + 20];
+#endif
+
+static int phram_setup(const char *val)
{
- char buf[64+12+12], *str = buf;
+ char buf[64 + 20 + 20], *str = buf;
char *token[3];
char *name;
- uint32_t start;
- uint32_t len;
+ uint64_t start;
+ uint64_t len;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -261,13 +246,13 @@ static int phram_setup(const char *val, struct kernel_param *kp)
if (ret)
return ret;
- ret = parse_num32(&start, token[1]);
+ ret = parse_num64(&start, token[1]);
if (ret) {
kfree(name);
parse_err("illegal start address\n");
}
- ret = parse_num32(&len, token[2]);
+ ret = parse_num64(&len, token[2]);
if (ret) {
kfree(name);
parse_err("illegal device length\n");
@@ -275,20 +260,60 @@ static int phram_setup(const char *val, struct kernel_param *kp)
ret = register_device(name, start, len);
if (!ret)
- pr_info("%s device: %#x at %#x\n", name, len, start);
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
else
kfree(name);
return ret;
}
-module_param_call(phram, phram_setup, NULL, NULL, 000);
+static int phram_param_call(const char *val, struct kernel_param *kp)
+{
+#ifdef MODULE
+ return phram_setup(val);
+#else
+ /*
+ * If more parameters are later passed in via
+ * /sys/module/phram/parameters/phram
+ * and init_phram() has already been called,
+ * we can parse the argument now.
+ */
+
+ if (phram_init_called)
+ return phram_setup(val);
+
+ /*
+ * During early boot stage, we only save the parameters
+ * here. We must parse them later: if the param passed
+ * from kernel boot command line, phram_param_call() is
+ * called so early that it is not possible to resolve
+ * the device (even kmalloc() fails). Defer that work to
+ * phram_setup().
+ */
+
+ if (strlen(val) >= sizeof(phram_paramline))
+ return -ENOSPC;
+ strcpy(phram_paramline, val);
+
+ return 0;
+#endif
+}
+
+module_param_call(phram, phram_param_call, NULL, NULL, 000);
MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
static int __init init_phram(void)
{
- return 0;
+ int ret = 0;
+
+#ifndef MODULE
+ if (phram_paramline[0])
+ ret = phram_setup(phram_paramline);
+ phram_init_called = 1;
+#endif
+
+ return ret;
}
static void __exit cleanup_phram(void)
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index ecff765579d..f02603e1bfe 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -93,14 +93,49 @@
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <linux/pci.h>
-
#include <linux/mtd/mtd.h>
-#include <linux/mtd/pmc551.h>
+
+#define PMC551_VERSION \
+ "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
+
+#define PCI_VENDOR_ID_V3_SEMI 0x11b0
+#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
+
+#define PMC551_PCI_MEM_MAP0 0x50
+#define PMC551_PCI_MEM_MAP1 0x54
+#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
+#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
+#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
+#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
+
+#define PMC551_SDRAM_MA 0x60
+#define PMC551_SDRAM_CMD 0x62
+#define PMC551_DRAM_CFG 0x64
+#define PMC551_SYS_CTRL_REG 0x78
+
+#define PMC551_DRAM_BLK0 0x68
+#define PMC551_DRAM_BLK1 0x6c
+#define PMC551_DRAM_BLK2 0x70
+#define PMC551_DRAM_BLK3 0x74
+#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
+#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
+#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
+
+struct mypriv {
+ struct pci_dev *dev;
+ u_char *start;
+ u32 base_map0;
+ u32 curr_map0;
+ u32 asize;
+ struct mtd_info *nextpmc551;
+};
static struct mtd_info *pmc551list;
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+
static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mypriv *priv = mtd->priv;
@@ -116,16 +151,6 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
#endif
end = instr->addr + instr->len - 1;
-
- /* Is it past the end? */
- if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
- (long)end, (long)mtd->size);
-#endif
- return -EINVAL;
- }
-
eoff_hi = end & ~(priv->asize - 1);
soff_hi = instr->addr & ~(priv->asize - 1);
eoff_lo = end & (priv->asize - 1);
@@ -179,18 +204,6 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
#endif
- if (from + len > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
- (long)from + len, (long)mtd->size);
-#endif
- return -EINVAL;
- }
-
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
soff_hi = from & ~(priv->asize - 1);
soff_lo = from & (priv->asize - 1);
@@ -206,11 +219,12 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
-static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_unpoint()\n");
#endif
+ return 0;
}
static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -229,16 +243,6 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
#endif
end = from + len - 1;
-
- /* Is it past the end? */
- if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
- (long)end, (long)mtd->size);
-#endif
- return -EINVAL;
- }
-
soff_hi = from & ~(priv->asize - 1);
eoff_hi = end & ~(priv->asize - 1);
soff_lo = from & (priv->asize - 1);
@@ -296,16 +300,6 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
#endif
end = to + len - 1;
- /* Is it past the end? or did the u32 wrap? */
- if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
- "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
- (long)to);
-#endif
- return -EINVAL;
- }
-
soff_hi = to & ~(priv->asize - 1);
eoff_hi = end & ~(priv->asize - 1);
soff_lo = to & (priv->asize - 1);
@@ -359,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
* mechanism
* returns the size of the memory region found.
*/
-static u32 fixup_pmc551(struct pci_dev *dev)
+static int fixup_pmc551(struct pci_dev *dev)
{
#ifdef CONFIG_MTD_PMC551_BUGFIX
u32 dram_data;
@@ -669,7 +663,7 @@ static int __init init_pmc551(void)
struct mypriv *priv;
int found = 0;
struct mtd_info *mtd;
- u32 length = 0;
+ int length = 0;
if (msize) {
msize = (1 << (ffs(msize) - 1)) << 20;
@@ -731,16 +725,11 @@ static int __init init_pmc551(void)
}
mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
- "device.\n");
+ if (!mtd)
break;
- }
priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
if (!priv) {
- printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
- "device.\n");
kfree(mtd);
break;
}
@@ -787,11 +776,11 @@ static int __init init_pmc551(void)
mtd->size = msize;
mtd->flags = MTD_CAP_RAM;
- mtd->erase = pmc551_erase;
- mtd->read = pmc551_read;
- mtd->write = pmc551_write;
- mtd->point = pmc551_point;
- mtd->unpoint = pmc551_unpoint;
+ mtd->_erase = pmc551_erase;
+ mtd->_read = pmc551_read;
+ mtd->_write = pmc551_write;
+ mtd->_point = pmc551_point;
+ mtd->_unpoint = pmc551_unpoint;
mtd->type = MTD_RAM;
mtd->name = "PMC551 RAM board";
mtd->erasesize = 0x10000;
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
new file mode 100644
index 00000000000..f59a125295d
--- /dev/null
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -0,0 +1,61 @@
+/*
+ * Generic/SFDP Flash Commands and Device Capabilities
+ *
+ * Copyright (C) 2013 Lee Jones <lee.jones@lianro.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _MTD_SERIAL_FLASH_CMDS_H
+#define _MTD_SERIAL_FLASH_CMDS_H
+
+/* Generic Flash Commands/OPCODEs */
+#define SPINOR_OP_RDSR2 0x35
+#define SPINOR_OP_WRVCR 0x81
+#define SPINOR_OP_RDVCR 0x85
+
+/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
+#define SPINOR_OP_READ_1_2_2 0xbb /* DUAL I/O READ */
+#define SPINOR_OP_READ_1_4_4 0xeb /* QUAD I/O READ */
+
+#define SPINOR_OP_WRITE 0x02 /* PAGE PROGRAM */
+#define SPINOR_OP_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
+#define SPINOR_OP_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
+
+/* READ commands with 32-bit addressing */
+#define SPINOR_OP_READ4_1_2_2 0xbc
+#define SPINOR_OP_READ4_1_4_4 0xec
+
+/* Configuration flags */
+#define FLASH_FLAG_SINGLE 0x000000ff
+#define FLASH_FLAG_READ_WRITE 0x00000001
+#define FLASH_FLAG_READ_FAST 0x00000002
+#define FLASH_FLAG_SE_4K 0x00000004
+#define FLASH_FLAG_SE_32K 0x00000008
+#define FLASH_FLAG_CE 0x00000010
+#define FLASH_FLAG_32BIT_ADDR 0x00000020
+#define FLASH_FLAG_RESET 0x00000040
+#define FLASH_FLAG_DYB_LOCKING 0x00000080
+
+#define FLASH_FLAG_DUAL 0x0000ff00
+#define FLASH_FLAG_READ_1_1_2 0x00000100
+#define FLASH_FLAG_READ_1_2_2 0x00000200
+#define FLASH_FLAG_READ_2_2_2 0x00000400
+#define FLASH_FLAG_WRITE_1_1_2 0x00001000
+#define FLASH_FLAG_WRITE_1_2_2 0x00002000
+#define FLASH_FLAG_WRITE_2_2_2 0x00004000
+
+#define FLASH_FLAG_QUAD 0x00ff0000
+#define FLASH_FLAG_READ_1_1_4 0x00010000
+#define FLASH_FLAG_READ_1_4_4 0x00020000
+#define FLASH_FLAG_READ_4_4_4 0x00040000
+#define FLASH_FLAG_WRITE_1_1_4 0x00100000
+#define FLASH_FLAG_WRITE_1_4_4 0x00200000
+#define FLASH_FLAG_WRITE_4_4_4 0x00400000
+
+#endif /* _MTD_SERIAL_FLASH_CMDS_H */
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index e585263161b..2fc4957cbe7 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -42,7 +42,6 @@
#include <linux/ioctl.h>
#include <linux/init.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <linux/mtd/mtd.h>
@@ -76,7 +75,7 @@ static slram_mtd_list_t *slram_mtdlist = NULL;
static int slram_erase(struct mtd_info *, struct erase_info *);
static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
resource_size_t *);
-static void slram_unpoint(struct mtd_info *, loff_t, size_t);
+static int slram_unpoint(struct mtd_info *, loff_t, size_t);
static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -84,21 +83,13 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
slram_priv_t *priv = mtd->priv;
- if (instr->addr + instr->len > mtd->size) {
- return(-EINVAL);
- }
-
memset(priv->start + instr->addr, 0xff, instr->len);
-
/* This'll catch a few races. Free the thing before returning :)
* I don't feel at all ashamed. This kind of thing is possible anyway
* with flash, but unlikely.
*/
-
instr->state = MTD_ERASE_DONE;
-
mtd_erase_callback(instr);
-
return(0);
}
@@ -107,20 +98,14 @@ static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
slram_priv_t *priv = mtd->priv;
- /* can we return a physical address with this driver? */
- if (phys)
- return -EINVAL;
-
- if (from + len > mtd->size)
- return -EINVAL;
-
*virt = priv->start + from;
*retlen = len;
return(0);
}
-static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
+ return 0;
}
static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -128,14 +113,7 @@ static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
{
slram_priv_t *priv = mtd->priv;
- if (from > mtd->size)
- return -EINVAL;
-
- if (from + len > mtd->size)
- len = mtd->size - from;
-
memcpy(buf, priv->start + from, len);
-
*retlen = len;
return(0);
}
@@ -145,11 +123,7 @@ static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
{
slram_priv_t *priv = mtd->priv;
- if (to + len > mtd->size)
- return -EINVAL;
-
memcpy(priv->start + to, buf, len);
-
*retlen = len;
return(0);
}
@@ -200,11 +174,11 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->name = name;
(*curmtd)->mtdinfo->size = length;
(*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
- (*curmtd)->mtdinfo->erase = slram_erase;
- (*curmtd)->mtdinfo->point = slram_point;
- (*curmtd)->mtdinfo->unpoint = slram_unpoint;
- (*curmtd)->mtdinfo->read = slram_read;
- (*curmtd)->mtdinfo->write = slram_write;
+ (*curmtd)->mtdinfo->_erase = slram_erase;
+ (*curmtd)->mtdinfo->_point = slram_point;
+ (*curmtd)->mtdinfo->_unpoint = slram_unpoint;
+ (*curmtd)->mtdinfo->_read = slram_read;
+ (*curmtd)->mtdinfo->_write = slram_write;
(*curmtd)->mtdinfo->owner = THIS_MODULE;
(*curmtd)->mtdinfo->type = MTD_RAM;
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
@@ -266,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
if (*(szlength) != '+') {
devlength = simple_strtoul(szlength, &buffer, 0);
- devlength = handle_unit(devlength, buffer) - devstart;
+ devlength = handle_unit(devlength, buffer);
if (devlength < devstart)
goto err_out;
@@ -306,14 +280,11 @@ __setup("slram=", mtd_slram_setup);
static int __init init_slram(void)
{
char *devname;
- int i;
#ifndef MODULE
char *devstart;
char *devlength;
- i = 0;
-
if (!map) {
E("slram: not enough parameters.\n");
return(-EINVAL);
@@ -340,6 +311,7 @@ static int __init init_slram(void)
}
#else
int count;
+ int i;
for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
count++) {
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 00000000000..c4176b0f382
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1093 @@
+/*
+ * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
+ * SPEAr platform
+ * The serial nor interface is largely based on drivers/mtd/m25p80.c,
+ * however the SPI interface has been replaced by SMI.
+ *
+ * Copyright © 2010 STMicroelectronics.
+ * Ashish Priyadarshi
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spear_smi.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* SMI clock rate */
+#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
+
+/* MAX time out to safely come out of a erase or write busy conditions */
+#define SMI_PROBE_TIMEOUT (HZ / 10)
+#define SMI_MAX_TIME_OUT (3 * HZ)
+
+/* timeout for command completion */
+#define SMI_CMD_TIMEOUT (HZ / 10)
+
+/* registers of smi */
+#define SMI_CR1 0x0 /* SMI control register 1 */
+#define SMI_CR2 0x4 /* SMI control register 2 */
+#define SMI_SR 0x8 /* SMI status register */
+#define SMI_TR 0xC /* SMI transmit register */
+#define SMI_RR 0x10 /* SMI receive register */
+
+/* defines for control_reg 1 */
+#define BANK_EN (0xF << 0) /* enables all banks */
+#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
+#define SW_MODE (0x1 << 28) /* enables SW Mode */
+#define WB_MODE (0x1 << 29) /* Write Burst Mode */
+#define FAST_MODE (0x1 << 15) /* Fast Mode */
+#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
+
+/* defines for control_reg 2 */
+#define SEND (0x1 << 7) /* Send data */
+#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
+#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
+#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
+#define WE (0x1 << 11) /* Write Enable */
+
+#define TX_LEN_SHIFT 0
+#define RX_LEN_SHIFT 4
+#define BANK_SHIFT 12
+
+/* defines for status register */
+#define SR_WIP 0x1 /* Write in progress */
+#define SR_WEL 0x2 /* Write enable latch */
+#define SR_BP0 0x4 /* Block protect 0 */
+#define SR_BP1 0x8 /* Block protect 1 */
+#define SR_BP2 0x10 /* Block protect 2 */
+#define SR_SRWD 0x80 /* SR write protect */
+#define TFF 0x100 /* Transfer Finished Flag */
+#define WCF 0x200 /* Transfer Finished Flag */
+#define ERF1 0x400 /* Forbidden Write Request */
+#define ERF2 0x800 /* Forbidden Access */
+
+#define WM_SHIFT 12
+
+/* flash opcodes */
+#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+
+/* Flash Device Ids maintenance section */
+
+/* data structure to maintain flash ids from different vendors */
+struct flash_device {
+ char *name;
+ u8 erase_cmd;
+ u32 device_id;
+ u32 pagesize;
+ unsigned long sectorsize;
+ unsigned long size_in_bytes;
+};
+
+#define FLASH_ID(n, es, id, psize, ssize, size) \
+{ \
+ .name = n, \
+ .erase_cmd = es, \
+ .device_id = id, \
+ .pagesize = psize, \
+ .sectorsize = ssize, \
+ .size_in_bytes = size \
+}
+
+static struct flash_device flash_devices[] = {
+ FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
+ FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
+ FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
+ FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
+ FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
+ FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
+ FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
+ FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
+ FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
+ FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
+ FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
+ FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
+ FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
+ FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
+ FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
+ FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
+ FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
+ FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
+ FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
+};
+
+/* Define spear specific structures */
+
+struct spear_snor_flash;
+
+/**
+ * struct spear_smi - Structure for SMI Device
+ *
+ * @clk: functional clock
+ * @status: current status register of SMI.
+ * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
+ * @lock: lock to prevent parallel access of SMI.
+ * @io_base: base address for registers of SMI.
+ * @pdev: platform device
+ * @cmd_complete: queue to wait for command completion of NOR-flash.
+ * @num_flashes: number of flashes actually present on board.
+ * @flash: separate structure for each Serial NOR-flash attached to SMI.
+ */
+struct spear_smi {
+ struct clk *clk;
+ u32 status;
+ unsigned long clk_rate;
+ struct mutex lock;
+ void __iomem *io_base;
+ struct platform_device *pdev;
+ wait_queue_head_t cmd_complete;
+ u32 num_flashes;
+ struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
+};
+
+/**
+ * struct spear_snor_flash - Structure for Serial NOR Flash
+ *
+ * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
+ * @dev_id: Device ID of NOR-flash.
+ * @lock: lock to manage flash read, write and erase operations
+ * @mtd: MTD info for each NOR-flash.
+ * @num_parts: Total number of partition in each bank of NOR-flash.
+ * @parts: Partition info for each bank of NOR-flash.
+ * @page_size: Page size of NOR-flash.
+ * @base_addr: Base address of NOR-flash.
+ * @erase_cmd: erase command may vary on different flash types
+ * @fast_mode: flash supports read in fast mode
+ */
+struct spear_snor_flash {
+ u32 bank;
+ u32 dev_id;
+ struct mutex lock;
+ struct mtd_info mtd;
+ u32 num_parts;
+ struct mtd_partition *parts;
+ u32 page_size;
+ void __iomem *base_addr;
+ u8 erase_cmd;
+ u8 fast_mode;
+};
+
+static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct spear_snor_flash, mtd);
+}
+
+/**
+ * spear_smi_read_sr - Read status register of flash through SMI
+ * @dev: structure of SMI information.
+ * @bank: bank to which flash is connected
+ *
+ * This routine will return the status register of the flash chip present at the
+ * given bank.
+ */
+static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in hw mode */
+ writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
+
+ /* performing a rsr instruction in hw mode */
+ writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
+ dev->io_base + SMI_CR2);
+
+ /* wait for tff */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* copy dev->status (lower 16 bits) in order to release lock */
+ if (ret > 0)
+ ret = dev->status & 0xffff;
+ else if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_wait_till_ready - wait till flash is ready
+ * @dev: structure of SMI information.
+ * @bank: flash corresponding to this bank
+ * @timeout: timeout for busy wait condition
+ *
+ * This routine checks for WIP (write in progress) bit in Status register
+ * If successful the routine returns 0 else -EBUSY
+ */
+static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
+ unsigned long timeout)
+{
+ unsigned long finish;
+ int status;
+
+ finish = jiffies + timeout;
+ do {
+ status = spear_smi_read_sr(dev, bank);
+ if (status < 0) {
+ if (status == -ETIMEDOUT)
+ continue; /* try till finish */
+ return status;
+ } else if (!(status & SR_WIP)) {
+ return 0;
+ }
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, finish));
+
+ dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * spear_smi_int_handler - SMI Interrupt Handler.
+ * @irq: irq number
+ * @dev_id: structure of SMI device, embedded in dev_id.
+ *
+ * The handler clears all interrupt conditions and records the status in
+ * dev->status which is used by the driver later.
+ */
+static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
+{
+ u32 status = 0;
+ struct spear_smi *dev = dev_id;
+
+ status = readl(dev->io_base + SMI_SR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ /* copy the status register in dev->status */
+ dev->status |= status;
+
+ /* send the completion */
+ wake_up_interruptible(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * spear_smi_hw_init - initializes the smi controller.
+ * @dev: structure of smi device
+ *
+ * this routine initializes the smi controller wit the default values
+ */
+static void spear_smi_hw_init(struct spear_smi *dev)
+{
+ unsigned long rate = 0;
+ u32 prescale = 0;
+ u32 val;
+
+ rate = clk_get_rate(dev->clk);
+
+ /* functional clock of smi */
+ prescale = DIV_ROUND_UP(rate, dev->clk_rate);
+
+ /*
+ * setting the standard values, fast mode, prescaler for
+ * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
+ */
+ val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
+
+ mutex_lock(&dev->lock);
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ writel(val, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+}
+
+/**
+ * get_flash_index - match chip id from a flash list.
+ * @flash_id: a valid nor flash chip id obtained from board.
+ *
+ * try to validate the chip id by matching from a list, if not found then simply
+ * returns negative. In case of success returns index in to the flash devices
+ * array.
+ */
+static int get_flash_index(u32 flash_id)
+{
+ int index;
+
+ /* Matches chip-id to entire list of 'serial-nor flash' ids */
+ for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
+ if (flash_devices[index].device_id == flash_id)
+ return index;
+ }
+
+ /* Memory chip is not listed and not supported */
+ return -ENODEV;
+}
+
+/**
+ * spear_smi_write_enable - Enable the flash to do write operation
+ * @dev: structure of SMI device
+ * @bank: enable write for flash connected to this bank
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns 0 on success.
+ */
+static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in h/w mode */
+ writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ /* give the flash, write enable command */
+ writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ if (ret == 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev,
+ "smi controller failed on write enable\n");
+ } else if (ret > 0) {
+ /* check whether write mode status is set for required bank */
+ if (dev->status & (1 << (bank + WM_SHIFT)))
+ ret = 0;
+ else {
+ dev_err(&dev->pdev->dev, "couldn't enable write\n");
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static inline u32
+get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
+{
+ u32 cmd;
+ u8 *x = (u8 *)&cmd;
+
+ x[0] = flash->erase_cmd;
+ x[1] = offset >> 16;
+ x[2] = offset >> 8;
+ x[3] = offset;
+
+ return cmd;
+}
+
+/**
+ * spear_smi_erase_sector - erase one sector of flash
+ * @dev: structure of SMI information
+ * @command: erase command to be send
+ * @bank: bank to which this command needs to be send
+ * @bytes: size of command
+ *
+ * Erase one sector of flash memory at offset ``offset'' which is any
+ * address within the sector which should be erased.
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int spear_smi_erase_sector(struct spear_smi *dev,
+ u32 bank, u32 command, u32 bytes)
+{
+ u32 ctrlreg1 = 0;
+ int ret;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
+
+ /* send command in sw mode */
+ writel(command, dev->io_base + SMI_TR);
+
+ writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
+ dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ if (ret == 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev, "sector erase failed\n");
+ } else if (ret > 0)
+ ret = 0; /* success */
+
+ /* restore ctrl regs */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+/**
+ * spear_mtd_erase - perform flash erase operation as requested by user
+ * @mtd: Provides the memory characteristics
+ * @e_info: Provides the erase information
+ *
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ u32 addr, command, bank;
+ int len, ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ bank = flash->bank;
+ if (bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ addr = e_info->addr;
+ len = e_info->len;
+
+ mutex_lock(&flash->lock);
+
+ /* now erase sectors in loop */
+ while (len) {
+ command = get_sector_erase_cmd(flash, addr);
+ /* preparing the command for flash */
+ ret = spear_smi_erase_sector(dev, bank, command, 4);
+ if (ret) {
+ e_info->state = MTD_ERASE_FAILED;
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+ e_info->state = MTD_ERASE_DONE;
+ mtd_erase_callback(e_info);
+
+ return 0;
+}
+
+/**
+ * spear_mtd_read - performs flash read operation as requested by the user
+ * @mtd: MTD information of the memory bank
+ * @from: Address from which to start read
+ * @len: Number of bytes to be read
+ * @retlen: Fills the Number of bytes actually read
+ * @buf: Fills this after reading
+ *
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void __iomem *src;
+ u32 ctrlreg1, val;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ src = flash->base_addr + from;
+
+ mutex_lock(&flash->lock);
+
+ /* wait till previous write/erase is done. */
+ ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ mutex_lock(&dev->lock);
+ /* put smi in hw mode not wbt mode */
+ ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
+ val &= ~(SW_MODE | WB_MODE);
+ if (flash->fast_mode)
+ val |= FAST_MODE;
+
+ writel(val, dev->io_base + SMI_CR1);
+
+ memcpy_fromio(buf, src, len);
+
+ /* restore ctrl reg1 */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+
+ *retlen = len;
+ mutex_unlock(&flash->lock);
+
+ return 0;
+}
+
+static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
+ void __iomem *dest, const void *src, size_t len)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ /* wait until finished previous write command. */
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ /* put smi in write enable */
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ /* put smi in hw, write burst mode */
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ memcpy_toio(dest, src, len);
+
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+/**
+ * spear_mtd_write - performs write operation as requested by the user.
+ * @mtd: MTD information of the memory bank.
+ * @to: Address to write.
+ * @len: Number of bytes to be written.
+ * @retlen: Number of bytes actually wrote.
+ * @buf: Buffer from which the data to be taken.
+ *
+ * Write an address range to the flash chip. Data must be written in
+ * flash_page_size chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void __iomem *dest;
+ u32 page_offset, page_size;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ dest = flash->base_addr + to;
+ mutex_lock(&flash->lock);
+
+ page_offset = (u32)to % flash->page_size;
+
+ /* do if all the bytes fit onto one page */
+ if (page_offset + len <= flash->page_size) {
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
+ if (!ret)
+ *retlen += len;
+ } else {
+ u32 i;
+
+ /* the size of data remaining on the first page */
+ page_size = flash->page_size - page_offset;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
+ page_size);
+ if (ret)
+ goto err_write;
+ else
+ *retlen += page_size;
+
+ /* write everything in pagesize chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > flash->page_size)
+ page_size = flash->page_size;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
+ buf + i, page_size);
+ if (ret)
+ break;
+ else
+ *retlen += page_size;
+ }
+ }
+
+err_write:
+ mutex_unlock(&flash->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_probe_flash - Detects the NOR Flash chip.
+ * @dev: structure of SMI information.
+ * @bank: bank on which flash must be probed
+ *
+ * This routine will check whether there exists a flash chip on a given memory
+ * bank ID.
+ * Return index of the probed flash in flash devices structure
+ */
+static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 val = 0;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ dev->status = 0; /* Will be set in interrupt handler */
+ /* put smi in sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val | SW_MODE, dev->io_base + SMI_CR1);
+
+ /* send readid command in sw mode */
+ writel(OPCODE_RDID, dev->io_base + SMI_TR);
+
+ val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
+ (3 << RX_LEN_SHIFT) | TFIE;
+ writel(val, dev->io_base + SMI_CR2);
+
+ /* wait for TFF */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+ if (ret <= 0) {
+ ret = -ENODEV;
+ goto err_probe;
+ }
+
+ /* get memory chip id */
+ val = readl(dev->io_base + SMI_RR);
+ val &= 0x00ffffff;
+ ret = get_flash_index(val);
+
+err_probe:
+ /* clear sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+
+#ifdef CONFIG_OF
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *pp = NULL;
+ const __be32 *addr;
+ u32 val;
+ int len;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ of_property_read_u32(np, "clock-rate", &val);
+ pdata->clk_rate = val;
+
+ pdata->board_flash_info = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->board_flash_info),
+ GFP_KERNEL);
+
+ /* Fill structs for each subnode (flash device) */
+ while ((pp = of_get_next_child(np, pp))) {
+ struct spear_smi_flash_info *flash_info;
+
+ flash_info = &pdata->board_flash_info[i];
+ pdata->np[i] = pp;
+
+ /* Read base-addr and size from DT */
+ addr = of_get_property(pp, "reg", &len);
+ pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
+ pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
+
+ if (of_get_property(pp, "st,smi-fast-mode", NULL))
+ pdata->board_flash_info->fast_mode = 1;
+
+ i++;
+ }
+
+ pdata->num_flashes = i;
+
+ return 0;
+}
+#else
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int spear_smi_setup_banks(struct platform_device *pdev,
+ u32 bank, struct device_node *np)
+{
+ struct spear_smi *dev = platform_get_drvdata(pdev);
+ struct mtd_part_parser_data ppdata = {};
+ struct spear_smi_flash_info *flash_info;
+ struct spear_smi_plat_data *pdata;
+ struct spear_snor_flash *flash;
+ struct mtd_partition *parts = NULL;
+ int count = 0;
+ int flash_index;
+ int ret = 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (bank > pdata->num_flashes - 1)
+ return -EINVAL;
+
+ flash_info = &pdata->board_flash_info[bank];
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
+ if (!flash)
+ return -ENOMEM;
+ flash->bank = bank;
+ flash->fast_mode = flash_info->fast_mode ? 1 : 0;
+ mutex_init(&flash->lock);
+
+ /* verify whether nor flash is really present on board */
+ flash_index = spear_smi_probe_flash(dev, bank);
+ if (flash_index < 0) {
+ dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
+ return flash_index;
+ }
+ /* map the memory for nor flash chip */
+ flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
+ flash_info->size);
+ if (!flash->base_addr)
+ return -EIO;
+
+ dev->flash[bank] = flash;
+ flash->mtd.priv = dev;
+
+ if (flash_info->name)
+ flash->mtd.name = flash_info->name;
+ else
+ flash->mtd.name = flash_devices[flash_index].name;
+
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.writesize = 1;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.size = flash_info->size;
+ flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
+ flash->page_size = flash_devices[flash_index].pagesize;
+ flash->mtd.writebufsize = flash->page_size;
+ flash->erase_cmd = flash_devices[flash_index].erase_cmd;
+ flash->mtd._erase = spear_mtd_erase;
+ flash->mtd._read = spear_mtd_read;
+ flash->mtd._write = spear_mtd_write;
+ flash->dev_id = flash_devices[flash_index].device_id;
+
+ dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
+ flash->mtd.name, flash->mtd.size,
+ flash->mtd.size / (1024 * 1024));
+
+ dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024);
+
+#ifndef CONFIG_OF
+ if (flash_info->partitions) {
+ parts = flash_info->partitions;
+ count = flash_info->nr_partitions;
+ }
+#endif
+ ppdata.of_node = np;
+
+ ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
+ count);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spear_smi_probe - Entry routine
+ * @pdev: platform device structure
+ *
+ * This is the first routine which gets invoked during booting and does all
+ * initialization/allocation work. The routine looks for available memory banks,
+ * and do proper init for any found one.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_smi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_smi_plat_data *pdata = NULL;
+ struct spear_smi *dev;
+ struct resource *smi_base;
+ int irq, ret = 0;
+ int i;
+
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdev->dev.platform_data = pdata;
+ ret = spear_smi_probe_config_dt(pdev, np);
+ if (ret) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "invalid smi irq\n");
+ goto err;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
+ goto err;
+ }
+
+ dev->pdev = pdev;
+ dev->clk_rate = pdata->clk_rate;
+
+ if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
+ dev->clk_rate = SMI_MAX_CLOCK_FREQ;
+
+ dev->num_flashes = pdata->num_flashes;
+
+ if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
+ dev_err(&pdev->dev, "exceeding max number of flashes\n");
+ dev->num_flashes = MAX_NUM_FLASH_CHIP;
+ }
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret)
+ goto err;
+
+ ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
+ pdev->name, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
+ goto err_irq;
+ }
+
+ mutex_init(&dev->lock);
+ init_waitqueue_head(&dev->cmd_complete);
+ spear_smi_hw_init(dev);
+ platform_set_drvdata(pdev, dev);
+
+ /* loop for each serial nor-flash which is connected to smi */
+ for (i = 0; i < dev->num_flashes; i++) {
+ ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "bank setup failed\n");
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ clk_disable_unprepare(dev->clk);
+err:
+ return ret;
+}
+
+/**
+ * spear_smi_remove - Exit routine
+ * @pdev: platform device structure
+ *
+ * free all allocations and delete the partitions.
+ */
+static int spear_smi_remove(struct platform_device *pdev)
+{
+ struct spear_smi *dev;
+ struct spear_snor_flash *flash;
+ int ret, i;
+
+ dev = platform_get_drvdata(pdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "dev is null\n");
+ return -ENODEV;
+ }
+
+ /* clean up for all nor flash */
+ for (i = 0; i < dev->num_flashes; i++) {
+ flash = dev->flash[i];
+ if (!flash)
+ continue;
+
+ /* clean up mtd stuff */
+ ret = mtd_device_unregister(&flash->mtd);
+ if (ret)
+ dev_err(&pdev->dev, "error removing mtd\n");
+ }
+
+ clk_disable_unprepare(dev->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int spear_smi_suspend(struct device *dev)
+{
+ struct spear_smi *sdev = dev_get_drvdata(dev);
+
+ if (sdev && sdev->clk)
+ clk_disable_unprepare(sdev->clk);
+
+ return 0;
+}
+
+static int spear_smi_resume(struct device *dev)
+{
+ struct spear_smi *sdev = dev_get_drvdata(dev);
+ int ret = -EPERM;
+
+ if (sdev && sdev->clk)
+ ret = clk_prepare_enable(sdev->clk);
+
+ if (!ret)
+ spear_smi_hw_init(sdev);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id spear_smi_id_table[] = {
+ { .compatible = "st,spear600-smi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_smi_id_table);
+#endif
+
+static struct platform_driver spear_smi_driver = {
+ .driver = {
+ .name = "smi",
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spear_smi_id_table),
+ .pm = &spear_smi_pm_ops,
+ },
+ .probe = spear_smi_probe,
+ .remove = spear_smi_remove,
+};
+module_platform_driver(spear_smi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
+MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 1e2c430aaad..c63ecbcad0b 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -5,7 +5,7 @@
*
* Copyright © 2009 Bluewater Systems Ltd
* Author: Andre Renaud <andre@bluewatersys.com>
- * Author: Ryan Mallon <ryan@bluewatersys.com>
+ * Author: Ryan Mallon
*
* Based on m25p80.c
*
@@ -15,7 +15,6 @@
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -52,8 +51,6 @@ struct sst25l_flash {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
-
- int partitioned;
};
struct flash_info {
@@ -66,7 +63,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __devinitdata sst25l_flash_info[] = {
+static struct flash_info sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -177,9 +174,6 @@ static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
int err;
/* Sanity checks */
- if (instr->addr + instr->len > flash->mtd.size)
- return -EINVAL;
-
if ((uint32_t)instr->len % mtd->erasesize)
return -EINVAL;
@@ -225,16 +219,6 @@ static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
unsigned char command[4];
int ret;
- /* Sanity checking */
- if (len == 0)
- return 0;
-
- if (from + len > flash->mtd.size)
- return -EINVAL;
-
- if (retlen)
- *retlen = 0;
-
spi_message_init(&message);
memset(&transfer, 0, sizeof(transfer));
@@ -276,13 +260,6 @@ static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
int i, j, ret, bytes, copied = 0;
unsigned char command[5];
- /* Sanity checks */
- if (!len)
- return 0;
-
- if (to + len > flash->mtd.size)
- return -EINVAL;
-
if ((uint32_t)to % mtd->writesize)
return -EINVAL;
@@ -335,7 +312,7 @@ out:
return ret;
}
-static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
+static struct flash_info *sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
@@ -375,28 +352,26 @@ static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
return flash_info;
}
-static int __devinit sst25l_probe(struct spi_device *spi)
+static int sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
- int ret, i;
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
+ int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
return -ENODEV;
- flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
flash->spi = spi;
mutex_init(&flash->lock);
- dev_set_drvdata(&spi->dev, flash);
+ spi_set_drvdata(spi, flash);
- data = spi->dev.platform_data;
+ data = dev_get_platdata(&spi->dev);
if (data && data->name)
flash->mtd.name = data->name;
else
@@ -406,16 +381,16 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.erasesize = flash_info->erase_size;
flash->mtd.writesize = flash_info->page_size;
+ flash->mtd.writebufsize = flash_info->page_size;
flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
- flash->mtd.erase = sst25l_erase;
- flash->mtd.read = sst25l_read;
- flash->mtd.write = sst25l_write;
+ flash->mtd._erase = sst25l_erase;
+ flash->mtd._read = sst25l_read;
+ flash->mtd._write = sst25l_write;
dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
(long long)flash->mtd.size >> 10);
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd .name = %s, .size = 0x%llx (%lldMiB) "
+ pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
flash->mtd.name,
(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -423,80 +398,34 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.numeraseregions);
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] = {"cmdlinepart", NULL};
-
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes,
- &parts, 0);
- }
-
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
-
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
-
- flash->partitioned = 1;
- return mtd_device_register(&flash->mtd, parts,
- nr_parts);
- }
-
- ret = mtd_device_register(&flash->mtd, NULL, 0);
- if (ret == 1) {
- kfree(flash);
- dev_set_drvdata(&spi->dev, NULL);
+ ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (ret)
return -ENODEV;
- }
return 0;
}
-static int __devexit sst25l_remove(struct spi_device *spi)
+static int sst25l_remove(struct spi_device *spi)
{
- struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
- int ret;
+ struct sst25l_flash *flash = spi_get_drvdata(spi);
- ret = mtd_device_unregister(&flash->mtd);
- if (ret == 0)
- kfree(flash);
- return ret;
+ return mtd_device_unregister(&flash->mtd);
}
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __devexit_p(sst25l_remove),
+ .remove = sst25l_remove,
};
-static int __init sst25l_init(void)
-{
- return spi_register_driver(&sst25l_driver);
-}
-
-static void __exit sst25l_exit(void)
-{
- spi_unregister_driver(&sst25l_driver);
-}
-
-module_init(sst25l_init);
-module_exit(sst25l_exit);
+module_spi_driver(sst25l_driver);
MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
- "Ryan Mallon <ryan@bluewatersys.com>");
+ "Ryan Mallon");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
new file mode 100644
index 00000000000..d252514d3e9
--- /dev/null
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -0,0 +1,2080 @@
+/*
+ * st_spi_fsm.c - ST Fast Sequence Mode (FSM) Serial Flash Controller
+ *
+ * Author: Angus Clark <angus.clark@st.com>
+ *
+ * Copyright (C) 2010-2014 STMicroelectronics Limited
+ *
+ * JEDEC probe based on drivers/mtd/devices/m25p80.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "serial_flash_cmds.h"
+
+/*
+ * FSM SPI Controller Registers
+ */
+#define SPI_CLOCKDIV 0x0010
+#define SPI_MODESELECT 0x0018
+#define SPI_CONFIGDATA 0x0020
+#define SPI_STA_MODE_CHANGE 0x0028
+#define SPI_FAST_SEQ_TRANSFER_SIZE 0x0100
+#define SPI_FAST_SEQ_ADD1 0x0104
+#define SPI_FAST_SEQ_ADD2 0x0108
+#define SPI_FAST_SEQ_ADD_CFG 0x010c
+#define SPI_FAST_SEQ_OPC1 0x0110
+#define SPI_FAST_SEQ_OPC2 0x0114
+#define SPI_FAST_SEQ_OPC3 0x0118
+#define SPI_FAST_SEQ_OPC4 0x011c
+#define SPI_FAST_SEQ_OPC5 0x0120
+#define SPI_MODE_BITS 0x0124
+#define SPI_DUMMY_BITS 0x0128
+#define SPI_FAST_SEQ_FLASH_STA_DATA 0x012c
+#define SPI_FAST_SEQ_1 0x0130
+#define SPI_FAST_SEQ_2 0x0134
+#define SPI_FAST_SEQ_3 0x0138
+#define SPI_FAST_SEQ_4 0x013c
+#define SPI_FAST_SEQ_CFG 0x0140
+#define SPI_FAST_SEQ_STA 0x0144
+#define SPI_QUAD_BOOT_SEQ_INIT_1 0x0148
+#define SPI_QUAD_BOOT_SEQ_INIT_2 0x014c
+#define SPI_QUAD_BOOT_READ_SEQ_1 0x0150
+#define SPI_QUAD_BOOT_READ_SEQ_2 0x0154
+#define SPI_PROGRAM_ERASE_TIME 0x0158
+#define SPI_MULT_PAGE_REPEAT_SEQ_1 0x015c
+#define SPI_MULT_PAGE_REPEAT_SEQ_2 0x0160
+#define SPI_STATUS_WR_TIME_REG 0x0164
+#define SPI_FAST_SEQ_DATA_REG 0x0300
+
+/*
+ * Register: SPI_MODESELECT
+ */
+#define SPI_MODESELECT_CONTIG 0x01
+#define SPI_MODESELECT_FASTREAD 0x02
+#define SPI_MODESELECT_DUALIO 0x04
+#define SPI_MODESELECT_FSM 0x08
+#define SPI_MODESELECT_QUADBOOT 0x10
+
+/*
+ * Register: SPI_CONFIGDATA
+ */
+#define SPI_CFG_DEVICE_ST 0x1
+#define SPI_CFG_DEVICE_ATMEL 0x4
+#define SPI_CFG_MIN_CS_HIGH(x) (((x) & 0xfff) << 4)
+#define SPI_CFG_CS_SETUPHOLD(x) (((x) & 0xff) << 16)
+#define SPI_CFG_DATA_HOLD(x) (((x) & 0xff) << 24)
+
+#define SPI_CFG_DEFAULT_MIN_CS_HIGH SPI_CFG_MIN_CS_HIGH(0x0AA)
+#define SPI_CFG_DEFAULT_CS_SETUPHOLD SPI_CFG_CS_SETUPHOLD(0xA0)
+#define SPI_CFG_DEFAULT_DATA_HOLD SPI_CFG_DATA_HOLD(0x00)
+
+/*
+ * Register: SPI_FAST_SEQ_TRANSFER_SIZE
+ */
+#define TRANSFER_SIZE(x) ((x) * 8)
+
+/*
+ * Register: SPI_FAST_SEQ_ADD_CFG
+ */
+#define ADR_CFG_CYCLES_ADD1(x) ((x) << 0)
+#define ADR_CFG_PADS_1_ADD1 (0x0 << 6)
+#define ADR_CFG_PADS_2_ADD1 (0x1 << 6)
+#define ADR_CFG_PADS_4_ADD1 (0x3 << 6)
+#define ADR_CFG_CSDEASSERT_ADD1 (1 << 8)
+#define ADR_CFG_CYCLES_ADD2(x) ((x) << (0+16))
+#define ADR_CFG_PADS_1_ADD2 (0x0 << (6+16))
+#define ADR_CFG_PADS_2_ADD2 (0x1 << (6+16))
+#define ADR_CFG_PADS_4_ADD2 (0x3 << (6+16))
+#define ADR_CFG_CSDEASSERT_ADD2 (1 << (8+16))
+
+/*
+ * Register: SPI_FAST_SEQ_n
+ */
+#define SEQ_OPC_OPCODE(x) ((x) << 0)
+#define SEQ_OPC_CYCLES(x) ((x) << 8)
+#define SEQ_OPC_PADS_1 (0x0 << 14)
+#define SEQ_OPC_PADS_2 (0x1 << 14)
+#define SEQ_OPC_PADS_4 (0x3 << 14)
+#define SEQ_OPC_CSDEASSERT (1 << 16)
+
+/*
+ * Register: SPI_FAST_SEQ_CFG
+ */
+#define SEQ_CFG_STARTSEQ (1 << 0)
+#define SEQ_CFG_SWRESET (1 << 5)
+#define SEQ_CFG_CSDEASSERT (1 << 6)
+#define SEQ_CFG_READNOTWRITE (1 << 7)
+#define SEQ_CFG_ERASE (1 << 8)
+#define SEQ_CFG_PADS_1 (0x0 << 16)
+#define SEQ_CFG_PADS_2 (0x1 << 16)
+#define SEQ_CFG_PADS_4 (0x3 << 16)
+
+/*
+ * Register: SPI_MODE_BITS
+ */
+#define MODE_DATA(x) (x & 0xff)
+#define MODE_CYCLES(x) ((x & 0x3f) << 16)
+#define MODE_PADS_1 (0x0 << 22)
+#define MODE_PADS_2 (0x1 << 22)
+#define MODE_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_DUMMY_BITS
+ */
+#define DUMMY_CYCLES(x) ((x & 0x3f) << 16)
+#define DUMMY_PADS_1 (0x0 << 22)
+#define DUMMY_PADS_2 (0x1 << 22)
+#define DUMMY_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_FAST_SEQ_FLASH_STA_DATA
+ */
+#define STA_DATA_BYTE1(x) ((x & 0xff) << 0)
+#define STA_DATA_BYTE2(x) ((x & 0xff) << 8)
+#define STA_PADS_1 (0x0 << 16)
+#define STA_PADS_2 (0x1 << 16)
+#define STA_PADS_4 (0x3 << 16)
+#define STA_CSDEASSERT (0x1 << 20)
+#define STA_RDNOTWR (0x1 << 21)
+
+/*
+ * FSM SPI Instruction Opcodes
+ */
+#define STFSM_OPC_CMD 0x1
+#define STFSM_OPC_ADD 0x2
+#define STFSM_OPC_STA 0x3
+#define STFSM_OPC_MODE 0x4
+#define STFSM_OPC_DUMMY 0x5
+#define STFSM_OPC_DATA 0x6
+#define STFSM_OPC_WAIT 0x7
+#define STFSM_OPC_JUMP 0x8
+#define STFSM_OPC_GOTO 0x9
+#define STFSM_OPC_STOP 0xF
+
+/*
+ * FSM SPI Instructions (== opcode + operand).
+ */
+#define STFSM_INSTR(cmd, op) ((cmd) | ((op) << 4))
+
+#define STFSM_INST_CMD1 STFSM_INSTR(STFSM_OPC_CMD, 1)
+#define STFSM_INST_CMD2 STFSM_INSTR(STFSM_OPC_CMD, 2)
+#define STFSM_INST_CMD3 STFSM_INSTR(STFSM_OPC_CMD, 3)
+#define STFSM_INST_CMD4 STFSM_INSTR(STFSM_OPC_CMD, 4)
+#define STFSM_INST_CMD5 STFSM_INSTR(STFSM_OPC_CMD, 5)
+#define STFSM_INST_ADD1 STFSM_INSTR(STFSM_OPC_ADD, 1)
+#define STFSM_INST_ADD2 STFSM_INSTR(STFSM_OPC_ADD, 2)
+
+#define STFSM_INST_DATA_WRITE STFSM_INSTR(STFSM_OPC_DATA, 1)
+#define STFSM_INST_DATA_READ STFSM_INSTR(STFSM_OPC_DATA, 2)
+
+#define STFSM_INST_STA_RD1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_WR1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_RD2 STFSM_INSTR(STFSM_OPC_STA, 0x2)
+#define STFSM_INST_STA_WR1_2 STFSM_INSTR(STFSM_OPC_STA, 0x3)
+
+#define STFSM_INST_MODE STFSM_INSTR(STFSM_OPC_MODE, 0)
+#define STFSM_INST_DUMMY STFSM_INSTR(STFSM_OPC_DUMMY, 0)
+#define STFSM_INST_WAIT STFSM_INSTR(STFSM_OPC_WAIT, 0)
+#define STFSM_INST_STOP STFSM_INSTR(STFSM_OPC_STOP, 0)
+
+#define STFSM_DEFAULT_EMI_FREQ 100000000UL /* 100 MHz */
+#define STFSM_DEFAULT_WR_TIME (STFSM_DEFAULT_EMI_FREQ * (15/1000)) /* 15ms */
+
+#define STFSM_FLASH_SAFE_FREQ 10000000UL /* 10 MHz */
+
+#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
+
+/* S25FLxxxS commands */
+#define S25FL_CMD_WRITE4_1_1_4 0x34
+#define S25FL_CMD_SE4 0xdc
+#define S25FL_CMD_CLSR 0x30
+#define S25FL_CMD_DYBWR 0xe1
+#define S25FL_CMD_DYBRD 0xe0
+#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
+ * 'SPINOR_OP_WRITE_1_4_4'
+ * as found on N25Qxxx devices! */
+
+/* Status register */
+#define FLASH_STATUS_BUSY 0x01
+#define FLASH_STATUS_WEL 0x02
+#define FLASH_STATUS_BP0 0x04
+#define FLASH_STATUS_BP1 0x08
+#define FLASH_STATUS_BP2 0x10
+#define FLASH_STATUS_SRWP0 0x80
+#define FLASH_STATUS_TIMEOUT 0xff
+/* S25FL Error Flags */
+#define S25FL_STATUS_E_ERR 0x20
+#define S25FL_STATUS_P_ERR 0x40
+
+#define N25Q_CMD_WRVCR 0x81
+#define N25Q_CMD_RDVCR 0x85
+#define N25Q_CMD_RDVECR 0x65
+#define N25Q_CMD_RDNVCR 0xb5
+#define N25Q_CMD_WRNVCR 0xb1
+
+#define FLASH_PAGESIZE 256 /* In Bytes */
+#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
+#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
+
+/*
+ * Flags to tweak operation of default read/write/erase routines
+ */
+#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
+#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
+#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
+#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
+
+struct stfsm_seq {
+ uint32_t data_size;
+ uint32_t addr1;
+ uint32_t addr2;
+ uint32_t addr_cfg;
+ uint32_t seq_opc[5];
+ uint32_t mode;
+ uint32_t dummy;
+ uint32_t status;
+ uint8_t seq[16];
+ uint32_t seq_cfg;
+} __packed __aligned(4);
+
+struct stfsm {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *region;
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct flash_info *info;
+
+ uint32_t configuration;
+ uint32_t fifo_dir_delay;
+ bool booted_from_spi;
+ bool reset_signal;
+ bool reset_por;
+
+ struct stfsm_seq stfsm_seq_read;
+ struct stfsm_seq stfsm_seq_write;
+ struct stfsm_seq stfsm_seq_en_32bit_addr;
+};
+
+/* Parameters to configure a READ or WRITE FSM sequence */
+struct seq_rw_config {
+ uint32_t flags; /* flags to support config */
+ uint8_t cmd; /* FLASH command */
+ int write; /* Write Sequence */
+ uint8_t addr_pads; /* No. of addr pads (MODE & DUMMY) */
+ uint8_t data_pads; /* No. of data pads */
+ uint8_t mode_data; /* MODE data */
+ uint8_t mode_cycles; /* No. of MODE cycles */
+ uint8_t dummy_cycles; /* No. of DUMMY cycles */
+};
+
+/* SPI Flash Device Table */
+struct flash_info {
+ char *name;
+ /*
+ * JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ u32 jedec_id;
+ u16 ext_id;
+ /*
+ * The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+ u32 flags;
+ /*
+ * Note, where FAST_READ is supported, freq_max specifies the
+ * FAST_READ frequency, not the READ frequency.
+ */
+ u32 max_freq;
+ int (*config)(struct stfsm *);
+};
+
+static int stfsm_n25q_config(struct stfsm *fsm);
+static int stfsm_mx25_config(struct stfsm *fsm);
+static int stfsm_s25fl_config(struct stfsm *fsm);
+static int stfsm_w25q_config(struct stfsm *fsm);
+
+static struct flash_info flash_types[] = {
+ /*
+ * ST Microelectronics/Numonyx --
+ * (newer production versions may have feature updates
+ * (eg faster operating frequency)
+ */
+#define M25P_FLAG (FLASH_FLAG_READ_WRITE | FLASH_FLAG_READ_FAST)
+ { "m25p40", 0x202013, 0, 64 * 1024, 8, M25P_FLAG, 25, NULL },
+ { "m25p80", 0x202014, 0, 64 * 1024, 16, M25P_FLAG, 25, NULL },
+ { "m25p16", 0x202015, 0, 64 * 1024, 32, M25P_FLAG, 25, NULL },
+ { "m25p32", 0x202016, 0, 64 * 1024, 64, M25P_FLAG, 50, NULL },
+ { "m25p64", 0x202017, 0, 64 * 1024, 128, M25P_FLAG, 50, NULL },
+ { "m25p128", 0x202018, 0, 256 * 1024, 64, M25P_FLAG, 50, NULL },
+
+#define M25PX_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
+ { "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
+
+ /* Macronix MX25xxx
+ * - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices
+ * where operating frequency must be reduced.
+ */
+#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_SE_4K | \
+ FLASH_FLAG_SE_32K)
+ { "mx25l3255e", 0xc29e16, 0, 64 * 1024, 64,
+ (MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86,
+ stfsm_mx25_config},
+ { "mx25l25635e", 0xc22019, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config },
+ { "mx25l25655e", 0xc22619, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config},
+
+#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_2 | \
+ FLASH_FLAG_WRITE_1_2_2 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_WRITE_1_4_4)
+ { "n25q128", 0x20ba18, 0, 64 * 1024, 256, N25Q_FLAG, 108,
+ stfsm_n25q_config },
+ { "n25q256", 0x20ba19, 0, 64 * 1024, 512,
+ N25Q_FLAG | FLASH_FLAG_32BIT_ADDR, 108, stfsm_n25q_config },
+
+ /*
+ * Spansion S25FLxxxP
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ */
+#define S25FLXXXP_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_READ_FAST)
+ { "s25fl032p", 0x010215, 0x4d00, 64 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config},
+ { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+
+ /*
+ * Spansion S25FLxxxS
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ * - RESET# signal supported by die but not bristled out on all
+ * package types. The package type is a function of board design,
+ * so this information is captured in the board's flags.
+ * - Supports 'DYB' sector protection. Depending on variant, sectors
+ * may default to locked state on power-on.
+ */
+#define S25FLXXXS_FLAG (S25FLXXXP_FLAG | \
+ FLASH_FLAG_RESET | \
+ FLASH_FLAG_DYB_LOCKING)
+ { "s25fl128s0", 0x012018, 0x0300, 256 * 1024, 64, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl128s1", 0x012018, 0x0301, 64 * 1024, 256, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+ { "s25fl256s1", 0x010219, 0x4d01, 64 * 1024, 512,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+#define W25X_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "w25x40", 0xef3013, 0, 64 * 1024, 8, W25X_FLAG, 75, NULL },
+ { "w25x80", 0xef3014, 0, 64 * 1024, 16, W25X_FLAG, 75, NULL },
+ { "w25x16", 0xef3015, 0, 64 * 1024, 32, W25X_FLAG, 75, NULL },
+ { "w25x32", 0xef3016, 0, 64 * 1024, 64, W25X_FLAG, 75, NULL },
+ { "w25x64", 0xef3017, 0, 64 * 1024, 128, W25X_FLAG, 75, NULL },
+
+ /* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */
+#define W25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4)
+ { "w25q80", 0xef4014, 0, 64 * 1024, 16, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q16", 0xef4015, 0, 64 * 1024, 32, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q32", 0xef4016, 0, 64 * 1024, 64, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q64", 0xef4017, 0, 64 * 1024, 128, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+
+ /* Sentinel */
+ { NULL, 0x000000, 0, 0, 0, 0, 0, NULL },
+};
+
+/*
+ * FSM message sequence configurations:
+ *
+ * All configs are presented in order of preference
+ */
+
+/* Default READ configurations, in order of preference */
+static struct seq_rw_config default_read_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* Default WRITE configurations */
+static struct seq_rw_config default_write_configs[] = {
+ {FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_WRITE, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [N25Qxxx] Configuration
+ */
+#define N25Q_VCR_DUMMY_CYCLES(x) (((x) & 0xf) << 4)
+#define N25Q_VCR_XIP_DISABLED ((uint8_t)0x1 << 3)
+#define N25Q_VCR_WRAP_CONT 0x3
+
+/* N25Q 3-byte Address READ configurations
+ * - 'FAST' variants configured for 8 dummy cycles.
+ *
+ * Note, the number of dummy cycles used for 'FAST' READ operations is
+ * configurable and would normally be tuned according to the READ command and
+ * operating frequency. However, this applies universally to all 'FAST' READ
+ * commands, including those used by the SPIBoot controller, and remains in
+ * force until the device is power-cycled. Since the SPIBoot controller is
+ * hard-wired to use 8 dummy cycles, we must configure the device to also use 8
+ * cycles.
+ */
+static struct seq_rw_config n25q_read3_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* N25Q 4-byte Address READ configurations
+ * - use special 4-byte address READ commands (reduces overheads, and
+ * reduces risk of hitting watchdog reset issues).
+ * - 'FAST' variants configured for 8 dummy cycles (see note above.)
+ */
+static struct seq_rw_config n25q_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [MX25xxx] Configuration
+ */
+#define MX25_STATUS_QE (0x1 << 6)
+
+static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD1;
+ seq->seq[1] = STFSM_INST_WAIT;
+ seq->seq[2] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+/*
+ * [S25FLxxx] Configuration
+ */
+#define STFSM_S25FL_CONFIG_QE (0x1 << 1)
+
+/*
+ * S25FLxxxS devices provide three ways of supporting 32-bit addressing: Bank
+ * Register, Extended Address Modes, and a 32-bit address command set. The
+ * 32-bit address command set is used here, since it avoids any problems with
+ * entering a state that is incompatible with the SPIBoot Controller.
+ */
+static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
+ {FLASH_FLAG_WRITE_1_1_4, S25FL_CMD_WRITE4_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, S25FL_CMD_WRITE4, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [W25Qxxx] Configuration
+ */
+#define W25Q_STATUS_QE (0x1 << 1)
+
+static struct stfsm_seq stfsm_seq_read_jedec = {
+ .data_size = TRANSFER_SIZE(8),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_read_status_fifo = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_sector = {
+ /* 'addr_cfg' configured during initialisation */
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_SE)),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_chip = {
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_write_status = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B));
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD2;
+ seq->seq[1] = STFSM_INST_CMD1;
+ seq->seq[2] = STFSM_INST_WAIT;
+ seq->seq[3] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+static inline int stfsm_is_idle(struct stfsm *fsm)
+{
+ return readl(fsm->base + SPI_FAST_SEQ_STA) & 0x10;
+}
+
+static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
+{
+ return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
+}
+
+static void stfsm_clear_fifo(struct stfsm *fsm)
+{
+ uint32_t avail;
+
+ for (;;) {
+ avail = stfsm_fifo_available(fsm);
+ if (!avail)
+ break;
+
+ while (avail) {
+ readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+ avail--;
+ }
+ }
+}
+
+static inline void stfsm_load_seq(struct stfsm *fsm,
+ const struct stfsm_seq *seq)
+{
+ void __iomem *dst = fsm->base + SPI_FAST_SEQ_TRANSFER_SIZE;
+ const uint32_t *src = (const uint32_t *)seq;
+ int words = sizeof(*seq) / sizeof(*src);
+
+ BUG_ON(!stfsm_is_idle(fsm));
+
+ while (words--) {
+ writel(*src, dst);
+ src++;
+ dst += 4;
+ }
+}
+
+static void stfsm_wait_seq(struct stfsm *fsm)
+{
+ unsigned long deadline;
+ int timeout = 0;
+
+ deadline = jiffies + msecs_to_jiffies(STFSM_MAX_WAIT_SEQ_MS);
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ if (stfsm_is_idle(fsm))
+ return;
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on sequence completion\n");
+}
+
+static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
+{
+ uint32_t remaining = size >> 2;
+ uint32_t avail;
+ uint32_t words;
+
+ dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
+
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
+
+ while (remaining) {
+ for (;;) {
+ avail = stfsm_fifo_available(fsm);
+ if (avail)
+ break;
+ udelay(1);
+ }
+ words = min(avail, remaining);
+ remaining -= words;
+
+ readsl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+ buf += words;
+ }
+}
+
+static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
+ uint32_t size)
+{
+ uint32_t words = size >> 2;
+
+ dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
+
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
+
+ writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+
+ return size;
+}
+
+static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
+ uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd) |
+ SEQ_OPC_CSDEASSERT);
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static uint8_t stfsm_wait_busy(struct stfsm *fsm)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ unsigned long deadline;
+ uint32_t status;
+ int timeout = 0;
+
+ /* Use RDRS1 */
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR));
+
+ /* Load read_status sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /*
+ * Repeat until busy bit is deasserted, or timeout, or error (S25FLxxxS)
+ */
+ deadline = jiffies + FLASH_MAX_BUSY_WAIT;
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ stfsm_wait_seq(fsm);
+
+ stfsm_read_fifo(fsm, &status, 4);
+
+ if ((status & FLASH_STATUS_BUSY) == 0)
+ return 0;
+
+ if ((fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) &&
+ ((status & S25FL_STATUS_P_ERR) ||
+ (status & S25FL_STATUS_E_ERR)))
+ return (uint8_t)(status & 0xff);
+
+ if (!timeout)
+ /* Restart */
+ writel(seq->seq_cfg, fsm->base + SPI_FAST_SEQ_CFG);
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on wait_busy\n");
+
+ return FLASH_STATUS_TIMEOUT;
+}
+
+static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
+ uint8_t *data, int bytes)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ uint32_t tmp;
+ uint8_t *t = (uint8_t *)&tmp;
+ int i;
+
+ dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n",
+ cmd, bytes);
+
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd)),
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ for (i = 0; i < bytes; i++)
+ data[i] = t[i];
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd,
+ uint16_t data, int bytes, int wait_busy)
+{
+ struct stfsm_seq *seq = &stfsm_seq_write_status;
+
+ dev_dbg(fsm->dev,
+ "write 'status' register [0x%02x], %d byte(s), 0x%04x\n"
+ " %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no");
+
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd));
+
+ seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT;
+ seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ if (wait_busy)
+ stfsm_wait_busy(fsm);
+
+ return 0;
+}
+
+/*
+ * SoC reset on 'boot-from-spi' systems
+ *
+ * Certain modes of operation cause the Flash device to enter a particular state
+ * for a period of time (e.g. 'Erase Sector', 'Quad Enable', and 'Enter 32-bit
+ * Addr' commands). On boot-from-spi systems, it is important to consider what
+ * happens if a warm reset occurs during this period. The SPIBoot controller
+ * assumes that Flash device is in its default reset state, 24-bit address mode,
+ * and ready to accept commands. This can be achieved using some form of
+ * on-board logic/controller to force a device POR in response to a SoC-level
+ * reset or by making use of the device reset signal if available (limited
+ * number of devices only).
+ *
+ * Failure to take such precautions can cause problems following a warm reset.
+ * For some operations (e.g. ERASE), there is little that can be done. For
+ * other modes of operation (e.g. 32-bit addressing), options are often
+ * available that can help minimise the window in which a reset could cause a
+ * problem.
+ *
+ */
+static bool stfsm_can_handle_soc_reset(struct stfsm *fsm)
+{
+ /* Reset signal is available on the board and supported by the device */
+ if (fsm->reset_signal && fsm->info->flags & FLASH_FLAG_RESET)
+ return true;
+
+ /* Board-level logic forces a power-on-reset */
+ if (fsm->reset_por)
+ return true;
+
+ /* Reset is not properly handled and may result in failure to reboot */
+ return false;
+}
+
+/* Configure 'addr_cfg' according to addressing mode */
+static void stfsm_prepare_erasesec_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq)
+{
+ int addr1_cycles = fsm->info->flags & FLASH_FLAG_32BIT_ADDR ? 16 : 8;
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(addr1_cycles) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+/* Search for preferred configuration based on available flags */
+static struct seq_rw_config *
+stfsm_search_seq_rw_configs(struct stfsm *fsm,
+ struct seq_rw_config cfgs[])
+{
+ struct seq_rw_config *config;
+ int flags = fsm->info->flags;
+
+ for (config = cfgs; config->cmd != 0; config++)
+ if ((config->flags & flags) == config->flags)
+ return config;
+
+ return NULL;
+}
+
+/* Prepare a READ/WRITE sequence according to configuration parameters */
+static void stfsm_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfg)
+{
+ int addr1_cycles, addr2_cycles;
+ int i = 0;
+
+ memset(seq, 0, sizeof(*seq));
+
+ /* Add READ/WRITE OPC */
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cfg->cmd));
+
+ /* Add WREN OPC for a WRITE sequence */
+ if (cfg->write)
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ /* Address configuration (24 or 32-bit addresses) */
+ addr1_cycles = (fsm->info->flags & FLASH_FLAG_32BIT_ADDR) ? 16 : 8;
+ addr1_cycles /= cfg->addr_pads;
+ addr2_cycles = 16 / cfg->addr_pads;
+ seq->addr_cfg = ((addr1_cycles & 0x3f) << 0 | /* ADD1 cycles */
+ (cfg->addr_pads - 1) << 6 | /* ADD1 pads */
+ (addr2_cycles & 0x3f) << 16 | /* ADD2 cycles */
+ ((cfg->addr_pads - 1) << 22)); /* ADD2 pads */
+
+ /* Data/Sequence configuration */
+ seq->seq_cfg = ((cfg->data_pads - 1) << 16 |
+ SEQ_CFG_STARTSEQ |
+ SEQ_CFG_CSDEASSERT);
+ if (!cfg->write)
+ seq->seq_cfg |= SEQ_CFG_READNOTWRITE;
+
+ /* Mode configuration (no. of pads taken from addr cfg) */
+ seq->mode = ((cfg->mode_data & 0xff) << 0 | /* data */
+ (cfg->mode_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+ /* Dummy configuration (no. of pads taken from addr cfg) */
+ seq->dummy = ((cfg->dummy_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+
+ /* Instruction sequence */
+ i = 0;
+ if (cfg->write)
+ seq->seq[i++] = STFSM_INST_CMD2;
+
+ seq->seq[i++] = STFSM_INST_CMD1;
+
+ seq->seq[i++] = STFSM_INST_ADD1;
+ seq->seq[i++] = STFSM_INST_ADD2;
+
+ if (cfg->mode_cycles)
+ seq->seq[i++] = STFSM_INST_MODE;
+
+ if (cfg->dummy_cycles)
+ seq->seq[i++] = STFSM_INST_DUMMY;
+
+ seq->seq[i++] =
+ cfg->write ? STFSM_INST_DATA_WRITE : STFSM_INST_DATA_READ;
+ seq->seq[i++] = STFSM_INST_STOP;
+}
+
+static int stfsm_search_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfgs)
+{
+ struct seq_rw_config *config;
+
+ config = stfsm_search_seq_rw_configs(fsm, cfgs);
+ if (!config) {
+ dev_err(fsm->dev, "failed to find suitable config\n");
+ return -EINVAL;
+ }
+
+ stfsm_prepare_rw_seq(fsm, seq, config);
+
+ return 0;
+}
+
+/* Prepare a READ/WRITE/ERASE 'default' sequences */
+static int stfsm_prepare_rwe_seqs_default(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ int ret;
+
+ /* Configure 'READ' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ default_read_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep WRITE sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ return 0;
+}
+
+static int stfsm_mx25_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint32_t data_pads;
+ uint8_t sta;
+ int ret;
+ bool soc_reset;
+
+ /*
+ * Use default READ/WRITE sequences
+ */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure 32-bit Address Support
+ */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /* Configure 'enter_32bitaddr' FSM sequence */
+ stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi)
+ /* If we can handle SoC resets, we enable 32-bit address
+ * mode pervasively */
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ else
+ /* Else, enable/disable 32-bit addressing before/after
+ * each operation */
+ fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
+ CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(sta & MX25_STATUS_QE)) {
+ /* Set 'QE' */
+ sta |= MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ } else {
+ if (sta & MX25_STATUS_QE) {
+ /* Clear 'QE' */
+ sta &= ~MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int stfsm_n25q_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint8_t vcr;
+ int ret = 0;
+ bool soc_reset;
+
+ /* Configure 'READ' sequence */
+ if (flags & FLASH_FLAG_32BIT_ADDR)
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read4_configs);
+ else
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read3_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prepare READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence (default configs) */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "preparing WRITE sequence using flags [0x%08x] failed\n",
+ flags);
+ return ret;
+ }
+
+ /* * Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ /* Configure 32-bit address support */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ stfsm_n25q_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi) {
+ /*
+ * If we can handle SoC resets, we enable 32-bit
+ * address mode pervasively
+ */
+ stfsm_enter_32bit_addr(fsm, 1);
+ } else {
+ /*
+ * If not, enable/disable for WRITE and ERASE
+ * operations (READ uses special commands)
+ */
+ fsm->configuration = (CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+ }
+
+ /*
+ * Configure device to use 8 dummy cycles
+ */
+ vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
+ N25Q_VCR_WRAP_CONT);
+ stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0);
+
+ return 0;
+}
+
+static void stfsm_s25fl_prepare_erasesec_seq_32(struct stfsm_seq *seq)
+{
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_SE4));
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+static void stfsm_s25fl_read_dyb(struct stfsm *fsm, uint32_t offs, uint8_t *dby)
+{
+ uint32_t tmp;
+ struct stfsm_seq seq = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBRD)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ *dby = (uint8_t)(tmp >> 24);
+
+ stfsm_wait_seq(fsm);
+}
+
+static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .status = (uint32_t)dby | STA_PADS_1 | STA_CSDEASSERT,
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+ stfsm_wait_seq(fsm);
+
+ stfsm_wait_busy(fsm);
+}
+
+static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_CLSR) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRDI) |
+ SEQ_OPC_CSDEASSERT),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_s25fl_config(struct stfsm *fsm)
+{
+ struct flash_info *info = fsm->info;
+ uint32_t flags = info->flags;
+ uint32_t data_pads;
+ uint32_t offs;
+ uint16_t sta_wr;
+ uint8_t sr1, cr1, dyb;
+ int update_sr = 0;
+ int ret;
+
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /*
+ * Prepare Read/Write/Erase sequences according to S25FLxxx
+ * 32-bit address command set
+ */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ stfsm_s25fl_read4_configs);
+ if (ret)
+ return ret;
+
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ stfsm_s25fl_write4_configs);
+ if (ret)
+ return ret;
+
+ stfsm_s25fl_prepare_erasesec_seq_32(&stfsm_seq_erase_sector);
+
+ } else {
+ /* Use default configurations for 24-bit addressing */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * For devices that support 'DYB' sector locking, check lock status and
+ * unlock sectors if necessary (some variants power-on with sectors
+ * locked by default)
+ */
+ if (flags & FLASH_FLAG_DYB_LOCKING) {
+ offs = 0;
+ for (offs = 0; offs < info->sector_size * info->n_sectors;) {
+ stfsm_s25fl_read_dyb(fsm, offs, &dyb);
+ if (dyb == 0x00)
+ stfsm_s25fl_write_dyb(fsm, offs, 0xff);
+
+ /* Handle bottom/top 4KiB parameter sectors */
+ if ((offs < info->sector_size * 2) ||
+ (offs >= (info->sector_size - info->n_sectors * 4)))
+ offs += 0x1000;
+ else
+ offs += 0x10000;
+ }
+ }
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
+ /* Set 'QE' */
+ cr1 |= STFSM_S25FL_CONFIG_QE;
+
+ update_sr = 1;
+ }
+ } else {
+ if (cr1 & STFSM_S25FL_CONFIG_QE) {
+ /* Clear 'QE' */
+ cr1 &= ~STFSM_S25FL_CONFIG_QE;
+
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sta_wr = ((uint16_t)cr1 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1);
+ }
+
+ /*
+ * S25FLxxx devices support Program and Error error flags.
+ * Configure driver to check flags and clear if necessary.
+ */
+ fsm->configuration |= CFG_S25FL_CHECK_ERROR_FLAGS;
+
+ return 0;
+}
+
+static int stfsm_w25q_config(struct stfsm *fsm)
+{
+ uint32_t data_pads;
+ uint8_t sr1, sr2;
+ uint16_t sr_wr;
+ int update_sr = 0;
+ int ret;
+
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(sr2 & W25Q_STATUS_QE)) {
+ /* Set 'QE' */
+ sr2 |= W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ } else {
+ if (sr2 & W25Q_STATUS_QE) {
+ /* Clear 'QE' */
+ sr2 &= ~W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ /* Write status register */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sr_wr = ((uint16_t)sr2 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1);
+ }
+
+ return 0;
+}
+
+static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
+ uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_read;
+ uint32_t data_pads;
+ uint32_t read_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *p;
+
+ dev_dbg(fsm->dev, "reading %d bytes from 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must read in multiples of 32 cycles (or 32*pads/8 Bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ read_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
+
+ /* Handle non-aligned size */
+ size_ub = (size + read_mask) & ~read_mask;
+ size_lb = size & ~read_mask;
+ size_mop = size & read_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ if (size_lb)
+ stfsm_read_fifo(fsm, (uint32_t *)p, size_lb);
+
+ if (size_mop) {
+ stfsm_read_fifo(fsm, tmp, read_mask + 1);
+ memcpy(p + size_lb, &tmp, size_mop);
+ }
+
+ /* Handle non-aligned buf */
+ if ((uintptr_t)buf & 0x3)
+ memcpy(buf, page_buf, size);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ stfsm_clear_fifo(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
+ uint32_t size, uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_write;
+ uint32_t data_pads;
+ uint32_t write_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *t = (uint8_t *)&tmp;
+ const uint8_t *p;
+ int ret;
+ int i;
+
+ dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must write in multiples of 32 cycles (or 32*pads/8 bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ write_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ if ((uintptr_t)buf & 0x3) {
+ memcpy(page_buf, buf, size);
+ p = (uint8_t *)page_buf;
+ } else {
+ p = buf;
+ }
+
+ /* Handle non-aligned size */
+ size_ub = (size + write_mask) & ~write_mask;
+ size_lb = size & ~write_mask;
+ size_mop = size & write_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ /* Need to set FIFO to write mode, before writing data to FIFO (see
+ * GNBvb79594)
+ */
+ writel(0x00040000, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /*
+ * Before writing data to the FIFO, apply a small delay to allow a
+ * potential change of FIFO direction to complete.
+ */
+ if (fsm->fifo_dir_delay == 0)
+ readl(fsm->base + SPI_FAST_SEQ_CFG);
+ else
+ udelay(fsm->fifo_dir_delay);
+
+
+ /* Write data to FIFO, before starting sequence (see GNBvd79593) */
+ if (size_lb) {
+ stfsm_write_fifo(fsm, (uint32_t *)p, size_lb);
+ p += size_lb;
+ }
+
+ /* Handle non-aligned size */
+ if (size_mop) {
+ memset(t, 0xff, write_mask + 1); /* fill with 0xff's */
+ for (i = 0; i < size_mop; i++)
+ t[i] = *p++;
+
+ stfsm_write_fifo(fsm, tmp, write_mask + 1);
+ }
+
+ /* Start sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+/*
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ */
+static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ uint32_t bytes;
+
+ dev_dbg(fsm->dev, "%s from 0x%08x, len %zd\n",
+ __func__, (u32)from, len);
+
+ mutex_lock(&fsm->lock);
+
+ while (len > 0) {
+ bytes = min_t(size_t, len, FLASH_PAGESIZE);
+
+ stfsm_read(fsm, buf, bytes, from);
+
+ buf += bytes;
+ from += bytes;
+ len -= bytes;
+
+ *retlen += bytes;
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ return 0;
+}
+
+static int stfsm_erase_sector(struct stfsm *fsm, uint32_t offset)
+{
+ struct stfsm_seq *seq = &stfsm_seq_erase_sector;
+ int ret;
+
+ dev_dbg(fsm->dev, "erasing sector at 0x%08x\n", offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return ret;
+}
+
+static int stfsm_erase_chip(struct stfsm *fsm)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_erase_chip;
+
+ dev_dbg(fsm->dev, "erasing chip\n");
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return stfsm_wait_busy(fsm);
+}
+
+/*
+ * Write an address range to the flash chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+
+ u32 page_offs;
+ u32 bytes;
+ uint8_t *b = (uint8_t *)buf;
+ int ret = 0;
+
+ dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len);
+
+ /* Offset within page */
+ page_offs = to % FLASH_PAGESIZE;
+
+ mutex_lock(&fsm->lock);
+
+ while (len) {
+ /* Write up to page boundary */
+ bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len);
+
+ ret = stfsm_write(fsm, b, bytes, to);
+ if (ret)
+ goto out1;
+
+ b += bytes;
+ len -= bytes;
+ to += bytes;
+
+ /* We are now page-aligned */
+ page_offs = 0;
+
+ *retlen += bytes;
+
+ }
+
+out1:
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+/*
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int stfsm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ u32 addr, len;
+ int ret;
+
+ dev_dbg(fsm->dev, "%s at 0x%llx, len %lld\n", __func__,
+ (long long)instr->addr, (long long)instr->len);
+
+ addr = instr->addr;
+ len = instr->len;
+
+ mutex_lock(&fsm->lock);
+
+ /* Whole-chip erase? */
+ if (len == mtd->size) {
+ ret = stfsm_erase_chip(fsm);
+ if (ret)
+ goto out1;
+ } else {
+ while (len) {
+ ret = stfsm_erase_sector(fsm, addr);
+ if (ret)
+ goto out1;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+
+out1:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+static void stfsm_read_jedec(struct stfsm *fsm, uint8_t *jedec)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_read_jedec;
+ uint32_t tmp[2];
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, tmp, 8);
+
+ memcpy(jedec, tmp, 5);
+
+ stfsm_wait_seq(fsm);
+}
+
+static struct flash_info *stfsm_jedec_probe(struct stfsm *fsm)
+{
+ struct flash_info *info;
+ u16 ext_jedec;
+ u32 jedec;
+ u8 id[5];
+
+ stfsm_read_jedec(fsm, id);
+
+ jedec = id[0] << 16 | id[1] << 8 | id[2];
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ */
+ ext_jedec = id[3] << 8 | id[4];
+
+ dev_dbg(fsm->dev, "JEDEC = 0x%08x [%02x %02x %02x %02x %02x]\n",
+ jedec, id[0], id[1], id[2], id[3], id[4]);
+
+ for (info = flash_types; info->name; info++) {
+ if (info->jedec_id == jedec) {
+ if (info->ext_id && info->ext_id != ext_jedec)
+ continue;
+ return info;
+ }
+ }
+ dev_err(fsm->dev, "Unrecognized JEDEC id %06x\n", jedec);
+
+ return NULL;
+}
+
+static int stfsm_set_mode(struct stfsm *fsm, uint32_t mode)
+{
+ int ret, timeout = 10;
+
+ /* Wait for controller to accept mode change */
+ while (--timeout) {
+ ret = readl(fsm->base + SPI_STA_MODE_CHANGE);
+ if (ret & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (!timeout)
+ return -EBUSY;
+
+ writel(mode, fsm->base + SPI_MODESELECT);
+
+ return 0;
+}
+
+static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
+{
+ uint32_t emi_freq;
+ uint32_t clk_div;
+
+ /* TODO: Make this dynamic */
+ emi_freq = STFSM_DEFAULT_EMI_FREQ;
+
+ /*
+ * Calculate clk_div - values between 2 and 128
+ * Multiple of 2, rounded up
+ */
+ clk_div = 2 * DIV_ROUND_UP(emi_freq, 2 * spi_freq);
+ if (clk_div < 2)
+ clk_div = 2;
+ else if (clk_div > 128)
+ clk_div = 128;
+
+ /*
+ * Determine a suitable delay for the IP to complete a change of
+ * direction of the FIFO. The required delay is related to the clock
+ * divider used. The following heuristics are based on empirical tests,
+ * using a 100MHz EMI clock.
+ */
+ if (clk_div <= 4)
+ fsm->fifo_dir_delay = 0;
+ else if (clk_div <= 10)
+ fsm->fifo_dir_delay = 1;
+ else
+ fsm->fifo_dir_delay = DIV_ROUND_UP(clk_div, 10);
+
+ dev_dbg(fsm->dev, "emi_clk = %uHZ, spi_freq = %uHZ, clk_div = %u\n",
+ emi_freq, spi_freq, clk_div);
+
+ writel(clk_div, fsm->base + SPI_CLOCKDIV);
+}
+
+static int stfsm_init(struct stfsm *fsm)
+{
+ int ret;
+
+ /* Perform a soft reset of the FSM controller */
+ writel(SEQ_CFG_SWRESET, fsm->base + SPI_FAST_SEQ_CFG);
+ udelay(1);
+ writel(0, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /* Set clock to 'safe' frequency initially */
+ stfsm_set_freq(fsm, STFSM_FLASH_SAFE_FREQ);
+
+ /* Switch to FSM */
+ ret = stfsm_set_mode(fsm, SPI_MODESELECT_FSM);
+ if (ret)
+ return ret;
+
+ /* Set timing parameters */
+ writel(SPI_CFG_DEVICE_ST |
+ SPI_CFG_DEFAULT_MIN_CS_HIGH |
+ SPI_CFG_DEFAULT_CS_SETUPHOLD |
+ SPI_CFG_DEFAULT_DATA_HOLD,
+ fsm->base + SPI_CONFIGDATA);
+ writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
+
+ /*
+ * Set the FSM 'WAIT' delay to the minimum workable value. Note, for
+ * our purposes, the WAIT instruction is used purely to achieve
+ * "sequence validity" rather than actually implement a delay.
+ */
+ writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME);
+
+ /* Clear FIFO, just in case */
+ stfsm_clear_fifo(fsm);
+
+ return 0;
+}
+
+static void stfsm_fetch_platform_configs(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *regmap;
+ uint32_t boot_device_reg;
+ uint32_t boot_device_spi;
+ uint32_t boot_device; /* Value we read from *boot_device_reg */
+ int ret;
+
+ /* Booting from SPI NOR Flash is the default */
+ fsm->booted_from_spi = true;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(regmap))
+ goto boot_device_fail;
+
+ fsm->reset_signal = of_property_read_bool(np, "st,reset-signal");
+
+ fsm->reset_por = of_property_read_bool(np, "st,reset-por");
+
+ /* Where in the syscon the boot device information lives */
+ ret = of_property_read_u32(np, "st,boot-device-reg", &boot_device_reg);
+ if (ret)
+ goto boot_device_fail;
+
+ /* Boot device value when booted from SPI NOR */
+ ret = of_property_read_u32(np, "st,boot-device-spi", &boot_device_spi);
+ if (ret)
+ goto boot_device_fail;
+
+ ret = regmap_read(regmap, boot_device_reg, &boot_device);
+ if (ret)
+ goto boot_device_fail;
+
+ if (boot_device != boot_device_spi)
+ fsm->booted_from_spi = false;
+
+ return;
+
+boot_device_fail:
+ dev_warn(&pdev->dev,
+ "failed to fetch boot device, assuming boot from SPI\n");
+}
+
+static int stfsm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ struct flash_info *info;
+ struct resource *res;
+ struct stfsm *fsm;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No DT found\n");
+ return -EINVAL;
+ }
+ ppdata.of_node = np;
+
+ fsm = devm_kzalloc(&pdev->dev, sizeof(*fsm), GFP_KERNEL);
+ if (!fsm)
+ return -ENOMEM;
+
+ fsm->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, fsm);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Resource not found\n");
+ return -ENODEV;
+ }
+
+ fsm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsm->base)) {
+ dev_err(&pdev->dev,
+ "Failed to reserve memory region %pR\n", res);
+ return PTR_ERR(fsm->base);
+ }
+
+ mutex_init(&fsm->lock);
+
+ ret = stfsm_init(fsm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialise FSM Controller\n");
+ return ret;
+ }
+
+ stfsm_fetch_platform_configs(pdev);
+
+ /* Detect SPI FLASH device */
+ info = stfsm_jedec_probe(fsm);
+ if (!info)
+ return -ENODEV;
+ fsm->info = info;
+
+ /* Use device size to determine address width */
+ if (info->sector_size * info->n_sectors > 0x1000000)
+ info->flags |= FLASH_FLAG_32BIT_ADDR;
+
+ /*
+ * Configure READ/WRITE/ERASE sequences according to platform and
+ * device flags.
+ */
+ if (info->config) {
+ ret = info->config(fsm);
+ if (ret)
+ return ret;
+ } else {
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ fsm->mtd.name = info->name;
+ fsm->mtd.dev.parent = &pdev->dev;
+ fsm->mtd.type = MTD_NORFLASH;
+ fsm->mtd.writesize = 4;
+ fsm->mtd.writebufsize = fsm->mtd.writesize;
+ fsm->mtd.flags = MTD_CAP_NORFLASH;
+ fsm->mtd.size = info->sector_size * info->n_sectors;
+ fsm->mtd.erasesize = info->sector_size;
+
+ fsm->mtd._read = stfsm_mtd_read;
+ fsm->mtd._write = stfsm_mtd_write;
+ fsm->mtd._erase = stfsm_mtd_erase;
+
+ dev_info(&pdev->dev,
+ "Found serial flash device: %s\n"
+ " size = %llx (%lldMiB) erasesize = 0x%08x (%uKiB)\n",
+ info->name,
+ (long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
+ fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
+
+ return mtd_device_parse_register(&fsm->mtd, NULL, &ppdata, NULL, 0);
+}
+
+static int stfsm_remove(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+
+ return mtd_device_unregister(&fsm->mtd);
+}
+
+static const struct of_device_id stfsm_match[] = {
+ { .compatible = "st,spi-fsm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stfsm_match);
+
+static struct platform_driver stfsm_driver = {
+ .probe = stfsm_probe,
+ .remove = stfsm_remove,
+ .driver = {
+ .name = "st-spi-fsm",
+ .owner = THIS_MODULE,
+ .of_match_table = stfsm_match,
+ },
+};
+module_platform_driver(stfsm_driver);
+
+MODULE_AUTHOR("Angus Clark <angus.clark@st.com>");
+MODULE_DESCRIPTION("ST SPI FSM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 037b399df3f..19d637266fc 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -168,8 +168,8 @@ static int scan_header(partition_t *part)
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
- (unsigned char *)&header);
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
if (err)
return err;
@@ -224,8 +224,8 @@ static int build_maps(partition_t *part)
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
- (unsigned char *)&header);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
if (ret)
goto out_XferInfo;
@@ -289,9 +289,9 @@ static int build_maps(partition_t *part)
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t), &retval,
- (unsigned char *)part->bam_cache);
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
if (ret)
goto out_bam_cache;
@@ -339,7 +339,7 @@ static int erase_xfer(partition_t *part,
struct erase_info *erase;
xfer = &part->XferInfo[xfernum];
- DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
+ pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
xfer->state = XFER_ERASING;
/* Is there a free erase slot? Always in MTD. */
@@ -355,7 +355,7 @@ static int erase_xfer(partition_t *part,
erase->len = 1 << part->header.EraseUnitSize;
erase->priv = (u_long)part;
- ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ ret = mtd_erase(part->mbd.mtd, erase);
if (!ret)
xfer->EraseCount++;
@@ -415,15 +415,15 @@ static int prepare_xfer(partition_t *part, int i)
xfer = &part->XferInfo[i];
xfer->state = XFER_FAILED;
- DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
+ pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
/* Write the transfer unit header */
header = part->header;
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
- &retlen, (u_char *)&header);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
if (ret) {
return ret;
@@ -438,8 +438,8 @@ static int prepare_xfer(partition_t *part, int i)
for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&ctl);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
if (ret)
return ret;
@@ -476,7 +476,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
eun = &part->EUNInfo[srcunit];
xfer = &part->XferInfo[xferunit];
- DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
+ pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
eun->Offset, xfer->Offset);
@@ -485,9 +485,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
/* mark the cache bad, in case we get an error later */
part->bam_index = 0xffff;
@@ -503,8 +503,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
- &retlen, (u_char *) &unit);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
@@ -523,16 +523,16 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
- ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
return ret;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
return ret;
@@ -550,9 +550,11 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
}
/* Write the BAM to the transfer unit */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(int32_t), &retlen,
- (u_char *)part->bam_cache);
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
@@ -560,8 +562,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
/* All clear? Then update the LogicalEUN again */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
- &retlen, (u_char *)&srcunitswap);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
@@ -598,7 +600,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
unit with the fewest erases, and usually pick the data unit with
the most deleted blocks. But with a small probability, pick the
oldest data unit instead. This means that we generally postpone
- the next reclaimation as long as possible, but shuffle static
+ the next reclamation as long as possible, but shuffle static
stuff around a bit for wear leveling.
======================================================================*/
@@ -609,8 +611,8 @@ static int reclaim_block(partition_t *part)
uint32_t best;
int queued, ret;
- DEBUG(0, "ftl_cs: reclaiming space...\n");
- DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits);
+ pr_debug("ftl_cs: reclaiming space...\n");
+ pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
/* Pick the least erased transfer unit */
best = 0xffffffff; xfer = 0xffff;
do {
@@ -618,22 +620,22 @@ static int reclaim_block(partition_t *part)
for (i = 0; i < part->header.NumTransferUnits; i++) {
int n=0;
if (part->XferInfo[i].state == XFER_UNKNOWN) {
- DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i);
+ pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
n=1;
erase_xfer(part, i);
}
if (part->XferInfo[i].state == XFER_ERASING) {
- DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i);
+ pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
n=1;
queued = 1;
}
else if (part->XferInfo[i].state == XFER_ERASED) {
- DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i);
+ pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
n=1;
prepare_xfer(part, i);
}
if (part->XferInfo[i].state == XFER_PREPARED) {
- DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i);
+ pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
n=1;
if (part->XferInfo[i].EraseCount <= best) {
best = part->XferInfo[i].EraseCount;
@@ -641,22 +643,21 @@ static int reclaim_block(partition_t *part)
}
}
if (!n)
- DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
+ pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
}
if (xfer == 0xffff) {
if (queued) {
- DEBUG(1, "ftl_cs: waiting for transfer "
+ pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
else
- DEBUG(1, "ftl_cs: reclaim failed: no "
+ pr_debug("ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
return -EIO;
@@ -666,7 +667,7 @@ static int reclaim_block(partition_t *part)
eun = 0;
if ((jiffies % shuffle_freq) == 0) {
- DEBUG(1, "ftl_cs: recycling freshest block...\n");
+ pr_debug("ftl_cs: recycling freshest block...\n");
best = 0xffffffff;
for (i = 0; i < part->DataUnits; i++)
if (part->EUNInfo[i].EraseCount <= best) {
@@ -686,7 +687,7 @@ static int reclaim_block(partition_t *part)
printk(KERN_NOTICE "ftl_cs: reclaim failed: "
"no free blocks!\n");
else
- DEBUG(1,"ftl_cs: reclaim failed: "
+ pr_debug("ftl_cs: reclaim failed: "
"no free blocks!\n");
return -EIO;
@@ -747,10 +748,11 @@ static uint32_t find_free(partition_t *part)
/* Invalidate cache */
part->bam_index = 0xffff;
- ret = part->mbd.mtd->read(part->mbd.mtd,
- part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
@@ -771,7 +773,7 @@ static uint32_t find_free(partition_t *part)
printk(KERN_NOTICE "ftl_cs: bad free list!\n");
return 0;
}
- DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
+ pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
return blk;
} /* find_free */
@@ -791,7 +793,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
int ret;
size_t offset, retlen;
- DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
+ pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -810,8 +812,8 @@ static int ftl_read(partition_t *part, caddr_t buffer,
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
- &retlen, (u_char *) buffer);
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
if (ret) {
printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
@@ -840,7 +842,7 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
int ret;
size_t retlen, offset;
- DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
+ pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
part, log_addr, virt_addr);
bsize = 1 << part->header.EraseUnitSize;
eun = log_addr / bsize;
@@ -849,8 +851,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&old_addr);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
return ret;
@@ -886,8 +888,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&le_virt_addr);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
if (ret) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
@@ -905,7 +907,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
int ret;
size_t retlen, offset;
- DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
+ pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -946,8 +948,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
- buffer);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
@@ -1011,7 +1012,7 @@ static int ftl_discardsect(struct mtd_blktrans_dev *dev,
partition_t *part = (void *)dev;
uint32_t bsize = 1 << part->header.EraseUnitSize;
- DEBUG(1, "FTL erase sector %ld for %d sectors\n",
+ pr_debug("FTL erase sector %ld for %d sectors\n",
sector, nr_sects);
while (nr_sects) {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index d7592e67d04..b66b541877f 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,27 +50,25 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct INFTLrecord *inftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
+ if (!mtd->_block_isbad) {
printk(KERN_ERR
"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
return;
}
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
+ pr_debug("INFTL: add_mtd for %s\n", mtd->name);
inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
- if (!inftl) {
- printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
+ if (!inftl)
return;
- }
inftl->mbd.mtd = mtd;
inftl->mbd.devnum = -1;
@@ -133,7 +131,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct INFTLrecord *inftl = (void *)dev;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum);
+ pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
@@ -154,13 +152,13 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & (mtd->writesize - 1);
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -174,13 +172,13 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & (mtd->writesize - 1);
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -194,14 +192,14 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.retlen;
return res;
}
@@ -215,16 +213,16 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
u16 pot = inftl->LastFreeEUN;
int silly = inftl->nb_blocks;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
- "desperate=%d)\n", inftl, desperate);
+ pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
+ inftl, desperate);
/*
* Normally, we force a fold to happen before we run out of free
* blocks completely.
*/
if (!desperate && inftl->numfreeEUNs < 2) {
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
- "EUNs (%d)\n", inftl->numfreeEUNs);
+ pr_debug("INFTL: there are too few free EUNs (%d)\n",
+ inftl->numfreeEUNs);
return BLOCK_NIL;
}
@@ -259,8 +257,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
struct inftl_oob oob;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
- "pending=%d)\n", inftl, thisVUC, pendingblock);
+ pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
+ inftl, thisVUC, pendingblock);
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
@@ -323,8 +321,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
* Chain, and the Erase Unit into which we are supposed to be copying.
* Go for it.
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
- thisVUC, targetEUN);
+ pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
unsigned char movebuf[SECTORSIZE];
@@ -346,17 +343,19 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE, &retlen,
- movebuf);
- if (ret < 0 && ret != -EUCLEAN) {
- ret = mtd->read(mtd,
- (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE,
- &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret != -EIO)
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
- "away on retry?\n");
+ pr_debug("INFTL: error went away on retry?\n");
}
memset(&oob, 0xff, sizeof(struct inftl_oob));
oob.b.Status = oob.b.Status1 = SECTOR_USED;
@@ -372,8 +371,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
* is important, by doing oldest first if we crash/reboot then it
* it is relatively simple to clean up the mess).
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
- thisVUC);
+ pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
for (;;) {
/* Find oldest unit in chain. */
@@ -421,7 +419,7 @@ static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
u16 ChainLength = 0, thislen;
u16 chain, EUN;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
+ pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
"pending=%d)\n", inftl, pendingblock);
for (chain = 0; chain < inftl->nb_blocks; chain++) {
@@ -484,8 +482,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
size_t retlen;
int silly, silly2 = 3;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
- "block=%d)\n", inftl, block);
+ pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
+ inftl, block);
do {
/*
@@ -501,8 +499,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
blockofs, 8, &retlen, (char *)&bci);
status = bci.Status | bci.Status1;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
- "EUN %d is %x\n", block , writeEUN, status);
+ pr_debug("INFTL: status of block %d in EUN %d is %x\n",
+ block , writeEUN, status);
switch(status) {
case SECTOR_FREE:
@@ -555,9 +553,9 @@ hitused:
* Hopefully we free something, lets try again.
* This time we are desperate...
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
- "to find free EUN to accommodate write to "
- "VUC %d\n", thisVUC);
+ pr_debug("INFTL: using desperate==1 to find free EUN "
+ "to accommodate write to VUC %d\n",
+ thisVUC);
writeEUN = INFTL_findfreeblock(inftl, 1);
if (writeEUN == BLOCK_NIL) {
/*
@@ -647,7 +645,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
+ pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
"thisVUC=%d)\n", inftl, thisVUC);
memset(BlockUsed, 0, sizeof(BlockUsed));
@@ -711,7 +709,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
* For each block in the chain free it and make it available
* for future use. Erase from the oldest unit first.
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);
+ pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
for (;;) {
u16 *prevEUN = &inftl->VUtable[thisVUC];
@@ -719,7 +717,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
/* If the chain is all gone already, we're done */
if (thisEUN == BLOCK_NIL) {
- DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
+ pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
return;
}
@@ -731,7 +729,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
thisEUN = *prevEUN;
}
- DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
+ pr_debug("Deleting EUN %d from VUC %d\n",
thisEUN, thisVUC);
if (INFTL_formatblock(inftl, thisEUN) < 0) {
@@ -767,7 +765,7 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
size_t retlen;
struct inftl_bci bci;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
+ pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
"block=%d)\n", inftl, block);
while (thisEUN < inftl->nb_blocks) {
@@ -826,7 +824,7 @@ static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
struct inftl_oob oob;
char *p, *pend;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+ pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
"buffer=%p)\n", inftl, block, buffer);
/* Is block all zero? */
@@ -876,7 +874,7 @@ static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
+ pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
"buffer=%p)\n", inftl, block, buffer);
while (thisEUN < inftl->nb_blocks) {
@@ -919,10 +917,10 @@ foundit:
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
- int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
- if (ret < 0 && ret != -EUCLEAN)
+ if (ret < 0 && !mtd_is_bitflip(ret))
return -EIO;
}
return 0;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 104052e774b..487e64f411a 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -30,7 +30,6 @@
#include <asm/uaccess.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nftl.h>
#include <linux/mtd/inftl.h>
@@ -53,7 +52,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
struct INFTLPartition *ip;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+ pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
/*
* Assume logical EraseSize == physical erasesize for starting the
@@ -73,8 +72,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
- ret = mtd->read(mtd, block * inftl->EraseSize,
- SECTORSIZE, &retlen, buf);
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -118,8 +117,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
/* Read the spare media header at offset 4096 */
- mtd->read(mtd, block * inftl->EraseSize + 4096,
- SECTORSIZE, &retlen, buf);
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
if (retlen != SECTORSIZE) {
printk(KERN_WARNING "INFTL: Unable to read spare "
"Media Header\n");
@@ -139,24 +138,20 @@ static int find_boot_record(struct INFTLrecord *inftl)
mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
- printk("INFTL: Media Header ->\n"
- " bootRecordID = %s\n"
- " NoOfBootImageBlocks = %d\n"
- " NoOfBinaryPartitions = %d\n"
- " NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
- " FormatFlgs = %d\n"
- " OsakVersion = 0x%x\n"
- " PercentUsed = %d\n",
- mh->bootRecordID, mh->NoOfBootImageBlocks,
- mh->NoOfBinaryPartitions,
- mh->NoOfBDTLPartitions,
- mh->BlockMultiplierBits, mh->FormatFlags,
- mh->OsakVersion, mh->PercentUsed);
- }
-#endif
+ pr_debug("INFTL: Media Header ->\n"
+ " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = 0x%x\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ mh->OsakVersion, mh->PercentUsed);
if (mh->NoOfBDTLPartitions == 0) {
printk(KERN_WARNING "INFTL: Media Header sanity check "
@@ -200,19 +195,15 @@ static int find_boot_record(struct INFTLrecord *inftl)
ip->spareUnits = le32_to_cpu(ip->spareUnits);
ip->Reserved0 = le32_to_cpu(ip->Reserved0);
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
- printk(" PARTITION[%d] ->\n"
- " virtualUnits = %d\n"
- " firstUnit = %d\n"
- " lastUnit = %d\n"
- " flags = 0x%x\n"
- " spareUnits = %d\n",
- i, ip->virtualUnits, ip->firstUnit,
- ip->lastUnit, ip->flags,
- ip->spareUnits);
- }
-#endif
+ pr_debug(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
if (ip->Reserved0 != ip->firstUnit) {
struct erase_info *instr = &inftl->instr;
@@ -228,7 +219,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
*/
instr->addr = ip->Reserved0 * inftl->EraseSize;
instr->len = inftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
}
if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
printk(KERN_WARNING "INFTL: Media Header "
@@ -314,7 +305,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
/* If any of the physical eraseblocks are bad, don't
use the unit. */
for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
- if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
inftl->PUtable[i] = BLOCK_RESERVED;
}
}
@@ -350,7 +342,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -375,7 +367,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
*
* Return: 0 when succeed, -1 on error.
*
- * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
*/
int INFTL_formatblock(struct INFTLrecord *inftl, int block)
{
@@ -385,8 +377,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
struct mtd_info *mtd = inftl->mbd.mtd;
int physblock;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
- "block=%d)\n", inftl, block);
+ pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
memset(instr, 0, sizeof(struct erase_info));
@@ -402,7 +393,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
mark only the failed block in the bbt. */
for (physblock = 0; physblock < inftl->EraseSize;
physblock += instr->len, instr->addr += instr->len) {
- mtd->erase(inftl->mbd.mtd, instr);
+ mtd_erase(inftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -432,7 +423,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the PUtable to BLOCK_RESERVED on failure) */
- inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
return -1;
}
@@ -476,30 +467,30 @@ void INFTL_dumptables(struct INFTLrecord *s)
{
int i;
- printk("-------------------------------------------"
+ pr_debug("-------------------------------------------"
"----------------------------------\n");
- printk("VUtable[%d] ->", s->nb_blocks);
+ pr_debug("VUtable[%d] ->", s->nb_blocks);
for (i = 0; i < s->nb_blocks; i++) {
if ((i % 8) == 0)
- printk("\n%04x: ", i);
- printk("%04x ", s->VUtable[i]);
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->VUtable[i]);
}
- printk("\n-------------------------------------------"
+ pr_debug("\n-------------------------------------------"
"----------------------------------\n");
- printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
+ pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
for (i = 0; i <= s->lastEUN; i++) {
if ((i % 8) == 0)
- printk("\n%04x: ", i);
- printk("%04x ", s->PUtable[i]);
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->PUtable[i]);
}
- printk("\n-------------------------------------------"
+ pr_debug("\n-------------------------------------------"
"----------------------------------\n");
- printk("INFTL ->\n"
+ pr_debug("INFTL ->\n"
" EraseSize = %d\n"
" h/s/c = %d/%d/%d\n"
" numvunits = %d\n"
@@ -513,7 +504,7 @@ void INFTL_dumptables(struct INFTLrecord *s)
s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
- printk("\n-------------------------------------------"
+ pr_debug("\n-------------------------------------------"
"----------------------------------\n");
}
@@ -521,25 +512,25 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
{
int logical, block, i;
- printk("-------------------------------------------"
+ pr_debug("-------------------------------------------"
"----------------------------------\n");
- printk("INFTL Virtual Unit Chains:\n");
+ pr_debug("INFTL Virtual Unit Chains:\n");
for (logical = 0; logical < s->nb_blocks; logical++) {
block = s->VUtable[logical];
if (block > s->nb_blocks)
continue;
- printk(" LOGICAL %d --> %d ", logical, block);
+ pr_debug(" LOGICAL %d --> %d ", logical, block);
for (i = 0; i < s->nb_blocks; i++) {
if (s->PUtable[block] == BLOCK_NIL)
break;
block = s->PUtable[block];
- printk("%d ", block);
+ pr_debug("%d ", block);
}
- printk("\n");
+ pr_debug("\n");
}
- printk("-------------------------------------------"
+ pr_debug("-------------------------------------------"
"----------------------------------\n");
}
@@ -555,7 +546,7 @@ int INFTL_mount(struct INFTLrecord *s)
int i;
u8 *ANACtable, ANAC;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
+ pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
/* Search for INFTL MediaHeader and Spare INFTL Media Header */
if (find_boot_record(s) < 0) {
@@ -585,7 +576,7 @@ int INFTL_mount(struct INFTLrecord *s)
* NOTEXPLORED state. Then at the end we will try to format it and
* mark it as free.
*/
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n");
+ pr_debug("INFTL: pass 1, explore each unit\n");
for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
continue;
@@ -717,17 +708,14 @@ int INFTL_mount(struct INFTLrecord *s)
logical_block = BLOCK_NIL;
}
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- INFTL_dumptables(s);
-#endif
+ INFTL_dumptables(s);
/*
* Second pass, check for infinite loops in chains. These are
* possible because we don't update the previous pointers when
* we fold chains. No big deal, just fix them up in PUtable.
*/
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n");
+ pr_debug("INFTL: pass 2, validate virtual chains\n");
for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
block = s->VUtable[logical_block];
last_block = BLOCK_NIL;
@@ -772,12 +760,8 @@ int INFTL_mount(struct INFTLrecord *s)
}
}
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- INFTL_dumptables(s);
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- INFTL_dumpVUchains(s);
-#endif
+ INFTL_dumptables(s);
+ INFTL_dumpVUchains(s);
/*
* Third pass, format unreferenced blocks and init free block count.
@@ -785,7 +769,7 @@ int INFTL_mount(struct INFTLrecord *s)
s->numfreeEUNs = 0;
s->LastFreeEUN = BLOCK_NIL;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n");
+ pr_debug("INFTL: pass 3, format unused blocks\n");
for (block = s->firstEUN; block <= s->lastEUN; block++) {
if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
printk("INFTL: unreferenced block %d, formatting it\n",
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
index 265f969817e..3a19cbee24d 100644
--- a/drivers/mtd/lpddr/Kconfig
+++ b/drivers/mtd/lpddr/Kconfig
@@ -1,5 +1,5 @@
-menu "LPDDR flash memory drivers"
- depends on MTD!=n
+menu "LPDDR & LPDDR2 PCM memory drivers"
+ depends on MTD
config MTD_LPDDR
tristate "Support for LPDDR flash chips"
@@ -17,4 +17,13 @@ config MTD_QINFO_PROBE
Window QINFO interface, permits software to be used for entire
families of devices. This serves similar purpose of CFI on legacy
Flash products
+
+config MTD_LPDDR2_NVM
+ # ARM dependency is only for writel_relaxed()
+ depends on MTD && ARM
+ tristate "Support for LPDDR2-NVM flash chips"
+ help
+ This option enables support of PCM memories with a LPDDR2-NVM
+ (Low power double data rate 2) interface.
+
endmenu
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
index da48e46b581..881d440d483 100644
--- a/drivers/mtd/lpddr/Makefile
+++ b/drivers/mtd/lpddr/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
+obj-$(CONFIG_MTD_LPDDR2_NVM) += lpddr2_nvm.o
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
new file mode 100644
index 00000000000..063cec40d0a
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -0,0 +1,507 @@
+/*
+ * LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock
+ * support for LPDDR2-NVM PCM memories
+ *
+ * Copyright © 2012 Micron Technology, Inc.
+ *
+ * Vincenzo Aliberti <vincenzo.aliberti@gmail.com>
+ * Domenico Manna <domenico.manna@gmail.com>
+ * Many thanks to Andrea Vigilante for initial enabling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+
+/* Parameters */
+#define ERASE_BLOCKSIZE (0x00020000/2) /* in Word */
+#define WRITE_BUFFSIZE (0x00000400/2) /* in Word */
+#define OW_BASE_ADDRESS 0x00000000 /* OW offset */
+#define BUS_WIDTH 0x00000020 /* x32 devices */
+
+/* PFOW symbols address offset */
+#define PFOW_QUERY_STRING_P (0x0000/2) /* in Word */
+#define PFOW_QUERY_STRING_F (0x0002/2) /* in Word */
+#define PFOW_QUERY_STRING_O (0x0004/2) /* in Word */
+#define PFOW_QUERY_STRING_W (0x0006/2) /* in Word */
+
+/* OW registers address */
+#define CMD_CODE_OFS (0x0080/2) /* in Word */
+#define CMD_DATA_OFS (0x0084/2) /* in Word */
+#define CMD_ADD_L_OFS (0x0088/2) /* in Word */
+#define CMD_ADD_H_OFS (0x008A/2) /* in Word */
+#define MPR_L_OFS (0x0090/2) /* in Word */
+#define MPR_H_OFS (0x0092/2) /* in Word */
+#define CMD_EXEC_OFS (0x00C0/2) /* in Word */
+#define STATUS_REG_OFS (0x00CC/2) /* in Word */
+#define PRG_BUFFER_OFS (0x0010/2) /* in Word */
+
+/* Datamask */
+#define MR_CFGMASK 0x8000
+#define SR_OK_DATAMASK 0x0080
+
+/* LPDDR2-NVM Commands */
+#define LPDDR2_NVM_LOCK 0x0061
+#define LPDDR2_NVM_UNLOCK 0x0062
+#define LPDDR2_NVM_SW_PROGRAM 0x0041
+#define LPDDR2_NVM_SW_OVERWRITE 0x0042
+#define LPDDR2_NVM_BUF_PROGRAM 0x00E9
+#define LPDDR2_NVM_BUF_OVERWRITE 0x00EA
+#define LPDDR2_NVM_ERASE 0x0020
+
+/* LPDDR2-NVM Registers offset */
+#define LPDDR2_MODE_REG_DATA 0x0040
+#define LPDDR2_MODE_REG_CFG 0x0050
+
+/*
+ * Internal Type Definitions
+ * pcm_int_data contains memory controller details:
+ * @reg_data : LPDDR2_MODE_REG_DATA register address after remapping
+ * @reg_cfg : LPDDR2_MODE_REG_CFG register address after remapping
+ * &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes)
+ */
+struct pcm_int_data {
+ void __iomem *ctl_regs;
+ int bus_width;
+};
+
+static DEFINE_MUTEX(lpdd2_nvm_mutex);
+
+/*
+ * Build a map_word starting from an u_long
+ */
+static inline map_word build_map_word(u_long myword)
+{
+ map_word val = { {0} };
+ val.x[0] = myword;
+ return val;
+}
+
+/*
+ * Build Mode Register Configuration DataMask based on device bus-width
+ */
+static inline u_int build_mr_cfgmask(u_int bus_width)
+{
+ u_int val = MR_CFGMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = val << 16;
+
+ return val;
+}
+
+/*
+ * Build Status Register OK DataMask based on device bus-width
+ */
+static inline u_int build_sr_ok_datamask(u_int bus_width)
+{
+ u_int val = SR_OK_DATAMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = (val << 16)+val;
+
+ return val;
+}
+
+/*
+ * Evaluates Overlay Window Control Registers address
+ */
+static inline u_long ow_reg_add(struct map_info *map, u_long offset)
+{
+ u_long val = 0;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ val = map->pfow_base + offset*pcm_data->bus_width;
+
+ return val;
+}
+
+/*
+ * Enable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_enable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Disable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_disable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Execute lpddr2-nvm operations
+ */
+static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
+ u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf)
+{
+ map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} },
+ mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} },
+ exec_cmd = { {0} }, sr;
+ map_word data_h = { {0} }; /* only for 2x x16 devices stacked */
+ u_long i, status_reg, prg_buff_ofs;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width);
+
+ /* Builds low and high words for OW Control Registers */
+ add_l.x[0] = cmd_add & 0x0000FFFF;
+ add_h.x[0] = (cmd_add >> 16) & 0x0000FFFF;
+ mpr_l.x[0] = cmd_mpr & 0x0000FFFF;
+ mpr_h.x[0] = (cmd_mpr >> 16) & 0x0000FFFF;
+ cmd.x[0] = cmd_code & 0x0000FFFF;
+ exec_cmd.x[0] = 0x0001;
+ data_l.x[0] = cmd_data & 0x0000FFFF;
+ data_h.x[0] = (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */
+
+ /* Set Overlay Window Control Registers */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
+ map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
+ if (pcm_data->bus_width == 0x0004) { /* 2x16 devices stacked */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
+ map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
+ }
+
+ /* Fill Program Buffer */
+ if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) ||
+ (cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) {
+ prg_buff_ofs = (map_read(map,
+ ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
+ for (i = 0; i < cmd_mpr; i++) {
+ map_write(map, build_map_word(buf[i]), map->pfow_base +
+ prg_buff_ofs + i);
+ }
+ }
+
+ /* Command Execute */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
+ if (pcm_data->bus_width == 0x0004) /* 2x16 devices stacked */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
+
+ /* Status Register Check */
+ do {
+ sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
+ status_reg = sr.x[0];
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */
+ sr = map_read(map, ow_reg_add(map,
+ STATUS_REG_OFS) + 2);
+ status_reg += sr.x[0] << 16;
+ }
+ } while ((status_reg & sr_ok_datamask) != sr_ok_datamask);
+
+ return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO);
+}
+
+/*
+ * Execute lpddr2-nvm operations @ block level
+ */
+static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len, u_char block_op)
+{
+ struct map_info *map = mtd->priv;
+ u_long add, end_add;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ add = start_add;
+ end_add = add + len;
+
+ do {
+ ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
+ if (ret)
+ goto out;
+ add += mtd->erasesize;
+ } while (add < end_add);
+
+out:
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * verify presence of PFOW string
+ */
+static int lpddr2_nvm_pfow_present(struct map_info *map)
+{
+ map_word pfow_val[4];
+ unsigned int found = 1;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Load string from array */
+ pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
+ pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
+ pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
+ pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
+
+ /* Verify the string loaded vs expected */
+ if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
+ found = 0;
+
+ ow_disable(map);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+
+ return found;
+}
+
+/*
+ * lpddr2_nvm driver read method
+ */
+static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ *retlen = len;
+
+ map_copy_from(map, buf, start_add, *retlen);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return 0;
+}
+
+/*
+ * lpddr2_nvm driver write method
+ */
+static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_long add, current_len, tot_len, target_len, my_data;
+ u_char *write_buf = (u_char *)buf;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Set start value for the variables */
+ add = start_add;
+ target_len = len;
+ tot_len = 0;
+
+ while (tot_len < target_len) {
+ if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */
+ my_data = write_buf[tot_len];
+ my_data += (write_buf[tot_len+1]) << 8;
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */
+ my_data += (write_buf[tot_len+2]) << 16;
+ my_data += (write_buf[tot_len+3]) << 24;
+ }
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
+ my_data, add, 0x00, NULL);
+ if (ret)
+ goto out;
+
+ add += pcm_data->bus_width;
+ tot_len += pcm_data->bus_width;
+ } else { /* do buffer program */
+ current_len = min(target_len - tot_len,
+ (u_long) mtd->writesize);
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
+ 0x00, add, current_len, write_buf + tot_len);
+ if (ret)
+ goto out;
+
+ add += current_len;
+ tot_len += current_len;
+ }
+ }
+
+out:
+ *retlen = tot_len;
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver erase method
+ */
+static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int ret = lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len,
+ LPDDR2_NVM_ERASE);
+ if (!ret) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ }
+
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver unlock method
+ */
+static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK);
+}
+
+/*
+ * lpddr2_nvm driver lock method
+ */
+static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
+}
+
+/*
+ * lpddr2_nvm driver probe method
+ */
+static int lpddr2_nvm_probe(struct platform_device *pdev)
+{
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *add_range;
+ struct resource *control_regs;
+ struct pcm_int_data *pcm_data;
+
+ /* Allocate memory control_regs data structures */
+ pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL);
+ if (!pcm_data)
+ return -ENOMEM;
+
+ pcm_data->bus_width = BUS_WIDTH;
+
+ /* Allocate memory for map_info & mtd_info data structures */
+ map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ /* lpddr2_nvm address range */
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Populate map_info data structure */
+ *map = (struct map_info) {
+ .virt = devm_ioremap_resource(&pdev->dev, add_range),
+ .name = pdev->dev.init_name,
+ .phys = add_range->start,
+ .size = resource_size(add_range),
+ .bankwidth = pcm_data->bus_width / 2,
+ .pfow_base = OW_BASE_ADDRESS,
+ .fldrv_priv = pcm_data,
+ };
+ if (IS_ERR(map->virt))
+ return PTR_ERR(map->virt);
+
+ simple_map_init(map); /* fill with default methods */
+
+ control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
+ if (IS_ERR(pcm_data->ctl_regs))
+ return PTR_ERR(pcm_data->ctl_regs);
+
+ /* Populate mtd_info data structure */
+ *mtd = (struct mtd_info) {
+ .name = pdev->dev.init_name,
+ .type = MTD_RAM,
+ .priv = map,
+ .size = resource_size(add_range),
+ .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
+ .writesize = 1,
+ .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+ ._read = lpddr2_nvm_read,
+ ._write = lpddr2_nvm_write,
+ ._erase = lpddr2_nvm_erase,
+ ._unlock = lpddr2_nvm_unlock,
+ ._lock = lpddr2_nvm_lock,
+ };
+
+ /* Verify the presence of the device looking for PFOW string */
+ if (!lpddr2_nvm_pfow_present(map)) {
+ pr_err("device not recognized\n");
+ return -EINVAL;
+ }
+ /* Parse partitions and register the MTD device */
+ return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+}
+
+/*
+ * lpddr2_nvm driver remove method
+ */
+static int lpddr2_nvm_remove(struct platform_device *pdev)
+{
+ return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+}
+
+/* Initialize platform_driver data structure for lpddr2_nvm */
+static struct platform_driver lpddr2_nvm_drv = {
+ .driver = {
+ .name = "lpddr2_nvm",
+ },
+ .probe = lpddr2_nvm_probe,
+ .remove = lpddr2_nvm_remove,
+};
+
+module_platform_driver(lpddr2_nvm_drv);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincenzo Aliberti <vincenzo.aliberti@gmail.com>");
+MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 65655dd59e1..018c75faadb 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -27,6 +27,7 @@
#include <linux/mtd/pfow.h>
#include <linux/mtd/qinfo.h>
#include <linux/slab.h>
+#include <linux/module.h>
static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, u_char *buf);
@@ -39,7 +40,7 @@ static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, void **mtdbuf, resource_size_t *phys);
-static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
static int get_chip(struct map_info *map, struct flchip *chip, int mode);
static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
static void put_chip(struct map_info *map, struct flchip *chip);
@@ -54,34 +55,25 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
int i, j;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ if (!mtd)
return NULL;
- }
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
- mtd->read = lpddr_read;
+ mtd->_read = lpddr_read;
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
mtd->flags &= ~MTD_BIT_WRITEABLE;
- mtd->erase = lpddr_erase;
- mtd->write = lpddr_write_buffers;
- mtd->writev = lpddr_writev;
- mtd->read_oob = NULL;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
- mtd->lock = lpddr_lock;
- mtd->unlock = lpddr_unlock;
- mtd->suspend = NULL;
- mtd->resume = NULL;
+ mtd->_erase = lpddr_erase;
+ mtd->_write = lpddr_write_buffers;
+ mtd->_writev = lpddr_writev;
+ mtd->_lock = lpddr_lock;
+ mtd->_unlock = lpddr_unlock;
if (map_is_linear(map)) {
- mtd->point = lpddr_point;
- mtd->unpoint = lpddr_unpoint;
+ mtd->_point = lpddr_point;
+ mtd->_unpoint = lpddr_unpoint;
}
- mtd->block_isbad = NULL;
- mtd->block_markbad = NULL;
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
@@ -394,7 +386,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
wake_up(&chip->wq);
}
-int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
@@ -475,7 +467,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
return ret;
}
-int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
@@ -536,14 +528,12 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
- if (!map->virt || (adr + len > mtd->size))
+ if (!map->virt)
return -EINVAL;
/* ofs: offset within the first chip that the first read should start */
ofs = adr - (chipnum << lpddr->chipshift);
-
*mtdbuf = (void *)map->virt + chip->start + ofs;
- *retlen = 0;
while (len) {
unsigned long thislen;
@@ -581,11 +571,11 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
return 0;
}
-static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
- int chipnum = adr >> lpddr->chipshift;
+ int chipnum = adr >> lpddr->chipshift, err = 0;
unsigned long ofs;
/* ofs: offset within the first chip that the first read should start */
@@ -609,9 +599,11 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
chip->state = FL_READY;
- } else
+ } else {
printk(KERN_WARNING "%s: Warning: unpoint called on non"
"pointed region\n", map->name);
+ err = -EINVAL;
+ }
put_chip(map, chip);
mutex_unlock(&chip->mutex);
@@ -620,6 +612,8 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
ofs = 0;
chipnum++;
}
+
+ return err;
}
static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
@@ -643,13 +637,11 @@ static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
int chipnum;
unsigned long ofs, vec_seek, i;
int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
-
size_t len = 0;
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
- *retlen = 0;
if (!len)
return 0;
@@ -694,9 +686,6 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
ofs = instr->addr;
len = instr->len;
- if (ofs > mtd->size || (len + ofs) > mtd->size)
- return -EINVAL;
-
while (len > 0) {
ret = do_erase_oneblock(mtd, ofs);
if (ret)
@@ -712,7 +701,7 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
#define DO_XXLOCK_LOCK 1
#define DO_XXLOCK_UNLOCK 2
-int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
{
int ret = 0;
struct map_info *map = mtd->priv;
@@ -757,34 +746,6 @@ static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
}
-int word_program(struct map_info *map, loff_t adr, uint32_t curval)
-{
- int ret;
- struct lpddr_private *lpddr = map->fldrv_priv;
- int chipnum = adr >> lpddr->chipshift;
- struct flchip *chip = &lpddr->chips[chipnum];
-
- mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, FL_WRITING);
- if (ret) {
- mutex_unlock(&chip->mutex);
- return ret;
- }
-
- send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
-
- ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
- if (ret) {
- printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
- map->name, adr, curval);
- goto out;
- }
-
-out: put_chip(map, chip);
- mutex_unlock(&chip->mutex);
- return ret;
-}
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index dbfe17baf04..69f2112340b 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
{
- int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+ int qinfo_lines = ARRAY_SIZE(qinfo_array);
int i;
int bankwidth = map_bankwidth(map) * 8;
int major, minor;
@@ -135,11 +135,8 @@ static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
- if (!lpddr->qinfo) {
- printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
- map->name);
+ if (!lpddr->qinfo)
return 0;
- }
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index c0c328c5b13..21b2874a303 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -1,5 +1,6 @@
menu "Mapping drivers for chip access"
depends on MTD!=n
+ depends on HAS_IOMEM
config MTD_COMPLEX_MAPPINGS
bool "Support non-linear mappings of flash chips"
@@ -41,8 +42,6 @@ config MTD_PHYSMAP_START
are mapped on your particular target board. Refer to the
memory map which should hopefully be in the documentation for
your board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_LEN
hex "Physical length of flash mapping"
@@ -55,8 +54,6 @@ config MTD_PHYSMAP_LEN
than the total amount of flash present. Refer to the memory
map which should hopefully be in the documentation for your
board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_BANKWIDTH
int "Bank width in octets"
@@ -67,15 +64,13 @@ config MTD_PHYSMAP_BANKWIDTH
in octets. For example, if you have a data bus width of 32
bits, you would set the bus width octet value to 4. This is
used internally by the CFI drivers.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_OF
- tristate "Flash device in physical memory map based on OF description"
- depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
+ tristate "Memory device in physical memory map based on OF description"
+ depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM)
help
- This provides a 'mapping' driver which allows the NOR Flash and
- ROM driver code to communicate with chips which are mapped
+ This provides a 'mapping' driver which allows the NOR Flash, ROM
+ and RAM driver code to communicate with chips which are mapped
physically into the CPU's memory. The mapping description here is
taken from OF device tree.
@@ -113,7 +108,7 @@ config MTD_SUN_UFLASH
config MTD_SC520CDP
tristate "CFI Flash device mapped on AMD SC520 CDP"
- depends on X86 && MTD_CFI
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
help
The SC520 CDP board has two banks of CFI-compliant chips and one
Dual-in-line JEDEC chip. This 'mapping' driver supports that
@@ -121,7 +116,7 @@ config MTD_SC520CDP
config MTD_NETSC520
tristate "CFI Flash device mapped on AMD NetSc520"
- depends on X86 && MTD_CFI
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
help
This enables access routines for the flash chips on the AMD NetSc520
demonstration board. If you have one of these boards and would like
@@ -129,7 +124,7 @@ config MTD_NETSC520
config MTD_TS5500
tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
- depends on X86
+ depends on TS5500 || COMPILE_TEST
select MTD_JEDECPROBE
select MTD_CFI_AMDSTD
help
@@ -162,24 +157,6 @@ config MTD_PXA2XX
help
This provides a driver for the NOR flash attached to a PXA2xx chip.
-config MTD_OCTAGON
- tristate "JEDEC Flash device mapped on Octagon 5066 SBC"
- depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
- help
- This provides a 'mapping' driver which supports the way in which
- the flash chips are connected in the Octagon-5066 Single Board
- Computer. More information on the board is available at
- <http://www.octagonsystems.com/products/5066.aspx>.
-
-config MTD_VMAX
- tristate "JEDEC Flash device mapped on Tempustech VMAX SBC301"
- depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
- help
- This provides a 'mapping' driver which supports the way in which
- the flash chips are connected in the Tempustech VMAX SBC301 Single
- Board Computer. More information on the board is available at
- <http://www.tempustech.com/>.
-
config MTD_SCx200_DOCFLASH
tristate "Flash device mapped with DOCCS on NatSemi SCx200"
depends on SCx200 && MTD_CFI
@@ -229,7 +206,7 @@ config MTD_CK804XROM
config MTD_SCB2_FLASH
tristate "BIOS flash chip on Intel SCB2 boards"
- depends on X86 && MTD_JEDECPROBE
+ depends on X86 && MTD_JEDECPROBE && PCI
help
Support for treating the BIOS flash chip on Intel SCB2 boards
as an MTD device - with this you can reprogram your BIOS.
@@ -248,38 +225,12 @@ config MTD_NETtel
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
-config MTD_BCM963XX
- tristate "Map driver for Broadcom BCM963xx boards"
- depends on BCM63XX
- select MTD_MAP_BANK_WIDTH_2
- select MTD_CFI_I1
- help
- Support for parsing CFE image tag and creating MTD partitions on
- Broadcom BCM63xx boards.
-
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
- select MTD_PARTITIONS
help
Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
-config MTD_DILNETPC
- tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CFI_INTELEXT && BROKEN
- help
- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
- For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
- and <http://www.ssv-embedded.de/ssv/pc104/p170.htm>
-
-config MTD_DILNETPC_BOOTSIZE
- hex "Size of DIL/Net PC flash boot partition"
- depends on MTD_DILNETPC
- default "0x80000"
- help
- The amount of space taken up by the kernel or Etherboot
- on the DIL/Net PC flash chips.
-
config MTD_L440GX
tristate "BIOS flash chip on Intel L440GX boards"
depends on X86 && MTD_JEDECPROBE
@@ -289,42 +240,6 @@ config MTD_L440GX
BE VERY CAREFUL.
-config MTD_TQM8XXL
- tristate "CFI Flash device mapped on TQM8XXL"
- depends on MTD_CFI && TQM8xxL
- help
- The TQM8xxL PowerPC board has up to two banks of CFI-compliant
- chips, currently uses AMD one. This 'mapping' driver supports
- that arrangement, allowing the CFI probe and command set driver
- code to communicate with the chips on the TQM8xxL board. More at
- <http://www.denx.de/wiki/PPCEmbedded/>.
-
-config MTD_RPXLITE
- tristate "CFI Flash device mapped on RPX Lite or CLLF"
- depends on MTD_CFI && (RPXCLASSIC || RPXLITE)
- help
- The RPXLite PowerPC board has CFI-compliant chips mapped in
- a strange sparse mapping. This 'mapping' driver supports that
- arrangement, allowing the CFI probe and command set driver code
- to communicate with the chips on the RPXLite board. More at
- <http://www.embeddedplanet.com/>.
-
-config MTD_MBX860
- tristate "System flash on MBX860 board"
- depends on MTD_CFI && MBX
- help
- This enables access routines for the flash chips on the Motorola
- MBX860 board. If you have one of these boards and would like
- to use the flash chips on it, say 'Y'.
-
-config MTD_DBOX2
- tristate "CFI Flash device mapped on D-Box2"
- depends on DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
- help
- This enables access routines for the flash chips on the Nokia/Sagem
- D-Box 2 board. If you have one of these boards and would like to use
- the flash chips on it, say 'Y'.
-
config MTD_CFI_FLAGADM
tristate "CFI Flash device mapping on FlagaDM"
depends on 8xx && MTD_CFI
@@ -339,17 +254,6 @@ config MTD_SOLUTIONENGINE
This enables access to the flash chips on the Hitachi SolutionEngine and
similar boards. Say 'Y' if you are building a kernel for such a board.
-config MTD_ARM_INTEGRATOR
- tristate "CFI Flash device mapped on ARM Integrator/P720T"
- depends on ARM && MTD_CFI
-
-config MTD_CDB89712
- tristate "Cirrus CDB89712 evaluation board mappings"
- depends on MTD_CFI && ARCH_CDB89712
- help
- This enables access to the flash or ROM chips on the CDB89712 board.
- If you have such a board, say 'Y'.
-
config MTD_SA1100
tristate "CFI Flash device mapped on StrongARM SA11x0"
depends on MTD_CFI && ARCH_SA1100
@@ -375,36 +279,6 @@ config MTD_IXP4XX
IXDP425 and Coyote. If you have an IXP4xx based board and
would like to use the flash chips on it, say 'Y'.
-config MTD_IXP2000
- tristate "CFI Flash device mapped on Intel IXP2000 based systems"
- depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000
- help
- This enables MTD access to flash devices on platforms based
- on Intel's IXP2000 family of network processors. If you have an
- IXP2000 based board and would like to use the flash chips on it,
- say 'Y'.
-
-config MTD_FORTUNET
- tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && SA1100_FORTUNET
- help
- This enables access to the Flash on the FortuNet board. If you
- have such a board, say 'Y'.
-
-config MTD_AUTCPU12
- tristate "NV-RAM mapping AUTCPU12 board"
- depends on ARCH_AUTCPU12
- help
- This enables access to the NV-RAM on autronix autcpu12 board.
- If you have such a board, say 'Y'.
-
-config MTD_EDB7312
- tristate "CFI Flash device mapped on EDB7312"
- depends on ARCH_EDB7312 && MTD_CFI
- help
- This enables access to the CFI Flash on the Cogent EDB7312 board.
- If you have such a board, say 'Y' here.
-
config MTD_IMPA7
tristate "JEDEC Flash device mapped on impA7"
depends on ARM && MTD_JEDECPROBE
@@ -412,21 +286,6 @@ config MTD_IMPA7
This enables access to the NOR Flash on the impA7 board of
implementa GmbH. If you have such a board, say 'Y' here.
-config MTD_CEIVA
- tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame"
- depends on MTD_JEDECPROBE && ARCH_CEIVA
- help
- This enables access to the flash chips on the Ceiva/Polaroid
- PhotoMax Digital Picture Frame.
- If you have such a device, say 'Y'.
-
-config MTD_H720X
- tristate "Hynix evaluation board mappings"
- depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
- help
- This enables access to the flash chips on the Hynix evaluation boards.
- If you have such a board, say 'Y'.
-
# This needs CFI or JEDEC, depending on the cards found.
config MTD_PCI
tristate "PCI MTD driver"
@@ -467,7 +326,7 @@ config MTD_BFIN_ASYNC
config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
- depends on GENERIC_GPIO || GPIOLIB
+ depends on GPIOLIB
depends on MTD_COMPLEX_MAPPINGS
help
Map driver which allows flashes to be partially physically addressed
@@ -477,31 +336,10 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_RAM=y && !MMU
+ depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
help
Map driver to support image based filesystems for uClinux.
-config MTD_WRSBC8260
- tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
- depends on (SBC82xx || SBC8560)
- select MTD_MAP_BANK_WIDTH_4
- select MTD_MAP_BANK_WIDTH_1
- select MTD_CFI_I1
- select MTD_CFI_I4
- help
- Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
- all three flash regions on CS0, CS1 and CS6 if they are configured
- correctly by the boot loader.
-
-config MTD_DMV182
- tristate "Map driver for Dy-4 SVME/DMV-182 board."
- depends on DMV182
- select MTD_MAP_BANK_WIDTH_32
- select MTD_CFI_I8
- select MTD_CFI_AMDSTD
- help
- Map driver for Dy-4 SVME/DMV-182 board.
-
config MTD_INTEL_VR_NOR
tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
depends on PCI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index cb48b11afff..141c91a5b24 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -7,10 +7,8 @@ obj-$(CONFIG_MTD) += map_funcs.o
endif
# Chip mappings
-obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
-obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
@@ -18,45 +16,30 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
-obj-$(CONFIG_MTD_MBX860) += mbx860.o
-obj-$(CONFIG_MTD_CEIVA) += ceiva.o
-obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
-obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
-obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_NETSC520) += netsc520.o
obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
-obj-$(CONFIG_MTD_VMAX) += vmax301.o
obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
-obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
-obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
-obj-$(CONFIG_MTD_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
-obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
-obj-$(CONFIG_MTD_H720X) += h720x-flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
-obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
-obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
-obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
-obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index e2875d6fe12..f7207b0a76d 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -100,8 +100,8 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
}
-static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int amd76xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -289,7 +289,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
+static void amd76xrom_remove_one(struct pci_dev *pdev)
{
struct amd76xrom_window *window = &amd76xrom_window;
@@ -347,4 +347,3 @@ module_exit(cleanup_amd76xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
-
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
deleted file mode 100644
index e5bfd0e093b..00000000000
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * NV-RAM memory access on autcpu12
- * (C) 2002 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-#include <mach/hardware.h>
-#include <mach/autcpu12.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-static struct mtd_info *sram_mtd;
-
-struct map_info autcpu12_sram_map = {
- .name = "SRAM",
- .size = 32768,
- .bankwidth = 4,
- .phys = 0x12000000,
-};
-
-static int __init init_autcpu12_sram (void)
-{
- int err, save0, save1;
-
- autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
- if (!autcpu12_sram_map.virt) {
- printk("Failed to ioremap autcpu12 NV-RAM space\n");
- err = -EIO;
- goto out;
- }
- simple_map_init(&autcpu_sram_map);
-
- /*
- * Check for 32K/128K
- * read ofs 0
- * read ofs 0x10000
- * Write complement to ofs 0x100000
- * Read and check result on ofs 0x0
- * Restore contents
- */
- save0 = map_read32(&autcpu12_sram_map,0);
- save1 = map_read32(&autcpu12_sram_map,0x10000);
- map_write32(&autcpu12_sram_map,~save0,0x10000);
- /* if we find this pattern on 0x0, we have 32K size
- * restore contents and exit
- */
- if ( map_read32(&autcpu12_sram_map,0) != save0) {
- map_write32(&autcpu12_sram_map,save0,0x0);
- goto map;
- }
- /* We have a 128K found, restore 0x10000 and set size
- * to 128K
- */
- map_write32(&autcpu12_sram_map,save1,0x10000);
- autcpu12_sram_map.size = SZ_128K;
-
-map:
- sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map);
- if (!sram_mtd) {
- printk("NV-RAM probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- sram_mtd->owner = THIS_MODULE;
- sram_mtd->erasesize = 16;
-
- if (mtd_device_register(sram_mtd, NULL, 0)) {
- printk("NV-RAM device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
-
- return 0;
-
-out_probe:
- map_destroy(sram_mtd);
- sram_mtd = 0;
-
-out_ioremap:
- iounmap((void *)autcpu12_sram_map.virt);
-out:
- return err;
-}
-
-static void __exit cleanup_autcpu12_maps(void)
-{
- if (sram_mtd) {
- mtd_device_unregister(sram_mtd);
- map_destroy(sram_mtd);
- iounmap((void *)autcpu12_sram_map.virt);
- }
-}
-
-module_init(init_autcpu12_sram);
-module_exit(cleanup_autcpu12_maps);
-
-MODULE_AUTHOR("Thomas Gleixner");
-MODULE_DESCRIPTION("autcpu12 NV-RAM map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
deleted file mode 100644
index 608967fe74c..00000000000
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
- * Mike Albon <malbon@openwrt.org>
- * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
-
-#define BCM63XX_BUSWIDTH 2 /* Buswidth */
-#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-
-#define PFX KBUILD_MODNAME ": "
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *bcm963xx_mtd_info;
-
-static struct map_info bcm963xx_map = {
- .name = "bcm963xx",
- .bankwidth = BCM63XX_BUSWIDTH,
-};
-
-static int parse_cfe_partitions(struct mtd_info *master,
- struct mtd_partition **pparts)
-{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
- struct mtd_partition *parts;
- int ret;
- size_t retlen;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
- int namelen = 0;
- int i;
- char *boardid;
- char *tagversion;
-
- /* Allocate memory for buffer */
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
- &retlen, (void *)buf);
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
-
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
- tagversion = &(buf->tag_version[0]);
- boardid = &(buf->board_id[0]);
-
- printk(KERN_INFO PFX "CFE boot tag found with version %s "
- "and board type %s\n", tagversion, boardid);
-
- kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
- spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
- sparelen = master->size - spareaddr - master->erasesize;
- rootfslen = spareaddr - rootfsaddr;
-
- /* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
- nrparts++;
- namelen += 6;
- };
- if (kernellen > 0) {
- nrparts++;
- namelen += 6;
- };
-
- /* Ask kernel for more memory */
- parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- vfree(buf);
- return -ENOMEM;
- };
-
- /* Start building partition list */
- parts[curpart].name = "CFE";
- parts[curpart].offset = 0;
- parts[curpart].size = master->erasesize;
- curpart++;
-
- if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
- curpart++;
- };
-
- if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
- curpart++;
- };
-
- parts[curpart].name = "nvram";
- parts[curpart].offset = master->size - master->erasesize;
- parts[curpart].size = master->erasesize;
-
- /* Global partition "linux" to make easy firmware upgrade */
- curpart++;
- parts[curpart].name = "linux";
- parts[curpart].offset = parts[0].size;
- parts[curpart].size = master->size - parts[0].size - parts[3].size;
-
- for (i = 0; i < nrparts; i++)
- printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
- "length %lx\n", i, parts[i].name,
- (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
-
- printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
- spareaddr, sparelen);
- *pparts = parts;
- vfree(buf);
-
- return nrparts;
-};
-
-static int bcm963xx_detect_cfe(struct mtd_info *master)
-{
- int idoffset = 0x4e0;
- static char idstring[8] = "CFE1CFE1";
- char buf[9];
- int ret;
- size_t retlen;
-
- ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
- buf[retlen] = 0;
- printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
-
- return strncmp(idstring, buf, 8);
-}
-
-static int bcm963xx_probe(struct platform_device *pdev)
-{
- int err = 0;
- int parsed_nr_parts = 0;
- char *part_type;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no resource supplied\n");
- return -ENODEV;
- }
-
- bcm963xx_map.phys = r->start;
- bcm963xx_map.size = resource_size(r);
- bcm963xx_map.virt = ioremap(r->start, resource_size(r));
- if (!bcm963xx_map.virt) {
- dev_err(&pdev->dev, "failed to ioremap\n");
- return -EIO;
- }
-
- dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
- bcm963xx_map.size, bcm963xx_map.phys);
-
- simple_map_init(&bcm963xx_map);
-
- bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
- if (!bcm963xx_mtd_info) {
- dev_err(&pdev->dev, "failed to probe using CFI\n");
- bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
- if (bcm963xx_mtd_info)
- goto probe_ok;
- dev_err(&pdev->dev, "failed to probe using JEDEC\n");
- err = -EIO;
- goto err_probe;
- }
-
-probe_ok:
- bcm963xx_mtd_info->owner = THIS_MODULE;
-
- /* This is mutually exclusive */
- if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
- dev_info(&pdev->dev, "CFE bootloader detected\n");
- if (parsed_nr_parts == 0) {
- int ret = parse_cfe_partitions(bcm963xx_mtd_info,
- &parsed_parts);
- if (ret > 0) {
- part_type = "CFE";
- parsed_nr_parts = ret;
- }
- }
- } else {
- dev_info(&pdev->dev, "unsupported bootloader\n");
- err = -ENODEV;
- goto err_probe;
- }
-
- return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
-
-err_probe:
- iounmap(bcm963xx_map.virt);
- return err;
-}
-
-static int bcm963xx_remove(struct platform_device *pdev)
-{
- if (bcm963xx_mtd_info) {
- mtd_device_unregister(bcm963xx_mtd_info);
- map_destroy(bcm963xx_mtd_info);
- }
-
- if (bcm963xx_map.virt) {
- iounmap(bcm963xx_map.virt);
- bcm963xx_map.virt = 0;
- }
-
- return 0;
-}
-
-static struct platform_driver bcm63xx_mtd_dev = {
- .probe = bcm963xx_probe,
- .remove = bcm963xx_remove,
- .driver = {
- .name = "bcm963xx-flash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init bcm963xx_mtd_init(void)
-{
- return platform_driver_register(&bcm63xx_mtd_dev);
-}
-
-static void __exit bcm963xx_mtd_exit(void)
-{
- platform_driver_unregister(&bcm63xx_mtd_dev);
-}
-
-module_init(bcm963xx_mtd_init);
-module_exit(bcm963xx_mtd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
-MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index d4297a97e10..6ea51e54904 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -14,7 +14,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
@@ -30,7 +29,8 @@
#include <linux/io.h>
#include <asm/unaligned.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "bfin-async-flash"
@@ -41,7 +41,6 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
- struct mtd_partition *parts;
};
static void switch_to_flash(struct async_state *state)
@@ -122,12 +121,13 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
switch_back(state);
}
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
-static int __devinit bfin_flash_probe(struct platform_device *pdev)
+static int bfin_flash_probe(struct platform_device *pdev)
{
int ret;
- struct physmap_flash_data *pdata = pdev->dev.platform_data;
+ struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
struct async_state *state;
@@ -142,7 +142,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
state->map.write = bfin_flash_write;
state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
- state->map.size = memory->end - memory->start + 1;
+ state->map.size = resource_size(memory);
state->map.virt = (void __iomem *)memory->start;
state->map.phys = memory->start;
state->map.map_priv_1 = (unsigned long)state;
@@ -165,30 +165,19 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
- if (ret > 0) {
- pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
- mtd_device_register(state->mtd, pdata->parts, ret);
- state->parts = pdata->parts;
- } else if (pdata->nr_parts) {
- pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
- mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
- } else {
- pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
- mtd_device_register(state->mtd, NULL, 0);
- }
+ mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+ pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, state);
return 0;
}
-static int __devexit bfin_flash_remove(struct platform_device *pdev)
+static int bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
mtd_device_unregister(state->mtd);
- kfree(state->parts);
map_destroy(state->mtd);
kfree(state);
return 0;
@@ -196,23 +185,13 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
static struct platform_driver bfin_flash_driver = {
.probe = bfin_flash_probe,
- .remove = __devexit_p(bfin_flash_remove),
+ .remove = bfin_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
};
-static int __init bfin_flash_init(void)
-{
- return platform_driver_register(&bfin_flash_driver);
-}
-module_init(bfin_flash_init);
-
-static void __exit bfin_flash_exit(void)
-{
- platform_driver_unregister(&bfin_flash_driver);
-}
-module_exit(bfin_flash_exit);
+module_platform_driver(bfin_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
deleted file mode 100644
index c29cbf87ea0..00000000000
--- a/drivers/mtd/maps/cdb89712.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Flash on Cirrus CDB89712
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-/* dynamic ioremap() areas */
-#define FLASH_START 0x00000000
-#define FLASH_SIZE 0x800000
-#define FLASH_WIDTH 4
-
-#define SRAM_START 0x60000000
-#define SRAM_SIZE 0xc000
-#define SRAM_WIDTH 4
-
-#define BOOTROM_START 0x70000000
-#define BOOTROM_SIZE 0x80
-#define BOOTROM_WIDTH 4
-
-
-static struct mtd_info *flash_mtd;
-
-struct map_info cdb89712_flash_map = {
- .name = "flash",
- .size = FLASH_SIZE,
- .bankwidth = FLASH_WIDTH,
- .phys = FLASH_START,
-};
-
-struct resource cdb89712_flash_resource = {
- .name = "Flash",
- .start = FLASH_START,
- .end = FLASH_START + FLASH_SIZE - 1,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
-
-static int __init init_cdb89712_flash (void)
-{
- int err;
-
- if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
- printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
- err = -EBUSY;
- goto out;
- }
-
- cdb89712_flash_map.virt = ioremap(FLASH_START, FLASH_SIZE);
- if (!cdb89712_flash_map.virt) {
- printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
- err = -EIO;
- goto out_resource;
- }
- simple_map_init(&cdb89712_flash_map);
- flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
- if (!flash_mtd) {
- flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
- if (flash_mtd)
- flash_mtd->erasesize = 0x10000;
- }
- if (!flash_mtd) {
- printk("FLASH probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- flash_mtd->owner = THIS_MODULE;
-
- if (mtd_device_register(flash_mtd, NULL, 0)) {
- printk("FLASH device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- return 0;
-
-out_probe:
- map_destroy(flash_mtd);
- flash_mtd = 0;
-out_ioremap:
- iounmap((void *)cdb89712_flash_map.virt);
-out_resource:
- release_resource (&cdb89712_flash_resource);
-out:
- return err;
-}
-
-
-
-
-
-static struct mtd_info *sram_mtd;
-
-struct map_info cdb89712_sram_map = {
- .name = "SRAM",
- .size = SRAM_SIZE,
- .bankwidth = SRAM_WIDTH,
- .phys = SRAM_START,
-};
-
-struct resource cdb89712_sram_resource = {
- .name = "SRAM",
- .start = SRAM_START,
- .end = SRAM_START + SRAM_SIZE - 1,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
-
-static int __init init_cdb89712_sram (void)
-{
- int err;
-
- if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
- printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
- err = -EBUSY;
- goto out;
- }
-
- cdb89712_sram_map.virt = ioremap(SRAM_START, SRAM_SIZE);
- if (!cdb89712_sram_map.virt) {
- printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
- err = -EIO;
- goto out_resource;
- }
- simple_map_init(&cdb89712_sram_map);
- sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
- if (!sram_mtd) {
- printk("SRAM probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- sram_mtd->owner = THIS_MODULE;
- sram_mtd->erasesize = 16;
-
- if (mtd_device_register(sram_mtd, NULL, 0)) {
- printk("SRAM device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- return 0;
-
-out_probe:
- map_destroy(sram_mtd);
- sram_mtd = 0;
-out_ioremap:
- iounmap((void *)cdb89712_sram_map.virt);
-out_resource:
- release_resource (&cdb89712_sram_resource);
-out:
- return err;
-}
-
-
-
-
-
-
-
-static struct mtd_info *bootrom_mtd;
-
-struct map_info cdb89712_bootrom_map = {
- .name = "BootROM",
- .size = BOOTROM_SIZE,
- .bankwidth = BOOTROM_WIDTH,
- .phys = BOOTROM_START,
-};
-
-struct resource cdb89712_bootrom_resource = {
- .name = "BootROM",
- .start = BOOTROM_START,
- .end = BOOTROM_START + BOOTROM_SIZE - 1,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
-
-static int __init init_cdb89712_bootrom (void)
-{
- int err;
-
- if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
- printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
- err = -EBUSY;
- goto out;
- }
-
- cdb89712_bootrom_map.virt = ioremap(BOOTROM_START, BOOTROM_SIZE);
- if (!cdb89712_bootrom_map.virt) {
- printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
- err = -EIO;
- goto out_resource;
- }
- simple_map_init(&cdb89712_bootrom_map);
- bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
- if (!bootrom_mtd) {
- printk("BootROM probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- bootrom_mtd->owner = THIS_MODULE;
- bootrom_mtd->erasesize = 0x10000;
-
- if (mtd_device_register(bootrom_mtd, NULL, 0)) {
- printk("BootROM device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- return 0;
-
-out_probe:
- map_destroy(bootrom_mtd);
- bootrom_mtd = 0;
-out_ioremap:
- iounmap((void *)cdb89712_bootrom_map.virt);
-out_resource:
- release_resource (&cdb89712_bootrom_resource);
-out:
- return err;
-}
-
-
-
-
-
-static int __init init_cdb89712_maps(void)
-{
-
- printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
- FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
-
- init_cdb89712_flash();
- init_cdb89712_sram();
- init_cdb89712_bootrom();
-
- return 0;
-}
-
-
-static void __exit cleanup_cdb89712_maps(void)
-{
- if (sram_mtd) {
- mtd_device_unregister(sram_mtd);
- map_destroy(sram_mtd);
- iounmap((void *)cdb89712_sram_map.virt);
- release_resource (&cdb89712_sram_resource);
- }
-
- if (flash_mtd) {
- mtd_device_unregister(flash_mtd);
- map_destroy(flash_mtd);
- iounmap((void *)cdb89712_flash_map.virt);
- release_resource (&cdb89712_flash_resource);
- }
-
- if (bootrom_mtd) {
- mtd_device_unregister(bootrom_mtd);
- map_destroy(bootrom_mtd);
- iounmap((void *)cdb89712_bootrom_map.virt);
- release_resource (&cdb89712_bootrom_resource);
- }
-}
-
-module_init(init_cdb89712_maps);
-module_exit(cleanup_cdb89712_maps);
-
-MODULE_AUTHOR("Ray L");
-MODULE_DESCRIPTION("ARM CDB89712 map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
deleted file mode 100644
index 06f9c981572..00000000000
--- a/drivers/mtd/maps/ceiva.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Ceiva flash memory driver.
- * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
- *
- * Note: this driver supports jedec compatible devices. Modification
- * for CFI compatible devices should be straight forward: change
- * jedec_probe to cfi_probe.
- *
- * Based on: sa1100-flash.c, which has the following copyright:
- * Flash memory access on SA11x0 based devices
- *
- * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-/*
- * This isn't complete yet, so...
- */
-#define CONFIG_MTD_CEIVA_STATICMAP
-
-#ifdef CONFIG_MTD_CEIVA_STATICMAP
-/*
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * Please note:
- * 1. The flash size given should be the largest flash size that can
- * be accommodated.
- *
- * 2. The bus width must defined in clps_setup_flash.
- *
- * The MTD layer will detect flash chip aliasing and reduce the size of
- * the map accordingly.
- *
- */
-
-#ifdef CONFIG_ARCH_CEIVA
-/* Flash / Partition sizing */
-/* For the 28F8003, we use the block mapping to calcuate the sizes */
-#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
-#define BOOT_PARTITION_SIZE_KiB (16)
-#define PARAMS_PARTITION_SIZE_KiB (8)
-#define KERNEL_PARTITION_SIZE_KiB (4*128)
-/* Use both remaining portion of first flash, and all of second flash */
-#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
-
-static struct mtd_partition ceiva_partitions[] = {
- {
- .name = "Ceiva BOOT partition",
- .size = BOOT_PARTITION_SIZE_KiB*1024,
- .offset = 0,
-
- },{
- .name = "Ceiva parameters partition",
- .size = PARAMS_PARTITION_SIZE_KiB*1024,
- .offset = (16 + 8) * 1024,
- },{
- .name = "Ceiva kernel partition",
- .size = (KERNEL_PARTITION_SIZE_KiB)*1024,
- .offset = 0x20000,
-
- },{
- .name = "Ceiva root filesystem partition",
- .offset = MTDPART_OFS_APPEND,
- .size = (ROOT_PARTITION_SIZE_KiB)*1024,
- }
-};
-#endif
-
-static int __init clps_static_partitions(struct mtd_partition **parts)
-{
- int nb_parts = 0;
-
-#ifdef CONFIG_ARCH_CEIVA
- if (machine_is_ceiva()) {
- *parts = ceiva_partitions;
- nb_parts = ARRAY_SIZE(ceiva_partitions);
- }
-#endif
- return nb_parts;
-}
-#endif
-
-struct clps_info {
- unsigned long base;
- unsigned long size;
- int width;
- void *vbase;
- struct map_info *map;
- struct mtd_info *mtd;
- struct resource *res;
-};
-
-#define NR_SUBMTD 4
-
-static struct clps_info info[NR_SUBMTD];
-
-static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
-{
- struct mtd_info *subdev[nr];
- struct map_info *maps;
- int i, found = 0, ret = 0;
-
- /*
- * Allocate the map_info structs in one go.
- */
- maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
- if (!maps)
- return -ENOMEM;
- /*
- * Claim and then map the memory regions.
- */
- for (i = 0; i < nr; i++) {
- if (clps[i].base == (unsigned long)-1)
- break;
-
- clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
- if (!clps[i].res) {
- ret = -EBUSY;
- break;
- }
-
- clps[i].map = maps + i;
-
- clps[i].map->name = "clps flash";
- clps[i].map->phys = clps[i].base;
-
- clps[i].vbase = ioremap(clps[i].base, clps[i].size);
- if (!clps[i].vbase) {
- ret = -ENOMEM;
- break;
- }
-
- clps[i].map->virt = (void __iomem *)clps[i].vbase;
- clps[i].map->bankwidth = clps[i].width;
- clps[i].map->size = clps[i].size;
-
- simple_map_init(&clps[i].map);
-
- clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
- if (clps[i].mtd == NULL) {
- ret = -ENXIO;
- break;
- }
- clps[i].mtd->owner = THIS_MODULE;
- subdev[i] = clps[i].mtd;
-
- printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
- "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
- clps[i].width * 8);
- found += 1;
- }
-
- /*
- * ENXIO is special. It means we didn't find a chip when
- * we probed. We need to tear down the mapping, free the
- * resource and mark it as such.
- */
- if (ret == -ENXIO) {
- iounmap(clps[i].vbase);
- clps[i].vbase = NULL;
- release_resource(clps[i].res);
- clps[i].res = NULL;
- }
-
- /*
- * If we found one device, don't bother with concat support.
- * If we found multiple devices, use concat if we have it
- * available, otherwise fail.
- */
- if (ret == 0 || ret == -ENXIO) {
- if (found == 1) {
- *rmtd = subdev[0];
- ret = 0;
- } else if (found > 1) {
- /*
- * We detected multiple devices. Concatenate
- * them together.
- */
- *rmtd = mtd_concat_create(subdev, found,
- "clps flash");
- if (*rmtd == NULL)
- ret = -ENXIO;
- }
- }
-
- /*
- * If we failed, clean up.
- */
- if (ret) {
- do {
- if (clps[i].mtd)
- map_destroy(clps[i].mtd);
- if (clps[i].vbase)
- iounmap(clps[i].vbase);
- if (clps[i].res)
- release_resource(clps[i].res);
- } while (i--);
-
- kfree(maps);
- }
-
- return ret;
-}
-
-static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
-{
- int i;
-
- mtd_device_unregister(mtd);
-
- if (mtd != clps[0].mtd)
- mtd_concat_destroy(mtd);
-
- for (i = NR_SUBMTD; i >= 0; i--) {
- if (clps[i].mtd)
- map_destroy(clps[i].mtd);
- if (clps[i].vbase)
- iounmap(clps[i].vbase);
- if (clps[i].res)
- release_resource(clps[i].res);
- }
- kfree(clps[0].map);
-}
-
-/*
- * We define the memory space, size, and width for the flash memory
- * space here.
- */
-
-static int __init clps_setup_flash(void)
-{
- int nr = 0;
-
-#ifdef CONFIG_ARCH_CEIVA
- if (machine_is_ceiva()) {
- info[0].base = CS0_PHYS_BASE;
- info[0].size = SZ_32M;
- info[0].width = CEIVA_FLASH_WIDTH;
- info[1].base = CS1_PHYS_BASE;
- info[1].size = SZ_32M;
- info[1].width = CEIVA_FLASH_WIDTH;
- nr = 2;
- }
-#endif
- return nr;
-}
-
-static struct mtd_partition *parsed_parts;
-static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
-
-static void __init clps_locate_partitions(struct mtd_info *mtd)
-{
- const char *part_type = NULL;
- int nr_parts = 0;
- do {
- /*
- * Partition selection stuff.
- */
- nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
- if (nr_parts > 0) {
- part_type = "command line";
- break;
- }
-#ifdef CONFIG_MTD_CEIVA_STATICMAP
- nr_parts = clps_static_partitions(&parsed_parts);
- if (nr_parts > 0) {
- part_type = "static";
- break;
- }
- printk("found: %d partitions\n", nr_parts);
-#endif
- } while (0);
-
- if (nr_parts == 0) {
- printk(KERN_NOTICE "clps flash: no partition info "
- "available, registering whole flash\n");
- mtd_device_register(mtd, NULL, 0);
- } else {
- printk(KERN_NOTICE "clps flash: using %s partition "
- "definition\n", part_type);
- mtd_device_register(mtd, parsed_parts, nr_parts);
- }
-
- /* Always succeeds. */
-}
-
-static void __exit clps_destroy_partitions(void)
-{
- kfree(parsed_parts);
-}
-
-static struct mtd_info *mymtd;
-
-static int __init clps_mtd_init(void)
-{
- int ret;
- int nr;
-
- nr = clps_setup_flash();
- if (nr < 0)
- return nr;
-
- ret = clps_setup_mtd(info, nr, &mymtd);
- if (ret)
- return ret;
-
- clps_locate_partitions(mymtd);
-
- return 0;
-}
-
-static void __exit clps_mtd_cleanup(void)
-{
- clps_destroy_mtd(info, mymtd);
- clps_destroy_partitions();
-}
-
-module_init(clps_mtd_init);
-module_exit(clps_mtd_cleanup);
-
-MODULE_AUTHOR("Rob Scott");
-MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index d16fc9d3b8c..d504b3d1791 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -55,13 +55,13 @@
#define FLASH_PARTITION3_SIZE 0x001C0000
-struct map_info flagadm_map = {
+static struct map_info flagadm_map = {
.name = "FlagaDM flash device",
.size = FLASH_SIZE,
.bankwidth = 2,
};
-struct mtd_partition flagadm_parts[] = {
+static struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
@@ -112,7 +112,7 @@ static int __init init_flagadm(void)
return 0;
}
- iounmap((void *)flagadm_map.virt);
+ iounmap((void __iomem *)flagadm_map.virt);
return -ENXIO;
}
@@ -123,8 +123,8 @@ static void __exit cleanup_flagadm(void)
map_destroy(mymtd);
}
if (flagadm_map.virt) {
- iounmap((void *)flagadm_map.virt);
- flagadm_map.virt = 0;
+ iounmap((void __iomem *)flagadm_map.virt);
+ flagadm_map.virt = NULL;
}
}
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 3d0e762fa5f..0455166f05f 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
}
-static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -308,8 +308,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
out:
/* Free any left over map structures */
- if (map)
- kfree(map);
+ kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
@@ -320,7 +319,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
+static void ck804xrom_remove_one(struct pci_dev *pdev)
{
struct ck804xrom_window *window = &ck804xrom_window;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
deleted file mode 100644
index 85bdece6ab3..00000000000
--- a/drivers/mtd/maps/dbox2-flash.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * D-Box 2 flash driver
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]= {
- {
- .name = "BR bootloader",
- .size = 128 * 1024,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- },
- {
- .name = "FLFS (U-Boot)",
- .size = 128 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "Root (SquashFS)",
- .size = 7040 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "var (JFFS2)",
- .size = 896 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "Flash without bootloader",
- .size = MTDPART_SIZ_FULL,
- .offset = 128 * 1024,
- .mask_flags = 0
- },
- {
- .name = "Complete Flash",
- .size = MTDPART_SIZ_FULL,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-#define WINDOW_ADDR 0x10000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-
-struct map_info dbox2_flash_map = {
- .name = "D-Box 2 flash memory",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = WINDOW_ADDR,
-};
-
-static int __init init_dbox2_flash(void)
-{
- printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
- dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!dbox2_flash_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&dbox2_flash_map);
-
- // Probe for dual Intel 28F320 or dual AMD
- mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
- if (!mymtd) {
- // Probe for single Intel 28F640
- dbox2_flash_map.bankwidth = 2;
-
- mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
- }
-
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- /* Create MTD devices for each partition. */
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
- return 0;
- }
-
- iounmap((void *)dbox2_flash_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_dbox2_flash(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (dbox2_flash_map.virt) {
- iounmap((void *)dbox2_flash_map.virt);
- dbox2_flash_map.virt = 0;
- }
-}
-
-module_init(init_dbox2_flash);
-module_exit(cleanup_dbox2_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
-MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 7a9e1989c97..f8a7dd14cee 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -143,16 +143,11 @@ static struct map_info dc21285_map = {
.copy_from = dc21285_copy_from,
};
-
/* Partition stuff */
-static struct mtd_partition *dc21285_parts;
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
-
- int nrparts;
-
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
case SA110_CNTL_ROMWIDTH_8:
@@ -200,8 +195,7 @@ static int __init init_dc21285(void)
dc21285_mtd->owner = THIS_MODULE;
- nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
- mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
+ mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
if(machine_is_ebsa285()) {
/*
@@ -224,8 +218,6 @@ static int __init init_dc21285(void)
static void __exit cleanup_dc21285(void)
{
mtd_device_unregister(dc21285_mtd);
- if (dc21285_parts)
- kfree(dc21285_parts);
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
deleted file mode 100644
index 3e393f0da82..00000000000
--- a/drivers/mtd/maps/dilnetpc.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP"
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
- * featuring the AMD Elan SC410 processor. There are two variants of this
- * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
- * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs
- * flash and 16 megs of RAM.
- * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm
- * and http://www.ssv-embedded.de/ssv/pc104/p170.htm
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <asm/io.h>
-
-/*
-** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
-** Destroying any of these blocks transforms the DNPC into
-** a paperweight (albeit not a very useful one, considering
-** it only weighs a few grams).
-**
-** Therefore, the BIOS blocks must never be erased or written to
-** except by people who know exactly what they are doing (e.g.
-** to install a BIOS update). These partitions are marked read-only
-** by default, but can be made read/write by undefining
-** DNPC_BIOS_BLOCKS_WRITEPROTECTED:
-*/
-#define DNPC_BIOS_BLOCKS_WRITEPROTECTED
-
-/*
-** The ID string (in ROM) is checked to determine whether we
-** are running on a DNP/1486 or ADNP/1486
-*/
-#define BIOSID_BASE 0x000fe100
-
-#define ID_DNPC "DNP1486"
-#define ID_ADNP "ADNP1486"
-
-/*
-** Address where the flash should appear in CPU space
-*/
-#define FLASH_BASE 0x2000000
-
-/*
-** Chip Setup and Control (CSC) indexed register space
-*/
-#define CSC_INDEX 0x22
-#define CSC_DATA 0x23
-
-#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */
-#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */
-
-#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */
-
-#define CSC_CR 0xd0 /* internal I/O device disable/Echo */
- /* Z-bus/configuration register */
-
-#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */
-
-
-/*
-** PC Card indexed register space:
-*/
-
-#define PCC_INDEX 0x3e0
-#define PCC_DATA 0x3e1
-
-#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */
-#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */
-#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */
-#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */
-#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */
-#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */
-#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */
-
-
-/*
-** Access to SC4x0's Chip Setup and Control (CSC)
-** and PC Card (PCC) indexed registers:
-*/
-static inline void setcsc(int reg, unsigned char data)
-{
- outb(reg, CSC_INDEX);
- outb(data, CSC_DATA);
-}
-
-static inline unsigned char getcsc(int reg)
-{
- outb(reg, CSC_INDEX);
- return(inb(CSC_DATA));
-}
-
-static inline void setpcc(int reg, unsigned char data)
-{
- outb(reg, PCC_INDEX);
- outb(data, PCC_DATA);
-}
-
-static inline unsigned char getpcc(int reg)
-{
- outb(reg, PCC_INDEX);
- return(inb(PCC_DATA));
-}
-
-
-/*
-************************************************************
-** Enable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size)
-{
- unsigned long flash_end = flash_base + flash_size - 1;
-
- /*
- ** enable setup of MMS windows C-F:
- */
- /* - enable PC Card indexed register space */
- setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
- /* - set PC Card controller to operate in standard mode */
- setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1);
-
- /*
- ** Program base address and end address of window
- ** where the flash ROM should appear in CPU address space
- */
- setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff);
- setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f);
- setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff);
- setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f);
-
- /* program offset of first flash location to appear in this window (0) */
- setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff);
- setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f);
-
- /* set attributes for MMS window C: non-cacheable, write-enabled */
- setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11);
-
- /* select physical device ROMCS0 (i.e. flash) for MMS Window C */
- setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03);
-
- /* enable memory window 1 */
- setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02);
-
- /* now disable PC Card indexed register space again */
- setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-/*
-************************************************************
-** Disable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_unmap_flash(void)
-{
- /* - enable PC Card indexed register space */
- setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
-
- /* disable memory window 1 */
- setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02);
-
- /* now disable PC Card indexed register space again */
- setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-
-/*
-************************************************************
-** Enable/Disable VPP to write to flash
-************************************************************
-*/
-
-static DEFINE_SPINLOCK(dnpc_spin);
-static int vpp_counter = 0;
-/*
-** This is what has to be done for the DNP board ..
-*/
-static void dnp_set_vpp(struct map_info *not_used, int on)
-{
- spin_lock_irq(&dnpc_spin);
-
- if (on)
- {
- if(++vpp_counter == 1)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4);
- }
- else
- {
- if(--vpp_counter == 0)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
- else
- BUG_ON(vpp_counter < 0);
- }
- spin_unlock_irq(&dnpc_spin);
-}
-
-/*
-** .. and this the ADNP version:
-*/
-static void adnp_set_vpp(struct map_info *not_used, int on)
-{
- spin_lock_irq(&dnpc_spin);
-
- if (on)
- {
- if(++vpp_counter == 1)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8);
- }
- else
- {
- if(--vpp_counter == 0)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
- else
- BUG_ON(vpp_counter < 0);
- }
- spin_unlock_irq(&dnpc_spin);
-}
-
-
-
-#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */
-#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */
-#define WINDOW_ADDR FLASH_BASE
-
-static struct map_info dnpc_map = {
- .name = "ADNP Flash Bank",
- .size = ADNP_WINDOW_SIZE,
- .bankwidth = 1,
- .set_vpp = adnp_set_vpp,
- .phys = WINDOW_ADDR
-};
-
-/*
-** The layout of the flash is somewhat "strange":
-**
-** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data
-** 2. 64 KiB (1 block) : System BIOS
-** 3. 960 KiB (15 blocks) : User Data (DNP model) or
-** 3. 3008 KiB (47 blocks) : User Data (ADNP model)
-** 4. 64 KiB (1 block) : System BIOS Entry
-*/
-
-static struct mtd_partition partition_info[]=
-{
- {
- .name = "ADNP boot",
- .offset = 0,
- .size = 0xf0000,
- },
- {
- .name = "ADNP system BIOS",
- .offset = MTDPART_OFS_NXTBLK,
- .size = 0x10000,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
- {
- .name = "ADNP file system",
- .offset = MTDPART_OFS_NXTBLK,
- .size = 0x2f0000,
- },
- {
- .name = "ADNP system BIOS entry",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-static struct mtd_info *mymtd;
-static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
-static struct mtd_info *merged_mtd;
-
-/*
-** "Highlevel" partition info:
-**
-** Using the MTD concat layer, we can re-arrange partitions to our
-** liking: we construct a virtual MTD device by concatenating the
-** partitions, specifying the sequence such that the boot block
-** is immediately followed by the filesystem block (i.e. the stupid
-** system BIOS block is mapped to a different place). When re-partitioning
-** this concatenated MTD device, we can set the boot block size to
-** an arbitrary (though erase block aligned) value i.e. not one that
-** is dictated by the flash's physical layout. We can thus set the
-** boot block to be e.g. 64 KB (which is fully sufficient if we want
-** to boot an etherboot image) or to -say- 1.5 MB if we want to boot
-** a large kernel image. In all cases, the remainder of the flash
-** is available as file system space.
-*/
-
-static struct mtd_partition higlvl_partition_info[]=
-{
- {
- .name = "ADNP boot block",
- .offset = 0,
- .size = CONFIG_MTD_DILNETPC_BOOTSIZE,
- },
- {
- .name = "ADNP file system space",
- .offset = MTDPART_OFS_NXTBLK,
- .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
- },
- {
- .name = "ADNP system BIOS + BIOS Entry",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
-};
-
-#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
-
-
-static int dnp_adnp_probe(void)
-{
- char *biosid, rc = -1;
-
- biosid = (char*)ioremap(BIOSID_BASE, 16);
- if(biosid)
- {
- if(!strcmp(biosid, ID_DNPC))
- rc = 1; /* this is a DNPC */
- else if(!strcmp(biosid, ID_ADNP))
- rc = 0; /* this is a ADNPC */
- }
- iounmap((void *)biosid);
- return(rc);
-}
-
-
-static int __init init_dnpc(void)
-{
- int is_dnp;
-
- /*
- ** determine hardware (DNP/ADNP/invalid)
- */
- if((is_dnp = dnp_adnp_probe()) < 0)
- return -ENXIO;
-
- /*
- ** Things are set up for ADNP by default
- ** -> modify all that needs to be different for DNP
- */
- if(is_dnp)
- { /*
- ** Adjust window size, select correct set_vpp function.
- ** The partitioning scheme is identical on both DNP
- ** and ADNP except for the size of the third partition.
- */
- int i;
- dnpc_map.size = DNP_WINDOW_SIZE;
- dnpc_map.set_vpp = dnp_set_vpp;
- partition_info[2].size = 0xf0000;
-
- /*
- ** increment all string pointers so the leading 'A' gets skipped,
- ** thus turning all occurrences of "ADNP ..." into "DNP ..."
- */
- ++dnpc_map.name;
- for(i = 0; i < NUM_PARTITIONS; i++)
- ++partition_info[i].name;
- higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
- CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
- for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
- ++higlvl_partition_info[i].name;
- }
-
- printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n",
- is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys);
-
- dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
-
- dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
-
- if (!dnpc_map.virt) {
- printk("Failed to ioremap_nocache\n");
- return -EIO;
- }
- simple_map_init(&dnpc_map);
-
- printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
-
- mymtd = do_map_probe("jedec_probe", &dnpc_map);
-
- if (!mymtd)
- mymtd = do_map_probe("cfi_probe", &dnpc_map);
-
- /*
- ** If flash probes fail, try to make flashes accessible
- ** at least as ROM. Ajust erasesize in this case since
- ** the default one (128M) will break our partitioning
- */
- if (!mymtd)
- if((mymtd = do_map_probe("map_rom", &dnpc_map)))
- mymtd->erasesize = 0x10000;
-
- if (!mymtd) {
- iounmap(dnpc_map.virt);
- return -ENXIO;
- }
-
- mymtd->owner = THIS_MODULE;
-
- /*
- ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
- ** -> add_mtd_partitions() will _not_ register MTD devices for
- ** the partitions, but will instead store pointers to the MTD
- ** objects it creates into our lowlvl_parts[] array.
- ** NOTE: we arrange the pointers such that the sequence of the
- ** partitions gets re-arranged: partition #2 follows
- ** partition #0.
- */
- partition_info[0].mtdp = &lowlvl_parts[0];
- partition_info[1].mtdp = &lowlvl_parts[2];
- partition_info[2].mtdp = &lowlvl_parts[1];
- partition_info[3].mtdp = &lowlvl_parts[3];
-
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
- /*
- ** now create a virtual MTD device by concatenating the for partitions
- ** (in the sequence given by the lowlvl_parts[] array.
- */
- merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
- if(merged_mtd)
- { /*
- ** now partition the new device the way we want it. This time,
- ** we do not supply mtd pointers in higlvl_partition_info, so
- ** add_mtd_partitions() will register the devices.
- */
- mtd_device_register(merged_mtd, higlvl_partition_info,
- NUM_HIGHLVL_PARTITIONS);
- }
-
- return 0;
-}
-
-static void __exit cleanup_dnpc(void)
-{
- if(merged_mtd) {
- mtd_device_unregister(merged_mtd);
- mtd_concat_destroy(merged_mtd);
- }
-
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (dnpc_map.virt) {
- iounmap(dnpc_map.virt);
- dnpc_unmap_flash();
- dnpc_map.virt = NULL;
- }
-}
-
-module_init(init_dnpc);
-module_exit(cleanup_dnpc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
-MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
deleted file mode 100644
index 6538ac675e0..00000000000
--- a/drivers/mtd/maps/dmv182.c
+++ /dev/null
@@ -1,146 +0,0 @@
-
-/*
- * drivers/mtd/maps/dmv182.c
- *
- * Flash map driver for the Dy4 SVME182 board
- *
- * Copyright 2003-2004, TimeSys Corporation
- *
- * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/*
- * This driver currently handles only the 16MiB user flash bank 1 on the
- * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
- * (VxWorks boot), or the optional 48MiB expansion flash.
- *
- * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
- * now supports the first 96MiB (the boot flash bank containing FFW
- * is excluded). The VxWorks loader is in partition 1.
- */
-
-#define FLASH_BASE_ADDR 0xf0000000
-#define FLASH_BANK_SIZE (128*1024*1024)
-
-MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
-MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
-MODULE_LICENSE("GPL");
-
-static struct map_info svme182_map = {
- .name = "Dy4 SVME182",
- .bankwidth = 32,
- .size = 128 * 1024 * 1024
-};
-
-#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE)
-
-// Allow 6MiB for the kernel
-#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024)
-// Allow 1MiB for the bootloader
-#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
-// Use the remaining 9MiB at the end of flash for the RFS
-#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
- NEW_BOOTIMAGE_PART_SIZE)
-
-static struct mtd_partition svme182_partitions[] = {
- // The Lower PABS is only 128KiB, but the partition code doesn't
- // like partitions that don't end on the largest erase block
- // size of the device, even if all of the erase blocks in the
- // partition are small ones. The hardware should prevent
- // writes to the actual PABS areas.
- {
- name: "Lower PABS and CPU 0 bootloader or kernel",
- size: 6*1024*1024,
- offset: 0,
- },
- {
- name: "Root Filesystem",
- size: 10*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "CPU1 Bootloader",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- },
- {
- name: "Extra",
- size: 110*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "Foundation Firmware and Upper PABS",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- mask_flags: MTD_WRITEABLE // read-only
- }
-};
-
-static struct mtd_info *this_mtd;
-
-static int __init init_svme182(void)
-{
- struct mtd_partition *partitions;
- int num_parts = ARRAY_SIZE(svme182_partitions);
-
- partitions = svme182_partitions;
-
- svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size);
-
- if (svme182_map.virt == 0) {
- printk("Failed to ioremap FLASH memory area.\n");
- return -EIO;
- }
-
- simple_map_init(&svme182_map);
-
- this_mtd = do_map_probe("cfi_probe", &svme182_map);
- if (!this_mtd)
- {
- iounmap((void *)svme182_map.virt);
- return -ENXIO;
- }
-
- printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
- this_mtd->size >> 20, FLASH_BASE_ADDR);
-
- this_mtd->owner = THIS_MODULE;
- mtd_device_register(this_mtd, partitions, num_parts);
-
- return 0;
-}
-
-static void __exit cleanup_svme182(void)
-{
- if (this_mtd)
- {
- mtd_device_unregister(this_mtd);
- map_destroy(this_mtd);
- }
-
- if (svme182_map.virt)
- {
- iounmap((void *)svme182_map.virt);
- svme182_map.virt = 0;
- }
-
- return;
-}
-
-module_init(init_svme182);
-module_exit(cleanup_svme182);
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
deleted file mode 100644
index fe42a212bb3..00000000000
--- a/drivers/mtd/maps/edb7312.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Handle mapping of the NOR flash on Cogent EDB7312 boards
- *
- * Copyright 2002 SYSGO Real-Time Solutions GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
-#define WINDOW_SIZE 0x01000000
-#define BUSWIDTH 2
-#define FLASH_BLOCKSIZE_MAIN 0x20000
-#define FLASH_NUMBLOCKS_MAIN 128
-/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
-#define PROBETYPES { "cfi_probe", NULL }
-
-#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
-#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
-
-static struct mtd_info *mymtd;
-
-struct map_info edb7312nor_map = {
- .name = "NOR flash on EDB7312",
- .size = WINDOW_SIZE,
- .bankwidth = BUSWIDTH,
- .phys = WINDOW_ADDR,
-};
-
-/*
- * MTD partitioning stuff
- */
-static struct mtd_partition static_partitions[3] =
-{
- {
- .name = "ARMboot",
- .size = 0x40000,
- .offset = 0
- },
- {
- .name = "Kernel",
- .size = 0x200000,
- .offset = 0x40000
- },
- {
- .name = "RootFS",
- .size = 0xDC0000,
- .offset = 0x240000
- },
-};
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static int mtd_parts_nb = 0;
-static struct mtd_partition *mtd_parts = 0;
-
-static int __init init_edb7312nor(void)
-{
- static const char *rom_probe_types[] = PROBETYPES;
- const char **type;
- const char *part_type = 0;
-
- printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
- WINDOW_SIZE, WINDOW_ADDR);
- edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!edb7312nor_map.virt) {
- printk(MSG_PREFIX "failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&edb7312nor_map);
-
- mymtd = 0;
- type = rom_probe_types;
- for(; !mymtd && *type; type++) {
- mymtd = do_map_probe(*type, &edb7312nor_map);
- }
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
- if (mtd_parts_nb > 0)
- part_type = "detected";
-
- if (mtd_parts_nb == 0) {
- mtd_parts = static_partitions;
- mtd_parts_nb = ARRAY_SIZE(static_partitions);
- part_type = "static";
- }
-
- if (mtd_parts_nb == 0)
- printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
- else
- printk(KERN_NOTICE MSG_PREFIX
- "using %s partition definition\n", part_type);
- /* Register the whole device first. */
- mtd_device_register(mymtd, NULL, 0);
- mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
- return 0;
- }
-
- iounmap((void *)edb7312nor_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_edb7312nor(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (edb7312nor_map.virt) {
- iounmap((void *)edb7312nor_map.virt);
- edb7312nor_map.virt = 0;
- }
-}
-
-module_init(init_edb7312nor);
-module_exit(cleanup_edb7312nor);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
-MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 08322b1c3e8..f784cf0caa1 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
pci_dev_put(window->pdev);
}
-static int __devinit esb2rom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct esb2rom_window *window = &esb2rom_window;
@@ -378,13 +378,13 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
return 0;
}
-static void __devexit esb2rom_remove_one (struct pci_dev *pdev)
+static void esb2rom_remove_one(struct pci_dev *pdev)
{
struct esb2rom_window *window = &esb2rom_window;
esb2rom_cleanup(window);
}
-static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = {
+static struct pci_device_id esb2rom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
deleted file mode 100644
index 956e2e4f30e..00000000000
--- a/drivers/mtd/maps/fortunet.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/* fortunet.c memory map
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define MAX_NUM_REGIONS 4
-#define MAX_NUM_PARTITIONS 8
-
-#define DEF_WINDOW_ADDR_PHY 0x00000000
-#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
-
-#define MTD_FORTUNET_PK "MTD FortuNet: "
-
-#define MAX_NAME_SIZE 128
-
-struct map_region
-{
- int window_addr_physical;
- int altbankwidth;
- struct map_info map_info;
- struct mtd_info *mymtd;
- struct mtd_partition parts[MAX_NUM_PARTITIONS];
- char map_name[MAX_NAME_SIZE];
- char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
-};
-
-static struct map_region map_regions[MAX_NUM_REGIONS];
-static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
-static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
-
-
-
-struct map_info default_map = {
- .size = DEF_WINDOW_SIZE,
- .bankwidth = 4,
-};
-
-static char * __init get_string_option(char *dest,int dest_size,char *sor)
-{
- if(!dest_size)
- return sor;
- dest_size--;
- while(*sor)
- {
- if(*sor==',')
- {
- sor++;
- break;
- }
- else if(*sor=='\"')
- {
- sor++;
- while(*sor)
- {
- if(*sor=='\"')
- {
- sor++;
- break;
- }
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- else
- {
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- *dest = 0;
- return sor;
-}
-
-static int __init MTD_New_Region(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[6];
- get_options (get_string_option(string,sizeof(string),line),6,params);
- if(params[0]<1)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
- " name,region-number[,base,size,bankwidth,altbankwidth]\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
- memcpy(&map_regions[params[1]].map_info,
- &default_map,sizeof(map_regions[params[1]].map_info));
- map_regions_set[params[1]] = 1;
- map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[params[1]].altbankwidth = 2;
- map_regions[params[1]].mymtd = NULL;
- map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
- strcpy(map_regions[params[1]].map_info.name,string);
- if(params[0]>1)
- {
- map_regions[params[1]].window_addr_physical = params[2];
- }
- if(params[0]>2)
- {
- map_regions[params[1]].map_info.size = params[3];
- }
- if(params[0]>3)
- {
- map_regions[params[1]].map_info.bankwidth = params[4];
- }
- if(params[0]>4)
- {
- map_regions[params[1]].altbankwidth = params[5];
- }
- return 1;
-}
-
-static int __init MTD_New_Partition(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[4];
- get_options (get_string_option(string,sizeof(string),line),4,params);
- if(params[0]<3)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
- " name,region-number,size,offset\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
- {
- printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
- return 1;
- }
- map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
- map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
- strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
- map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
- params[2];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
- params[3];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
- map_regions_parts[params[1]]++;
- return 1;
-}
-
-__setup("MTD_Region=", MTD_New_Region);
-__setup("MTD_Partition=", MTD_New_Partition);
-
-/* Backwards-spelling-compatibility */
-__setup("MTD_Partion=", MTD_New_Partition);
-
-static int __init init_fortunet(void)
-{
- int ix,iy;
- for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_parts[ix]&&(!map_regions_set[ix]))
- {
- printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
- ix);
- memset(&map_regions[ix],0,sizeof(map_regions[ix]));
- memcpy(&map_regions[ix].map_info,&default_map,
- sizeof(map_regions[ix].map_info));
- map_regions_set[ix] = 1;
- map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[ix].altbankwidth = 2;
- map_regions[ix].mymtd = NULL;
- map_regions[ix].map_info.name = map_regions[ix].map_name;
- strcpy(map_regions[ix].map_info.name,"FORTUNET");
- }
- if(map_regions_set[ix])
- {
- iy++;
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
- " address %x size %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
-
- map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
-
- map_regions[ix].map_info.virt =
- ioremap_nocache(
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
- if(!map_regions[ix].map_info.virt)
- {
- int j = 0;
- printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
- map_regions[ix].map_info.name);
- for (j = 0 ; j < ix; j++)
- iounmap(map_regions[j].map_info.virt);
- return -ENXIO;
- }
- simple_map_init(&map_regions[ix].map_info);
-
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].map_info.virt);
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- if((!map_regions[ix].mymtd)&&(
- map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
- {
- printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
- "for %s flash.\n",
- map_regions[ix].map_info.name);
- map_regions[ix].map_info.bankwidth =
- map_regions[ix].altbankwidth;
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- }
- map_regions[ix].mymtd->owner = THIS_MODULE;
- mtd_device_register(map_regions[ix].mymtd,
- map_regions[ix].parts,
- map_regions_parts[ix]);
- }
- }
- if(iy)
- return 0;
- return -ENXIO;
-}
-
-static void __exit cleanup_fortunet(void)
-{
- int ix;
- for(ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_set[ix])
- {
- if( map_regions[ix].mymtd )
- {
- mtd_device_unregister(map_regions[ix].mymtd);
- map_destroy( map_regions[ix].mymtd );
- }
- iounmap((void *)map_regions[ix].map_info.virt);
- }
- }
-}
-
-module_init(init_fortunet);
-module_exit(cleanup_fortunet);
-
-MODULE_AUTHOR("FortuNet, Inc.");
-MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7568c5f8b8a..a4c477b9fdd 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -14,7 +14,6 @@
*/
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -26,7 +25,8 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "gpio-addr-flash"
#define PFX DRIVER_NAME ": "
@@ -142,7 +142,8 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
*
* See gf_copy_from() caveat.
*/
-static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void gf_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
{
struct async_state *state = gf_map_info_to_state(map);
@@ -155,7 +156,8 @@ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from,
memcpy_toio(map->virt + (to % state->win_size), from, len);
}
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
/**
* gpio_flash_probe() - setup a mapping for a GPIO assisted flash
@@ -185,16 +187,15 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
* ...
* };
*/
-static int __devinit gpio_flash_probe(struct platform_device *pdev)
+static int gpio_flash_probe(struct platform_device *pdev)
{
- int nr_parts;
size_t i, arr_size;
struct physmap_flash_data *pdata;
struct resource *memory;
struct resource *gpios;
struct async_state *state;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -252,25 +253,14 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
- nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
- &pdata->parts, 0);
- if (nr_parts > 0) {
- pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
- kfree(pdata->parts);
- } else if (pdata->nr_parts) {
- pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
- nr_parts = pdata->nr_parts;
- } else {
- pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
- nr_parts = 0;
- }
- mtd_device_register(state->mtd, pdata->parts, nr_parts);
+ mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+ pdata->parts, pdata->nr_parts);
return 0;
}
-static int __devexit gpio_flash_remove(struct platform_device *pdev)
+static int gpio_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
size_t i = 0;
@@ -285,23 +275,13 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
static struct platform_driver gpio_flash_driver = {
.probe = gpio_flash_probe,
- .remove = __devexit_p(gpio_flash_remove),
+ .remove = gpio_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
};
-static int __init gpio_flash_init(void)
-{
- return platform_driver_register(&gpio_flash_driver);
-}
-module_init(gpio_flash_init);
-
-static void __exit gpio_flash_exit(void)
-{
- platform_driver_unregister(&gpio_flash_driver);
-}
-module_exit(gpio_flash_exit);
+module_platform_driver(gpio_flash_driver);
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
deleted file mode 100644
index 7f035860a36..00000000000
--- a/drivers/mtd/maps/h720x-flash.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
- * evaluation boards
- *
- * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
- * 2003 Thomas Gleixner <tglx@linutronix.de>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-static struct mtd_info *mymtd;
-
-static struct map_info h720x_map = {
- .name = "H720X",
- .bankwidth = 4,
- .size = H720X_FLASH_SIZE,
- .phys = H720X_FLASH_PHYS,
-};
-
-static struct mtd_partition h720x_partitions[] = {
- {
- .name = "ArMon",
- .size = 0x00080000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Env",
- .size = 0x00040000,
- .offset = 0x00080000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Kernel",
- .size = 0x00180000,
- .offset = 0x000c0000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Ramdisk",
- .size = 0x00400000,
- .offset = 0x00240000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "jffs2",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND
- }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
-
-static int nr_mtd_parts;
-static struct mtd_partition *mtd_parts;
-static const char *probes[] = { "cmdlinepart", NULL };
-
-/*
- * Initialize FLASH support
- */
-static int __init h720x_mtd_init(void)
-{
-
- char *part_type = NULL;
-
- h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
-
- if (!h720x_map.virt) {
- printk(KERN_ERR "H720x-MTD: ioremap failed\n");
- return -EIO;
- }
-
- simple_map_init(&h720x_map);
-
- // Probe for flash bankwidth 4
- printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
- mymtd = do_map_probe("cfi_probe", &h720x_map);
- if (!mymtd) {
- printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
- // Probe for bankwidth 2
- h720x_map.bankwidth = 2;
- mymtd = do_map_probe("cfi_probe", &h720x_map);
- }
-
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
- if (nr_mtd_parts > 0)
- part_type = "command line";
- if (nr_mtd_parts <= 0) {
- mtd_parts = h720x_partitions;
- nr_mtd_parts = NUM_PARTITIONS;
- part_type = "builtin";
- }
- printk(KERN_INFO "Using %s partition table\n", part_type);
- mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
- return 0;
- }
-
- iounmap((void *)h720x_map.virt);
- return -ENXIO;
-}
-
-/*
- * Cleanup
- */
-static void __exit h720x_mtd_cleanup(void)
-{
-
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
-
- /* Free partition info, if commandline partition was used */
- if (mtd_parts && (mtd_parts != h720x_partitions))
- kfree (mtd_parts);
-
- if (h720x_map.virt) {
- iounmap((void *)h720x_map.virt);
- h720x_map.virt = 0;
- }
-}
-
-
-module_init(h720x_mtd_init);
-module_exit(h720x_mtd_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6689dcb3124..c7478e18f48 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
}
-static int __devinit ichxrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
@@ -315,13 +315,13 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
+static void ichxrom_remove_one(struct pci_dev *pdev)
{
struct ichxrom_window *window = &ichxrom_window;
ichxrom_cleanup(window);
}
-static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
+static struct pci_device_id ichxrom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 404a50cbafa..15bbda03be6 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -24,14 +24,12 @@
#define NUM_FLASHBANKS 2
#define BUSWIDTH 4
-/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
-#define PROBETYPES { "jedec_probe", NULL }
-
#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
#define MTDID "impa7-%d" /* for mtdparts= partitioning */
static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+static const char * const rom_probe_types[] = { "jedec_probe", NULL };
static struct map_info impa7_map[NUM_FLASHBANKS] = {
{
@@ -49,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
-static struct mtd_partition static_partitions[] =
+static struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
@@ -58,16 +56,9 @@ static struct mtd_partition static_partitions[] =
},
};
-static int mtd_parts_nb[NUM_FLASHBANKS];
-static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
-
-static const char *probes[] = { "cmdlinepart", NULL };
-
static int __init init_impa7(void)
{
- static const char *rom_probe_types[] = PROBETYPES;
- const char **type;
- const char *part_type = 0;
+ const char * const *type;
int i;
static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
{ WINDOW_ADDR0, WINDOW_SIZE0 },
@@ -88,7 +79,7 @@ static int __init init_impa7(void)
}
simple_map_init(&impa7_map[i]);
- impa7_mtd[i] = 0;
+ impa7_mtd[i] = NULL;
type = rom_probe_types;
for(; !impa7_mtd[i] && *type; type++) {
impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
@@ -97,26 +88,12 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
- mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
- probes,
- &mtd_parts[i],
- 0);
- if (mtd_parts_nb[i] > 0) {
- part_type = "command line";
- } else {
- mtd_parts[i] = static_partitions;
- mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
- part_type = "static";
- }
-
- printk(KERN_NOTICE MSG_PREFIX
- "using %s partition definition\n",
- part_type);
- mtd_device_register(impa7_mtd[i],
- mtd_parts[i], mtd_parts_nb[i]);
+ mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
+ partitions,
+ ARRAY_SIZE(partitions));
+ } else {
+ iounmap((void __iomem *)impa7_map[i].virt);
}
- else
- iounmap((void *)impa7_map[i].virt);
}
return devicesfound == 0 ? -ENXIO : 0;
}
@@ -128,8 +105,8 @@ static void __exit cleanup_impa7(void)
if (impa7_mtd[i]) {
mtd_device_unregister(impa7_mtd[i]);
map_destroy(impa7_mtd[i]);
- iounmap((void *)impa7_map[i].virt);
- impa7_map[i].virt = 0;
+ iounmap((void __iomem *)impa7_map[i].virt);
+ impa7_map[i].virt = NULL;
}
}
}
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index d2f47be8754..5ab71f0e1bc 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
@@ -44,7 +43,6 @@ struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
- int nr_parts;
struct pci_dev *dev;
};
@@ -64,32 +62,28 @@ struct vr_nor_mtd {
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
-static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
+static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
-static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
+static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
- struct mtd_partition *parts;
- static const char *part_probes[] = { "cmdlinepart", NULL };
-
/* register the flash bank */
/* partition the flash bank */
- p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
- return mtd_device_register(p->info, parts, p->nr_parts);
+ return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
-static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
-static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
+static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
- static const char *probe_types[] =
+ static const char * const probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
- const char **type;
+ const char * const *type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
@@ -101,7 +95,7 @@ static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
return 0;
}
-static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
+static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
@@ -121,7 +115,7 @@ static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
-static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
+static int vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
@@ -181,11 +175,10 @@ static struct pci_device_id vr_nor_pci_ids[] = {
{0,}
};
-static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
+static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
@@ -194,8 +187,7 @@ static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
pci_disable_device(dev);
}
-static int __devinit
-vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
@@ -261,22 +253,11 @@ vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
- .remove = __devexit_p(vr_nor_pci_remove),
+ .remove = vr_nor_pci_remove,
.id_table = vr_nor_pci_ids,
};
-static int __init vr_nor_mtd_init(void)
-{
- return pci_register_driver(&vr_nor_pci_driver);
-}
-
-static void __exit vr_nor_mtd_exit(void)
-{
- pci_unregister_driver(&vr_nor_pci_driver);
-}
-
-module_init(vr_nor_mtd_init);
-module_exit(vr_nor_mtd_exit);
+module_pci_driver(vr_nor_pci_driver);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
deleted file mode 100644
index c00b9175ba9..00000000000
--- a/drivers/mtd/maps/ixp2000.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * drivers/mtd/maps/ixp2000.c
- *
- * Mapping for the Intel XScale IXP2000 based systems
- *
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2003-2004 MontaVista Software, Inc.
- *
- * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com>
- * Maintainer: Deepak Saxena <dsaxena@plexity.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach/flash.h>
-
-#include <linux/reboot.h>
-
-struct ixp2000_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
- struct mtd_partition *partitions;
- struct resource *res;
-};
-
-static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
-{
- unsigned long (*set_bank)(unsigned long) =
- (unsigned long(*)(unsigned long))map->map_priv_2;
-
- return (set_bank ? set_bank(ofs) : ofs);
-}
-
-#ifdef __ARMEB__
-/*
- * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which
- * causes the lower address bits to be XORed with 0x11 on 8 bit accesses
- * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44.
- */
-static int erratum44_workaround = 0;
-
-static inline unsigned long address_fix8_write(unsigned long addr)
-{
- if (erratum44_workaround) {
- return (addr ^ 3);
- }
- return addr;
-}
-#else
-
-#define address_fix8_write(x) (x)
-#endif
-
-static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs)
-{
- map_word val;
-
- val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs)));
- return val;
-}
-
-/*
- * We can't use the standard memcpy due to the broken SlowPort
- * address translation on rev A0 and A1 silicon and the fact that
- * we have banked flash.
- */
-static void ixp2000_flash_copy_from(struct map_info *map, void *to,
- unsigned long from, ssize_t len)
-{
- from = flash_bank_setup(map, from);
- while(len--)
- *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++);
-}
-
-static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs)
-{
- *(__u8 *) (address_fix8_write(map->map_priv_1 +
- flash_bank_setup(map, ofs))) = d.x[0];
-}
-
-static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to,
- const void *from, ssize_t len)
-{
- to = flash_bank_setup(map, to);
- while(len--) {
- unsigned long tmp = address_fix8_write(map->map_priv_1 + to++);
- *(__u8 *)(tmp) = *(__u8 *)(from++);
- }
-}
-
-
-static int ixp2000_flash_remove(struct platform_device *dev)
-{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct ixp2000_flash_info *info = platform_get_drvdata(dev);
-
- platform_set_drvdata(dev, NULL);
-
- if(!info)
- return 0;
-
- if (info->mtd) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
- if (info->map.map_priv_1)
- iounmap((void *) info->map.map_priv_1);
-
- kfree(info->partitions);
-
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
- }
-
- if (plat->exit)
- plat->exit();
-
- return 0;
-}
-
-
-static int ixp2000_flash_probe(struct platform_device *dev)
-{
- static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
- struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
- struct flash_platform_data *plat;
- struct ixp2000_flash_info *info;
- unsigned long window_size;
- int err = -1;
-
- if (!ixp_data)
- return -ENODEV;
-
- plat = ixp_data->platform_data;
- if (!plat)
- return -ENODEV;
-
- window_size = dev->resource->end - dev->resource->start + 1;
- dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
- ixp_data->nr_banks, ((u32)window_size >> 20));
-
- if (plat->width != 1) {
- dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n",
- plat->width * 8);
- return -EIO;
- }
-
- info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
- if(!info) {
- err = -ENOMEM;
- goto Error;
- }
-
- platform_set_drvdata(dev, info);
-
- /*
- * Tell the MTD layer we're not 1:1 mapped so that it does
- * not attempt to do a direct access on us.
- */
- info->map.phys = NO_XIP;
-
- info->map.size = ixp_data->nr_banks * window_size;
- info->map.bankwidth = 1;
-
- /*
- * map_priv_2 is used to store a ptr to the bank_setup routine
- */
- info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
-
- info->map.name = dev_name(&dev->dev);
- info->map.read = ixp2000_flash_read8;
- info->map.write = ixp2000_flash_write8;
- info->map.copy_from = ixp2000_flash_copy_from;
- info->map.copy_to = ixp2000_flash_copy_to;
-
- info->res = request_mem_region(dev->resource->start,
- dev->resource->end - dev->resource->start + 1,
- dev_name(&dev->dev));
- if (!info->res) {
- dev_err(&dev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto Error;
- }
-
- info->map.map_priv_1 = (unsigned long) ioremap(dev->resource->start,
- dev->resource->end - dev->resource->start + 1);
- if (!info->map.map_priv_1) {
- dev_err(&dev->dev, "Failed to ioremap flash region\n");
- err = -EIO;
- goto Error;
- }
-
-#if defined(__ARMEB__)
- /*
- * Enable erratum 44 workaround for NPUs with broken slowport
- */
-
- erratum44_workaround = ixp2000_has_broken_slowport();
- dev_info(&dev->dev, "Erratum 44 workaround %s\n",
- erratum44_workaround ? "enabled" : "disabled");
-#endif
-
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto Error;
- }
- info->mtd->owner = THIS_MODULE;
-
- err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
- if (err > 0) {
- err = mtd_device_register(info->mtd, info->partitions, err);
- if(err)
- dev_err(&dev->dev, "Could not parse partitions\n");
- }
-
- if (err)
- goto Error;
-
- return 0;
-
-Error:
- ixp2000_flash_remove(dev);
- return err;
-}
-
-static struct platform_driver ixp2000_flash_driver = {
- .probe = ixp2000_flash_probe,
- .remove = ixp2000_flash_remove,
- .driver = {
- .name = "IXP2000-Flash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init ixp2000_flash_init(void)
-{
- return platform_driver_register(&ixp2000_flash_driver);
-}
-
-static void __exit ixp2000_flash_exit(void)
-{
- platform_driver_unregister(&ixp2000_flash_driver);
-}
-
-module_init(ixp2000_flash_init);
-module_exit(ixp2000_flash_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 155b21942f4..6a589f1e288 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -13,9 +13,9 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -145,19 +145,16 @@ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
struct ixp4xx_flash_info {
struct mtd_info *mtd;
struct map_info map;
- struct mtd_partition *partitions;
struct resource *res;
};
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int ixp4xx_flash_remove(struct platform_device *dev)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
struct ixp4xx_flash_info *info = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
if(!info)
return 0;
@@ -165,15 +162,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
-
- kfree(info->partitions);
-
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
- }
if (plat->exit)
plat->exit();
@@ -183,10 +171,11 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
static int ixp4xx_flash_probe(struct platform_device *dev)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
struct ixp4xx_flash_info *info;
- const char *part_type = NULL;
- int nr_parts = 0;
+ struct mtd_part_parser_data ppdata = {
+ .origin = dev->resource->start,
+ };
int err = -1;
if (!plat)
@@ -198,7 +187,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
- info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info),
+ GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
@@ -224,20 +214,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
info->map.write = ixp4xx_probe_write16;
info->map.copy_from = ixp4xx_copy_from;
- info->res = request_mem_region(dev->resource->start,
- resource_size(dev->resource),
- "IXP4XXFlash");
- if (!info->res) {
- printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
- err = -ENOMEM;
- goto Error;
- }
-
- info->map.virt = ioremap(dev->resource->start,
- resource_size(dev->resource));
- if (!info->map.virt) {
- printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
- err = -EIO;
+ info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
goto Error;
}
@@ -252,28 +231,12 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16;
- nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
- dev->resource->start);
- if (nr_parts > 0) {
- part_type = "dynamic";
- } else {
- info->partitions = plat->parts;
- nr_parts = plat->nr_parts;
- part_type = "static";
- }
- if (nr_parts == 0)
- printk(KERN_NOTICE "IXP4xx flash: no partition info "
- "available, registering whole flash\n");
- else
- printk(KERN_NOTICE "IXP4xx flash: using %s partition "
- "definition\n", part_type);
-
- err = mtd_device_register(info->mtd, info->partitions, nr_parts);
- if (err)
+ err = mtd_device_parse_register(info->mtd, probes, &ppdata,
+ plat->parts, plat->nr_parts);
+ if (err) {
printk(KERN_ERR "Could not parse partitions\n");
-
- if (err)
goto Error;
+ }
return 0;
@@ -291,19 +254,7 @@ static struct platform_driver ixp4xx_flash_driver = {
},
};
-static int __init ixp4xx_flash_init(void)
-{
- return platform_driver_register(&ixp4xx_flash_driver);
-}
-
-static void __exit ixp4xx_flash_exit(void)
-{
- platform_driver_unregister(&ixp4xx_flash_driver);
-}
-
-
-module_init(ixp4xx_flash_init);
-module_exit(ixp4xx_flash_exit);
+module_platform_driver(ixp4xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index dd0360ba241..74bd98ee635 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -27,17 +27,21 @@ static struct mtd_info *mymtd;
/* Is this really the vpp port? */
+static DEFINE_SPINLOCK(l440gx_vpp_lock);
+static int l440gx_vpp_refcnt;
static void l440gx_set_vpp(struct map_info *map, int vpp)
{
- unsigned long l;
+ unsigned long flags;
- l = inl(VPP_PORT);
+ spin_lock_irqsave(&l440gx_vpp_lock, flags);
if (vpp) {
- l |= 1;
+ if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
+ outl(inl(VPP_PORT) | 1, VPP_PORT);
} else {
- l &= ~1;
+ if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
+ outl(inl(VPP_PORT) & ~1, VPP_PORT);
}
- outl(l, VPP_PORT);
+ spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
}
static struct map_info l440gx_map = {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index a90cabd7b84..7aa682cd4d7 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -7,21 +7,21 @@
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
-#include <lantiq_platform.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,7 +44,8 @@ struct ltq_mtd {
struct map_info *map;
};
-static char ltq_map_name[] = "ltq_nor";
+static const char ltq_map_name[] = "ltq_nor";
+static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -107,47 +108,42 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static const char const *part_probe_types[] = { "cmdlinepart", NULL };
-
-static int __init
+static int
ltq_mtd_probe(struct platform_device *pdev)
{
- struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
struct ltq_mtd *ltq_mtd;
- struct mtd_partition *parts;
- struct resource *res;
- int nr_parts = 0;
struct cfi_private *cfi;
int err;
- ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
+ if (of_machine_is_compatible("lantiq,falcon") &&
+ (ltq_boot_select() != BS_FLASH)) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
+ ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
+ if (!ltq_mtd)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource");
- err = -ENOENT;
- goto err_out;
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
}
- res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
- resource_size(ltq_mtd->res), dev_name(&pdev->dev));
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to request mem resource");
- err = -EBUSY;
- goto err_out;
- }
+ ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+ GFP_KERNEL);
+ if (!ltq_mtd->map)
+ return -ENOMEM;
- ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- ltq_mtd->map->phys = res->start;
- ltq_mtd->map->size = resource_size(res);
- ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
- ltq_mtd->map->phys, ltq_mtd->map->size);
- if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to ioremap!\n");
- err = -ENOMEM;
- goto err_free;
- }
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
@@ -162,8 +158,7 @@ ltq_mtd_probe(struct platform_device *pdev)
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
- err = -ENXIO;
- goto err_unmap;
+ return -ENXIO;
}
ltq_mtd->mtd->owner = THIS_MODULE;
@@ -172,17 +167,9 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- nr_parts = parse_mtd_partitions(ltq_mtd->mtd,
- part_probe_types, &parts, 0);
- if (nr_parts > 0) {
- dev_info(&pdev->dev,
- "using %d partitions from cmdline", nr_parts);
- } else {
- nr_parts = ltq_mtd_data->nr_parts;
- parts = ltq_mtd_data->parts;
- }
-
- err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+ &ppdata, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
@@ -192,59 +179,38 @@ ltq_mtd_probe(struct platform_device *pdev)
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_unmap:
- iounmap(ltq_mtd->map->virt);
-err_free:
- kfree(ltq_mtd->map);
-err_out:
- kfree(ltq_mtd);
return err;
}
-static int __devexit
+static int
ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
- if (ltq_mtd) {
- if (ltq_mtd->mtd) {
- del_mtd_partitions(ltq_mtd->mtd);
- map_destroy(ltq_mtd->mtd);
- }
- if (ltq_mtd->map->virt)
- iounmap(ltq_mtd->map->virt);
- kfree(ltq_mtd->map);
- kfree(ltq_mtd);
+ if (ltq_mtd && ltq_mtd->mtd) {
+ mtd_device_unregister(ltq_mtd->mtd);
+ map_destroy(ltq_mtd->mtd);
}
return 0;
}
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
static struct platform_driver ltq_mtd_driver = {
- .remove = __devexit_p(ltq_mtd_remove),
+ .probe = ltq_mtd_probe,
+ .remove = ltq_mtd_remove,
.driver = {
- .name = "ltq_nor",
+ .name = "ltq-nor",
.owner = THIS_MODULE,
+ .of_match_table = ltq_mtd_match,
},
};
-static int __init
-init_ltq_mtd(void)
-{
- int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
- if (ret)
- pr_err("ltq_nor: error registering platform driver");
- return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
- platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 5936c466e90..cadfbe05187 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -10,7 +10,6 @@
* kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
@@ -33,9 +32,6 @@ struct latch_addr_flash_info {
/* cache; could be found out of res */
unsigned long win_mask;
- int nr_parts;
- struct mtd_partition *parts;
-
spinlock_t lock;
};
@@ -97,8 +93,6 @@ static void lf_copy_from(struct map_info *map, void *to,
static char *rom_probe_types[] = { "cfi_probe", NULL };
-static char *part_probe_types[] = { "cmdlinepart", NULL };
-
static int latch_addr_flash_remove(struct platform_device *dev)
{
struct latch_addr_flash_info *info;
@@ -107,13 +101,10 @@ static int latch_addr_flash_remove(struct platform_device *dev)
info = platform_get_drvdata(dev);
if (info == NULL)
return 0;
- platform_set_drvdata(dev, NULL);
- latch_addr_data = dev->dev.platform_data;
+ latch_addr_data = dev_get_platdata(&dev->dev);
if (info->mtd != NULL) {
- if (info->nr_parts)
- kfree(info->parts);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
@@ -132,7 +123,7 @@ static int latch_addr_flash_remove(struct platform_device *dev)
return 0;
}
-static int __devinit latch_addr_flash_probe(struct platform_device *dev)
+static int latch_addr_flash_probe(struct platform_device *dev)
{
struct latch_addr_flash_data *latch_addr_data;
struct latch_addr_flash_info *info;
@@ -142,7 +133,7 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
int chipsel;
int err;
- latch_addr_data = dev->dev.platform_data;
+ latch_addr_data = dev_get_platdata(&dev->dev);
if (latch_addr_data == NULL)
return -ENODEV;
@@ -206,21 +197,9 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- mtd_device_register(info->mtd, info->parts, err);
- return 0;
- }
- if (latch_addr_data->nr_parts) {
- pr_notice("Using latch-addr-flash partition information\n");
- mtd_device_register(info->mtd,
- latch_addr_data->parts,
- latch_addr_data->nr_parts);
- return 0;
- }
-
- mtd_device_register(info->mtd, NULL, 0);
+ mtd_device_parse_register(info->mtd, NULL, NULL,
+ latch_addr_data->parts,
+ latch_addr_data->nr_parts);
return 0;
iounmap:
@@ -237,23 +216,13 @@ done:
static struct platform_driver latch_addr_flash_driver = {
.probe = latch_addr_flash_probe,
- .remove = __devexit_p(latch_addr_flash_remove),
+ .remove = latch_addr_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
};
-static int __init latch_addr_flash_init(void)
-{
- return platform_driver_register(&latch_addr_flash_driver);
-}
-module_init(latch_addr_flash_init);
-
-static void __exit latch_addr_flash_exit(void)
-{
- platform_driver_unregister(&latch_addr_flash_driver);
-}
-module_exit(latch_addr_flash_exit);
+module_platform_driver(latch_addr_flash_driver);
MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
deleted file mode 100644
index 93fa56c3300..00000000000
--- a/drivers/mtd/maps/mbx860.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Handle mapping of the flash on MBX860 boards
- *
- * Author: Anton Todorov
- * Copyright: (C) 2001 Emness Technology
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x00200000
-
-/* Flash / Partition sizing */
-#define MAX_SIZE_KiB 8192
-#define BOOT_PARTITION_SIZE_KiB 512
-#define KERNEL_PARTITION_SIZE_KiB 5632
-#define APP_PARTITION_SIZE_KiB 2048
-
-#define NUM_PARTITIONS 3
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]={
- { .name = "MBX flash BOOT partition",
- .offset = 0,
- .size = BOOT_PARTITION_SIZE_KiB*1024 },
- { .name = "MBX flash DATA partition",
- .offset = BOOT_PARTITION_SIZE_KiB*1024,
- .size = (KERNEL_PARTITION_SIZE_KiB)*1024 },
- { .name = "MBX flash APPLICATION partition",
- .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
-};
-
-
-static struct mtd_info *mymtd;
-
-struct map_info mbx_map = {
- .name = "MBX flash",
- .size = WINDOW_SIZE,
- .phys = WINDOW_ADDR,
- .bankwidth = 4,
-};
-
-static int __init init_mbx(void)
-{
- printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
- if (!mbx_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&mbx_map);
-
- mymtd = do_map_probe("jedec_probe", &mbx_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- mtd_device_register(mymtd, NULL, 0);
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
- return 0;
- }
-
- iounmap((void *)mbx_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_mbx(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (mbx_map.virt) {
- iounmap((void *)mbx_map.virt);
- mbx_map.virt = 0;
- }
-}
-
-module_init(init_mbx);
-module_exit(cleanup_mbx);
-
-MODULE_AUTHOR("Anton Todorov <a.todorov@emness.com>");
-MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
deleted file mode 100644
index 807ac2a2e68..00000000000
--- a/drivers/mtd/maps/octagon-5066.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/* ######################################################################
-
- Octagon 5066 MTD Driver.
-
- The Octagon 5066 is a SBC based on AMD's 586-WB running at 133 MHZ. It
- comes with a builtin AMD 29F016 flash chip and a socketed EEPROM that
- is replacable by flash. Both units are mapped through a multiplexer
- into a 32k memory window at 0xe8000. The control register for the
- multiplexing unit is located at IO 0x208 with a bit map of
- 0-5 Page Selection in 32k increments
- 6-7 Device selection:
- 00 SSD off
- 01 SSD 0 (Socket)
- 10 SSD 1 (Flash chip)
- 11 undefined
-
- On each SSD, the first 128k is reserved for use by the bios
- (actually it IS the bios..) This only matters if you are booting off the
- flash, you must not put a file system starting there.
-
- The driver tries to do a detection algorithm to guess what sort of devices
- are plugged into the sockets.
-
- ##################################################################### */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-
-#define WINDOW_START 0xe8000
-#define WINDOW_LENGTH 0x8000
-#define WINDOW_SHIFT 27
-#define WINDOW_MASK 0x7FFF
-#define PAGE_IO 0x208
-
-static volatile char page_n_dev = 0;
-static unsigned long iomapadr;
-static DEFINE_SPINLOCK(oct5066_spin);
-
-/*
- * We use map_priv_1 to identify which device we are.
- */
-
-static void __oct5066_page(struct map_info *map, __u8 byte)
-{
- outb(byte,PAGE_IO);
- page_n_dev = byte;
-}
-
-static inline void oct5066_page(struct map_info *map, unsigned long ofs)
-{
- __u8 byte = map->map_priv_1 | (ofs >> WINDOW_SHIFT);
-
- if (page_n_dev != byte)
- __oct5066_page(map, byte);
-}
-
-
-static map_word oct5066_read8(struct map_info *map, unsigned long ofs)
-{
- map_word ret;
- spin_lock(&oct5066_spin);
- oct5066_page(map, ofs);
- ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
- return ret;
-}
-
-static void oct5066_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
-
- spin_lock(&oct5066_spin);
- oct5066_page(map, from);
- memcpy_fromio(to, iomapadr + from, thislen);
- spin_unlock(&oct5066_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static void oct5066_write8(struct map_info *map, map_word d, unsigned long adr)
-{
- spin_lock(&oct5066_spin);
- oct5066_page(map, adr);
- writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
-}
-
-static void oct5066_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
- spin_lock(&oct5066_spin);
- oct5066_page(map, to);
- memcpy_toio(iomapadr + to, from, thislen);
- spin_unlock(&oct5066_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static struct map_info oct5066_map[2] = {
- {
- .name = "Octagon 5066 Socket",
- .phys = NO_XIP,
- .size = 512 * 1024,
- .bankwidth = 1,
- .read = oct5066_read8,
- .copy_from = oct5066_copy_from,
- .write = oct5066_write8,
- .copy_to = oct5066_copy_to,
- .map_priv_1 = 1<<6
- },
- {
- .name = "Octagon 5066 Internal Flash",
- .phys = NO_XIP,
- .size = 2 * 1024 * 1024,
- .bankwidth = 1,
- .read = oct5066_read8,
- .copy_from = oct5066_copy_from,
- .write = oct5066_write8,
- .copy_to = oct5066_copy_to,
- .map_priv_1 = 2<<6
- }
-};
-
-static struct mtd_info *oct5066_mtd[2] = {NULL, NULL};
-
-// OctProbe - Sense if this is an octagon card
-// ---------------------------------------------------------------------
-/* Perform a simple validity test, we map the window select SSD0 and
- change pages while monitoring the window. A change in the window,
- controlled by the PAGE_IO port is a functioning 5066 board. This will
- fail if the thing in the socket is set to a uniform value. */
-static int __init OctProbe(void)
-{
- unsigned int Base = (1 << 6);
- unsigned long I;
- unsigned long Values[10];
- for (I = 0; I != 20; I++)
- {
- outb(Base + (I%10),PAGE_IO);
- if (I < 10)
- {
- // Record the value and check for uniqueness
- Values[I%10] = readl(iomapadr);
- if (I > 0 && Values[I%10] == Values[0])
- return -EAGAIN;
- }
- else
- {
- // Make sure we get the same values on the second pass
- if (Values[I%10] != readl(iomapadr))
- return -EAGAIN;
- }
- }
- return 0;
-}
-
-void cleanup_oct5066(void)
-{
- int i;
- for (i=0; i<2; i++) {
- if (oct5066_mtd[i]) {
- mtd_device_unregister(oct5066_mtd[i]);
- map_destroy(oct5066_mtd[i]);
- }
- }
- iounmap((void *)iomapadr);
- release_region(PAGE_IO, 1);
-}
-
-static int __init init_oct5066(void)
-{
- int i;
- int ret = 0;
-
- // Do an autoprobe sequence
- if (!request_region(PAGE_IO,1,"Octagon SSD")) {
- printk(KERN_NOTICE "5066: Page Register in Use\n");
- return -EAGAIN;
- }
- iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH);
- if (!iomapadr) {
- printk(KERN_NOTICE "Failed to ioremap memory region\n");
- ret = -EIO;
- goto out_rel;
- }
- if (OctProbe() != 0) {
- printk(KERN_NOTICE "5066: Octagon Probe Failed, is this an Octagon 5066 SBC?\n");
- iounmap((void *)iomapadr);
- ret = -EAGAIN;
- goto out_unmap;
- }
-
- // Print out our little header..
- printk("Octagon 5066 SSD IO:0x%x MEM:0x%x-0x%x\n",PAGE_IO,WINDOW_START,
- WINDOW_START+WINDOW_LENGTH);
-
- for (i=0; i<2; i++) {
- oct5066_mtd[i] = do_map_probe("cfi_probe", &oct5066_map[i]);
- if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("jedec", &oct5066_map[i]);
- if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("map_ram", &oct5066_map[i]);
- if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
- if (oct5066_mtd[i]) {
- oct5066_mtd[i]->owner = THIS_MODULE;
- mtd_device_register(oct5066_mtd[i], NULL, 0);
- }
- }
-
- if (!oct5066_mtd[0] && !oct5066_mtd[1]) {
- cleanup_oct5066();
- return -ENXIO;
- }
-
- return 0;
-
- out_unmap:
- iounmap((void *)iomapadr);
- out_rel:
- release_region(PAGE_IO, 1);
- return ret;
-}
-
-module_init(init_oct5066);
-module_exit(cleanup_oct5066);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com>, David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("MTD map driver for Octagon 5066 Single Board Computer");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1d005a3e9b4..eb0242e0b2d 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -43,26 +42,14 @@ static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
struct map_pci_info *map = (struct map_pci_info *)_map;
map_word val;
val.x[0]= readb(map->base + map->translate(map, ofs));
-// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
return val;
}
-#if 0
-static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- map_word val;
- val.x[0] = readw(map->base + map->translate(map, ofs));
-// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
- return val;
-}
-#endif
static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
map_word val;
val.x[0] = readl(map->base + map->translate(map, ofs));
-// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
return val;
}
@@ -75,22 +62,12 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from
static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
writeb(val.x[0], map->base + map->translate(map, ofs));
}
-#if 0
-static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
- writew(val.x[0], map->base + map->translate(map, ofs));
-}
-#endif
static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
writel(val.x[0], map->base + map->translate(map, ofs));
}
@@ -275,8 +252,7 @@ static struct pci_device_id mtd_pci_ids[] = {
* Generic code follows.
*/
-static int __devinit
-mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
struct map_pci_info *map = NULL;
@@ -306,8 +282,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto release;
- /* tsk - do_map_probe should take const char * */
- mtd = do_map_probe((char *)info->map_name, &map->map);
+ mtd = do_map_probe(info->map_name, &map->map);
err = -ENODEV;
if (!mtd)
goto release;
@@ -330,8 +305,7 @@ out:
return err;
}
-static void __devexit
-mtd_pci_remove(struct pci_dev *dev)
+static void mtd_pci_remove(struct pci_dev *dev)
{
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
@@ -341,32 +315,19 @@ mtd_pci_remove(struct pci_dev *dev)
map->exit(dev, map);
kfree(map);
- pci_set_drvdata(dev, NULL);
pci_release_regions(dev);
}
static struct pci_driver mtd_pci_driver = {
.name = "MTD PCI",
.probe = mtd_pci_probe,
- .remove = __devexit_p(mtd_pci_remove),
+ .remove = mtd_pci_remove,
.id_table = mtd_pci_ids,
};
-static int __init mtd_pci_maps_init(void)
-{
- return pci_register_driver(&mtd_pci_driver);
-}
-
-static void __exit mtd_pci_maps_exit(void)
-{
- pci_unregister_driver(&mtd_pci_driver);
-}
-
-module_init(mtd_pci_maps_init);
-module_exit(mtd_pci_maps_exit);
+module_pci_driver(mtd_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("Generic PCI map driver");
MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
-
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index bbe168b65c2..a3cfad392ed 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -14,7 +14,6 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -22,22 +21,6 @@
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
-#ifdef CONFIG_MTD_DEBUG
-static int debug = CONFIG_MTD_DEBUG_VERBOSE;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
-#undef DEBUG
-#define DEBUG(n, format, arg...) \
- if (n <= debug) { \
- printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
- }
-
-#else
-#undef DEBUG
-#define DEBUG(n, arg...)
-static const int debug = 0;
-#endif
-
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -105,13 +88,13 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
int ret;
if (!pcmcia_dev_present(dev->p_dev)) {
- DEBUG(1, "device removed");
+ pr_debug("device removed\n");
return 0;
}
offset = to & ~(dev->win_size-1);
if (offset != dev->offset) {
- DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
+ pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
dev->offset, offset);
ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
if (ret != 0)
@@ -132,7 +115,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
return d;
}
@@ -147,7 +130,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
return d;
}
@@ -157,7 +140,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
caddr_t addr;
@@ -169,7 +152,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
if(!addr)
return;
- DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
+ pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
memcpy_fromio(to, addr, toread);
len -= toread;
to += toread;
@@ -185,7 +168,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
@@ -196,7 +179,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
@@ -206,7 +189,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
caddr_t addr;
@@ -218,7 +201,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
if(!addr)
return;
- DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
+ pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
memcpy_toio(addr, from, towrite);
len -= towrite;
to += towrite;
@@ -240,7 +223,7 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -255,7 +238,7 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -268,7 +251,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
@@ -280,7 +263,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n",
adr, win_base + adr, d.x[0]);
writeb(d.x[0], win_base + adr);
}
@@ -293,7 +276,7 @@ static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n",
adr, win_base + adr, d.x[0]);
writew(d.x[0], win_base + adr);
}
@@ -306,18 +289,29 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
memcpy_toio(win_base + to, from, len);
}
+static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static int pcmcia_vpp_refcnt;
static void pcmciamtd_set_vpp(struct map_info *map, int on)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct pcmcia_device *link = dev->p_dev;
+ unsigned long flags;
- DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
- pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
+ pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
+ spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+ if (on) {
+ if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
+ pcmcia_fixup_vpp(link, dev->vpp);
+ } else {
+ if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
+ pcmcia_fixup_vpp(link, 0);
+ }
+ spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
}
@@ -325,7 +319,7 @@ static void pcmciamtd_release(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
- DEBUG(3, "link = 0x%p", link);
+ pr_debug("link = 0x%p\n", link);
if (link->resource[2]->end) {
if(dev->win_base) {
@@ -337,7 +331,6 @@ static void pcmciamtd_release(struct pcmcia_device *link)
}
-#ifdef CONFIG_MTD_DEBUG
static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv_data)
@@ -347,7 +340,7 @@ static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_format_t *t = &parse.format;
(void)t; /* Shut up, gcc */
- DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
+ pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
t->type, t->edc, t->offset, t->length);
}
return -ENOSPC;
@@ -363,12 +356,11 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
- DEBUG(2, "JEDEC: 0x%02x 0x%02x",
+ pr_debug("JEDEC: 0x%02x 0x%02x\n",
t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
-#endif
static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
tuple_t *tuple,
@@ -382,14 +374,14 @@ static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
if (pcmcia_parse_tuple(tuple, &parse))
return -EINVAL;
- DEBUG(2, "Common memory:");
+ pr_debug("Common memory:\n");
dev->pcmcia_map.size = t->dev[0].size;
/* from here on: DEBUG only */
for (i = 0; i < t->ndev; i++) {
- DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
- DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
- DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
- DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
+ pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
+ pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
+ pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
+ pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
}
return 0;
}
@@ -409,12 +401,12 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
/* from here on: DEBUG only */
for (i = 0; i < t->ngeo; i++) {
- DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
- DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
- DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
- DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
- DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
- DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
+ pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
+ pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
+ pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
+ pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
+ pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
+ pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
}
return 0;
}
@@ -432,13 +424,11 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
if (p_dev->prod_id[i])
strcat(dev->mtd_name, p_dev->prod_id[i]);
}
- DEBUG(2, "Found name: %s", dev->mtd_name);
+ pr_debug("Found name: %s\n", dev->mtd_name);
}
-#ifdef CONFIG_MTD_DEBUG
pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
-#endif
pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
@@ -450,12 +440,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
if(force_size) {
dev->pcmcia_map.size = force_size << 20;
- DEBUG(2, "size forced to %dM", force_size);
+ pr_debug("size forced to %dM\n", force_size);
}
if(bankwidth) {
dev->pcmcia_map.bankwidth = bankwidth;
- DEBUG(2, "bankwidth forced to %d", bankwidth);
+ pr_debug("bankwidth forced to %d\n", bankwidth);
}
dev->pcmcia_map.name = dev->mtd_name;
@@ -464,7 +454,7 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
*new_name = 1;
}
- DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
+ pr_debug("Device: Size: %lu Width:%d Name: %s\n",
dev->pcmcia_map.size,
dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
@@ -479,7 +469,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
static char *probes[] = { "jedec_probe", "cfi_probe" };
int new_name = 0;
- DEBUG(3, "link=0x%p", link);
+ pr_debug("link=0x%p\n", link);
card_settings(dev, link, &new_name);
@@ -512,11 +502,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
do {
int ret;
- DEBUG(2, "requesting window with size = %luKiB memspeed = %d",
+ pr_debug("requesting window with size = %luKiB memspeed = %d\n",
(unsigned long) resource_size(link->resource[2]) >> 10,
mem_speed);
ret = pcmcia_request_window(link, link->resource[2], mem_speed);
- DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
+ pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
if(ret) {
j++;
link->resource[2]->start = 0;
@@ -524,21 +514,21 @@ static int pcmciamtd_config(struct pcmcia_device *link)
force_size << 20 : MAX_PCMCIA_ADDR;
link->resource[2]->end >>= j;
} else {
- DEBUG(2, "Got window of size %luKiB", (unsigned long)
+ pr_debug("Got window of size %luKiB\n", (unsigned long)
resource_size(link->resource[2]) >> 10);
dev->win_size = resource_size(link->resource[2]);
break;
}
} while (link->resource[2]->end >= 0x1000);
- DEBUG(2, "dev->win_size = %d", dev->win_size);
+ pr_debug("dev->win_size = %d\n", dev->win_size);
if(!dev->win_size) {
dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
- DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
+ pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
/* Get write protect status */
dev->win_base = ioremap(link->resource[2]->start,
@@ -549,7 +539,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
pcmciamtd_release(link);
return -ENODEV;
}
- DEBUG(1, "mapped window dev = %p @ %pR, base = %p",
+ pr_debug("mapped window dev = %p @ %pR, base = %p\n",
dev, link->resource[2], dev->win_base);
dev->offset = 0;
@@ -564,7 +554,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
link->config_index = 0;
- DEBUG(2, "Setting Configuration");
+ pr_debug("Setting Configuration\n");
ret = pcmcia_enable_device(link);
if (ret != 0) {
if (dev->win_base) {
@@ -580,17 +570,17 @@ static int pcmciamtd_config(struct pcmcia_device *link)
mtd = do_map_probe("map_rom", &dev->pcmcia_map);
} else {
for(i = 0; i < ARRAY_SIZE(probes); i++) {
- DEBUG(1, "Trying %s", probes[i]);
+ pr_debug("Trying %s\n", probes[i]);
mtd = do_map_probe(probes[i], &dev->pcmcia_map);
if(mtd)
break;
- DEBUG(1, "FAILED: %s", probes[i]);
+ pr_debug("FAILED: %s\n", probes[i]);
}
}
if(!mtd) {
- DEBUG(1, "Can not find an MTD");
+ pr_debug("Can not find an MTD\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -617,7 +607,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
/* If the memory found is fits completely into the mapped PCMCIA window,
use the faster non-remapping read/write functions */
if(mtd->size <= dev->win_size) {
- DEBUG(1, "Using non remapping memory functions");
+ pr_debug("Using non remapping memory functions\n");
dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
if (dev->pcmcia_map.bankwidth == 1) {
dev->pcmcia_map.read = pcmcia_read8;
@@ -645,7 +635,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
static int pcmciamtd_suspend(struct pcmcia_device *dev)
{
- DEBUG(2, "EVENT_PM_RESUME");
+ pr_debug("EVENT_PM_RESUME\n");
/* get_lock(link); */
@@ -654,7 +644,7 @@ static int pcmciamtd_suspend(struct pcmcia_device *dev)
static int pcmciamtd_resume(struct pcmcia_device *dev)
{
- DEBUG(2, "EVENT_PM_SUSPEND");
+ pr_debug("EVENT_PM_SUSPEND\n");
/* free_lock(link); */
@@ -666,7 +656,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
- DEBUG(3, "link=0x%p", link);
+ pr_debug("link=0x%p\n", link);
if(dev->mtd_info) {
mtd_device_unregister(dev->mtd_info);
@@ -686,7 +676,7 @@ static int pcmciamtd_probe(struct pcmcia_device *link)
/* Create new memory card device */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) return -ENOMEM;
- DEBUG(1, "dev=0x%p", dev);
+ pr_debug("dev=0x%p\n", dev);
dev->p_dev = link;
link->priv = dev;
@@ -755,7 +745,7 @@ static int __init init_pcmciamtd(void)
static void __exit exit_pcmciamtd(void)
{
- DEBUG(1, DRIVER_DESC " unloading");
+ pr_debug(DRIVER_DESC " unloading");
pcmcia_unregister_driver(&pcmciamtd_driver);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index f64cee4a3bf..f73cd461257 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,8 +27,8 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
- int nr_parts;
- struct mtd_partition *parts;
+ spinlock_t vpp_lock;
+ int vpp_refcnt;
};
static int physmap_flash_remove(struct platform_device *dev)
@@ -40,14 +40,11 @@ static int physmap_flash_remove(struct platform_device *dev)
info = platform_get_drvdata(dev);
if (info == NULL)
return 0;
- platform_set_drvdata(dev, NULL);
- physmap_data = dev->dev.platform_data;
+ physmap_data = dev_get_platdata(&dev->dev);
if (info->cmtd) {
mtd_device_unregister(info->cmtd);
- if (info->nr_parts)
- kfree(info->parts);
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
}
@@ -67,33 +64,45 @@ static void physmap_set_vpp(struct map_info *map, int state)
{
struct platform_device *pdev;
struct physmap_flash_data *physmap_data;
+ struct physmap_flash_info *info;
+ unsigned long flags;
pdev = (struct platform_device *)map->map_priv_1;
- physmap_data = pdev->dev.platform_data;
+ physmap_data = dev_get_platdata(&pdev->dev);
+
+ if (!physmap_data->set_vpp)
+ return;
- if (physmap_data->set_vpp)
- physmap_data->set_vpp(pdev, state);
+ info = platform_get_drvdata(pdev);
+
+ spin_lock_irqsave(&info->vpp_lock, flags);
+ if (state) {
+ if (++info->vpp_refcnt == 1) /* first nested 'on' */
+ physmap_data->set_vpp(pdev, 1);
+ } else {
+ if (--info->vpp_refcnt == 0) /* last nested 'off' */
+ physmap_data->set_vpp(pdev, 0);
+ }
+ spin_unlock_irqrestore(&info->vpp_lock, flags);
}
-static const char *rom_probe_types[] = {
- "cfi_probe",
- "jedec_probe",
- "qinfo_probe",
- "map_rom",
- NULL };
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
- NULL };
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL };
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", "afs", NULL };
static int physmap_flash_probe(struct platform_device *dev)
{
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
- const char **probe_type;
+ const char * const *probe_type;
+ const char * const *part_types;
int err = 0;
int i;
int devices_found = 0;
- physmap_data = dev->dev.platform_data;
+ physmap_data = dev_get_platdata(&dev->dev);
if (physmap_data == NULL)
return -ENODEV;
@@ -175,23 +184,12 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
- err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- mtd_device_register(info->cmtd, info->parts, err);
- info->nr_parts = err;
- return 0;
- }
-
- if (physmap_data->nr_parts) {
- printk(KERN_NOTICE "Using physmap partition information\n");
- mtd_device_register(info->cmtd, physmap_data->parts,
- physmap_data->nr_parts);
- return 0;
- }
+ spin_lock_init(&info->vpp_lock);
- mtd_device_register(info->cmtd, NULL, 0);
+ part_types = physmap_data->part_probe_types ? : part_probe_types;
+ mtd_device_parse_register(info->cmtd, part_types, NULL,
+ physmap_data->parts, physmap_data->nr_parts);
return 0;
err_out:
@@ -206,9 +204,8 @@ static void physmap_flash_shutdown(struct platform_device *dev)
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend && info->mtd[i]->resume)
- if (info->mtd[i]->suspend(info->mtd[i]) == 0)
- info->mtd[i]->resume(info->mtd[i]);
+ if (mtd_suspend(info->mtd[i]) == 0)
+ mtd_resume(info->mtd[i]);
}
#else
#define physmap_flash_shutdown NULL
@@ -245,21 +242,6 @@ static struct platform_device physmap_flash = {
.num_resources = 1,
.resource = &physmap_flash_resource,
};
-
-void physmap_configure(unsigned long addr, unsigned long size,
- int bankwidth, void (*set_vpp)(struct map_info *, int))
-{
- physmap_flash_resource.start = addr;
- physmap_flash_resource.end = addr + size - 1;
- physmap_flash_data.width = bankwidth;
- physmap_flash_data.set_vpp = set_vpp;
-}
-
-void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
-{
- physmap_flash_data.nr_parts = num_parts;
- physmap_flash_data.parts = parts;
-}
#endif
static int __init physmap_init(void)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index d251d1db129..217c25d7381 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -34,58 +33,10 @@ struct of_flash_list {
struct of_flash {
struct mtd_info *cmtd;
- struct mtd_partition *parts;
int list_size; /* number of elements in of_flash_list */
struct of_flash_list list[0];
};
-#define OF_FLASH_PARTS(info) ((info)->parts)
-static int parse_obsolete_partitions(struct platform_device *dev,
- struct of_flash *info,
- struct device_node *dp)
-{
- int i, plen, nr_parts;
- const struct {
- __be32 offset, len;
- } *part;
- const char *names;
-
- part = of_get_property(dp, "partitions", &plen);
- if (!part)
- return 0; /* No partitions found */
-
- dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n");
-
- nr_parts = plen / sizeof(part[0]);
-
- info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL);
- if (!info->parts)
- return -ENOMEM;
-
- names = of_get_property(dp, "partition-names", &plen);
-
- for (i = 0; i < nr_parts; i++) {
- info->parts[i].offset = be32_to_cpu(part->offset);
- info->parts[i].size = be32_to_cpu(part->len) & ~1;
- if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
- info->parts[i].mask_flags = MTD_WRITEABLE;
-
- if (names && (plen > 0)) {
- int len = strlen(names) + 1;
-
- info->parts[i].name = (char *)names;
- plen -= len;
- names += len;
- } else {
- info->parts[i].name = "unnamed";
- }
-
- part++;
- }
-
- return nr_parts;
-}
-
static int of_flash_remove(struct platform_device *dev)
{
struct of_flash *info;
@@ -101,11 +52,8 @@ static int of_flash_remove(struct platform_device *dev)
mtd_concat_destroy(info->cmtd);
}
- if (info->cmtd) {
- if (OF_FLASH_PARTS(info))
- kfree(OF_FLASH_PARTS(info));
+ if (info->cmtd)
mtd_device_unregister(info->cmtd);
- }
for (i = 0; i < info->list_size; i++) {
if (info->list[i].mtd)
@@ -119,23 +67,21 @@ static int of_flash_remove(struct platform_device *dev)
kfree(info->list[i].res);
}
}
-
- kfree(info);
-
return 0;
}
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom" };
+
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
-static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
- struct map_info *map)
+static struct mtd_info *obsolete_probe(struct platform_device *dev,
+ struct map_info *map)
{
struct device_node *dp = dev->dev.of_node;
const char *of_probe;
struct mtd_info *mtd;
- static const char *rom_probe_types[]
- = { "cfi_probe", "jedec_probe", "map_rom"};
int i;
dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" "
@@ -165,8 +111,10 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
information. */
-static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
-static const char ** __devinit of_get_probes(struct device_node *dp)
+static const char * const part_probe_types_def[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
+
+static const char * const *of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
@@ -195,16 +143,16 @@ static const char ** __devinit of_get_probes(struct device_node *dp)
return res;
}
-static void __devinit of_free_probes(const char **probes)
+static void of_free_probes(const char * const *probes)
{
if (probes != part_probe_types_def)
kfree(probes);
}
static struct of_device_id of_flash_match[];
-static int __devinit of_flash_probe(struct platform_device *dev)
+static int of_flash_probe(struct platform_device *dev)
{
- const char **part_probe_types;
+ const char * const *part_probe_types;
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
@@ -218,6 +166,9 @@ static int __devinit of_flash_probe(struct platform_device *dev)
int reg_tuple_size;
struct mtd_info **mtd_list = NULL;
resource_size_t res_size;
+ struct mtd_part_parser_data ppdata;
+ bool map_indirect;
+ const char *mtd_name = NULL;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
@@ -226,6 +177,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
/*
* Get number of "reg" tuples. Scan for MTD devices on area's
* described by each "reg" region. This makes it possible (including
@@ -241,9 +194,12 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
count /= reg_tuple_size;
+ map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
+
err = -ENOMEM;
- info = kzalloc(sizeof(struct of_flash) +
- sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev,
+ sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
if (!info)
goto err_flash_remove;
@@ -280,10 +236,11 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
+ info->list[i].map.device_node = dp;
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
@@ -296,6 +253,17 @@ static int __devinit of_flash_probe(struct platform_device *dev)
simple_map_init(&info->list[i].map);
+ /*
+ * On some platforms (e.g. MPC5200) a direct 1:1 mapping
+ * may cause problems with JFFS2 usage, as the local bus (LPB)
+ * doesn't support unaligned accesses as implemented in the
+ * JFFS2 code via memcpy(). By setting NO_XIP, the
+ * flash will not be exposed directly to the MTD users
+ * (e.g. JFFS2) any more.
+ */
+ if (map_indirect)
+ info->list[i].map.phys = NO_XIP;
+
if (probe_type) {
info->list[i].mtd = do_map_probe(probe_type,
&info->list[i].map);
@@ -317,6 +285,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
err = 0;
+ info->cmtd = NULL;
if (info->list_size == 1) {
info->cmtd = info->list[0].mtd;
} else if (info->list_size > 1) {
@@ -325,35 +294,19 @@ static int __devinit of_flash_probe(struct platform_device *dev)
*/
info->cmtd = mtd_concat_create(mtd_list, info->list_size,
dev_name(&dev->dev));
- if (info->cmtd == NULL)
- err = -ENXIO;
}
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+
if (err)
goto err_out;
+ ppdata.of_node = dp;
part_probe_types = of_get_probes(dp);
- err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
- if (err < 0) {
- of_free_probes(part_probe_types);
- goto err_out;
- }
+ mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
+ NULL, 0);
of_free_probes(part_probe_types);
- if (err == 0) {
- err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
- if (err < 0)
- goto err_out;
- }
-
- if (err == 0) {
- err = parse_obsolete_partitions(dev, info, dp);
- if (err < 0)
- goto err_out;
- }
-
- mtd_device_register(info->cmtd, info->parts, err);
-
kfree(mtd_list);
return 0;
@@ -404,18 +357,7 @@ static struct platform_driver of_flash_driver = {
.remove = of_flash_remove,
};
-static int __init of_flash_init(void)
-{
- return platform_driver_register(&of_flash_driver);
-}
-
-static void __exit of_flash_exit(void)
-{
- platform_driver_unregister(&of_flash_driver);
-}
-
-module_init(of_flash_init);
-module_exit(of_flash_exit);
+module_platform_driver(of_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 65bd1cd4d62..dc6df9abea0 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -58,7 +58,7 @@ static void pismo_set_vpp(struct platform_device *pdev, int on)
pismo->vpp(pismo->vpp_data, on);
}
-static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
+static unsigned int pismo_width_to_bytes(unsigned int width)
{
width &= 15;
if (width > 2)
@@ -66,8 +66,8 @@ static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
return 1 << width;
}
-static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
- u8 addr, size_t size)
+static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
+ size_t size)
{
int ret;
struct i2c_msg msg[] = {
@@ -88,8 +88,9 @@ static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
return ret == ARRAY_SIZE(msg) ? size : -EIO;
}
-static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
- struct pismo_mem *region, const char *name, void *pdata, size_t psize)
+static int pismo_add_device(struct pismo_data *pismo, int i,
+ struct pismo_mem *region, const char *name,
+ void *pdata, size_t psize)
{
struct platform_device *dev;
struct resource res = { };
@@ -129,8 +130,8 @@ static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
return ret;
}
-static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
- struct pismo_mem *region)
+static int pismo_add_nor(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
{
struct physmap_flash_data data = {
.width = region->width,
@@ -143,8 +144,8 @@ static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
- struct pismo_mem *region)
+static int pismo_add_sram(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
{
struct platdata_mtd_ram data = {
.bankwidth = region->width,
@@ -154,8 +155,8 @@ static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
- const struct pismo_cs_block *cs, phys_addr_t base)
+static void pismo_add_one(struct pismo_data *pismo, int i,
+ const struct pismo_cs_block *cs, phys_addr_t base)
{
struct device *dev = &pismo->client->dev;
struct pismo_mem region;
@@ -197,7 +198,7 @@ static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
}
}
-static int __devexit pismo_remove(struct i2c_client *client)
+static int pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
@@ -210,8 +211,8 @@ static int __devexit pismo_remove(struct i2c_client *client)
return 0;
}
-static int __devinit pismo_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pismo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct pismo_pdata *pdata = client->dev.platform_data;
@@ -267,7 +268,7 @@ static struct i2c_driver pismo_driver = {
.owner = THIS_MODULE,
},
.probe = pismo_probe,
- .remove = __devexit_p(pismo_remove),
+ .remove = pismo_remove,
.id_table = pismo_id,
};
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 9ca1eccba4b..d597e89f269 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
@@ -44,8 +43,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
- struct mtd_partition *partitions;
- bool free_partitions;
struct resource *area;
struct platdata_mtd_ram *pdata;
};
@@ -57,7 +54,7 @@ struct platram_info {
static inline struct platram_info *to_platram_info(struct platform_device *dev)
{
- return (struct platram_info *)platform_get_drvdata(dev);
+ return platform_get_drvdata(dev);
}
/* platram_setrw
@@ -86,8 +83,6 @@ static int platram_remove(struct platform_device *pdev)
{
struct platram_info *info = to_platram_info(pdev);
- platform_set_drvdata(pdev, NULL);
-
dev_dbg(&pdev->dev, "removing device\n");
if (info == NULL)
@@ -95,10 +90,6 @@ static int platram_remove(struct platform_device *pdev)
if (info->mtd) {
mtd_device_unregister(info->mtd);
- if (info->partitions) {
- if (info->free_partitions)
- kfree(info->partitions);
- }
map_destroy(info->mtd);
}
@@ -136,17 +127,16 @@ static int platram_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "probe entered\n");
- if (pdev->dev.platform_data == NULL) {
+ if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "no platform data supplied\n");
err = -ENOENT;
goto exit_error;
}
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
@@ -205,7 +195,7 @@ static int platram_probe(struct platform_device *pdev)
* supplied by the platform_data struct */
if (pdata->map_probes) {
- const char **map_probes = pdata->map_probes;
+ const char * const *map_probes = pdata->map_probes;
for ( ; !info->mtd && *map_probes; map_probes++)
info->mtd = do_map_probe(*map_probes , &info->map);
@@ -225,31 +215,23 @@ static int platram_probe(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RW);
- /* check to see if there are any available partitions, or wether
+ /* check to see if there are any available partitions, or whether
* to add this device whole */
- if (!pdata->nr_partitions) {
- /* try to probe using the supplied probe type */
- if (pdata->probes) {
- err = parse_mtd_partitions(info->mtd, pdata->probes,
- &info->partitions, 0);
- info->free_partitions = 1;
- if (err > 0)
- err = mtd_device_register(info->mtd,
- info->partitions, err);
- }
- }
- /* use the static mapping */
- else
- err = mtd_device_register(info->mtd, pdata->partitions,
- pdata->nr_partitions);
+ err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
+ pdata->partitions,
+ pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
- /* add the whole device. */
- err = mtd_device_register(info->mtd, NULL, 0);
- if (err)
- dev_err(&pdev->dev, "failed to register the entire device\n");
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
return err;
@@ -273,21 +255,7 @@ static struct platform_driver platram_driver = {
},
};
-/* module init/exit */
-
-static int __init platram_init(void)
-{
- printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
- return platform_driver_register(&platram_driver);
-}
-
-static void __exit platram_exit(void)
-{
- platform_driver_unregister(&platram_driver);
-}
-
-module_init(platram_init);
-module_exit(platram_exit);
+module_platform_driver(platram_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index f59d62f74d4..cb4d92eea9f 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -13,7 +13,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -41,23 +40,17 @@ static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
}
struct pxa2xx_flash_info {
- struct mtd_partition *parts;
- int nr_parts;
struct mtd_info *mtd;
struct map_info map;
};
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-
-static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
+static int pxa2xx_flash_probe(struct platform_device *pdev)
{
- struct flash_platform_data *flash = pdev->dev.platform_data;
+ struct flash_platform_data *flash = dev_get_platdata(&pdev->dev);
struct pxa2xx_flash_info *info;
- struct mtd_partition *parts;
struct resource *res;
- int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -67,12 +60,10 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->map.name = (char *) flash->name;
+ info->map.name = flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
- info->map.size = res->end - res->start + 1;
- info->parts = flash->parts;
- info->nr_parts = flash->nr_parts;
+ info->map.size = resource_size(res);
info->map.virt = ioremap(info->map.phys, info->map.size);
if (!info->map.virt) {
@@ -81,7 +72,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
return -ENOMEM;
}
info->map.cached =
- ioremap_cached(info->map.phys, info->map.size);
+ ioremap_cache(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
@@ -104,36 +95,23 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
-
- if (ret > 0) {
- info->nr_parts = ret;
- info->parts = parts;
- }
-
- if (!info->nr_parts)
- printk("Registering %s as whole device\n",
- info->map.name);
-
- mtd_device_register(info->mtd, info->parts, info->nr_parts);
+ mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
+ flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
}
-static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
+static int pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
- kfree(info->parts);
kfree(info);
return 0;
}
@@ -143,8 +121,8 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
@@ -156,22 +134,11 @@ static struct platform_driver pxa2xx_flash_driver = {
.owner = THIS_MODULE,
},
.probe = pxa2xx_flash_probe,
- .remove = __devexit_p(pxa2xx_flash_remove),
+ .remove = pxa2xx_flash_remove,
.shutdown = pxa2xx_flash_shutdown,
};
-static int __init init_pxa2xx_flash(void)
-{
- return platform_driver_register(&pxa2xx_flash_driver);
-}
-
-static void __exit cleanup_pxa2xx_flash(void)
-{
- platform_driver_unregister(&pxa2xx_flash_driver);
-}
-
-module_init(init_pxa2xx_flash);
-module_exit(cleanup_pxa2xx_flash);
+module_platform_driver(pxa2xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 761fb459d2c..146b6047ed2 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -25,8 +24,6 @@
struct rbtx4939_flash_info {
struct mtd_info *mtd;
struct map_info map;
- int nr_parts;
- struct mtd_partition *parts;
};
static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -36,32 +33,29 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
info = platform_get_drvdata(dev);
if (!info)
return 0;
- platform_set_drvdata(dev, NULL);
if (info->mtd) {
- struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
+ struct rbtx4939_flash_data *pdata = dev_get_platdata(&dev->dev);
- if (info->nr_parts)
- kfree(info->parts);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
return 0;
}
-static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
-static const char *part_probe_types[] = { "cmdlinepart", NULL };
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", NULL };
static int rbtx4939_flash_probe(struct platform_device *dev)
{
struct rbtx4939_flash_data *pdata;
struct rbtx4939_flash_info *info;
struct resource *res;
- const char **probe_type;
+ const char * const *probe_type;
int err = 0;
unsigned long size;
- pdata = dev->dev.platform_data;
+ pdata = dev_get_platdata(&dev->dev);
if (!pdata)
return -ENODEV;
@@ -105,24 +99,11 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
goto err_out;
}
info->mtd->owner = THIS_MODULE;
+ err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
+
if (err)
goto err_out;
-
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- mtd_device_register(info->mtd, info->parts, err);
- info->nr_parts = err;
- return 0;
- }
-
- if (pdata->nr_parts) {
- pr_notice("Using rbtx4939 partition information\n");
- mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
- return 0;
- }
-
- mtd_device_register(info->mtd, NULL, 0);
return 0;
err_out:
@@ -135,9 +116,8 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
- if (info->mtd->suspend && info->mtd->resume)
- if (info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define rbtx4939_flash_shutdown NULL
@@ -153,18 +133,7 @@ static struct platform_driver rbtx4939_flash_driver = {
},
};
-static int __init rbtx4939_flash_init(void)
-{
- return platform_driver_register(&rbtx4939_flash_driver);
-}
-
-static void __exit rbtx4939_flash_exit(void)
-{
- platform_driver_unregister(&rbtx4939_flash_driver);
-}
-
-module_init(rbtx4939_flash_init);
-module_exit(rbtx4939_flash_exit);
+module_platform_driver(rbtx4939_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RBTX4939 MTD map driver");
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
deleted file mode 100644
index ed88225bf66..00000000000
--- a/drivers/mtd/maps/rpxlite.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Handle mapping of the flash on the RPX Lite and CLLF boards
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-static struct map_info rpxlite_map = {
- .name = "RPX",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = WINDOW_ADDR,
-};
-
-static int __init init_rpxlite(void)
-{
- printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
- if (!rpxlite_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&rpxlite_map);
- mymtd = do_map_probe("cfi_probe", &rpxlite_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- mtd_device_register(mymtd, NULL, 0);
- return 0;
- }
-
- iounmap((void *)rpxlite_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rpxlite(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (rpxlite_map.virt) {
- iounmap((void *)rpxlite_map.virt);
- rpxlite_map.virt = 0;
- }
-}
-
-module_init(init_rpxlite);
-module_exit(cleanup_rpxlite);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
-MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a9b5e0e5c4c..8fc06bf111c 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -23,106 +23,6 @@
#include <asm/sizes.h>
#include <asm/mach/flash.h>
-#if 0
-/*
- * This is here for documentation purposes only - until these people
- * submit their machine types. It will be gone January 2005.
- */
-static struct mtd_partition consus_partitions[] = {
- {
- .name = "Consus boot firmware",
- .offset = 0,
- .size = 0x00040000,
- .mask_flags = MTD_WRITABLE, /* force read-only */
- }, {
- .name = "Consus kernel",
- .offset = 0x00040000,
- .size = 0x00100000,
- .mask_flags = 0,
- }, {
- .name = "Consus disk",
- .offset = 0x00140000,
- /* The rest (up to 16M) for jffs. We could put 0 and
- make it find the size automatically, but right now
- i have 32 megs. jffs will use all 32 megs if given
- the chance, and this leads to horrible problems
- when you try to re-flash the image because blob
- won't erase the whole partition. */
- .size = 0x01000000 - 0x00140000,
- .mask_flags = 0,
- }, {
- /* this disk is a secondary disk, which can be used as
- needed, for simplicity, make it the size of the other
- consus partition, although realistically it could be
- the remainder of the disk (depending on the file
- system used) */
- .name = "Consus disk2",
- .offset = 0x01000000,
- .size = 0x01000000 - 0x00140000,
- .mask_flags = 0,
- }
-};
-
-/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
-static struct mtd_partition frodo_partitions[] =
-{
- {
- .name = "bootloader",
- .size = 0x00040000,
- .offset = 0x00000000,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "bootloader params",
- .size = 0x00040000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "kernel",
- .size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "ramdisk",
- .size = 0x00400000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "file system",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND
- }
-};
-
-static struct mtd_partition jornada56x_partitions[] = {
- {
- .name = "bootldr",
- .size = 0x00040000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE,
- }, {
- .name = "rootfs",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static void jornada56x_set_vpp(int vpp)
-{
- if (vpp)
- GPSR = GPIO_GPIO26;
- else
- GPCR = GPIO_GPIO26;
- GPDR |= GPIO_GPIO26;
-}
-
-/*
- * Machine Phys Size set_vpp
- * Consus : SA1100_CS0_PHYS SZ_32M
- * Frodo : SA1100_CS0_PHYS SZ_32M
- * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp
- */
-#endif
-
struct sa_subdev_info {
char name[16];
struct map_info map;
@@ -131,17 +31,27 @@ struct sa_subdev_info {
};
struct sa_info {
- struct mtd_partition *parts;
struct mtd_info *mtd;
int num_subdev;
- unsigned int nr_parts;
struct sa_subdev_info subdev[0];
};
+static DEFINE_SPINLOCK(sa1100_vpp_lock);
+static int sa1100_vpp_refcnt;
static void sa1100_set_vpp(struct map_info *map, int on)
{
struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
- subdev->plat->set_vpp(on);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sa1100_vpp_lock, flags);
+ if (on) {
+ if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
+ subdev->plat->set_vpp(1);
+ } else {
+ if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
+ subdev->plat->set_vpp(0);
+ }
+ spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
}
static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
@@ -231,8 +141,6 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
mtd_concat_destroy(info->mtd);
}
- kfree(info->parts);
-
for (i = info->num_subdev - 1; i >= 0; i--)
sa1100_destroy_subdev(&info->subdev[i]);
kfree(info);
@@ -241,8 +149,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
plat->exit();
}
-static struct sa_info *__devinit
-sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
+static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ struct flash_platform_data *plat)
{
struct sa_info *info;
int nr, size, i, ret = 0;
@@ -336,15 +244,13 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
return ERR_PTR(ret);
}
-static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
+static int sa1100_mtd_probe(struct platform_device *pdev)
{
- struct flash_platform_data *plat = pdev->dev.platform_data;
- struct mtd_partition *parts;
- const char *part_type = NULL;
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
struct sa_info *info;
- int err, nr_parts = 0;
+ int err;
if (!plat)
return -ENODEV;
@@ -358,26 +264,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
/*
* Partition selection stuff.
*/
- nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
- if (nr_parts > 0) {
- info->parts = parts;
- part_type = "dynamic";
- } else {
- parts = plat->parts;
- nr_parts = plat->nr_parts;
- part_type = "static";
- }
-
- if (nr_parts == 0)
- printk(KERN_NOTICE "SA1100 flash: no partition info "
- "available, registering whole flash\n");
- else
- printk(KERN_NOTICE "SA1100 flash: using %s partition "
- "definition\n", part_type);
-
- mtd_device_register(info->mtd, parts, nr_parts);
-
- info->nr_parts = nr_parts;
+ mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
+ plat->nr_parts);
platform_set_drvdata(pdev, info);
err = 0;
@@ -389,47 +277,23 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
static int __exit sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
- struct flash_platform_data *plat = pdev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
sa1100_destroy(info, plat);
return 0;
}
-#ifdef CONFIG_PM
-static void sa1100_mtd_shutdown(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
-}
-#else
-#define sa1100_mtd_shutdown NULL
-#endif
-
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
.owner = THIS_MODULE,
},
};
-static int __init sa1100_mtd_init(void)
-{
- return platform_driver_register(&sa1100_mtd_driver);
-}
-
-static void __exit sa1100_mtd_exit(void)
-{
- platform_driver_unregister(&sa1100_mtd_driver);
-}
-
-module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_exit);
+module_platform_driver(sa1100_mtd_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 8fead8e46bc..093edd51bdc 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -183,7 +183,7 @@ static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
static void sc520cdp_setup_par(void)
{
- volatile unsigned long __iomem *mmcr;
+ unsigned long __iomem *mmcr;
unsigned long mmcr_val;
int i, j;
@@ -203,11 +203,11 @@ static void sc520cdp_setup_par(void)
*/
for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
- mmcr_val = mmcr[SC520_PAR(j)];
+ mmcr_val = readl(&mmcr[SC520_PAR(j)]);
/* if target device field matches, reprogram the PAR */
if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
{
- mmcr[SC520_PAR(j)] = par_table[i].new_par;
+ writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);
break;
}
}
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index d88c8426bb0..b7a22a612a4 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -69,8 +68,7 @@ static struct map_info scb2_map = {
};
static int region_fail;
-static int __devinit
-scb2_fixup_mtd(struct mtd_info *mtd)
+static int scb2_fixup_mtd(struct mtd_info *mtd)
{
int i;
int done = 0;
@@ -133,8 +131,8 @@ scb2_fixup_mtd(struct mtd_info *mtd)
/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
#define CSB5_FCR 0x41
#define CSB5_FCR_DECODE_ALL 0x0e
-static int __devinit
-scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int scb2_flash_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
u8 reg;
@@ -197,15 +195,13 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
return 0;
}
-static void __devexit
-scb2_flash_remove(struct pci_dev *dev)
+static void scb2_flash_remove(struct pci_dev *dev)
{
if (!scb2_mtd)
return;
/* disable flash writes */
- if (scb2_mtd->lock)
- scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
@@ -215,7 +211,6 @@ scb2_flash_remove(struct pci_dev *dev)
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
- pci_set_drvdata(dev, NULL);
}
static struct pci_device_id scb2_flash_pci_ids[] = {
@@ -232,23 +227,10 @@ static struct pci_driver scb2_flash_driver = {
.name = "Intel SCB2 BIOS Flash",
.id_table = scb2_flash_pci_ids,
.probe = scb2_flash_probe,
- .remove = __devexit_p(scb2_flash_remove),
+ .remove = scb2_flash_remove,
};
-static int __init
-scb2_flash_init(void)
-{
- return pci_register_driver(&scb2_flash_driver);
-}
-
-static void __exit
-scb2_flash_exit(void)
-{
- pci_unregister_driver(&scb2_flash_driver);
-}
-
-module_init(scb2_flash_init);
-module_exit(scb2_flash_exit);
+module_pci_driver(scb2_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index cbf6bade935..bb580bc1644 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -19,8 +19,6 @@
static struct mtd_info *flash_mtd;
static struct mtd_info *eprom_mtd;
-static struct mtd_partition *parsed_parts;
-
struct map_info soleng_eprom_map = {
.name = "Solution Engine EPROM",
.size = 0x400000,
@@ -33,30 +31,10 @@ struct map_info soleng_flash_map = {
.bankwidth = 4,
};
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-#ifdef CONFIG_MTD_SUPERH_RESERVE
-static struct mtd_partition superh_se_partitions[] = {
- /* Reserved for boot code, read-only */
- {
- .name = "flash_boot",
- .offset = 0x00000000,
- .size = CONFIG_MTD_SUPERH_RESERVE,
- .mask_flags = MTD_WRITEABLE,
- },
- /* All else is writable (e.g. JFFS) */
- {
- .name = "Flash FS",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
- }
-};
-#endif /* CONFIG_MTD_SUPERH_RESERVE */
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_soleng_maps(void)
{
- int nr_parts = 0;
-
/* First probe at offset 0 */
soleng_flash_map.phys = 0;
soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
@@ -92,21 +70,7 @@ static int __init init_soleng_maps(void)
mtd_device_register(eprom_mtd, NULL, 0);
}
- nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
-
-#ifdef CONFIG_MTD_SUPERH_RESERVE
- if (nr_parts <= 0) {
- printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
- CONFIG_MTD_SUPERH_RESERVE);
- parsed_parts = superh_se_partitions;
- nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts);
- }
-#endif /* CONFIG_MTD_SUPERH_RESERVE */
-
- if (nr_parts > 0)
- mtd_device_register(flash_mtd, parsed_parts, nr_parts);
- else
- mtd_device_register(flash_mtd, NULL, 0);
+ mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);
return 0;
}
@@ -118,10 +82,7 @@ static void __exit cleanup_soleng_maps(void)
map_destroy(eprom_mtd);
}
- if (parsed_parts)
- mtd_device_unregister(flash_mtd);
- else
- mtd_device_unregister(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 2d66234f57c..b6f1aac3510 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -75,7 +74,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
up->name = of_get_property(dp, "model", NULL);
if (up->name && 0 < strlen(up->name))
- up->map.name = (char *)up->name;
+ up->map.name = up->name;
up->map.phys = op->resource[0].start;
@@ -108,7 +107,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct platform_device *op)
+static int uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -121,7 +120,7 @@ static int __devinit uflash_probe(struct platform_device *op)
return uflash_devinit(op, dp);
}
-static int __devexit uflash_remove(struct platform_device *op)
+static int uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -155,18 +154,7 @@ static struct platform_driver uflash_driver = {
.of_match_table = uflash_match,
},
.probe = uflash_probe,
- .remove = __devexit_p(uflash_remove),
+ .remove = uflash_remove,
};
-static int __init uflash_init(void)
-{
- return platform_driver_register(&uflash_driver);
-}
-
-static void __exit uflash_exit(void)
-{
- platform_driver_unregister(&uflash_driver);
-}
-
-module_init(uflash_init);
-module_exit(uflash_exit);
+module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
deleted file mode 100644
index d78587990e7..00000000000
--- a/drivers/mtd/maps/tqm8xxl.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Handle mapping of the flash memory access routines
- * on TQM8xxL based devices.
- *
- * based on rpxlite.c
- *
- * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
- *
- * This code is GPLed
- *
- */
-
-/*
- * According to TQM8xxL hardware manual, TQM8xxL series have
- * following flash memory organisations:
- * | capacity | | chip type | | bank0 | | bank1 |
- * 2MiB 512Kx16 2MiB 0
- * 4MiB 1Mx16 4MiB 0
- * 8MiB 1Mx16 4MiB 4MiB
- * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
- * kernel configuration.
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define FLASH_ADDR 0x40000000
-#define FLASH_SIZE 0x00800000
-#define FLASH_BANK_MAX 4
-
-// trivial struct to describe partition information
-struct mtd_part_def
-{
- int nums;
- unsigned char *type;
- struct mtd_partition* mtd_part;
-};
-
-//static struct mtd_info *mymtd;
-static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
-static struct map_info* map_banks[FLASH_BANK_MAX];
-static struct mtd_part_def part_banks[FLASH_BANK_MAX];
-static unsigned long num_banks;
-static void __iomem *start_scan_addr;
-
-/*
- * Here are partition information for all known TQM8xxL series devices.
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must correspond to the
- * value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
- */
-
-/* Currently, TQM8xxL has up to 8MiB flash */
-static unsigned long tqm8xxl_max_flash_size = 0x00800000;
-
-/* partition definition for first flash bank
- * (cf. "drivers/char/flash_config.c")
- */
-static struct mtd_partition tqm8xxl_partitions[] = {
- {
- .name = "ppcboot",
- .offset = 0x00000000,
- .size = 0x00020000, /* 128KB */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "kernel", /* default kernel image */
- .offset = 0x00020000,
- .size = 0x000e0000,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "user",
- .offset = 0x00100000,
- .size = 0x00100000,
- },
- {
- .name = "initrd",
- .offset = 0x00200000,
- .size = 0x00200000,
- }
-};
-/* partition definition for second flash bank */
-static struct mtd_partition tqm8xxl_fs_partitions[] = {
- {
- .name = "cramfs",
- .offset = 0x00000000,
- .size = 0x00200000,
- },
- {
- .name = "jffs",
- .offset = 0x00200000,
- .size = 0x00200000,
- //.size = MTDPART_SIZ_FULL,
- }
-};
-
-static int __init init_tqm_mtd(void)
-{
- int idx = 0, ret = 0;
- unsigned long flash_addr, flash_size, mtd_size = 0;
- /* pointer to TQM8xxL board info data */
- bd_t *bd = (bd_t *)__res;
-
- flash_addr = bd->bi_flashstart;
- flash_size = bd->bi_flashsize;
-
- //request maximum flash size address space
- start_scan_addr = ioremap(flash_addr, flash_size);
- if (!start_scan_addr) {
- printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr);
- return -EIO;
- }
-
- for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
- if(mtd_size >= flash_size)
- break;
-
- printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx);
-
- map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- if(map_banks[idx] == NULL) {
- ret = -ENOMEM;
- /* FIXME: What if some MTD devices were probed already? */
- goto error_mem;
- }
-
- map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
-
- if (!map_banks[idx]->name) {
- ret = -ENOMEM;
- /* FIXME: What if some MTD devices were probed already? */
- goto error_mem;
- }
- sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
-
- map_banks[idx]->size = flash_size;
- map_banks[idx]->bankwidth = 4;
-
- simple_map_init(map_banks[idx]);
-
- map_banks[idx]->virt = start_scan_addr;
- map_banks[idx]->phys = flash_addr;
- /* FIXME: This looks utterly bogus, but I'm trying to
- preserve the behaviour of the original (shown here)...
-
- map_banks[idx]->map_priv_1 =
- start_scan_addr + ((idx > 0) ?
- (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
- */
-
- if (idx && mtd_banks[idx-1]) {
- map_banks[idx]->virt += mtd_banks[idx-1]->size;
- map_banks[idx]->phys += mtd_banks[idx-1]->size;
- }
-
- //start to probe flash chips
- mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
-
- if (mtd_banks[idx]) {
- mtd_banks[idx]->owner = THIS_MODULE;
- mtd_size += mtd_banks[idx]->size;
- num_banks++;
-
- printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks,
- mtd_banks[idx]->name, mtd_banks[idx]->size);
- }
- }
-
- /* no supported flash chips found */
- if (!num_banks) {
- printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n");
- ret = -ENXIO;
- goto error_mem;
- }
-
- /*
- * Select Static partition definitions
- */
- part_banks[0].mtd_part = tqm8xxl_partitions;
- part_banks[0].type = "Static image";
- part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions);
-
- part_banks[1].mtd_part = tqm8xxl_fs_partitions;
- part_banks[1].type = "Static file system";
- part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
-
- for(idx = 0; idx < num_banks ; idx++) {
- if (part_banks[idx].nums == 0)
- printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
- else
- printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
- idx, part_banks[idx].type);
- mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
- part_banks[idx].nums);
- }
- return 0;
-error_mem:
- for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
- if(map_banks[idx] != NULL) {
- kfree(map_banks[idx]->name);
- map_banks[idx]->name = NULL;
- kfree(map_banks[idx]);
- map_banks[idx] = NULL;
- }
- }
-error:
- iounmap(start_scan_addr);
- return ret;
-}
-
-static void __exit cleanup_tqm_mtd(void)
-{
- unsigned int idx = 0;
- for(idx = 0 ; idx < num_banks ; idx++) {
- /* destroy mtd_info previously allocated */
- if (mtd_banks[idx]) {
- mtd_device_unregister(mtd_banks[idx]);
- map_destroy(mtd_banks[idx]);
- }
- /* release map_info not used anymore */
- kfree(map_banks[idx]->name);
- kfree(map_banks[idx]);
- }
-
- if (start_scan_addr) {
- iounmap(start_scan_addr);
- start_scan_addr = 0;
- }
-}
-
-module_init(init_tqm_mtd);
-module_exit(cleanup_tqm_mtd);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kirk Lee <kirk@hpc.ee.ntu.edu.tw>");
-MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards");
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 1de390e1c2f..da2cdb5fd6d 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -82,11 +82,12 @@ static void __exit cleanup_tsunami_flash(void)
tsunami_flash_mtd = 0;
}
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom", NULL };
static int __init init_tsunami_flash(void)
{
- static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
- char **type;
+ const char * const *type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 6793074f3f4..c1af83db520 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,17 +19,30 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
+#include <asm/sections.h>
/****************************************************************************/
-extern char _ebss;
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+/*
+ * Blackfin uses uclinux_ram_map during startup, so it must not be static.
+ * Provide a dummy declaration to make sparse happy.
+ */
+extern struct map_info uclinux_ram_map;
struct map_info uclinux_ram_map = {
- .name = "RAM",
- .phys = (unsigned long)&_ebss,
+ .name = MAP_NAME,
.size = 0,
};
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
@@ -61,31 +74,42 @@ static int __init uclinux_mtd_init(void)
struct map_info *mapp;
mapp = &uclinux_ram_map;
+
+ if (physaddr == -1)
+ mapp->phys = (resource_size_t)__bss_stop;
+ else
+ mapp->phys = physaddr;
+
if (!mapp->size)
mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
- printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
+ printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
(int) mapp->phys, (int) mapp->size);
- mapp->virt = ioremap_nocache(mapp->phys, mapp->size);
+ /*
+ * The filesystem is guaranteed to be in direct mapped memory. It is
+ * directly following the kernels own bss region. Following the same
+ * mechanism used by architectures setting up traditional initrds we
+ * use phys_to_virt to get the virtual address of its start.
+ */
+ mapp->virt = phys_to_virt(mapp->phys);
if (mapp->virt == 0) {
- printk("uclinux[mtd]: ioremap_nocache() failed\n");
+ printk("uclinux[mtd]: no virtual mapping?\n");
return(-EIO);
}
simple_map_init(mapp);
- mtd = do_map_probe("map_ram", mapp);
+ mtd = do_map_probe("map_" MAP_NAME, mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
- iounmap(mapp->virt);
return(-ENXIO);
}
mtd->owner = THIS_MODULE;
- mtd->point = uclinux_point;
+ mtd->_point = uclinux_point;
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
@@ -103,10 +127,8 @@ static void __exit uclinux_mtd_cleanup(void)
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
- if (uclinux_ram_map.virt) {
- iounmap((void *) uclinux_ram_map.virt);
+ if (uclinux_ram_map.virt)
uclinux_ram_map.virt = 0;
- }
}
/****************************************************************************/
@@ -116,6 +138,6 @@ module_exit(uclinux_mtd_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic RAM based MTD for uClinux");
+MODULE_DESCRIPTION("Generic MTD for uClinux");
/****************************************************************************/
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
deleted file mode 100644
index 5e68de73eab..00000000000
--- a/drivers/mtd/maps/vmax301.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/* ######################################################################
-
- Tempustech VMAX SBC301 MTD Driver.
-
- The VMAx 301 is a SBC based on . It
- comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
- more flash. Each unit has it's own 8k mapping into a settable region
- (0xD8000). There are two 8k mappings for each MTD, the first is always set
- to the lower 8k of the device the second is paged. Writing a 16 bit page
- value to anywhere in the first 8k will cause the second 8k to page around.
-
- To boot the device a bios extension must be installed into the first 8k
- of flash that is smart enough to copy itself down, page in the rest of
- itself and begin executing.
-
- ##################################################################### */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-
-
-#define WINDOW_START 0xd8000
-#define WINDOW_LENGTH 0x2000
-#define WINDOW_SHIFT 25
-#define WINDOW_MASK 0x1FFF
-
-/* Actually we could use two spinlocks, but we'd have to have
- more private space in the struct map_info. We lose a little
- performance like this, but we'd probably lose more by having
- the extra indirection from having one of the map->map_priv
- fields pointing to yet another private struct.
-*/
-static DEFINE_SPINLOCK(vmax301_spin);
-
-static void __vmax301_page(struct map_info *map, unsigned long page)
-{
- writew(page, map->map_priv_2 - WINDOW_LENGTH);
- map->map_priv_1 = page;
-}
-
-static inline void vmax301_page(struct map_info *map,
- unsigned long ofs)
-{
- unsigned long page = (ofs >> WINDOW_SHIFT);
- if (map->map_priv_1 != page)
- __vmax301_page(map, page);
-}
-
-static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
-{
- map_word ret;
- spin_lock(&vmax301_spin);
- vmax301_page(map, ofs);
- ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
- return ret;
-}
-
-static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
- spin_lock(&vmax301_spin);
- vmax301_page(map, from);
- memcpy_fromio(to, map->map_priv_2 + from, thislen);
- spin_unlock(&vmax301_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
-{
- spin_lock(&vmax301_spin);
- vmax301_page(map, adr);
- writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
-}
-
-static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
- spin_lock(&vmax301_spin);
- vmax301_page(map, to);
- memcpy_toio(map->map_priv_2 + to, from, thislen);
- spin_unlock(&vmax301_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static struct map_info vmax_map[2] = {
- {
- .name = "VMAX301 Internal Flash",
- .phys = NO_XIP,
- .size = 3*2*1024*1024,
- .bankwidth = 1,
- .read = vmax301_read8,
- .copy_from = vmax301_copy_from,
- .write = vmax301_write8,
- .copy_to = vmax301_copy_to,
- .map_priv_1 = WINDOW_START + WINDOW_LENGTH,
- .map_priv_2 = 0xFFFFFFFF
- },
- {
- .name = "VMAX301 Socket",
- .phys = NO_XIP,
- .size = 0,
- .bankwidth = 1,
- .read = vmax301_read8,
- .copy_from = vmax301_copy_from,
- .write = vmax301_write8,
- .copy_to = vmax301_copy_to,
- .map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
- .map_priv_2 = 0xFFFFFFFF
- }
-};
-
-static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
-
-static void __exit cleanup_vmax301(void)
-{
- int i;
-
- for (i=0; i<2; i++) {
- if (vmax_mtd[i]) {
- mtd_device_unregister(vmax_mtd[i]);
- map_destroy(vmax_mtd[i]);
- }
- }
- iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
-}
-
-static int __init init_vmax301(void)
-{
- int i;
- unsigned long iomapadr;
- // Print out our little header..
- printk("Tempustech VMAX 301 MEM:0x%x-0x%x\n",WINDOW_START,
- WINDOW_START+4*WINDOW_LENGTH);
-
- iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH*4);
- if (!iomapadr) {
- printk("Failed to ioremap memory region\n");
- return -EIO;
- }
- /* Put the address in the map's private data area.
- We store the actual MTD IO address rather than the
- address of the first half, because it's used more
- often.
- */
- vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
- vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
-
- for (i=0; i<2; i++) {
- vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
- if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
- if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
- if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
- if (vmax_mtd[i]) {
- vmax_mtd[i]->owner = THIS_MODULE;
- mtd_device_register(vmax_mtd[i], NULL, 0);
- }
- }
-
- if (!vmax_mtd[0] && !vmax_mtd[1]) {
- iounmap((void *)iomapadr);
- return -ENXIO;
- }
-
- return 0;
-}
-
-module_init(init_vmax301);
-module_exit(cleanup_vmax301);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 3a04b078576..6b223cfe92b 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -360,9 +360,6 @@ static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
int index = 0, retval, partition, leftover, numblocks;
unsigned char cx;
- if (len < 1)
- return -EIO;
-
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
@@ -434,11 +431,6 @@ static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
partition = mpart->partition;
card = maple_get_drvdata(mdev);
- /* simple sanity checks */
- if (len < 1) {
- error = -EIO;
- goto failed;
- }
numblocks = card->parts[partition].numblocks;
if (to + len > numblocks * card->blocklen)
len = numblocks * card->blocklen - to;
@@ -544,9 +536,9 @@ static void vmu_queryblocks(struct mapleq *mq)
mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
mtd_cur->size = part_cur->numblocks * card->blocklen;
mtd_cur->erasesize = card->blocklen;
- mtd_cur->write = vmu_flash_write;
- mtd_cur->read = vmu_flash_read;
- mtd_cur->sync = vmu_flash_sync;
+ mtd_cur->_write = vmu_flash_write;
+ mtd_cur->_read = vmu_flash_read;
+ mtd_cur->_sync = vmu_flash_sync;
mtd_cur->writesize = card->blocklen;
mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
@@ -604,7 +596,7 @@ fail_name:
}
/* Handles very basic info about the flash, queries for details */
-static int __devinit vmu_connect(struct maple_device *mdev)
+static int vmu_connect(struct maple_device *mdev)
{
unsigned long test_flash_data, basic_flash_data;
int c, error;
@@ -698,7 +690,7 @@ fail_nomem:
return error;
}
-static void __devexit vmu_disconnect(struct maple_device *mdev)
+static void vmu_disconnect(struct maple_device *mdev)
{
struct memcard *card;
struct mdev_part *mpart;
@@ -780,7 +772,7 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
}
-static int __devinit probe_maple_vmu(struct device *dev)
+static int probe_maple_vmu(struct device *dev)
{
int error;
struct maple_device *mdev = to_maple_dev(dev);
@@ -797,7 +789,7 @@ static int __devinit probe_maple_vmu(struct device *dev)
return 0;
}
-static int __devexit remove_maple_vmu(struct device *dev)
+static int remove_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
@@ -810,7 +802,7 @@ static struct maple_driver vmu_flash_driver = {
.drv = {
.name = "Dreamcast_visual_memory",
.probe = probe_maple_vmu,
- .remove = __devexit_p(remove_maple_vmu),
+ .remove = remove_maple_vmu,
},
};
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
deleted file mode 100644
index 901ce968efa..00000000000
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
- *
- * Copyright (C) 2004 Red Hat, Inc.
- *
- * Author: David Woodhouse <dwmw2@infradead.org>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/immap_cpm2.h>
-
-static struct mtd_info *sbcmtd[3];
-static struct mtd_partition *sbcmtd_parts[3];
-
-struct map_info sbc82xx_flash_map[3] = {
- {.name = "Boot flash"},
- {.name = "Alternate boot flash"},
- {.name = "User flash"}
-};
-
-static struct mtd_partition smallflash_parts[] = {
- {
- .name = "space",
- .size = 0x100000,
- .offset = 0,
- }, {
- .name = "bootloader",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static struct mtd_partition bigflash_parts[] = {
- {
- .name = "bootloader",
- .size = 0x00100000,
- .offset = 0,
- }, {
- .name = "file system",
- .size = 0x01f00000,
- .offset = MTDPART_OFS_APPEND,
- }, {
- .name = "boot config",
- .size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
- }, {
- .name = "space",
- .size = 0x01f00000,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
-
-#define init_sbc82xx_one_flash(map, br, or) \
-do { \
- (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
- (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
- switch (br & 0x00001800) { \
- case 0x00000000: \
- case 0x00000800: (map).bankwidth = 1; break; \
- case 0x00001000: (map).bankwidth = 2; break; \
- case 0x00001800: (map).bankwidth = 4; break; \
- } \
-} while (0);
-
-static int __init init_sbc82xx_flash(void)
-{
- volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
- int bigflash;
- int i;
-
-#ifdef CONFIG_SBC8560
- mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
-#else
- mc = &cpm2_immr->im_memctl;
-#endif
-
- bigflash = 1;
- if ((mc->memc_br0 & 0x00001800) == 0x00001800)
- bigflash = 0;
-
- init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
- init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
- init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
-
-#ifdef CONFIG_SBC8560
- iounmap((void *) mc);
-#endif
-
- for (i=0; i<3; i++) {
- int8_t flashcs[3] = { 0, 6, 1 };
- int nr_parts;
-
- printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
- sbc82xx_flash_map[i].name,
- (sbc82xx_flash_map[i].size >> 20),
- flashcs[i]);
- if (!sbc82xx_flash_map[i].phys) {
- /* We know it can't be at zero. */
- printk("): disabled by bootloader.\n");
- continue;
- }
- printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
-
- sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
-
- if (!sbc82xx_flash_map[i].virt) {
- printk("Failed to ioremap\n");
- continue;
- }
-
- simple_map_init(&sbc82xx_flash_map[i]);
-
- sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
-
- if (!sbcmtd[i])
- continue;
-
- sbcmtd[i]->owner = THIS_MODULE;
-
- nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
- &sbcmtd_parts[i], 0);
- if (nr_parts > 0) {
- mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
- nr_parts);
- continue;
- }
-
- /* No partitioning detected. Use default */
- if (i == 2) {
- mtd_device_register(sbcmtd[i], NULL, 0);
- } else if (i == bigflash) {
- mtd_device_register(sbcmtd[i], bigflash_parts,
- ARRAY_SIZE(bigflash_parts));
- } else {
- mtd_device_register(sbcmtd[i], smallflash_parts,
- ARRAY_SIZE(smallflash_parts));
- }
- }
- return 0;
-}
-
-static void __exit cleanup_sbc82xx_flash(void)
-{
- int i;
-
- for (i=0; i<3; i++) {
- if (!sbcmtd[i])
- continue;
-
- if (i<2 || sbcmtd_parts[i])
- mtd_device_unregister(sbcmtd[i]);
- else
- mtd_device_unregister(sbcmtd[i]);
-
- kfree(sbcmtd_parts[i]);
- map_destroy(sbcmtd[i]);
-
- iounmap((void *)sbc82xx_flash_map[i].virt);
- sbc82xx_flash_map[i].virt = 0;
- }
-}
-
-module_init(init_sbc82xx_flash);
-module_exit(cleanup_sbc82xx_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ca385697446..43e30992a36 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -30,9 +30,7 @@
#include <linux/blkpg.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
-#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <asm/uaccess.h>
#include "mtdcore.h"
@@ -84,12 +82,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
-
- buf = req->buffer;
+ buf = bio_data(req->bio);
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
+ if (req->cmd_flags & REQ_FLUSH)
+ return tr->flush(dev);
+
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
@@ -121,16 +121,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
{
- if (kthread_should_stop())
- return 1;
-
return dev->bg_stop;
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
-static int mtd_blktrans_thread(void *arg)
+static void mtd_blktrans_work(struct work_struct *work)
{
- struct mtd_blktrans_dev *dev = arg;
+ struct mtd_blktrans_dev *dev =
+ container_of(work, struct mtd_blktrans_dev, work);
struct mtd_blktrans_ops *tr = dev->tr;
struct request_queue *rq = dev->rq;
struct request *req = NULL;
@@ -138,7 +136,7 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
+ while (1) {
int res;
dev->bg_stop = false;
@@ -156,15 +154,7 @@ static int mtd_blktrans_thread(void *arg)
background_done = !dev->bg_stop;
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- set_current_state(TASK_RUNNING);
-
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
+ break;
}
spin_unlock_irq(rq->queue_lock);
@@ -185,8 +175,6 @@ static int mtd_blktrans_thread(void *arg)
__blk_end_request_all(req, -EIO);
spin_unlock_irq(rq->queue_lock);
-
- return 0;
}
static void mtd_blktrans_request(struct request_queue *rq)
@@ -199,10 +187,8 @@ static void mtd_blktrans_request(struct request_queue *rq)
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
- else {
- dev->bg_stop = true;
- wake_up_process(dev->thread);
- }
+ else
+ queue_work(dev->wq, &dev->work);
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -215,7 +201,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&dev->lock);
- if (dev->open++)
+ if (dev->open)
goto unlock;
kref_get(&dev->ref);
@@ -233,8 +219,10 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
ret = __get_mtd_device(dev->mtd);
if (ret)
goto error_release;
+ dev->file_mode = mode;
unlock:
+ dev->open++;
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
@@ -250,13 +238,12 @@ error_put:
return ret;
}
-static int blktrans_release(struct gendisk *disk, fmode_t mode)
+static void blktrans_release(struct gendisk *disk, fmode_t mode)
{
struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
- int ret = 0;
if (!dev)
- return ret;
+ return;
mutex_lock(&dev->lock);
@@ -267,13 +254,13 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
module_put(dev->tr->owner);
if (dev->mtd) {
- ret = dev->tr->release ? dev->tr->release(dev) : 0;
+ if (dev->tr->release)
+ dev->tr->release(dev);
__put_mtd_device(dev->mtd);
}
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
- return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -323,7 +310,7 @@ unlock:
return ret;
}
-static const struct block_device_operations mtd_blktrans_ops = {
+static const struct block_device_operations mtd_block_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
@@ -399,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
- gd->fops = &mtd_blktrans_ops;
+ gd->fops = &mtd_block_ops;
if (tr->part_bits)
if (new->devnum < 26)
@@ -423,9 +410,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (!new->rq)
goto error3;
+ if (tr->flush)
+ blk_queue_flush(new->rq, REQ_FLUSH);
+
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
+
if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
new->rq->limits.max_discard_sectors = UINT_MAX;
@@ -433,14 +425,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- /* Create processing thread */
- /* TODO: workqueue ? */
- new->thread = kthread_run(mtd_blktrans_thread, new,
- "%s%d", tr->name, new->mtd->index);
- if (IS_ERR(new->thread)) {
- ret = PTR_ERR(new->thread);
+ /* Create processing workqueue */
+ new->wq = alloc_workqueue("%s%d", 0, 0,
+ tr->name, new->mtd->index);
+ if (!new->wq)
goto error4;
- }
+ INIT_WORK(&new->work, mtd_blktrans_work);
+
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -480,9 +471,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
/* Stop new requests to arrive */
del_gendisk(old->disk);
-
- /* Stop the thread */
- kthread_stop(old->thread);
+ /* Stop workqueue. This will perform any pending request. */
+ destroy_workqueue(old->wq);
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 3326615ad66..485ea751c7f 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -32,6 +32,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/mutex.h>
+#include <linux/major.h>
struct mtdblk_dev {
@@ -44,7 +45,7 @@ struct mtdblk_dev {
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
};
-static struct mutex mtdblks_lock;
+static DEFINE_MUTEX(mtdblks_lock);
/*
* Cache stuff...
@@ -85,7 +86,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -102,7 +103,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
* Next, write the data to flash.
*/
- ret = mtd->write(mtd, pos, len, &retlen, buf);
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
@@ -119,7 +120,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
if (mtdblk->cache_state != STATE_DIRTY)
return 0;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
+ pr_debug("mtdblock: writing cached data for \"%s\" "
"at 0x%lx, size 0x%x\n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
@@ -148,11 +149,11 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
size_t retlen;
int ret;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
+ pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
- return mtd->write(mtd, pos, len, &retlen, buf);
+ return mtd_write(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -184,8 +185,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset != sect_start) {
/* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
- ret = mtd->read(mtd, sect_start, sect_size,
- &retlen, mtdblk->cache_data);
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
if (ret)
return ret;
if (retlen != sect_size)
@@ -218,11 +219,11 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
size_t retlen;
int ret;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
- return mtd->read(mtd, pos, len, &retlen, buf);
+ return mtd_read(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -241,7 +242,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
- ret = mtd->read(mtd, pos, size, &retlen, buf);
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
return ret;
if (retlen != size)
@@ -283,7 +284,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
- DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
+ pr_debug("mtdblock_open\n");
mutex_lock(&mtdblks_lock);
if (mtdblk->count) {
@@ -303,16 +304,16 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblks_lock);
- DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+ pr_debug("ok\n");
return 0;
}
-static int mtdblock_release(struct mtd_blktrans_dev *mbd)
+static void mtdblock_release(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
- DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
+ pr_debug("mtdblock_release\n");
mutex_lock(&mtdblks_lock);
@@ -321,17 +322,18 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
- /* It was the last usage. Free the cache */
- if (mbd->mtd->sync)
- mbd->mtd->sync(mbd->mtd);
+ /*
+ * It was the last usage. Free the cache, but only sync if
+ * opened for writing.
+ */
+ if (mbd->file_mode & FMODE_WRITE)
+ mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
mutex_unlock(&mtdblks_lock);
- DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
-
- return 0;
+ pr_debug("ok\n");
}
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
@@ -341,9 +343,7 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
-
- if (dev->mtd->sync)
- dev->mtd->sync(dev->mtd);
+ mtd_sync(dev->mtd);
return 0;
}
@@ -374,7 +374,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
- .major = 31,
+ .major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
@@ -389,8 +389,6 @@ static struct mtd_blktrans_ops mtdblock_tr = {
static int __init init_mtdblock(void)
{
- mutex_init(&mtdblks_lock);
-
return register_mtd_blktrans(&mtdblock_tr);
}
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 795a8c0a05b..fb5dc89369d 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -23,13 +23,15 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
+#include <linux/module.h>
+#include <linux/major.h>
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
size_t retlen;
- if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
@@ -39,7 +41,7 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
@@ -69,7 +71,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
- .major = 31,
+ .major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.readsect = mtdblock_readsect,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 3f92731a5b9..a0f54e80670 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -31,19 +31,21 @@
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/blkpg.h>
+#include <linux/magic.h>
+#include <linux/major.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
#include <asm/uaccess.h>
-#define MTD_INODE_FS_MAGIC 0x11307854
+#include "mtdcore.h"
+
static DEFINE_MUTEX(mtd_mutex);
-static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
- * as mode information ofr various use cases.
+ * as mode information of various use cases.
*/
struct mtd_file_info {
struct mtd_info *mtd;
@@ -51,33 +53,17 @@ struct mtd_file_info {
enum mtd_file_modes mode;
};
-static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_info *mtd = mfi->mtd;
-
- switch (orig) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += file->f_pos;
- break;
- case SEEK_END:
- offset += mtd->size;
- break;
- default:
- return -EINVAL;
- }
-
- if (offset >= 0 && offset <= mtd->size)
- return file->f_pos = offset;
-
- return -EINVAL;
+ return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
}
+static int count;
+static struct vfsmount *mnt;
+static struct file_system_type mtd_inodefs_type;
-
-static int mtd_open(struct inode *inode, struct file *file)
+static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int devnum = minor >> 1;
@@ -86,12 +72,16 @@ static int mtd_open(struct inode *inode, struct file *file)
struct mtd_file_info *mfi;
struct inode *mtd_ino;
- DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
+ pr_debug("MTD_open\n");
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
+ ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
+ if (ret)
+ return ret;
+
mutex_lock(&mtd_mutex);
mtd = get_mtd_device(NULL, devnum);
@@ -101,16 +91,14 @@ static int mtd_open(struct inode *inode, struct file *file)
}
if (mtd->type == MTD_ABSENT) {
- put_mtd_device(mtd);
ret = -ENODEV;
- goto out;
+ goto out1;
}
- mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+ mtd_ino = iget_locked(mnt->mnt_sb, devnum);
if (!mtd_ino) {
- put_mtd_device(mtd);
ret = -ENOMEM;
- goto out;
+ goto out1;
}
if (mtd_ino->i_state & I_NEW) {
mtd_ino->i_private = mtd;
@@ -122,49 +110,53 @@ static int mtd_open(struct inode *inode, struct file *file)
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
- iput(mtd_ino);
- put_mtd_device(mtd);
ret = -EACCES;
- goto out;
+ goto out2;
}
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
- iput(mtd_ino);
- put_mtd_device(mtd);
ret = -ENOMEM;
- goto out;
+ goto out2;
}
mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
+ mutex_unlock(&mtd_mutex);
+ return 0;
+out2:
+ iput(mtd_ino);
+out1:
+ put_mtd_device(mtd);
out:
mutex_unlock(&mtd_mutex);
+ simple_release_fs(&mnt, &count);
return ret;
-} /* mtd_open */
+} /* mtdchar_open */
/*====================================================================*/
-static int mtd_close(struct inode *inode, struct file *file)
+static int mtdchar_close(struct inode *inode, struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
+ pr_debug("MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & FMODE_WRITE) && mtd->sync)
- mtd->sync(mtd);
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
iput(mfi->ino);
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
+ simple_release_fs(&mnt, &count);
return 0;
-} /* mtd_close */
+} /* mtdchar_close */
/* Back in June 2001, dwmw2 wrote:
*
@@ -184,18 +176,19 @@ static int mtd_close(struct inode *inode, struct file *file)
* alignment requirements are not met in the NAND subdriver.
*/
-static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- size_t retlen=0;
+ size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
size_t size = count;
char *kbuf;
- DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
+ pr_debug("MTD_read\n");
if (*ppos + count > mtd->size)
count = mtd->size - *ppos;
@@ -211,38 +204,40 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
len = min_t(size_t, count, size);
switch (mfi->mode) {
- case MTD_MODE_OTP_FACTORY:
- ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
- case MTD_MODE_OTP_USER:
- ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
- case MTD_MODE_RAW:
+ case MTD_FILE_MODE_RAW:
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
ops.oobbuf = NULL;
ops.len = len;
- ret = mtd->read_oob(mtd, *ppos, &ops);
+ ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
}
- /* Nand returns -EBADMSG on ecc errors, but it returns
+ /* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
- * to dump areas with ecc errors !
+ * to dump areas with ECC errors!
* For kernel internal usage it also might return -EUCLEAN
* to signal the caller that a bitflip has occurred and has
* been corrected by the ECC algorithm.
* Userspace software which accesses NAND this way
* must be aware of the fact that it deals with NAND
*/
- if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
+ if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
*ppos += retlen;
if (copy_to_user(buf, kbuf, retlen)) {
kfree(kbuf);
@@ -265,9 +260,10 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
kfree(kbuf);
return total_retlen;
-} /* mtd_read */
+} /* mtdchar_read */
-static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -278,7 +274,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
int ret=0;
int len;
- DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
+ pr_debug("MTD_write\n");
if (*ppos == mtd->size)
return -ENOSPC;
@@ -302,34 +298,41 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
}
switch (mfi->mode) {
- case MTD_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_FACTORY:
ret = -EROFS;
break;
- case MTD_MODE_OTP_USER:
- if (!mtd->write_user_prot_reg) {
- ret = -EOPNOTSUPP;
- break;
- }
- ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
- case MTD_MODE_RAW:
+ case MTD_FILE_MODE_RAW:
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
ops.oobbuf = NULL;
+ ops.ooboffs = 0;
ops.len = len;
- ret = mtd->write_oob(mtd, *ppos, &ops);
+ ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
+
+ /*
+ * Return -ENOSPC only if no data could be written at all.
+ * Otherwise just return the number of bytes that actually
+ * have been written.
+ */
+ if ((ret == -ENOSPC) && (total_retlen))
+ break;
+
if (!ret) {
*ppos += retlen;
total_retlen += retlen;
@@ -344,7 +347,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
kfree(kbuf);
return total_retlen;
-} /* mtd_write */
+} /* mtdchar_write */
/*======================================================================
@@ -356,40 +359,41 @@ static void mtdchar_erase_callback (struct erase_info *instr)
wake_up((wait_queue_head_t *)instr->priv);
}
-#ifdef CONFIG_HAVE_MTD_OTP
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
- int ret = 0;
+ size_t retlen;
switch (mode) {
case MTD_OTP_FACTORY:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_MODE_OTP_FACTORY;
+ if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_MODE_OTP_USER;
+ if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
- default:
- ret = -EINVAL;
case MTD_OTP_OFF:
+ mfi->mode = MTD_FILE_MODE_NORMAL;
break;
+ default:
+ return -EINVAL;
}
- return ret;
+
+ return 0;
}
-#else
-# define otp_select_filemode(f,m) -EOPNOTSUPP
-#endif
-static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
+ struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops;
uint32_t retlen;
int ret = 0;
@@ -400,7 +404,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->write_oob)
+ if (!mtd->_write_oob)
ret = -EOPNOTSUPP;
else
ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
@@ -409,9 +413,10 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
ops.ooblen = length;
- ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
return -EINVAL;
@@ -420,8 +425,8 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (IS_ERR(ops.oobbuf))
return PTR_ERR(ops.oobbuf);
- start &= ~((uint64_t)mtd->oobsize - 1);
- ret = mtd->write_oob(mtd, start, &ops);
+ start &= ~((uint64_t)mtd->writesize - 1);
+ ret = mtd_write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
@@ -433,27 +438,25 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
}
-static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
- uint32_t length, void __user *ptr, uint32_t __user *retp)
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
{
+ struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops;
int ret = 0;
if (length > 4096)
return -EINVAL;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_WRITE, ptr,
- length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
ops.ooblen = length;
- ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
return -EINVAL;
@@ -462,8 +465,8 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
if (!ops.oobbuf)
return -ENOMEM;
- start &= ~((uint64_t)mtd->oobsize - 1);
- ret = mtd->read_oob(mtd, start, &ops);
+ start &= ~((uint64_t)mtd->writesize - 1);
+ ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT;
@@ -472,13 +475,29 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
ret = -EFAULT;
kfree(ops.oobbuf);
+
+ /*
+ * NAND returns -EBADMSG on ECC errors, but it returns the OOB
+ * data. For our userspace tools it is important to dump areas
+ * with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+ * to signal the caller that a bitflip has occured and has
+ * been corrected by the ECC algorithm.
+ *
+ * Note: currently the standard NAND function, nand_read_oob_std,
+ * does not calculate ECC for the OOB area, so do not rely on
+ * this behavior unless you have replaced it with your own.
+ */
+ if (mtd_is_bitflip_or_eccerr(ret))
+ return 0;
+
return ret;
}
/*
* Copies (and truncates, if necessary) data from the larger struct,
* nand_ecclayout, to the smaller, deprecated layout struct,
- * nand_ecclayout_user. This is necessary only to suppport the deprecated
+ * nand_ecclayout_user. This is necessary only to support the deprecated
* API ioctl ECCGETLAYOUT while allowing all new functionality to use
* nand_ecclayout flexibly (i.e. the struct may change size in new
* releases without requiring major rewrites).
@@ -508,7 +527,7 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
struct blkpg_ioctl_arg a;
@@ -544,7 +563,58 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
}
}
-static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
+ struct mtd_write_req __user *argp)
+{
+ struct mtd_write_req req;
+ struct mtd_oob_ops ops;
+ const void __user *usr_data, *usr_oob;
+ int ret;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ usr_data = (const void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
+ if (!access_ok(VERIFY_READ, usr_data, req.len) ||
+ !access_ok(VERIFY_READ, usr_oob, req.ooblen))
+ return -EFAULT;
+
+ if (!mtd->_write_oob)
+ return -EOPNOTSUPP;
+
+ ops.mode = req.mode;
+ ops.len = (size_t)req.len;
+ ops.ooblen = (size_t)req.ooblen;
+ ops.ooboffs = 0;
+
+ if (usr_data) {
+ ops.datbuf = memdup_user(usr_data, ops.len);
+ if (IS_ERR(ops.datbuf))
+ return PTR_ERR(ops.datbuf);
+ } else {
+ ops.datbuf = NULL;
+ }
+
+ if (usr_oob) {
+ ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
+ if (IS_ERR(ops.oobbuf)) {
+ kfree(ops.datbuf);
+ return PTR_ERR(ops.oobbuf);
+ }
+ } else {
+ ops.oobbuf = NULL;
+ }
+
+ ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
+
+ kfree(ops.datbuf);
+ kfree(ops.oobbuf);
+
+ return ret;
+}
+
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -553,7 +623,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
u_long size;
struct mtd_info_user info;
- DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
+ pr_debug("MTD_ioctl\n");
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
if (cmd & IOC_IN) {
@@ -601,8 +671,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
info.erasesize = mtd->erasesize;
info.writesize = mtd->writesize;
info.oobsize = mtd->oobsize;
- /* The below fields are obsolete */
- info.ecctype = -1;
+ /* The below field is obsolete */
+ info.padding = 0;
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
return -EFAULT;
break;
@@ -658,7 +728,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
wq_head is no longer there when the
callback routine tries to wake us up.
*/
- ret = mtd->erase(mtd, erase);
+ ret = mtd_erase(mtd, erase);
if (!ret) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -684,7 +754,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->length);
break;
}
@@ -698,7 +768,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
@@ -711,7 +781,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -725,12 +795,19 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
}
+ case MEMWRITE:
+ {
+ ret = mtdchar_write_ioctl(mtd,
+ (struct mtd_write_req __user *)arg);
+ break;
+ }
+
case MEMLOCK:
{
struct erase_info_user einfo;
@@ -738,10 +815,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->lock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->lock(mtd, einfo.start, einfo.length);
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
break;
}
@@ -752,10 +826,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->unlock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->unlock(mtd, einfo.start, einfo.length);
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
break;
}
@@ -766,10 +837,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->is_locked)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
break;
}
@@ -800,10 +868,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_isbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_isbad(mtd, offs);
+ return mtd_block_isbad(mtd, offs);
break;
}
@@ -813,21 +878,17 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_markbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_markbad(mtd, offs);
+ return mtd_block_markbad(mtd, offs);
break;
}
-#ifdef CONFIG_HAVE_MTD_OTP
case OTPSELECT:
{
int mode;
if (copy_from_user(&mode, argp, sizeof(int)))
return -EFAULT;
- mfi->mode = MTD_MODE_NORMAL;
+ mfi->mode = MTD_FILE_MODE_NORMAL;
ret = otp_select_filemode(mfi, mode);
@@ -839,27 +900,26 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case OTPGETREGIONINFO:
{
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
+ size_t retlen;
if (!buf)
return -ENOMEM;
- ret = -EOPNOTSUPP;
switch (mfi->mode) {
- case MTD_MODE_OTP_FACTORY:
- if (mtd->get_fact_prot_info)
- ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
break;
- case MTD_MODE_OTP_USER:
- if (mtd->get_user_prot_info)
- ret = mtd->get_user_prot_info(mtd, buf, 4096);
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
break;
default:
+ ret = -EINVAL;
break;
}
- if (ret >= 0) {
+ if (!ret) {
if (cmd == OTPGETREGIONCOUNT) {
- int nbr = ret / sizeof(struct otp_info);
+ int nbr = retlen / sizeof(struct otp_info);
ret = copy_to_user(argp, &nbr, sizeof(int));
} else
- ret = copy_to_user(argp, buf, ret);
+ ret = copy_to_user(argp, buf, retlen);
if (ret)
ret = -EFAULT;
}
@@ -871,18 +931,15 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct otp_info oinfo;
- if (mfi->mode != MTD_MODE_OTP_USER)
+ if (mfi->mode != MTD_FILE_MODE_OTP_USER)
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
- if (!mtd->lock_user_prot_reg)
- return -EOPNOTSUPP;
- ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
-#endif
- /* This ioctl is being deprecated - it truncates the ecc layout */
+ /* This ioctl is being deprecated - it truncates the ECC layout */
case ECCGETLAYOUT:
{
struct nand_ecclayout_user *usrlay;
@@ -915,17 +972,17 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
mfi->mode = 0;
switch(arg) {
- case MTD_MODE_OTP_FACTORY:
- case MTD_MODE_OTP_USER:
+ case MTD_FILE_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_USER:
ret = otp_select_filemode(mfi, arg);
break;
- case MTD_MODE_RAW:
- if (!mtd->read_oob || !mtd->write_oob)
+ case MTD_FILE_MODE_RAW:
+ if (!mtd_has_oob(mtd))
return -EOPNOTSUPP;
mfi->mode = arg;
- case MTD_MODE_NORMAL:
+ case MTD_FILE_MODE_NORMAL:
break;
default:
ret = -EINVAL;
@@ -936,7 +993,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case BLKPG:
{
- ret = mtd_blkpg_ioctl(mtd,
+ ret = mtdchar_blkpg_ioctl(mtd,
(struct blkpg_ioctl_arg __user *)arg);
break;
}
@@ -955,12 +1012,12 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return ret;
} /* memory_ioctl */
-static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
{
int ret;
mutex_lock(&mtd_mutex);
- ret = mtd_ioctl(file, cmd, arg);
+ ret = mtdchar_ioctl(file, cmd, arg);
mutex_unlock(&mtd_mutex);
return ret;
@@ -977,7 +1034,7 @@ struct mtd_oob_buf32 {
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
-static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -996,7 +1053,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start,
+ ret = mtdchar_writeoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->length);
break;
@@ -1011,13 +1068,13 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(mtd, buf.start,
+ ret = mtdchar_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
}
default:
- ret = mtd_ioctl(file, cmd, (unsigned long)argp);
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
mutex_unlock(&mtd_mutex);
@@ -1033,7 +1090,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
* mappings)
*/
#ifndef CONFIG_MMU
-static unsigned long mtd_get_unmapped_area(struct file *file,
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
@@ -1041,91 +1098,77 @@ static unsigned long mtd_get_unmapped_area(struct file *file,
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
- if (mtd->get_unmapped_area) {
- unsigned long offset;
-
- if (addr != 0)
- return (unsigned long) -EINVAL;
-
- if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
- return (unsigned long) -EINVAL;
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
- offset = pgoff << PAGE_SHIFT;
- if (offset > mtd->size - len)
- return (unsigned long) -EINVAL;
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
- }
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
- /* can't map directly */
- return (unsigned long) -ENOSYS;
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENODEV : ret;
}
#endif
/*
* set up a mapping for shared memory segments
*/
-static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
- unsigned long start;
- unsigned long off;
- u32 len;
-
- if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
- off = vma->vm_pgoff << PAGE_SHIFT;
- start = map->phys;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
- start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
- return -EINVAL;
-
- off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ /* This is broken because it assumes the MTD device is map-based
+ and that mtd->priv is a valid struct map_info. It should be
+ replaced with something that uses the mtd_get_unmapped_area()
+ operation properly. */
+ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
#ifdef pgprot_noncached
- if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
+ if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
+ return vm_iomap_memory(vma, map->phys, map->size);
}
- return -ENOSYS;
+ return -ENODEV;
#else
- return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
+ return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
#endif
}
static const struct file_operations mtd_fops = {
.owner = THIS_MODULE,
- .llseek = mtd_lseek,
- .read = mtd_read,
- .write = mtd_write,
- .unlocked_ioctl = mtd_unlocked_ioctl,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = mtd_compat_ioctl,
+ .compat_ioctl = mtdchar_compat_ioctl,
#endif
- .open = mtd_open,
- .release = mtd_close,
- .mmap = mtd_mmap,
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
- .get_unmapped_area = mtd_get_unmapped_area,
+ .get_unmapped_area = mtdchar_get_unmapped_area,
#endif
};
+static const struct super_operations mtd_ops = {
+ .drop_inode = generic_delete_inode,
+ .statfs = simple_statfs,
+};
+
static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
+ return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
}
static struct file_system_type mtd_inodefs_type = {
@@ -1133,76 +1176,38 @@ static struct file_system_type mtd_inodefs_type = {
.mount = mtd_inodefs_mount,
.kill_sb = kill_anon_super,
};
+MODULE_ALIAS_FS("mtd_inodefs");
-static void mtdchar_notify_add(struct mtd_info *mtd)
-{
-}
-
-static void mtdchar_notify_remove(struct mtd_info *mtd)
-{
- struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
-
- if (mtd_ino) {
- /* Destroy the inode if it exists */
- mtd_ino->i_nlink = 0;
- iput(mtd_ino);
- }
-}
-
-static struct mtd_notifier mtdchar_notifier = {
- .add = mtdchar_notify_add,
- .remove = mtdchar_notify_remove,
-};
-
-static int __init init_mtdchar(void)
+int __init init_mtdchar(void)
{
int ret;
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
"mtd", &mtd_fops);
if (ret < 0) {
- pr_notice("Can't allocate major number %d for "
- "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+ pr_err("Can't allocate major number %d for MTD\n",
+ MTD_CHAR_MAJOR);
return ret;
}
ret = register_filesystem(&mtd_inodefs_type);
if (ret) {
- pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+ pr_err("Can't register mtd_inodefs filesystem, error %d\n",
+ ret);
goto err_unregister_chdev;
}
- mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
- if (IS_ERR(mtd_inode_mnt)) {
- ret = PTR_ERR(mtd_inode_mnt);
- pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
- goto err_unregister_filesystem;
- }
- register_mtd_user(&mtdchar_notifier);
-
return ret;
-err_unregister_filesystem:
- unregister_filesystem(&mtd_inodefs_type);
err_unregister_chdev:
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
return ret;
}
-static void __exit cleanup_mtdchar(void)
+void __exit cleanup_mtdchar(void)
{
- unregister_mtd_user(&mtdchar_notifier);
- mntput(mtd_inode_mnt);
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
-module_init(init_mtdchar);
-module_exit(cleanup_mtdchar);
-
-MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Direct character-device access to MTD devices");
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index e601672a530..b9000563b9f 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,8 +72,6 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret = 0, err;
int i;
- *retlen = 0;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
@@ -91,14 +89,14 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Entire transaction goes into this subdev */
size = len;
- err = subdev->read(subdev, from, size, &retsize, buf);
+ err = mtd_read(subdev, from, size, &retsize, buf);
/* Save information about bitflips! */
if (unlikely(err)) {
- if (err == -EBADMSG) {
+ if (mtd_is_eccerr(err)) {
mtd->ecc_stats.failed++;
ret = err;
- } else if (err == -EUCLEAN) {
+ } else if (mtd_is_bitflip(err)) {
mtd->ecc_stats.corrected++;
/* Do not overwrite -EBADMSG !! */
if (!ret)
@@ -126,11 +124,6 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
int err = -EINVAL;
int i;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
- *retlen = 0;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
@@ -145,11 +138,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
else
size = len;
- if (!(subdev->flags & MTD_WRITEABLE))
- err = -EROFS;
- else
- err = subdev->write(subdev, to, size, &retsize, buf);
-
+ err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
@@ -176,19 +165,10 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
int i;
int err = -EINVAL;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
- *retlen = 0;
-
/* Calculate total length of data */
for (i = 0; i < count; i++)
total_len += vecs[i].iov_len;
- /* Do not allow write past end of device */
- if ((to + total_len) > mtd->size)
- return -EINVAL;
-
/* Check alignment */
if (mtd->writesize > 1) {
uint64_t __to = to;
@@ -224,11 +204,8 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
old_iov_len = vecs_copy[entry_high].iov_len;
vecs_copy[entry_high].iov_len = size;
- if (!(subdev->flags & MTD_WRITEABLE))
- err = -EROFS;
- else
- err = subdev->writev(subdev, &vecs_copy[entry_low],
- entry_high - entry_low + 1, to, &retsize);
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to, &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
@@ -273,16 +250,16 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (from + devops.len > subdev->size)
devops.len = subdev->size - from;
- err = subdev->read_oob(subdev, from, &devops);
+ err = mtd_read_oob(subdev, from, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
/* Save information about bitflips! */
if (unlikely(err)) {
- if (err == -EBADMSG) {
+ if (mtd_is_eccerr(err)) {
mtd->ecc_stats.failed++;
ret = err;
- } else if (err == -EUCLEAN) {
+ } else if (mtd_is_bitflip(err)) {
mtd->ecc_stats.corrected++;
/* Do not overwrite -EBADMSG !! */
if (!ret)
@@ -333,7 +310,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (to + devops.len > subdev->size)
devops.len = subdev->size - to;
- err = subdev->write_oob(subdev, to, &devops);
+ err = mtd_write_oob(subdev, to, &devops);
ops->retlen += devops.oobretlen;
if (err)
return err;
@@ -379,7 +356,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
* FIXME: Allow INTERRUPTIBLE. Which means
* not having the wait_queue head on the stack.
*/
- err = mtd->erase(mtd, erase);
+ err = mtd_erase(mtd, erase);
if (!err) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -402,15 +379,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
uint64_t length, offset = 0;
struct erase_info *erase;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
- if (instr->addr > concat->mtd.size)
- return -EINVAL;
-
- if (instr->len + instr->addr > concat->mtd.size)
- return -EINVAL;
-
/*
* Check for proper erase block alignment of the to-be-erased area.
* It is easier to do this based on the super device's erase
@@ -458,8 +426,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EINVAL;
}
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
/* make a local copy of instr to avoid modifying the caller's struct */
erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
@@ -498,10 +464,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
else
erase->len = length;
- if (!(subdev->flags & MTD_WRITEABLE)) {
- err = -EROFS;
- break;
- }
length -= erase->len;
if ((err = concat_dev_erase(subdev, erase))) {
/* sanity check: should never happen since
@@ -537,9 +499,6 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
uint64_t size;
@@ -554,12 +513,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->lock) {
- err = subdev->lock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -577,9 +533,6 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = 0;
- if ((len + ofs) > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
uint64_t size;
@@ -594,12 +547,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->unlock) {
- err = subdev->unlock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -619,7 +569,7 @@ static void concat_sync(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->sync(subdev);
+ mtd_sync(subdev);
}
}
@@ -630,7 +580,7 @@ static int concat_suspend(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- if ((rc = subdev->suspend(subdev)) < 0)
+ if ((rc = mtd_suspend(subdev)) < 0)
return rc;
}
return rc;
@@ -643,7 +593,7 @@ static void concat_resume(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->resume(subdev);
+ mtd_resume(subdev);
}
}
@@ -652,12 +602,9 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, res = 0;
- if (!concat->subdev[0]->block_isbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return res;
- if (ofs > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -666,7 +613,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- res = subdev->block_isbad(subdev, ofs);
+ res = mtd_block_isbad(subdev, ofs);
break;
}
@@ -678,12 +625,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if (!concat->subdev[0]->block_markbad)
- return 0;
-
- if (ofs > mtd->size)
- return -EINVAL;
-
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -692,7 +633,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- err = subdev->block_markbad(subdev, ofs);
+ err = mtd_block_markbad(subdev, ofs);
if (!err)
mtd->ecc_stats.badblocks++;
break;
@@ -721,15 +662,7 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
continue;
}
- /* we've found the subdev over which the mapping will reside */
- if (offset + len > subdev->size)
- return (unsigned long) -EINVAL;
-
- if (subdev->get_unmapped_area)
- return subdev->get_unmapped_area(subdev, len, offset,
- flags);
-
- break;
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
}
return (unsigned long) -ENOSYS;
@@ -770,7 +703,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
/*
* Set up the new "super" device's MTD object structure, check for
- * incompatibilites between the subdevices.
+ * incompatibilities between the subdevices.
*/
concat->mtd.type = subdev[0]->type;
concat->mtd.flags = subdev[0]->flags;
@@ -786,16 +719,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
- if (subdev[0]->writev)
- concat->mtd.writev = concat_writev;
- if (subdev[0]->read_oob)
- concat->mtd.read_oob = concat_read_oob;
- if (subdev[0]->write_oob)
- concat->mtd.write_oob = concat_write_oob;
- if (subdev[0]->block_isbad)
- concat->mtd.block_isbad = concat_block_isbad;
- if (subdev[0]->block_markbad)
- concat->mtd.block_markbad = concat_block_markbad;
+ if (subdev[0]->_writev)
+ concat->mtd._writev = concat_writev;
+ if (subdev[0]->_read_oob)
+ concat->mtd._read_oob = concat_read_oob;
+ if (subdev[0]->_write_oob)
+ concat->mtd._write_oob = concat_write_oob;
+ if (subdev[0]->_block_isbad)
+ concat->mtd._block_isbad = concat_block_isbad;
+ if (subdev[0]->_block_markbad)
+ concat->mtd._block_markbad = concat_block_markbad;
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
@@ -842,8 +775,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
if (concat->mtd.writesize != subdev[i]->writesize ||
concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
concat->mtd.oobsize != subdev[i]->oobsize ||
- !concat->mtd.read_oob != !subdev[i]->read_oob ||
- !concat->mtd.write_oob != !subdev[i]->write_oob) {
+ !concat->mtd._read_oob != !subdev[i]->_read_oob ||
+ !concat->mtd._write_oob != !subdev[i]->_write_oob) {
kfree(concat);
printk("Incompatible OOB or ECC data on \"%s\"\n",
subdev[i]->name);
@@ -858,15 +791,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->num_subdev = num_devs;
concat->mtd.name = name;
- concat->mtd.erase = concat_erase;
- concat->mtd.read = concat_read;
- concat->mtd.write = concat_write;
- concat->mtd.sync = concat_sync;
- concat->mtd.lock = concat_lock;
- concat->mtd.unlock = concat_unlock;
- concat->mtd.suspend = concat_suspend;
- concat->mtd.resume = concat_resume;
- concat->mtd.get_unmapped_area = concat_get_unmapped_area;
+ concat->mtd._erase = concat_erase;
+ concat->mtd._read = concat_read;
+ concat->mtd._write = concat_write;
+ concat->mtd._sync = concat_sync;
+ concat->mtd._lock = concat_lock;
+ concat->mtd._unlock = concat_unlock;
+ concat->mtd._suspend = concat_suspend;
+ concat->mtd._resume = concat_resume;
+ concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c510aff289a..d201feeb3ca 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -36,11 +36,13 @@
#include <linux/idr.h>
#include <linux/backing-dev.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include "mtdcore.h"
+
/*
* backing device capabilities for non-mappable devices (such as NAND flash)
* - permits private mappings, copies are taken of the data
@@ -96,18 +98,15 @@ EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
-#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE)
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
-#else
-#define MTD_DEVT(index) 0
-#endif
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
*/
static void mtd_release(struct device *dev)
{
- dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
+ struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
/* remove /dev/mtdXro node if needed */
if (index)
@@ -116,27 +115,24 @@ static void mtd_release(struct device *dev)
static int mtd_cls_suspend(struct device *dev, pm_message_t state)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
- if (mtd && mtd->suspend)
- return mtd->suspend(mtd);
- else
- return 0;
+ return mtd ? mtd_suspend(mtd) : 0;
}
static int mtd_cls_resume(struct device *dev)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
-
- if (mtd && mtd->resume)
- mtd->resume(mtd);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ if (mtd)
+ mtd_resume(mtd);
return 0;
}
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
char *type;
switch (mtd->type) {
@@ -161,6 +157,9 @@ static ssize_t mtd_type_show(struct device *dev,
case MTD_UBIVOLUME:
type = "ubi";
break;
+ case MTD_MLCNANDFLASH:
+ type = "mlc-nand";
+ break;
default:
type = "unknown";
}
@@ -172,7 +171,7 @@ static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
@@ -182,7 +181,7 @@ static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
@@ -193,7 +192,7 @@ static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
@@ -203,7 +202,7 @@ static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
@@ -213,7 +212,7 @@ static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
@@ -224,7 +223,7 @@ static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
@@ -234,7 +233,7 @@ static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
@@ -245,13 +244,60 @@ static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
}
static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int bitflip_threshold;
+ int retval;
+
+ retval = kstrtouint(buf, 0, &bitflip_threshold);
+ if (retval)
+ return retval;
+
+ mtd->bitflip_threshold = bitflip_threshold;
+ return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+ mtd_bitflip_threshold_show,
+ mtd_bitflip_threshold_store);
+
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+
+}
+static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
@@ -262,17 +308,12 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_oobsize.attr,
&dev_attr_numeraseregions.attr,
&dev_attr_name.attr,
+ &dev_attr_ecc_strength.attr,
+ &dev_attr_ecc_step_size.attr,
+ &dev_attr_bitflip_threshold.attr,
NULL,
};
-
-static struct attribute_group mtd_group = {
- .attrs = mtd_attrs,
-};
-
-static const struct attribute_group *mtd_groups[] = {
- &mtd_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(mtd);
static struct device_type mtd_devtype = {
.name = "mtd",
@@ -312,18 +353,17 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- do {
- if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
- goto fail_locked;
- error = idr_get_new(&mtd_idr, mtd, &i);
- } while (error == -EAGAIN);
-
- if (error)
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (i < 0)
goto fail_locked;
mtd->index = i;
mtd->usecount = 0;
+ /* default value if not set by driver */
+ if (mtd->bitflip_threshold == 0)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
@@ -338,9 +378,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
@@ -362,7 +402,7 @@ int add_mtd_device(struct mtd_info *mtd)
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
- DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
@@ -429,27 +469,63 @@ out_error:
}
/**
- * mtd_device_register - register an MTD device.
+ * mtd_device_parse_register - parse partitions and register an MTD device.
+ *
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ * 'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ * only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ * MTD device is registered if no partition info is found
+ *
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
*
- * @master: the MTD device to register
- * @parts: the partitions to register - only valid if nr_parts > 0
- * @nr_parts: the number of partitions in parts. If zero then the full MTD
- * device is registered
+ * * It first tries to probe partitions on MTD device @mtd using parsers
+ * specified in @types (if @types is %NULL, then the default list of parsers
+ * is used, see 'parse_mtd_partitions()' for more information). If none are
+ * found this functions tries to fallback to information specified in
+ * @parts/@nr_parts.
+ * * If any partitioning info was found, this function registers the found
+ * partitions.
+ * * If no partitions were found this function just registers the MTD device
+ * @mtd and exits.
*
- * Register an MTD device with the system and optionally, a number of
- * partitions. If nr_parts is 0 then the whole device is registered, otherwise
- * only the partitions are registered. To register both the full device *and*
- * the partitions, call mtd_device_register() twice, once with nr_parts == 0
- * and once equal to the number of partitions.
+ * Returns zero in case of success and a negative error code in case of failure.
*/
-int mtd_device_register(struct mtd_info *master,
- const struct mtd_partition *parts,
- int nr_parts)
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *parts,
+ int nr_parts)
{
- return parts ? add_mtd_partitions(master, parts, nr_parts) :
- add_mtd_device(master);
+ int err;
+ struct mtd_partition *real_parts;
+
+ err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (err <= 0 && nr_parts && parts) {
+ real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
+ GFP_KERNEL);
+ if (!real_parts)
+ err = -ENOMEM;
+ else
+ err = nr_parts;
+ }
+
+ if (err > 0) {
+ err = add_mtd_partitions(mtd, real_parts, err);
+ kfree(real_parts);
+ } else if (err == 0) {
+ err = add_mtd_device(mtd);
+ if (err == 1)
+ err = -ENODEV;
+ }
+
+ return err;
}
-EXPORT_SYMBOL_GPL(mtd_device_register);
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
/**
* mtd_device_unregister - unregister an existing MTD device.
@@ -480,7 +556,6 @@ EXPORT_SYMBOL_GPL(mtd_device_unregister);
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
-
void register_mtd_user (struct mtd_notifier *new)
{
struct mtd_info *mtd;
@@ -496,6 +571,7 @@ void register_mtd_user (struct mtd_notifier *new)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(register_mtd_user);
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
@@ -506,7 +582,6 @@ void register_mtd_user (struct mtd_notifier *new)
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
-
int unregister_mtd_user (struct mtd_notifier *old)
{
struct mtd_info *mtd;
@@ -522,7 +597,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
mutex_unlock(&mtd_table_mutex);
return 0;
}
-
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
/**
* get_mtd_device - obtain a validated handle for an MTD device
@@ -535,7 +610,6 @@ int unregister_mtd_user (struct mtd_notifier *old)
* both, return the num'th driver only if its address matches. Return
* error code if not.
*/
-
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL, *other;
@@ -568,6 +642,7 @@ out:
mutex_unlock(&mtd_table_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
@@ -577,8 +652,8 @@ int __get_mtd_device(struct mtd_info *mtd)
if (!try_module_get(mtd->owner))
return -ENODEV;
- if (mtd->get_device) {
- err = mtd->get_device(mtd);
+ if (mtd->_get_device) {
+ err = mtd->_get_device(mtd);
if (err) {
module_put(mtd->owner);
@@ -588,6 +663,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd->usecount++;
return 0;
}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
@@ -597,7 +673,6 @@ int __get_mtd_device(struct mtd_info *mtd)
* This function returns MTD device description structure in case of
* success and an error code in case of failure.
*/
-
struct mtd_info *get_mtd_device_nm(const char *name)
{
int err = -ENODEV;
@@ -626,6 +701,7 @@ out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
void put_mtd_device(struct mtd_info *mtd)
{
@@ -634,50 +710,375 @@ void put_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
--mtd->usecount;
BUG_ON(mtd->usecount < 0);
- if (mtd->put_device)
- mtd->put_device(mtd);
+ if (mtd->_put_device)
+ mtd->_put_device(mtd);
module_put(mtd->owner);
}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
+
+/*
+ * Erase is an asynchronous operation. Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ if (!instr->len) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+ }
+ return mtd->_erase(mtd, instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys)
+{
+ *retlen = 0;
+ *virt = NULL;
+ if (phys)
+ *phys = 0;
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_point(mtd, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unpoint(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags)
+{
+ if (!mtd->_get_unmapped_area)
+ return -EOPNOTSUPP;
+ if (offset > mtd->size || len > mtd->size - offset)
+ return -EINVAL;
+ return mtd->_get_unmapped_area(mtd, len, offset, flags);
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf)
+{
+ int ret_code;
+ *retlen = 0;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ /*
+ * In the absence of an error, drivers return a non-negative integer
+ * representing the maximum number of bitflips that were corrected on
+ * any one ecc region (if applicable; zero otherwise).
+ */
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read);
+
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (to < 0 || to > mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write);
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_panic_write)
+ return -EOPNOTSUPP;
+ if (to < 0 || to > mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_panic_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_panic_write);
+
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ int ret_code;
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_read_oob)
+ return -EOPNOTSUPP;
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
-/* default_mtd_writev - default mtd writev method for MTD devices that
- * don't implement their own
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
*/
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ if (!mtd->_get_fact_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
+
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
+
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ if (!mtd->_get_user_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
+
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
+
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ int ret;
+
+ *retlen = 0;
+ if (!mtd->_write_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * If no data could be written at all, we are out of memory and
+ * must return -ENOSPC.
+ */
+ return (*retlen) ? 0 : -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
+
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_lock_user_prot_reg(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
+
+/* Chip-supported device locking */
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_lock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_lock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock);
+
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_unlock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unlock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unlock);
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_is_locked)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_is_locked(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->_block_isbad)
+ return 0;
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ return mtd->_block_isbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
+
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->_block_markbad)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ return mtd->_block_markbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
- if(!mtd->write) {
- ret = -EROFS;
- } else {
- for (i=0; i<count; i++) {
- if (!vecs[i].iov_len)
- continue;
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
}
- if (retlen)
- *retlen = totlen;
+ *retlen = totlen;
return ret;
}
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!mtd->_writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->_writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
/**
* mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
- * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
* to the actual allocation size on success.
*
* This routine attempts to allocate a contiguous kernel buffer up to
@@ -722,15 +1123,6 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
*/
return kmalloc(*size, GFP_KERNEL);
}
-
-EXPORT_SYMBOL_GPL(get_mtd_device);
-EXPORT_SYMBOL_GPL(get_mtd_device_nm);
-EXPORT_SYMBOL_GPL(__get_mtd_device);
-EXPORT_SYMBOL_GPL(put_mtd_device);
-EXPORT_SYMBOL_GPL(__put_mtd_device);
-EXPORT_SYMBOL_GPL(register_mtd_user);
-EXPORT_SYMBOL_GPL(unregister_mtd_user);
-EXPORT_SYMBOL_GPL(default_mtd_writev);
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
@@ -738,8 +1130,6 @@ EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
/*====================================================================*/
/* Support for /proc/mtd */
-static struct proc_dir_entry *proc_mtd;
-
static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
@@ -777,7 +1167,7 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
ret = bdi_init(bdi);
if (!ret)
- ret = bdi_register(bdi, NULL, name);
+ ret = bdi_register(bdi, NULL, "%s", name);
if (ret)
bdi_destroy(bdi);
@@ -785,6 +1175,8 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
return ret;
}
+static struct proc_dir_entry *proc_mtd;
+
static int __init init_mtd(void)
{
int ret;
@@ -805,11 +1197,17 @@ static int __init init_mtd(void)
if (ret)
goto err_bdi3;
-#ifdef CONFIG_PROC_FS
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
-#endif /* CONFIG_PROC_FS */
+
+ ret = init_mtdchar();
+ if (ret)
+ goto out_procfs;
+
return 0;
+out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
err_bdi3:
bdi_destroy(&mtd_bdi_ro_mappable);
err_bdi2:
@@ -823,10 +1221,9 @@ err_reg:
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PROC_FS
+ cleanup_mtdchar();
if (proc_mtd)
- remove_proc_entry( "mtd", NULL);
-#endif /* CONFIG_PROC_FS */
+ remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
bdi_destroy(&mtd_bdi_unmappable);
bdi_destroy(&mtd_bdi_ro_mappable);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 0ed6126b4c1..7b0353399a1 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -1,20 +1,21 @@
-/* linux/drivers/mtd/mtdcore.h
- *
- * Header file for driver private mtdcore exports
- *
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
*/
-/* These are exported solely for the purpose of mtd_blkdevs.c. You
- should not use them for _anything_ else */
-
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *__mtd_next_device(int i);
-extern int add_mtd_device(struct mtd_info *mtd);
-extern int del_mtd_device(struct mtd_info *mtd);
-extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
- int);
-extern int del_mtd_partitions(struct mtd_info *);
+struct mtd_info *__mtd_next_device(int i);
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index e3e40f44032..97bb8f6304d 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -112,7 +112,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -169,14 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
cxt->nextpage = 0;
}
- while (mtd->block_isbad) {
- ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
- if (!ret)
- break;
- if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
- return;
- }
+ while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
badblock:
printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
cxt->nextpage * record_size);
@@ -190,6 +183,11 @@ badblock:
}
}
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
+ return;
+ }
+
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
@@ -199,9 +197,9 @@ badblock:
return;
}
- if (mtd->block_markbad && ret == -EIO) {
- ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
- if (ret < 0) {
+ if (ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
+ if (ret < 0 && ret != -EOPNOTSUPP) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
}
@@ -221,12 +219,16 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
hdr[0] = cxt->nextcount;
hdr[1] = MTDOOPS_KERNMSG_MAGIC;
- if (panic)
- ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
- else
- ret = mtd->write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ return;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
if (retlen != record_size || ret < 0)
printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
@@ -253,12 +255,14 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_block_isbad(mtd, page * record_size))
+ continue;
/* Assume the page is used */
mark_page_used(cxt, page);
- ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
- &retlen, (u_char *) &count[0]);
+ ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *)&count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
- (ret < 0 && ret != -EUCLEAN)) {
+ (ret < 0 && !mtd_is_bitflip(ret))) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
page * record_size, retlen,
MTDOOPS_HEADER_SIZE, ret);
@@ -267,7 +271,7 @@ static void find_next_position(struct mtdoops_context *cxt)
if (count[0] == 0xffffffff && count[1] == 0xffffffff)
mark_page_unused(cxt, page);
- if (count[0] == 0xffffffff)
+ if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
continue;
if (maxcount == 0xffffffff) {
maxcount = count[0];
@@ -285,55 +289,33 @@ static void find_next_position(struct mtdoops_context *cxt)
}
}
if (maxcount == 0xffffffff) {
- cxt->nextpage = 0;
- cxt->nextcount = 1;
- schedule_work(&cxt->work_erase);
- return;
+ cxt->nextpage = cxt->oops_pages - 1;
+ cxt->nextcount = 0;
+ }
+ else {
+ cxt->nextpage = maxpos;
+ cxt->nextcount = maxcount;
}
-
- cxt->nextpage = maxpos;
- cxt->nextcount = maxcount;
mtdoops_inc_counter(cxt);
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
- const char *s2, unsigned long l2)
+ enum kmsg_dump_reason reason)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
- unsigned long s1_start, s2_start;
- unsigned long l1_cpy, l2_cpy;
- char *dst;
-
- if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC &&
- reason != KMSG_DUMP_KEXEC)
- return;
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
- dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
- l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
- l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
- s2_start = l2 - l2_cpy;
- s1_start = l1 - l1_cpy;
-
- memcpy(dst, s1 + s1_start, l1_cpy);
- memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+ kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
/* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS) {
- if (!cxt->mtd->panic_write)
- printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
- else
- mtdoops_write(cxt, 1);
- return;
- }
+ if (reason != KMSG_DUMP_OOPS)
+ mtdoops_write(cxt, 1);
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
@@ -369,12 +351,13 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
/* oops_page_used is a bit field */
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG));
+ BITS_PER_LONG) * sizeof(unsigned long));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
}
+ cxt->dump.max_reason = KMSG_DUMP_OOPS;
cxt->dump.dump = mtdoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
@@ -401,8 +384,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
- flush_work_sync(&cxt->work_erase);
- flush_work_sync(&cxt->work_write);
+ flush_work(&cxt->work_erase);
+ flush_work(&cxt->work_write);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 630be3e7da0..1ca9aec141f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,19 +65,14 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
int res;
stats = part->master->ecc_stats;
-
- if (from >= mtd->size)
- len = 0;
- else if (from + len > mtd->size)
- len = mtd->size - from;
- res = part->master->read(part->master, from + part->offset,
- len, retlen, buf);
- if (unlikely(res)) {
- if (res == -EUCLEAN)
- mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
- if (res == -EBADMSG)
- mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
- }
+ res = part->master->_read(part->master, from + part->offset, len,
+ retlen, buf);
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->master->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->master->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -85,19 +80,16 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct mtd_part *part = PART(mtd);
- if (from >= mtd->size)
- len = 0;
- else if (from + len > mtd->size)
- len = mtd->size - from;
- return part->master->point (part->master, from + part->offset,
- len, retlen, virt, phys);
+
+ return part->master->_point(part->master, from + part->offset, len,
+ retlen, virt, phys);
}
-static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
- part->master->unpoint(part->master, from + part->offset, len);
+ return part->master->_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -108,8 +100,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = PART(mtd);
offset += part->offset;
- return part->master->get_unmapped_area(part->master, len, offset,
- flags);
+ return part->master->_get_unmapped_area(part->master, len, offset,
+ flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -130,7 +122,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
if (ops->oobbuf) {
size_t len, pages;
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = mtd->oobavail;
else
len = mtd->oobsize;
@@ -140,11 +132,11 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ res = part->master->_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
- if (res == -EUCLEAN)
+ if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
- if (res == -EBADMSG)
+ if (mtd_is_eccerr(res))
mtd->ecc_stats.failed++;
}
return res;
@@ -154,58 +146,48 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return part->master->_read_user_prot_reg(part->master, from, len,
+ retlen, buf);
}
-static int part_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info(part->master, buf, len);
+ return part->master->_get_user_prot_info(part->master, len, retlen,
+ buf);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg(part->master, from,
- len, retlen, buf);
+ return part->master->_read_fact_prot_reg(part->master, from, len,
+ retlen, buf);
}
-static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
- size_t len)
+static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info(part->master, buf, len);
+ return part->master->_get_fact_prot_info(part->master, len, retlen,
+ buf);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (to >= mtd->size)
- len = 0;
- else if (to + len > mtd->size)
- len = mtd->size - to;
- return part->master->write(part->master, to + part->offset,
- len, retlen, buf);
+ return part->master->_write(part->master, to + part->offset, len,
+ retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (to >= mtd->size)
- len = 0;
- else if (to + len > mtd->size)
- len = mtd->size - to;
- return part->master->panic_write(part->master, to + part->offset,
- len, retlen, buf);
+ return part->master->_panic_write(part->master, to + part->offset, len,
+ retlen, buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -213,51 +195,43 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
if (to >= mtd->size)
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return part->master->write_oob(part->master, to + part->offset, ops);
+ return part->master->_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return part->master->_write_user_prot_reg(part->master, from, len,
+ retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg(part->master, from, len);
+ return part->master->_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return part->master->writev(part->master, vecs, count,
- to + part->offset, retlen);
+ return part->master->_writev(part->master, vecs, count,
+ to + part->offset, retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
int ret;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (instr->addr >= mtd->size)
- return -EINVAL;
+
instr->addr += part->offset;
- ret = part->master->erase(part->master, instr);
+ ret = part->master->_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
@@ -268,7 +242,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
void mtd_erase_callback(struct erase_info *instr)
{
- if (instr->mtd->erase == part_erase) {
+ if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -283,52 +257,44 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
- return part->master->lock(part->master, ofs + part->offset, len);
+ return part->master->_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
- return part->master->unlock(part->master, ofs + part->offset, len);
+ return part->master->_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
- return part->master->is_locked(part->master, ofs + part->offset, len);
+ return part->master->_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->sync(part->master);
+ part->master->_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- return part->master->suspend(part->master);
+ return part->master->_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->resume(part->master);
+ part->master->_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
- if (ofs >= mtd->size)
- return -EINVAL;
ofs += part->offset;
- return part->master->block_isbad(part->master, ofs);
+ return part->master->_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -336,12 +302,8 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_part *part = PART(mtd);
int res;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (ofs >= mtd->size)
- return -EINVAL;
ofs += part->offset;
- res = part->master->block_markbad(part->master, ofs);
+ res = part->master->_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
@@ -416,54 +378,55 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
*/
slave->mtd.dev.parent = master->dev.parent;
- slave->mtd.read = part_read;
- slave->mtd.write = part_write;
+ slave->mtd._read = part_read;
+ slave->mtd._write = part_write;
- if (master->panic_write)
- slave->mtd.panic_write = part_panic_write;
+ if (master->_panic_write)
+ slave->mtd._panic_write = part_panic_write;
- if (master->point && master->unpoint) {
- slave->mtd.point = part_point;
- slave->mtd.unpoint = part_unpoint;
+ if (master->_point && master->_unpoint) {
+ slave->mtd._point = part_point;
+ slave->mtd._unpoint = part_unpoint;
}
- if (master->get_unmapped_area)
- slave->mtd.get_unmapped_area = part_get_unmapped_area;
- if (master->read_oob)
- slave->mtd.read_oob = part_read_oob;
- if (master->write_oob)
- slave->mtd.write_oob = part_write_oob;
- if (master->read_user_prot_reg)
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
- if (master->read_fact_prot_reg)
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
- if (master->write_user_prot_reg)
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
- if (master->lock_user_prot_reg)
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
- if (master->get_user_prot_info)
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
- if (master->get_fact_prot_info)
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
- if (master->sync)
- slave->mtd.sync = part_sync;
- if (!partno && !master->dev.class && master->suspend && master->resume) {
- slave->mtd.suspend = part_suspend;
- slave->mtd.resume = part_resume;
+ if (master->_get_unmapped_area)
+ slave->mtd._get_unmapped_area = part_get_unmapped_area;
+ if (master->_read_oob)
+ slave->mtd._read_oob = part_read_oob;
+ if (master->_write_oob)
+ slave->mtd._write_oob = part_write_oob;
+ if (master->_read_user_prot_reg)
+ slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
+ if (master->_read_fact_prot_reg)
+ slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->_write_user_prot_reg)
+ slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
+ if (master->_lock_user_prot_reg)
+ slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->_get_user_prot_info)
+ slave->mtd._get_user_prot_info = part_get_user_prot_info;
+ if (master->_get_fact_prot_info)
+ slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
+ if (master->_sync)
+ slave->mtd._sync = part_sync;
+ if (!partno && !master->dev.class && master->_suspend &&
+ master->_resume) {
+ slave->mtd._suspend = part_suspend;
+ slave->mtd._resume = part_resume;
}
- if (master->writev)
- slave->mtd.writev = part_writev;
- if (master->lock)
- slave->mtd.lock = part_lock;
- if (master->unlock)
- slave->mtd.unlock = part_unlock;
- if (master->is_locked)
- slave->mtd.is_locked = part_is_locked;
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
- slave->mtd.erase = part_erase;
+ if (master->_writev)
+ slave->mtd._writev = part_writev;
+ if (master->_lock)
+ slave->mtd._lock = part_lock;
+ if (master->_unlock)
+ slave->mtd._unlock = part_unlock;
+ if (master->_is_locked)
+ slave->mtd._is_locked = part_is_locked;
+ if (master->_block_isbad)
+ slave->mtd._block_isbad = part_block_isbad;
+ if (master->_block_markbad)
+ slave->mtd._block_markbad = part_block_markbad;
+ slave->mtd._erase = part_erase;
slave->master = master;
slave->offset = part->offset;
@@ -479,6 +442,19 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
}
}
+ if (slave->offset == MTDPART_OFS_RETAIN) {
+ slave->offset = cur_offset;
+ if (master->size - slave->offset >= slave->mtd.size) {
+ slave->mtd.size = master->size - slave->offset
+ - slave->mtd.size;
+ } else {
+ printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+ part->name, master->size - slave->offset,
+ slave->mtd.size);
+ /* register to preserve ordering */
+ goto out_register;
+ }
+ }
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
@@ -542,12 +518,15 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
}
slave->mtd.ecclayout = master->ecclayout;
- if (master->block_isbad) {
+ slave->mtd.ecc_step_size = master->ecc_step_size;
+ slave->mtd.ecc_strength = master->ecc_strength;
+ slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
+ if (master->_block_isbad) {
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
+ if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
@@ -557,7 +536,7 @@ out_register:
return slave;
}
-int mtd_add_partition(struct mtd_info *master, char *name,
+int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
@@ -693,49 +672,82 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
return ret;
}
-int register_mtd_parser(struct mtd_part_parser *p)
+#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+
+void register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_add(&p->list, &part_parsers);
spin_unlock(&part_parser_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(register_mtd_parser);
-int deregister_mtd_parser(struct mtd_part_parser *p)
+void deregister_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_del(&p->list);
spin_unlock(&part_parser_lock);
- return 0;
}
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
-int parse_mtd_partitions(struct mtd_info *master, const char **types,
- struct mtd_partition **pparts, unsigned long origin)
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char * const default_mtd_part_types[] = {
+ "cmdlinepart",
+ "ofpart",
+ NULL
+};
+
+/**
+ * parse_mtd_partitions - parse MTD partitions
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @pparts: array of partitions found is returned here
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find partition on MTD device @master. It uses MTD
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o zero if no partitions were found
+ * o a positive number of found partitions, in which case on exit @pparts will
+ * point to an array containing this number of &struct mtd_info objects.
+ */
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
struct mtd_part_parser *parser;
int ret = 0;
+ if (!types)
+ types = default_mtd_part_types;
+
for ( ; ret <= 0 && *types; types++) {
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
- parser = get_partition_parser(*types);
+ parser = get_partition_parser(*types);
if (!parser)
continue;
- ret = (*parser->parse_fn)(master, pparts, origin);
+ ret = (*parser->parse_fn)(master, pparts, data);
+ put_partition_parser(parser);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
+ break;
}
- put_partition_parser(parser);
}
return ret;
}
-EXPORT_SYMBOL_GPL(parse_mtd_partitions);
-int mtd_is_partition(struct mtd_info *mtd)
+int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
int ispart = 0;
@@ -751,3 +763,13 @@ int mtd_is_partition(struct mtd_info *mtd)
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return mtd->size;
+
+ return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 16b02a1fc10..20c02a3b741 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -14,8 +14,10 @@
#include <linux/mtd/super.h>
#include <linux/namei.h>
+#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/major.h>
/*
* compare superblocks to see if they're equivalent
@@ -26,12 +28,12 @@ static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
struct mtd_info *mtd = _mtd;
if (sb->s_mtd == mtd) {
- DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n",
+ pr_debug("MTDSB: Match on device %d (\"%s\")\n",
mtd->index, mtd->name);
return 1;
}
- DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+ pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
return 0;
}
@@ -62,7 +64,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
struct super_block *sb;
int ret;
- sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd);
+ sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd);
if (IS_ERR(sb))
goto out_error;
@@ -70,11 +72,9 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
goto already_mounted;
/* fresh new superblock */
- DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
+ pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- sb->s_flags = flags;
-
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
@@ -87,7 +87,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
/* new mountpoint for an already mounted superblock */
already_mounted:
- DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
+ pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
put_mtd_device(mtd);
return dget(sb->s_root);
@@ -108,7 +108,7 @@ static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd)) {
- DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
+ pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
return ERR_CAST(mtd);
}
@@ -131,7 +131,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
if (!dev_name)
return ERR_PTR(-EINVAL);
- DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
+ pr_debug("MTDSB: dev_name \"%s\"\n", dev_name);
/* the preferred way of mounting in future; especially when
* CONFIG_BLOCK=n - we specify the underlying MTD device by number or
@@ -142,7 +142,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
struct mtd_info *mtd;
/* mount by MTD device name */
- DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
+ pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
mtd = get_mtd_device_nm(dev_name + 4);
@@ -163,7 +163,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
if (!*endptr) {
/* It was a valid number */
- DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
+ pr_debug("MTDSB: mtd%%d, mtdnr %d\n",
mtdnr);
return mount_mtd_nr(fs_type, flags,
dev_name, data,
@@ -179,10 +179,10 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
bdev = lookup_bdev(dev_name);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
- DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
+ pr_debug("MTDSB: lookup_bdev() returned %d\n", ret);
return ERR_PTR(ret);
}
- DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
+ pr_debug("MTDSB: lookup_bdev() returned 0\n");
ret = -EINVAL;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fd788532761..8b33b26eb12 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -86,7 +86,7 @@ struct swap_eb {
unsigned int flags;
unsigned int active_count;
unsigned int erase_count;
- unsigned int pad; /* speeds up pointer decremtnt */
+ unsigned int pad; /* speeds up pointer decrement */
};
#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
@@ -274,12 +274,12 @@ static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
eb->root = NULL;
/* badblocks not supported */
- if (!d->mtd->block_markbad)
+ if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
- ret = d->mtd->block_markbad(d->mtd, offset);
+ ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
@@ -312,9 +312,9 @@ static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = d->mtd->read_oob(d->mtd, from, ops);
+ int ret = mtd_read_oob(d->mtd, from, ops);
- if (ret == -EUCLEAN)
+ if (mtd_is_bitflip(ret))
return ret;
if (ret) {
@@ -343,18 +343,18 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
- if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
ops.oobbuf = d->oob_buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ret = mtdswap_read_oob(d, offset, &ops);
- if (ret && ret != -EUCLEAN)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
data = (struct mtdswap_oobdata *)d->oob_buf;
@@ -363,7 +363,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
eb->erase_count = le32_to_cpu(data->count);
- if (ret == -EUCLEAN)
+ if (mtd_is_bitflip(ret))
ret = MTDSWAP_SCANNED_BITFLIP;
else {
if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
@@ -389,7 +389,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
ops.ooboffs = 0;
ops.oobbuf = (uint8_t *)&n;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = NULL;
if (marker == MTDSWAP_TYPE_CLEAN) {
@@ -403,12 +403,12 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
- ret = d->mtd->write_oob(d->mtd, offset , &ops);
+ ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
"error %d\n", offset, ret);
- if (ret == -EIO || ret == -EBADMSG)
+ if (ret == -EIO || mtd_is_eccerr(ret))
mtdswap_handle_write_error(d, eb);
return ret;
}
@@ -567,7 +567,7 @@ retry:
erase.len = mtd->erasesize;
erase.priv = (u_long)&wq;
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
@@ -628,7 +628,7 @@ static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
TREE_COUNT(d, CLEAN)--;
ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
- } while (ret == -EIO || ret == -EBADMSG);
+ } while (ret == -EIO || mtd_is_eccerr(ret));
if (ret)
return ret;
@@ -678,7 +678,7 @@ retry:
ret = mtdswap_map_free_block(d, page, bp);
eb = d->eb_data + (*bp / d->pages_per_eblk);
- if (ret == -EIO || ret == -EBADMSG) {
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write = NULL;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
@@ -689,8 +689,8 @@ retry:
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
- ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
- if (ret == -EIO || ret == -EBADMSG) {
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
@@ -736,9 +736,9 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
- if (ret < 0 && ret != -EUCLEAN) {
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
oldeb->flags |= EBLOCK_READERR;
@@ -931,7 +931,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = mtd->writesize;
ops.ooblen = mtd->ecclayout->oobavail;
ops.ooboffs = 0;
@@ -946,7 +946,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
- ret = mtd->write_oob(mtd, pos, &ops);
+ ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -955,7 +955,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
pos = base;
for (i = 0; i < mtd_pages; i++) {
- ret = mtd->read_oob(mtd, pos, &ops);
+ ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -1016,7 +1016,7 @@ static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
if (ret == 0)
mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
- else if (ret != -EIO && ret != -EBADMSG)
+ else if (ret != -EIO && !mtd_is_eccerr(ret))
mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
return 0;
@@ -1047,8 +1047,7 @@ static int mtdswap_flush(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- if (d->mtd->sync)
- d->mtd->sync(d->mtd);
+ mtd_sync(d->mtd);
return 0;
}
@@ -1059,9 +1058,9 @@ static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
badcnt = 0;
- if (mtd->block_isbad)
+ if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
@@ -1161,10 +1160,10 @@ static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
- if (ret == -EUCLEAN) {
+ if (mtd_is_bitflip(ret)) {
eb->flags |= EBLOCK_BITFLIP;
mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
ret = 0;
@@ -1374,11 +1373,10 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
goto revmap_fail;
eblk_bytes = sizeof(struct swap_eb)*d->eblks;
- d->eb_data = vmalloc(eblk_bytes);
+ d->eb_data = vzalloc(eblk_bytes);
if (!d->eb_data)
goto eb_data_fail;
- memset(d->eb_data, 0, eblk_bytes);
for (i = 0; i < pages; i++)
d->page_data[i] = BLOCK_UNDEF;
@@ -1427,7 +1425,7 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
while ((this_opt = strsep(&parts, ",")) != NULL) {
- if (strict_strtoul(this_opt, 0, &part) < 0)
+ if (kstrtoul(this_opt, 0, &part) < 0)
return;
if (mtd->index == part)
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4c3425235ad..f1cf503517f 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -22,15 +22,6 @@ menuconfig MTD_NAND
if MTD_NAND
-config MTD_NAND_VERIFY_WRITE
- bool "Verify NAND page writes"
- help
- This adds an extra check when data is written to the flash. The
- NAND flash device internally checks only bits transitioning
- from 1 to 0. There is a rare possibility that even though the
- device thinks the write was successful, a bit could have been
- flipped accidentally due to device wear or something else.
-
config MTD_NAND_BCH
tristate
select BCH
@@ -50,32 +41,32 @@ config MTD_SM_COMMON
tristate
default n
-config MTD_NAND_MUSEUM_IDS
- bool "Enable chip ids for obsolete ancient NAND devices"
- default n
- help
- Enable this option only when your board has first generation
- NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
- of these chips were reused by later, larger chips.
-
-config MTD_NAND_AUTCPU12
- tristate "SmartMediaCard on autronix autcpu12 board"
- depends on ARCH_AUTCPU12
- help
- This enables the driver for the autronix autcpu12 board to
- access the SmartMediaCard.
-
config MTD_NAND_DENALI
- depends on PCI
+ tristate "Support Denali NAND controller"
+ depends on HAS_DMA
+ help
+ Enable support for the Denali NAND controller. This should be
+ combined with either the PCI or platform drivers to provide device
+ registration.
+
+config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
+ depends on PCI && MTD_NAND_DENALI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
-
+
+config MTD_NAND_DENALI_DT
+ tristate "Support Denali NAND controller as a DT device"
+ depends on HAVE_CLK && MTD_NAND_DENALI
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
config MTD_NAND_DENALI_SCRATCH_REG_ADDR
hex "Denali NAND size scratch register address"
default "0xFF108018"
- depends on MTD_NAND_DENALI
+ depends on MTD_NAND_DENALI_PCI
help
Some platforms place the NAND chip size in a scratch register
because (some versions of) the driver aren't able to automatically
@@ -83,31 +74,12 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
scratch register here to enable this feature. On Intel Moorestown
boards, the scratch register is at 0xFF108018.
-config MTD_NAND_EDB7312
- tristate "Support for Cirrus Logic EBD7312 evaluation board"
- depends on ARCH_EDB7312
- help
- This enables the driver for the Cirrus Logic EBD7312 evaluation
- board to access the onboard NAND Flash.
-
-config MTD_NAND_H1900
- tristate "iPAQ H1900 flash"
- depends on ARCH_PXA
- help
- This enables the driver for the iPAQ h1900 flash.
-
config MTD_NAND_GPIO
tristate "GPIO NAND Flash driver"
- depends on GENERIC_GPIO && ARM
+ depends on GPIOLIB
help
This enables a GPIO based NAND flash driver.
-config MTD_NAND_SPIA
- tristate "NAND Flash device on SPIA board"
- depends on ARCH_P720T
- help
- If you had to ask, you don't have one. Say 'N'.
-
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
depends on MACH_AMS_DELTA
@@ -116,10 +88,23 @@ config MTD_NAND_AMS_DELTA
Support for NAND flash on Amstrad E3 (Delta).
config MTD_NAND_OMAP2
- tristate "NAND Flash device on OMAP2 and OMAP3"
- depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3)
+ tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
+ depends on ARCH_OMAP2PLUS
+ help
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
+ platforms.
+
+config MTD_NAND_OMAP_BCH
+ depends on MTD_NAND_OMAP2
+ tristate "Support hardware based BCH error correction"
+ default n
+ select BCH
help
- Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+ This config enables the ELM hardware engine, which can be used to
+ locate and correct errors when using BCH ECC scheme. This offloads
+ the cpu from doing ECC error searching and correction. However some
+ legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+ so they should not enable this config symbol.
config MTD_NAND_IDS
tristate
@@ -138,7 +123,7 @@ config MTD_NAND_RICOH
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
- depends on SOC_AU1200 || SOC_AU1550
+ depends on MIPS_ALCHEMY
help
This enables the driver for the NAND flash controller on the
AMD/Alchemy 1550 SOC.
@@ -175,25 +160,9 @@ config MTD_NAND_BF5XX_BOOTROM_ECC
If unsure, say N.
-config MTD_NAND_RTC_FROM4
- tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
- depends on SH_SOLUTION_ENGINE
- select REED_SOLOMON
- select REED_SOLOMON_DEC8
- select BITREVERSE
- help
- This enables the driver for the Renesas Technology AG-AND
- flash interface board (FROM_BOARD4)
-
-config MTD_NAND_PPCHAMELEONEVB
- tristate "NAND Flash device on PPChameleonEVB board"
- depends on PPCHAMELEONEVB && BROKEN
- help
- This enables the NAND flash driver on the PPChameleon EVB Board.
-
config MTD_NAND_S3C2410
tristate "NAND Flash support for Samsung S3C SoCs"
- depends on ARCH_S3C2410 || ARCH_S3C64XX
+ depends on ARCH_S3C24XX || ARCH_S3C64XX
help
This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
@@ -233,25 +202,9 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
-config MTD_NAND_BCM_UMI
- tristate "NAND Flash support for BCM Reference Boards"
- depends on ARCH_BCMRING
- help
- This enables the NAND flash controller on the BCM UMI block.
-
- No board specific support is done by this driver, each board
- must advertise a platform_device for the driver to attach.
-
-config MTD_NAND_BCM_UMI_HWCS
- bool "BCM UMI NAND Hardware CS"
- depends on MTD_NAND_BCM_UMI
- help
- Enable the use of the BCM UMI block's internal CS using NAND.
- This should only be used if you know the external NAND CS can toggle.
-
config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+ depends on HAS_IOMEM
select REED_SOLOMON
select REED_SOLOMON_DEC16
help
@@ -319,6 +272,26 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
+config MTD_NAND_DOCG4
+ tristate "Support for DiskOnChip G4"
+ depends on HAS_IOMEM
+ select BCH
+ select BITREVERSE
+ help
+ Support for diskonchip G4 nand flash, found in various smartphones and
+ PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
+ Portege G900, Asus P526, and O2 XDA Zinc.
+
+ With this driver you will be able to use UBI and create a ubifs on the
+ device, so you may wish to consider enabling UBI and UBIFS as well.
+
+ These devices ship with the Mys/Sandisk SAFTL formatting, for which
+ there is currently no mtd parser, so you may want to use command line
+ partitioning to segregate write-protected blocks. On the Treo680, the
+ first five erase blocks (256KiB each) are write-protected, followed
+ by the block containing the saftl partition table. This is probably
+ typical.
+
config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
depends on ARCH_PXA
@@ -351,53 +324,35 @@ config MTD_NAND_ATMEL
help
Enables support for NAND Flash / Smart Media Card interface
on Atmel AT91 and AVR32 processors.
-choice
- prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
- depends on MTD_NAND_ATMEL
-config MTD_NAND_ATMEL_ECC_HW
- bool "Hardware ECC"
- depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
+config MTD_NAND_PXA3xx
+ tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION
help
- Use hardware ECC instead of software ECC when the chip
- supports it.
-
- The hardware ECC controller is capable of single bit error
- correction and 2-bit random detection per page.
-
- NB : hardware and software ECC schemes are incompatible.
- If you switch from one to another, you'll have to erase your
- mtd partition.
-
- If unsure, say Y
+ This enables the driver for the NAND flash device found on
+ PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
-config MTD_NAND_ATMEL_ECC_SOFT
- bool "Software ECC"
+config MTD_NAND_SLC_LPC32XX
+ tristate "NXP LPC32xx SLC Controller"
+ depends on ARCH_LPC32XX
help
- Use software ECC.
+ Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+ chips) NAND controller. This is the default for the PHYTEC 3250
+ reference board which contains a NAND256R3A2CZA6 chip.
- NB : hardware and software ECC schemes are incompatible.
- If you switch from one to another, you'll have to erase your
- mtd partition.
+ Please check the actual NAND chip connected and its support
+ by the SLC NAND controller.
-config MTD_NAND_ATMEL_ECC_NONE
- bool "No ECC (testing only, DANGEROUS)"
- depends on DEBUG_KERNEL
+config MTD_NAND_MLC_LPC32XX
+ tristate "NXP LPC32xx MLC Controller"
+ depends on ARCH_LPC32XX
help
- No ECC will be used.
- It's not a good idea and it should be reserved for testing
- purpose only.
-
- If unsure, say N
+ Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+ controller. This is the default for the WORK92105 controller
+ board.
-endchoice
-
-config MTD_NAND_PXA3xx
- tristate "Support for NAND flash devices on PXA3xx"
- depends on PXA3xx || ARCH_MMP
- help
- This enables the driver for the NAND flash device found on
- PXA3xx processors
+ Please check the actual NAND chip connected and its support
+ by the MLC NAND controller.
config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
@@ -423,20 +378,33 @@ config MTD_NAND_NANDSIM
The simulator may simulate various NAND flash chips for the
MTD nand layer.
+config MTD_NAND_GPMI_NAND
+ tristate "GPMI NAND Flash Controller driver"
+ depends on MTD_NAND && MXS_DMA
+ help
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time. The GPMI may conflicts with other
+ block, such as SD card. So pay attention to it when you enable
+ the GPMI.
+
+config MTD_NAND_BCM47XXNFLASH
+ tristate "Support for NAND flash on BCM4706 BCMA bus"
+ depends on BCMA_NFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
+ depends on HAS_IOMEM
help
This implements a generic NAND driver for on-SOC platform
devices. You will need to provide platform-specific functions
via platform_data.
-config MTD_ALAUDA
- tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1"
- depends on USB
- help
- These two (and possibly other) Alauda-based cardreaders for
- SmartMedia and xD allow raw flash access.
-
config MTD_NAND_ORION
tristate "NAND Flash support for Marvell Orion SoC"
depends on PLAT_ORION
@@ -456,6 +424,17 @@ config MTD_NAND_FSL_ELBC
Enabling this option will enable you to use this to control
external NAND devices.
+config MTD_NAND_FSL_IFC
+ tristate "NAND support for Freescale IFC controller"
+ depends on MTD_NAND && FSL_SOC
+ select FSL_IFC
+ select MEMORY
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
config MTD_NAND_FSL_UPM
tristate "Support for NAND on Freescale UPM"
depends on PPC_83xx || PPC_85xx
@@ -473,30 +452,26 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on IMX_HAVE_PLATFORM_MXC_NAND
+ depends on ARCH_MXC
help
This enables the driver for the NAND flash controller on the
MXC processors.
-config MTD_NAND_NOMADIK
- tristate "ST Nomadik 8815 NAND support"
- depends on ARCH_NOMADIK
- help
- Driver for the NAND flash controller on the Nomadik, with ECC.
-
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on HAS_DMA
help
Several Renesas SuperH CPU has FLCTL. This option enables support
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci SoC"
- depends on ARCH_DAVINCI
+ tristate "Support NAND on DaVinci/Keystone SoC"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
help
Enable the driver for NAND flash chips on Texas Instruments
- DaVinci processors.
+ DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
tristate "NAND Flash support for TXx9 SoC"
@@ -525,9 +500,17 @@ config MTD_NAND_JZ4740
config MTD_NAND_FSMC
tristate "Support for NAND on ST Micros FSMC"
- depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
+config MTD_NAND_XWAY
+ tristate "Support for NAND on Lantiq XWAY SoC"
+ depends on LANTIQ && SOC_TYPE_XWAY
+ select MTD_NAND_PLATFORM
+ help
+ Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+ to the External Bus Unit (EBU).
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 5745d831168..542b5689eb6 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -9,20 +9,17 @@ obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
-obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
-obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
-obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
-obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
-obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
-obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
@@ -34,20 +31,23 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
-obj-$(CONFIG_MTD_ALAUDA) += alauda.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
-obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
-obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
deleted file mode 100644
index eb40ea829ab..00000000000
--- a/drivers/mtd/nand/alauda.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * MTD driver for Alauda chips
- *
- * Copyright (C) 2007 Joern Engel <joern@logfs.org>
- *
- * Based on drivers/usb/usb-skeleton.c which is:
- * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
- * and on drivers/usb/storage/alauda.c, which is:
- * (c) 2005 Daniel Drake <dsd@gentoo.org>
- *
- * Idea and initial work by Arnd Bergmann <arnd@arndb.de>
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand_ecc.h>
-
-/* Control commands */
-#define ALAUDA_GET_XD_MEDIA_STATUS 0x08
-#define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a
-#define ALAUDA_GET_XD_MEDIA_SIG 0x86
-
-/* Common prefix */
-#define ALAUDA_BULK_CMD 0x40
-
-/* The two ports */
-#define ALAUDA_PORT_XD 0x00
-#define ALAUDA_PORT_SM 0x01
-
-/* Bulk commands */
-#define ALAUDA_BULK_READ_PAGE 0x84
-#define ALAUDA_BULK_READ_OOB 0x85 /* don't use, there's a chip bug */
-#define ALAUDA_BULK_READ_BLOCK 0x94
-#define ALAUDA_BULK_ERASE_BLOCK 0xa3
-#define ALAUDA_BULK_WRITE_PAGE 0xa4
-#define ALAUDA_BULK_WRITE_BLOCK 0xb4
-#define ALAUDA_BULK_RESET_MEDIA 0xe0
-
-/* Address shifting */
-#define PBA_LO(pba) ((pba & 0xF) << 5)
-#define PBA_HI(pba) (pba >> 3)
-#define PBA_ZONE(pba) (pba >> 11)
-
-#define TIMEOUT HZ
-
-static const struct usb_device_id alauda_table[] = {
- { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
- { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
- { }
-};
-MODULE_DEVICE_TABLE(usb, alauda_table);
-
-struct alauda_card {
- u8 id; /* id byte */
- u8 chipshift; /* 1<<chipshift total size */
- u8 pageshift; /* 1<<pageshift page size */
- u8 blockshift; /* 1<<blockshift block size */
-};
-
-struct alauda {
- struct usb_device *dev;
- struct usb_interface *interface;
- struct mtd_info *mtd;
- struct alauda_card *card;
- struct mutex card_mutex;
- u32 pagemask;
- u32 bytemask;
- u32 blockmask;
- unsigned int write_out;
- unsigned int bulk_in;
- unsigned int bulk_out;
- u8 port;
- struct kref kref;
-};
-
-static struct alauda_card alauda_card_ids[] = {
- /* NAND flash */
- { 0x6e, 20, 8, 12}, /* 1 MB */
- { 0xe8, 20, 8, 12}, /* 1 MB */
- { 0xec, 20, 8, 12}, /* 1 MB */
- { 0x64, 21, 8, 12}, /* 2 MB */
- { 0xea, 21, 8, 12}, /* 2 MB */
- { 0x6b, 22, 9, 13}, /* 4 MB */
- { 0xe3, 22, 9, 13}, /* 4 MB */
- { 0xe5, 22, 9, 13}, /* 4 MB */
- { 0xe6, 23, 9, 13}, /* 8 MB */
- { 0x73, 24, 9, 14}, /* 16 MB */
- { 0x75, 25, 9, 14}, /* 32 MB */
- { 0x76, 26, 9, 14}, /* 64 MB */
- { 0x79, 27, 9, 14}, /* 128 MB */
- { 0x71, 28, 9, 14}, /* 256 MB */
-
- /* MASK ROM */
- { 0x5d, 21, 9, 13}, /* 2 MB */
- { 0xd5, 22, 9, 13}, /* 4 MB */
- { 0xd6, 23, 9, 13}, /* 8 MB */
- { 0x57, 24, 9, 13}, /* 16 MB */
- { 0x58, 25, 9, 13}, /* 32 MB */
- { }
-};
-
-static struct alauda_card *get_card(u8 id)
-{
- struct alauda_card *card;
-
- for (card = alauda_card_ids; card->id; card++)
- if (card->id == id)
- return card;
- return NULL;
-}
-
-static void alauda_delete(struct kref *kref)
-{
- struct alauda *al = container_of(kref, struct alauda, kref);
-
- if (al->mtd) {
- mtd_device_unregister(al->mtd);
- kfree(al->mtd);
- }
- usb_put_dev(al->dev);
- kfree(al);
-}
-
-static int alauda_get_media_status(struct alauda *al, void *buf)
-{
- int ret;
-
- mutex_lock(&al->card_mutex);
- ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
- ALAUDA_GET_XD_MEDIA_STATUS, 0xc0, 0, 1, buf, 2, HZ);
- mutex_unlock(&al->card_mutex);
- return ret;
-}
-
-static int alauda_ack_media(struct alauda *al)
-{
- int ret;
-
- mutex_lock(&al->card_mutex);
- ret = usb_control_msg(al->dev, usb_sndctrlpipe(al->dev, 0),
- ALAUDA_ACK_XD_MEDIA_CHANGE, 0x40, 0, 1, NULL, 0, HZ);
- mutex_unlock(&al->card_mutex);
- return ret;
-}
-
-static int alauda_get_media_signatures(struct alauda *al, void *buf)
-{
- int ret;
-
- mutex_lock(&al->card_mutex);
- ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
- ALAUDA_GET_XD_MEDIA_SIG, 0xc0, 0, 0, buf, 4, HZ);
- mutex_unlock(&al->card_mutex);
- return ret;
-}
-
-static void alauda_reset(struct alauda *al)
-{
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_RESET_MEDIA, 0, 0,
- 0, 0, 0, 0, al->port
- };
- mutex_lock(&al->card_mutex);
- usb_bulk_msg(al->dev, al->bulk_out, command, 9, NULL, HZ);
- mutex_unlock(&al->card_mutex);
-}
-
-static void correct_data(void *buf, void *read_ecc,
- int *corrected, int *uncorrected)
-{
- u8 calc_ecc[3];
- int err;
-
- nand_calculate_ecc(NULL, buf, calc_ecc);
- err = nand_correct_data(NULL, buf, read_ecc, calc_ecc);
- if (err) {
- if (err > 0)
- (*corrected)++;
- else
- (*uncorrected)++;
- }
-}
-
-struct alauda_sg_request {
- struct urb *urb[3];
- struct completion comp;
-};
-
-static void alauda_complete(struct urb *urb)
-{
- struct completion *comp = urb->context;
-
- if (comp)
- complete(comp);
-}
-
-static int __alauda_read_page(struct mtd_info *mtd, loff_t from, void *buf,
- void *oob)
-{
- struct alauda_sg_request sg;
- struct alauda *al = mtd->priv;
- u32 pba = from >> al->card->blockshift;
- u32 page = (from >> al->card->pageshift) & al->pagemask;
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_READ_PAGE, PBA_HI(pba),
- PBA_ZONE(pba), 0, PBA_LO(pba) + page, 1, 0, al->port
- };
- int i, err;
-
- for (i=0; i<3; i++)
- sg.urb[i] = NULL;
-
- err = -ENOMEM;
- for (i=0; i<3; i++) {
- sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
- if (!sg.urb[i])
- goto out;
- }
- init_completion(&sg.comp);
- usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, mtd->writesize,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[2], al->dev, al->bulk_in, oob, 16,
- alauda_complete, &sg.comp);
-
- mutex_lock(&al->card_mutex);
- for (i=0; i<3; i++) {
- err = usb_submit_urb(sg.urb[i], GFP_NOIO);
- if (err)
- goto cancel;
- }
- if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
- err = -ETIMEDOUT;
-cancel:
- for (i=0; i<3; i++) {
- usb_kill_urb(sg.urb[i]);
- }
- }
- mutex_unlock(&al->card_mutex);
-
-out:
- usb_free_urb(sg.urb[0]);
- usb_free_urb(sg.urb[1]);
- usb_free_urb(sg.urb[2]);
- return err;
-}
-
-static int alauda_read_page(struct mtd_info *mtd, loff_t from,
- void *buf, u8 *oob, int *corrected, int *uncorrected)
-{
- int err;
-
- err = __alauda_read_page(mtd, from, buf, oob);
- if (err)
- return err;
- correct_data(buf, oob+13, corrected, uncorrected);
- correct_data(buf+256, oob+8, corrected, uncorrected);
- return 0;
-}
-
-static int alauda_write_page(struct mtd_info *mtd, loff_t to, void *buf,
- void *oob)
-{
- struct alauda_sg_request sg;
- struct alauda *al = mtd->priv;
- u32 pba = to >> al->card->blockshift;
- u32 page = (to >> al->card->pageshift) & al->pagemask;
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_PAGE, PBA_HI(pba),
- PBA_ZONE(pba), 0, PBA_LO(pba) + page, 32, 0, al->port
- };
- int i, err;
-
- for (i=0; i<3; i++)
- sg.urb[i] = NULL;
-
- err = -ENOMEM;
- for (i=0; i<3; i++) {
- sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
- if (!sg.urb[i])
- goto out;
- }
- init_completion(&sg.comp);
- usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[1], al->dev, al->write_out, buf,mtd->writesize,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[2], al->dev, al->write_out, oob, 16,
- alauda_complete, &sg.comp);
-
- mutex_lock(&al->card_mutex);
- for (i=0; i<3; i++) {
- err = usb_submit_urb(sg.urb[i], GFP_NOIO);
- if (err)
- goto cancel;
- }
- if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
- err = -ETIMEDOUT;
-cancel:
- for (i=0; i<3; i++) {
- usb_kill_urb(sg.urb[i]);
- }
- }
- mutex_unlock(&al->card_mutex);
-
-out:
- usb_free_urb(sg.urb[0]);
- usb_free_urb(sg.urb[1]);
- usb_free_urb(sg.urb[2]);
- return err;
-}
-
-static int alauda_erase_block(struct mtd_info *mtd, loff_t ofs)
-{
- struct alauda_sg_request sg;
- struct alauda *al = mtd->priv;
- u32 pba = ofs >> al->card->blockshift;
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
- PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, al->port
- };
- u8 buf[2];
- int i, err;
-
- for (i=0; i<2; i++)
- sg.urb[i] = NULL;
-
- err = -ENOMEM;
- for (i=0; i<2; i++) {
- sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
- if (!sg.urb[i])
- goto out;
- }
- init_completion(&sg.comp);
- usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, 2,
- alauda_complete, &sg.comp);
-
- mutex_lock(&al->card_mutex);
- for (i=0; i<2; i++) {
- err = usb_submit_urb(sg.urb[i], GFP_NOIO);
- if (err)
- goto cancel;
- }
- if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
- err = -ETIMEDOUT;
-cancel:
- for (i=0; i<2; i++) {
- usb_kill_urb(sg.urb[i]);
- }
- }
- mutex_unlock(&al->card_mutex);
-
-out:
- usb_free_urb(sg.urb[0]);
- usb_free_urb(sg.urb[1]);
- return err;
-}
-
-static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
-{
- static u8 ignore_buf[512]; /* write only */
-
- return __alauda_read_page(mtd, from, ignore_buf, oob);
-}
-
-static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- u8 oob[16];
- int err;
-
- err = alauda_read_oob(mtd, ofs, oob);
- if (err)
- return err;
-
- /* A block is marked bad if two or more bits are zero */
- return hweight8(oob[5]) >= 7 ? 0 : 1;
-}
-
-static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct alauda *al = mtd->priv;
- void *bounce_buf;
- int err, corrected=0, uncorrected=0;
-
- bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
- if (!bounce_buf)
- return -ENOMEM;
-
- *retlen = len;
- while (len) {
- u8 oob[16];
- size_t byte = from & al->bytemask;
- size_t cplen = min(len, mtd->writesize - byte);
-
- err = alauda_read_page(mtd, from, bounce_buf, oob,
- &corrected, &uncorrected);
- if (err)
- goto out;
-
- memcpy(buf, bounce_buf + byte, cplen);
- buf += cplen;
- from += cplen;
- len -= cplen;
- }
- err = 0;
- if (corrected)
- err = -EUCLEAN;
- if (uncorrected)
- err = -EBADMSG;
-out:
- kfree(bounce_buf);
- return err;
-}
-
-static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct alauda *al = mtd->priv;
- int err, corrected=0, uncorrected=0;
-
- if ((from & al->bytemask) || (len & al->bytemask))
- return alauda_bounce_read(mtd, from, len, retlen, buf);
-
- *retlen = len;
- while (len) {
- u8 oob[16];
-
- err = alauda_read_page(mtd, from, buf, oob,
- &corrected, &uncorrected);
- if (err)
- return err;
-
- buf += mtd->writesize;
- from += mtd->writesize;
- len -= mtd->writesize;
- }
- err = 0;
- if (corrected)
- err = -EUCLEAN;
- if (uncorrected)
- err = -EBADMSG;
- return err;
-}
-
-static int alauda_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct alauda *al = mtd->priv;
- int err;
-
- if ((to & al->bytemask) || (len & al->bytemask))
- return -EINVAL;
-
- *retlen = len;
- while (len) {
- u32 page = (to >> al->card->pageshift) & al->pagemask;
- u8 oob[16] = { 'h', 'e', 'l', 'l', 'o', 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- /* don't write to bad blocks */
- if (page == 0) {
- err = alauda_isbad(mtd, to);
- if (err) {
- return -EIO;
- }
- }
- nand_calculate_ecc(mtd, buf, &oob[13]);
- nand_calculate_ecc(mtd, buf+256, &oob[8]);
-
- err = alauda_write_page(mtd, to, (void*)buf, oob);
- if (err)
- return err;
-
- buf += mtd->writesize;
- to += mtd->writesize;
- len -= mtd->writesize;
- }
- return 0;
-}
-
-static int __alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct alauda *al = mtd->priv;
- u32 ofs = instr->addr;
- u32 len = instr->len;
- int err;
-
- if ((ofs & al->blockmask) || (len & al->blockmask))
- return -EINVAL;
-
- while (len) {
- /* don't erase bad blocks */
- err = alauda_isbad(mtd, ofs);
- if (err > 0)
- err = -EIO;
- if (err < 0)
- return err;
-
- err = alauda_erase_block(mtd, ofs);
- if (err < 0)
- return err;
-
- ofs += mtd->erasesize;
- len -= mtd->erasesize;
- }
- return 0;
-}
-
-static int alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- int err;
-
- err = __alauda_erase(mtd, instr);
- instr->state = err ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
- mtd_erase_callback(instr);
- return err;
-}
-
-static int alauda_init_media(struct alauda *al)
-{
- u8 buf[4], *b0=buf, *b1=buf+1;
- struct alauda_card *card;
- struct mtd_info *mtd;
- int err;
-
- mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd)
- return -ENOMEM;
-
- for (;;) {
- err = alauda_get_media_status(al, buf);
- if (err < 0)
- goto error;
- if (*b0 & 0x10)
- break;
- msleep(20);
- }
-
- err = alauda_ack_media(al);
- if (err)
- goto error;
-
- msleep(10);
-
- err = alauda_get_media_status(al, buf);
- if (err < 0)
- goto error;
-
- if (*b0 != 0x14) {
- /* media not ready */
- err = -EIO;
- goto error;
- }
- err = alauda_get_media_signatures(al, buf);
- if (err < 0)
- goto error;
-
- card = get_card(*b1);
- if (!card) {
- printk(KERN_ERR"Alauda: unknown card id %02x\n", *b1);
- err = -EIO;
- goto error;
- }
- printk(KERN_INFO"pagesize=%x\nerasesize=%x\nsize=%xMiB\n",
- 1<<card->pageshift, 1<<card->blockshift,
- 1<<(card->chipshift-20));
- al->card = card;
- al->pagemask = (1 << (card->blockshift - card->pageshift)) - 1;
- al->bytemask = (1 << card->pageshift) - 1;
- al->blockmask = (1 << card->blockshift) - 1;
-
- mtd->name = "alauda";
- mtd->size = 1<<card->chipshift;
- mtd->erasesize = 1<<card->blockshift;
- mtd->writesize = 1<<card->pageshift;
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->read = alauda_read;
- mtd->write = alauda_write;
- mtd->erase = alauda_erase;
- mtd->block_isbad = alauda_isbad;
- mtd->priv = al;
- mtd->owner = THIS_MODULE;
-
- err = mtd_device_register(mtd, NULL, 0);
- if (err) {
- err = -ENFILE;
- goto error;
- }
-
- al->mtd = mtd;
- alauda_reset(al); /* no clue whether this is necessary */
- return 0;
-error:
- kfree(mtd);
- return err;
-}
-
-static int alauda_check_media(struct alauda *al)
-{
- u8 buf[2], *b0 = buf, *b1 = buf+1;
- int err;
-
- err = alauda_get_media_status(al, buf);
- if (err < 0)
- return err;
-
- if ((*b1 & 0x01) == 0) {
- /* door open */
- return -EIO;
- }
- if ((*b0 & 0x80) || ((*b0 & 0x1F) == 0x10)) {
- /* no media ? */
- return -EIO;
- }
- if (*b0 & 0x08) {
- /* media change ? */
- return alauda_init_media(al);
- }
- return 0;
-}
-
-static int alauda_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct alauda *al;
- struct usb_host_interface *iface;
- struct usb_endpoint_descriptor *ep,
- *ep_in=NULL, *ep_out=NULL, *ep_wr=NULL;
- int i, err = -ENOMEM;
-
- al = kzalloc(2*sizeof(*al), GFP_KERNEL);
- if (!al)
- goto error;
-
- kref_init(&al->kref);
- usb_set_intfdata(interface, al);
-
- al->dev = usb_get_dev(interface_to_usbdev(interface));
- al->interface = interface;
-
- iface = interface->cur_altsetting;
- for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
- ep = &iface->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(ep)) {
- ep_in = ep;
- } else if (usb_endpoint_is_bulk_out(ep)) {
- if (i==0)
- ep_wr = ep;
- else
- ep_out = ep;
- }
- }
- err = -EIO;
- if (!ep_wr || !ep_in || !ep_out)
- goto error;
-
- al->write_out = usb_sndbulkpipe(al->dev,
- usb_endpoint_num(ep_wr));
- al->bulk_in = usb_rcvbulkpipe(al->dev,
- usb_endpoint_num(ep_in));
- al->bulk_out = usb_sndbulkpipe(al->dev,
- usb_endpoint_num(ep_out));
-
- /* second device is identical up to now */
- memcpy(al+1, al, sizeof(*al));
-
- mutex_init(&al[0].card_mutex);
- mutex_init(&al[1].card_mutex);
-
- al[0].port = ALAUDA_PORT_XD;
- al[1].port = ALAUDA_PORT_SM;
-
- dev_info(&interface->dev, "alauda probed\n");
- alauda_check_media(al);
- alauda_check_media(al+1);
-
- return 0;
-
-error:
- if (al)
- kref_put(&al->kref, alauda_delete);
- return err;
-}
-
-static void alauda_disconnect(struct usb_interface *interface)
-{
- struct alauda *al;
-
- al = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
-
- /* FIXME: prevent more I/O from starting */
-
- /* decrement our usage count */
- if (al)
- kref_put(&al->kref, alauda_delete);
-
- dev_info(&interface->dev, "alauda gone");
-}
-
-static struct usb_driver alauda_driver = {
- .name = "alauda",
- .probe = alauda_probe,
- .disconnect = alauda_disconnect,
- .id_table = alauda_table,
-};
-
-static int __init alauda_init(void)
-{
- return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
- usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 78017eb9318..4936e9e0002 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -17,25 +17,26 @@
*/
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-omap.h>
+
#include <asm/io.h>
-#include <mach/hardware.h>
#include <asm/sizes.h>
-#include <mach/gpio.h>
-#include <plat/board-ams-delta.h>
+
+#include <mach/board-ams-delta.h>
+
+#include <mach/hardware.h>
/*
* MTD structure for E3 (Delta)
*/
static struct mtd_info *ams_delta_mtd = NULL;
-#define NAND_MASK (AMS_DELTA_LATCH2_NAND_NRE | AMS_DELTA_LATCH2_NAND_NWE | AMS_DELTA_LATCH2_NAND_CLE | AMS_DELTA_LATCH2_NAND_ALE | AMS_DELTA_LATCH2_NAND_NCE | AMS_DELTA_LATCH2_NAND_NWP)
-
/*
* Define partitions for flash devices
*/
@@ -68,10 +69,9 @@ static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
writew(0, io_base + OMAP_MPUIO_IO_CNTL);
writew(byte, this->IO_ADDR_W);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
ndelay(40);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
- AMS_DELTA_LATCH2_NAND_NWE);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
}
static u_char ams_delta_read_byte(struct mtd_info *mtd)
@@ -80,12 +80,11 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
struct nand_chip *this = mtd->priv;
void __iomem *io_base = this->priv;
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
ndelay(40);
writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
res = readw(this->IO_ADDR_R);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
- AMS_DELTA_LATCH2_NAND_NRE);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
return res;
}
@@ -107,18 +106,6 @@ static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
buf[i] = ams_delta_read_byte(mtd);
}
-static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
- int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- if (buf[i] != ams_delta_read_byte(mtd))
- return -EFAULT;
-
- return 0;
-}
-
/*
* Command control function
*
@@ -132,15 +119,12 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
{
if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long bits;
-
- bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
- bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
- bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
-
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
- AMS_DELTA_LATCH2_NAND_ALE |
- AMS_DELTA_LATCH2_NAND_NCE, bits);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
+ (ctrl & NAND_NCE) == 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
+ (ctrl & NAND_CLE) != 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
+ (ctrl & NAND_ALE) != 0);
}
if (cmd != NAND_CMD_NONE)
@@ -152,10 +136,43 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
}
+static const struct gpio _mandatory_gpio[] = {
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nce",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nre",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwp",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwe",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_ale",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_cle",
+ },
+};
+
/*
* Main initialization routine
*/
-static int __devinit ams_delta_init(struct platform_device *pdev)
+static int ams_delta_init(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -186,18 +203,17 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
/* Link the private data with the MTD structure */
ams_delta_mtd->priv = this;
- if (!request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -EBUSY;
- goto out_free;
- }
+ /*
+ * Don't try to request the memory region from here,
+ * it should have been already requested from the
+ * gpio-omap driver and requesting it again would fail.
+ */
io_base = ioremap(res->start, resource_size(res));
if (io_base == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
err = -EIO;
- goto out_release_io;
+ goto out_free;
}
this->priv = io_base;
@@ -208,7 +224,6 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
this->read_byte = ams_delta_read_byte;
this->write_buf = ams_delta_write_buf;
this->read_buf = ams_delta_read_buf;
- this->verify_buf = ams_delta_verify_buf;
this->cmd_ctrl = ams_delta_hwcontrol;
if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
this->dev_ready = ams_delta_nand_ready;
@@ -223,10 +238,9 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
platform_set_drvdata(pdev, io_base);
/* Set chip enabled, but */
- ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
- AMS_DELTA_LATCH2_NAND_NWE |
- AMS_DELTA_LATCH2_NAND_NCE |
- AMS_DELTA_LATCH2_NAND_NWP);
+ err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ if (err)
+ goto out_gpio;
/* Scan to find existence of the device */
if (nand_scan(ams_delta_mtd, 1)) {
@@ -241,10 +255,10 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
goto out;
out_mtd:
- platform_set_drvdata(pdev, NULL);
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+out_gpio:
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
-out_release_io:
- release_mem_region(res->start, resource_size(res));
out_free:
kfree(ams_delta_mtd);
out:
@@ -254,16 +268,16 @@ out_free:
/*
* Clean up routine
*/
-static int __devexit ams_delta_cleanup(struct platform_device *pdev)
+static int ams_delta_cleanup(struct platform_device *pdev)
{
void __iomem *io_base = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Release resources, unregister device */
nand_release(ams_delta_mtd);
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
- release_mem_region(res->start, resource_size(res));
/* Free the MTD device structure */
kfree(ams_delta_mtd);
@@ -273,24 +287,14 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
static struct platform_driver ams_delta_nand_driver = {
.probe = ams_delta_init,
- .remove = __devexit_p(ams_delta_cleanup),
+ .remove = ams_delta_cleanup,
.driver = {
.name = "ams-delta-nand",
.owner = THIS_MODULE,
},
};
-static int __init ams_delta_nand_init(void)
-{
- return platform_driver_register(&ams_delta_nand_driver);
-}
-module_init(ams_delta_nand_init);
-
-static void __exit ams_delta_nand_exit(void)
-{
- platform_driver_unregister(&ams_delta_nand_driver);
-}
-module_exit(ams_delta_nand_exit);
+module_platform_driver(ams_delta_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index b300705d41c..4ce181a35bc 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,20 +1,25 @@
/*
- * Copyright (C) 2003 Rick Bronson
+ * Copyright © 2003 Rick Bronson
*
* Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
*
* Derived from drivers/mtd/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
*
*
* Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
- * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
*
* Derived from Das U-Boot source code
* (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
- * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
*
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * © Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -22,32 +27,25 @@
*
*/
+#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-
-#include <mach/board.h>
-#include <mach/cpu.h>
-
-#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW
-#define hard_ecc 1
-#else
-#define hard_ecc 0
-#endif
-
-#ifdef CONFIG_MTD_NAND_ATMEL_ECC_NONE
-#define no_ecc 1
-#else
-#define no_ecc 0
-#endif
+#include <linux/platform_data/atmel.h>
static int use_dma = 1;
module_param(use_dma, int, 0);
@@ -62,6 +60,7 @@ module_param(on_flash_bbt, int, 0);
__raw_writel((value), add + ATMEL_ECC_##reg)
#include "atmel_nand_ecc.h" /* Hardware ECC registers */
+#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */
/* oob layout for large page size
* bad block info is on bytes 0 and 1
@@ -89,31 +88,75 @@ static struct nand_ecclayout atmel_oobinfo_small = {
},
};
+struct atmel_nfc {
+ void __iomem *base_cmd_regs;
+ void __iomem *hsmc_regs;
+ void __iomem *sram_bank0;
+ dma_addr_t sram_bank0_phys;
+ bool use_nfc_sram;
+ bool write_by_sram;
+
+ bool is_initialized;
+ struct completion comp_nfc;
+
+ /* Point to the sram bank which include readed data via NFC */
+ void __iomem *data_in_sram;
+ bool will_write_sram;
+};
+static struct atmel_nfc nand_nfc;
+
struct atmel_nand_host {
struct nand_chip nand_chip;
struct mtd_info mtd;
void __iomem *io_base;
dma_addr_t io_phys;
- struct atmel_nand_data *board;
+ struct atmel_nand_data board;
struct device *dev;
void __iomem *ecc;
struct completion comp;
struct dma_chan *dma_chan;
+
+ struct atmel_nfc *nfc;
+
+ bool has_pmecc;
+ u8 pmecc_corr_cap;
+ u16 pmecc_sector_size;
+ u32 pmecc_lookup_table_offset;
+ u32 pmecc_lookup_table_offset_512;
+ u32 pmecc_lookup_table_offset_1024;
+
+ int pmecc_bytes_per_sector;
+ int pmecc_sector_number;
+ int pmecc_degree; /* Degree of remainders */
+ int pmecc_cw_len; /* Length of codeword */
+
+ void __iomem *pmerrloc_base;
+ void __iomem *pmecc_rom_base;
+
+ /* lookup table for alpha_to and index_of */
+ void __iomem *pmecc_alpha_to;
+ void __iomem *pmecc_index_of;
+
+ /* data for pmecc computation */
+ int16_t *pmecc_partial_syn;
+ int16_t *pmecc_si;
+ int16_t *pmecc_smu; /* Sigma table */
+ int16_t *pmecc_lmu; /* polynomal order */
+ int *pmecc_mu;
+ int *pmecc_dmu;
+ int *pmecc_delta;
};
-static int cpu_has_dma(void)
-{
- return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
-}
+static struct nand_ecclayout atmel_pmecc_oobinfo;
/*
* Enable NAND.
*/
static void atmel_nand_enable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
- gpio_set_value(host->board->enable_pin, 0);
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 0);
}
/*
@@ -121,8 +164,8 @@ static void atmel_nand_enable(struct atmel_nand_host *host)
*/
static void atmel_nand_disable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
- gpio_set_value(host->board->enable_pin, 1);
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 1);
}
/*
@@ -143,9 +186,9 @@ static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
return;
if (ctrl & NAND_CLE)
- writeb(cmd, host->io_base + (1 << host->board->cle));
+ writeb(cmd, host->io_base + (1 << host->board.cle));
else
- writeb(cmd, host->io_base + (1 << host->board->ale));
+ writeb(cmd, host->io_base + (1 << host->board.ale));
}
/*
@@ -156,8 +199,78 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
- return gpio_get_value(host->board->rdy_pin) ^
- !!host->board->rdy_pin_active_low;
+ return gpio_get_value(host->board.rdy_pin) ^
+ !!host->board.rdy_pin_active_low;
+}
+
+/* Set up for hardware ready pin and enable pin. */
+static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ int res = 0;
+
+ if (gpio_is_valid(host->board.rdy_pin)) {
+ res = devm_gpio_request(host->dev,
+ host->board.rdy_pin, "nand_rdy");
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request rdy gpio %d\n",
+ host->board.rdy_pin);
+ return res;
+ }
+
+ res = gpio_direction_input(host->board.rdy_pin);
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request input direction rdy gpio %d\n",
+ host->board.rdy_pin);
+ return res;
+ }
+
+ chip->dev_ready = atmel_nand_device_ready;
+ }
+
+ if (gpio_is_valid(host->board.enable_pin)) {
+ res = devm_gpio_request(host->dev,
+ host->board.enable_pin, "nand_enable");
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request enable gpio %d\n",
+ host->board.enable_pin);
+ return res;
+ }
+
+ res = gpio_direction_output(host->board.enable_pin, 1);
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request output direction enable gpio %d\n",
+ host->board.enable_pin);
+ return res;
+ }
+ }
+
+ return res;
+}
+
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = readl_relaxed(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ u32 __iomem *t = trg;
+ const u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ writel_relaxed(*s++, t++);
}
/*
@@ -166,15 +279,27 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
- __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+ if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+ memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ host->nfc->data_in_sram += len;
+ } else {
+ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+ }
}
static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
- __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+ if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+ memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ host->nfc->data_in_sram += len;
+ } else {
+ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+ }
}
static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
@@ -196,6 +321,40 @@ static void dma_complete_func(void *completion)
complete(completion);
}
+static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
+{
+ /* NFC only has two banks. Must be 0 or 1 */
+ if (bank > 1)
+ return -EINVAL;
+
+ if (bank) {
+ /* Only for a 2k-page or lower flash, NFC can handle 2 banks */
+ if (host->mtd.writesize > 2048)
+ return -EINVAL;
+ nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
+ } else {
+ nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
+ }
+
+ return 0;
+}
+
+static uint nfc_get_sram_off(struct atmel_nand_host *host)
+{
+ if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+ return NFC_SRAM_BANK1_OFFSET;
+ else
+ return 0;
+}
+
+static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
+{
+ if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+ return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
+ else
+ return host->nfc->sram_bank0_phys;
+}
+
static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
int is_read)
{
@@ -209,14 +368,14 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
void *p = buf;
int err = -EIO;
enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct atmel_nfc *nfc = host->nfc;
if (buf >= high_memory)
goto err_buf;
dma_dev = host->dma_chan->device;
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
if (dma_mapping_error(dma_dev->dev, phys_addr)) {
@@ -225,11 +384,20 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
}
if (is_read) {
- dma_src_addr = host->io_phys;
+ if (nfc && nfc->data_in_sram)
+ dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
+ - (nfc->sram_bank0 + nfc_get_sram_off(host)));
+ else
+ dma_src_addr = host->io_phys;
+
dma_dst_addr = phys_addr;
} else {
dma_src_addr = phys_addr;
- dma_dst_addr = host->io_phys;
+
+ if (nfc && nfc->write_by_sram)
+ dma_dst_addr = nfc_sram_phys(host);
+ else
+ dma_dst_addr = host->io_phys;
}
tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
@@ -252,13 +420,17 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
dma_async_issue_pending(host->dma_chan);
wait_for_completion(&host->comp);
+ if (is_read && nfc && nfc->data_in_sram)
+ /* After read data from SRAM, need to increase the position */
+ nfc->data_in_sram += len;
+
err = 0;
err_dma:
dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
err_buf:
if (err != 0)
- dev_warn(host->dev, "Fall back to CPU I/O\n");
+ dev_dbg(host->dev, "Fall back to CPU I/O\n");
return err;
}
@@ -272,7 +444,7 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
- if (host->board->bus_width_16)
+ if (host->board.bus_width_16)
atmel_read_buf16(mtd, buf, len);
else
atmel_read_buf8(mtd, buf, len);
@@ -288,13 +460,779 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
- if (host->board->bus_width_16)
+ if (host->board.bus_width_16)
atmel_write_buf16(mtd, buf, len);
else
atmel_write_buf8(mtd, buf, len);
}
/*
+ * Return number of ecc bytes per sector according to sector size and
+ * correction capability
+ *
+ * Following table shows what at91 PMECC supported:
+ * Correction Capability Sector_512_bytes Sector_1024_bytes
+ * ===================== ================ =================
+ * 2-bits 4-bytes 4-bytes
+ * 4-bits 7-bytes 7-bytes
+ * 8-bits 13-bytes 14-bytes
+ * 12-bits 20-bytes 21-bytes
+ * 24-bits 39-bytes 42-bytes
+ */
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+ int m = 12 + sector_size / 512;
+ return (m * cap + 7) / 8;
+}
+
+static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+ int oobsize, int ecc_len)
+{
+ int i;
+
+ layout->eccbytes = ecc_len;
+
+ /* ECC will occupy the last ecc_len bytes continuously */
+ for (i = 0; i < ecc_len; i++)
+ layout->eccpos[i] = oobsize - ecc_len + i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length =
+ oobsize - ecc_len - layout->oobfree[0].offset;
+}
+
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+{
+ int table_size;
+
+ table_size = host->pmecc_sector_size == 512 ?
+ PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
+
+ return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
+ table_size * sizeof(int16_t);
+}
+
+static int pmecc_data_alloc(struct atmel_nand_host *host)
+{
+ const int cap = host->pmecc_corr_cap;
+ int size;
+
+ size = (2 * cap + 1) * sizeof(int16_t);
+ host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_lmu = devm_kzalloc(host->dev,
+ (cap + 1) * sizeof(int16_t), GFP_KERNEL);
+ host->pmecc_smu = devm_kzalloc(host->dev,
+ (cap + 2) * size, GFP_KERNEL);
+
+ size = (cap + 1) * sizeof(int);
+ host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
+
+ if (!host->pmecc_partial_syn ||
+ !host->pmecc_si ||
+ !host->pmecc_lmu ||
+ !host->pmecc_smu ||
+ !host->pmecc_mu ||
+ !host->pmecc_dmu ||
+ !host->pmecc_delta)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i;
+ uint32_t value;
+
+ /* Fill odd syndromes */
+ for (i = 0; i < host->pmecc_corr_cap; i++) {
+ value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
+ if (i & 1)
+ value >>= 16;
+ value &= 0xffff;
+ host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
+ }
+}
+
+static void pmecc_substitute(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t *partial_syn = host->pmecc_partial_syn;
+ const int cap = host->pmecc_corr_cap;
+ int16_t *si;
+ int i, j;
+
+ /* si[] is a table that holds the current syndrome value,
+ * an element of that table belongs to the field
+ */
+ si = host->pmecc_si;
+
+ memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
+
+ /* Computation 2t syndromes based on S(x) */
+ /* Odd syndromes */
+ for (i = 1; i < 2 * cap; i += 2) {
+ for (j = 0; j < host->pmecc_degree; j++) {
+ if (partial_syn[i] & ((unsigned short)0x1 << j))
+ si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
+ }
+ }
+ /* Even syndrome = (Odd syndrome) ** 2 */
+ for (i = 2, j = 1; j <= cap; i = ++j << 1) {
+ if (si[j] == 0) {
+ si[i] = 0;
+ } else {
+ int16_t tmp;
+
+ tmp = readw_relaxed(index_of + si[j]);
+ tmp = (tmp * 2) % host->pmecc_cw_len;
+ si[i] = readw_relaxed(alpha_to + tmp);
+ }
+ }
+
+ return;
+}
+
+static void pmecc_get_sigma(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ int16_t *lmu = host->pmecc_lmu;
+ int16_t *si = host->pmecc_si;
+ int *mu = host->pmecc_mu;
+ int *dmu = host->pmecc_dmu; /* Discrepancy */
+ int *delta = host->pmecc_delta; /* Delta order */
+ int cw_len = host->pmecc_cw_len;
+ const int16_t cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int i, j, k;
+ uint32_t dmu_0_count, tmp;
+ int16_t *smu = host->pmecc_smu;
+
+ /* index of largest delta */
+ int ro;
+ int largest;
+ int diff;
+
+ dmu_0_count = 0;
+
+ /* First Row */
+
+ /* Mu */
+ mu[0] = -1;
+
+ memset(smu, 0, sizeof(int16_t) * num);
+ smu[0] = 1;
+
+ /* discrepancy set to 1 */
+ dmu[0] = 1;
+ /* polynom order set to 0 */
+ lmu[0] = 0;
+ delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+ /* Second Row */
+
+ /* Mu */
+ mu[1] = 0;
+ /* Sigma(x) set to 1 */
+ memset(&smu[num], 0, sizeof(int16_t) * num);
+ smu[num] = 1;
+
+ /* discrepancy set to S1 */
+ dmu[1] = si[1];
+
+ /* polynom order set to 0 */
+ lmu[1] = 0;
+
+ delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+ /* Init the Sigma(x) last row */
+ memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
+
+ for (i = 1; i <= cap; i++) {
+ mu[i + 1] = i << 1;
+ /* Begin Computing Sigma (Mu+1) and L(mu) */
+ /* check if discrepancy is set to 0 */
+ if (dmu[i] == 0) {
+ dmu_0_count++;
+
+ tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
+ if ((cap - (lmu[i] >> 1) - 1) & 0x1)
+ tmp += 2;
+ else
+ tmp += 1;
+
+ if (dmu_0_count == tmp) {
+ for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+ smu[(cap + 1) * num + j] =
+ smu[i * num + j];
+
+ lmu[cap + 1] = lmu[i];
+ return;
+ }
+
+ /* copy polynom */
+ for (j = 0; j <= lmu[i] >> 1; j++)
+ smu[(i + 1) * num + j] = smu[i * num + j];
+
+ /* copy previous polynom order to the next */
+ lmu[i + 1] = lmu[i];
+ } else {
+ ro = 0;
+ largest = -1;
+ /* find largest delta with dmu != 0 */
+ for (j = 0; j < i; j++) {
+ if ((dmu[j]) && (delta[j] > largest)) {
+ largest = delta[j];
+ ro = j;
+ }
+ }
+
+ /* compute difference */
+ diff = (mu[i] - mu[ro]);
+
+ /* Compute degree of the new smu polynomial */
+ if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+ lmu[i + 1] = lmu[i];
+ else
+ lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+ /* Init smu[i+1] with 0 */
+ for (k = 0; k < num; k++)
+ smu[(i + 1) * num + k] = 0;
+
+ /* Compute smu[i+1] */
+ for (k = 0; k <= lmu[ro] >> 1; k++) {
+ int16_t a, b, c;
+
+ if (!(smu[ro * num + k] && dmu[i]))
+ continue;
+ a = readw_relaxed(index_of + dmu[i]);
+ b = readw_relaxed(index_of + dmu[ro]);
+ c = readw_relaxed(index_of + smu[ro * num + k]);
+ tmp = a + (cw_len - b) + c;
+ a = readw_relaxed(alpha_to + tmp % cw_len);
+ smu[(i + 1) * num + (k + diff)] = a;
+ }
+
+ for (k = 0; k <= lmu[i] >> 1; k++)
+ smu[(i + 1) * num + k] ^= smu[i * num + k];
+ }
+
+ /* End Computing Sigma (Mu+1) and L(mu) */
+ /* In either case compute delta */
+ delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+ /* Do not compute discrepancy for the last iteration */
+ if (i >= cap)
+ continue;
+
+ for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+ tmp = 2 * (i - 1);
+ if (k == 0) {
+ dmu[i + 1] = si[tmp + 3];
+ } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+ int16_t a, b, c;
+ a = readw_relaxed(index_of +
+ smu[(i + 1) * num + k]);
+ b = si[2 * (i - 1) + 3 - k];
+ c = readw_relaxed(index_of + b);
+ tmp = a + c;
+ tmp %= cw_len;
+ dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
+ dmu[i + 1];
+ }
+ }
+ }
+
+ return;
+}
+
+static int pmecc_err_location(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned long end_time;
+ const int cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int sector_size = host->pmecc_sector_size;
+ int err_nbr = 0; /* number of error */
+ int roots_nbr; /* number of roots */
+ int i;
+ uint32_t val;
+ int16_t *smu = host->pmecc_smu;
+
+ pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
+
+ for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
+ pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
+ smu[(cap + 1) * num + i]);
+ err_nbr++;
+ }
+
+ val = (err_nbr - 1) << 16;
+ if (sector_size == 1024)
+ val |= 1;
+
+ pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
+ pmerrloc_writel(host->pmerrloc_base, ELEN,
+ sector_size * 8 + host->pmecc_degree * cap);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+ & PMERRLOC_CALC_DONE)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
+ return -1;
+ }
+ cpu_relax();
+ }
+
+ roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+ & PMERRLOC_ERR_NUM_MASK) >> 8;
+ /* Number of roots == degree of smu hence <= cap */
+ if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
+ return err_nbr - 1;
+
+ /* Number of roots does not match the degree of smu
+ * unable to correct error */
+ return -1;
+}
+
+static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
+ int sector_num, int extra_bytes, int err_nbr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i = 0;
+ int byte_pos, bit_pos, sector_size, pos;
+ uint32_t tmp;
+ uint8_t err_byte;
+
+ sector_size = host->pmecc_sector_size;
+
+ while (err_nbr) {
+ tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
+ byte_pos = tmp / 8;
+ bit_pos = tmp % 8;
+
+ if (byte_pos >= (sector_size + extra_bytes))
+ BUG(); /* should never happen */
+
+ if (byte_pos < sector_size) {
+ err_byte = *(buf + byte_pos);
+ *(buf + byte_pos) ^= (1 << bit_pos);
+
+ pos = sector_num * host->pmecc_sector_size + byte_pos;
+ dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, *(buf + byte_pos));
+ } else {
+ /* Bit flip in OOB area */
+ tmp = sector_num * host->pmecc_bytes_per_sector
+ + (byte_pos - sector_size);
+ err_byte = ecc[tmp];
+ ecc[tmp] ^= (1 << bit_pos);
+
+ pos = tmp + nand_chip->ecc.layout->eccpos[0];
+ dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, ecc[tmp]);
+ }
+
+ i++;
+ err_nbr--;
+ }
+
+ return;
+}
+
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
+ u8 *ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i, err_nbr, eccbytes;
+ uint8_t *buf_pos;
+ int total_err = 0;
+
+ eccbytes = nand_chip->ecc.bytes;
+ for (i = 0; i < eccbytes; i++)
+ if (ecc[i] != 0xff)
+ goto normal_check;
+ /* Erased page, return OK */
+ return 0;
+
+normal_check:
+ for (i = 0; i < host->pmecc_sector_number; i++) {
+ err_nbr = 0;
+ if (pmecc_stat & 0x1) {
+ buf_pos = buf + i * host->pmecc_sector_size;
+
+ pmecc_gen_syndrome(mtd, i);
+ pmecc_substitute(mtd);
+ pmecc_get_sigma(mtd);
+
+ err_nbr = pmecc_err_location(mtd);
+ if (err_nbr == -1) {
+ dev_err(host->dev, "PMECC: Too many errors\n");
+ mtd->ecc_stats.failed++;
+ return -EIO;
+ } else {
+ pmecc_correct_data(mtd, buf_pos, ecc, i,
+ host->pmecc_bytes_per_sector, err_nbr);
+ mtd->ecc_stats.corrected += err_nbr;
+ total_err += err_nbr;
+ }
+ }
+ pmecc_stat >>= 1;
+ }
+
+ return total_err;
+}
+
+static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
+{
+ u32 val;
+
+ if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
+ dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
+ return;
+ }
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ val = pmecc_readl_relaxed(host->ecc, CFG);
+
+ if (ecc_op == NAND_ECC_READ)
+ pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
+ | PMECC_CFG_AUTO_ENABLE);
+ else
+ pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
+ & ~PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+}
+
+static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ struct atmel_nand_host *host = chip->priv;
+ int eccsize = chip->ecc.size;
+ uint8_t *oob = chip->oob_poi;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t stat;
+ unsigned long end_time;
+ int bitflips = 0;
+
+ if (!host->nfc || !host->nfc->use_nfc_sram)
+ pmecc_enable(host, NAND_ECC_READ);
+
+ chip->read_buf(mtd, buf, eccsize);
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to get error status.\n");
+ return -EIO;
+ }
+ cpu_relax();
+ }
+
+ stat = pmecc_readl_relaxed(host->ecc, ISR);
+ if (stat != 0) {
+ bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+ if (bitflips < 0)
+ /* uncorrectable errors */
+ return 0;
+ }
+
+ return bitflips;
+}
+
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ struct atmel_nand_host *host = chip->priv;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int i, j;
+ unsigned long end_time;
+
+ if (!host->nfc || !host->nfc->write_by_sram) {
+ pmecc_enable(host, NAND_ECC_WRITE);
+ chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+ }
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
+ return -EIO;
+ }
+ cpu_relax();
+ }
+
+ for (i = 0; i < host->pmecc_sector_number; i++) {
+ for (j = 0; j < host->pmecc_bytes_per_sector; j++) {
+ int pos;
+
+ pos = i * host->pmecc_bytes_per_sector + j;
+ chip->oob_poi[eccpos[pos]] =
+ pmecc_readb_ecc_relaxed(host->ecc, i, j);
+ }
+ }
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void atmel_pmecc_core_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ uint32_t val = 0;
+ struct nand_ecclayout *ecc_layout;
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+ switch (host->pmecc_corr_cap) {
+ case 2:
+ val = PMECC_CFG_BCH_ERR2;
+ break;
+ case 4:
+ val = PMECC_CFG_BCH_ERR4;
+ break;
+ case 8:
+ val = PMECC_CFG_BCH_ERR8;
+ break;
+ case 12:
+ val = PMECC_CFG_BCH_ERR12;
+ break;
+ case 24:
+ val = PMECC_CFG_BCH_ERR24;
+ break;
+ }
+
+ if (host->pmecc_sector_size == 512)
+ val |= PMECC_CFG_SECTOR512;
+ else if (host->pmecc_sector_size == 1024)
+ val |= PMECC_CFG_SECTOR1024;
+
+ switch (host->pmecc_sector_number) {
+ case 1:
+ val |= PMECC_CFG_PAGE_1SECTOR;
+ break;
+ case 2:
+ val |= PMECC_CFG_PAGE_2SECTORS;
+ break;
+ case 4:
+ val |= PMECC_CFG_PAGE_4SECTORS;
+ break;
+ case 8:
+ val |= PMECC_CFG_PAGE_8SECTORS;
+ break;
+ }
+
+ val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
+ | PMECC_CFG_AUTO_DISABLE);
+ pmecc_writel(host->ecc, CFG, val);
+
+ ecc_layout = nand_chip->ecc.layout;
+ pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
+ pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
+ pmecc_writel(host->ecc, EADDR,
+ ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+ /* See datasheet about PMECC Clock Control Register */
+ pmecc_writel(host->ecc, CLK, 2);
+ pmecc_writel(host->ecc, IDR, 0xff);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+}
+
+/*
+ * Get minimum ecc requirements from NAND.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to minimum ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+ int *cap, int *sector_size)
+{
+ /* Get minimum ECC requirements */
+ if (host->nand_chip.ecc_strength_ds) {
+ *cap = host->nand_chip.ecc_strength_ds;
+ *sector_size = host->nand_chip.ecc_step_ds;
+ dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
+ *cap, *sector_size);
+ } else {
+ *cap = 2;
+ *sector_size = 512;
+ dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
+ }
+
+ /* If device tree doesn't specify, use NAND's minimum ECC parameters */
+ if (host->pmecc_corr_cap == 0) {
+ /* use the most fitable ecc bits (the near bigger one ) */
+ if (*cap <= 2)
+ host->pmecc_corr_cap = 2;
+ else if (*cap <= 4)
+ host->pmecc_corr_cap = 4;
+ else if (*cap <= 8)
+ host->pmecc_corr_cap = 8;
+ else if (*cap <= 12)
+ host->pmecc_corr_cap = 12;
+ else if (*cap <= 24)
+ host->pmecc_corr_cap = 24;
+ else
+ return -EINVAL;
+ }
+ if (host->pmecc_sector_size == 0) {
+ /* use the most fitable sector size (the near smaller one ) */
+ if (*sector_size >= 1024)
+ host->pmecc_sector_size = 1024;
+ else if (*sector_size >= 512)
+ host->pmecc_sector_size = 512;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
+ struct atmel_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *nand_chip = &host->nand_chip;
+ struct resource *regs, *regs_pmerr, *regs_rom;
+ int cap, sector_size, err_no;
+
+ err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+ if (err_no) {
+ dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+ return err_no;
+ }
+
+ if (cap > host->pmecc_corr_cap ||
+ sector_size != host->pmecc_sector_size)
+ dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
+ cap = host->pmecc_corr_cap;
+ sector_size = host->pmecc_sector_size;
+ host->pmecc_lookup_table_offset = (sector_size == 512) ?
+ host->pmecc_lookup_table_offset_512 :
+ host->pmecc_lookup_table_offset_1024;
+
+ dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
+ cap, sector_size);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs) {
+ dev_warn(host->dev,
+ "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->ecc)) {
+ dev_err(host->dev, "ioremap failed\n");
+ err_no = PTR_ERR(host->ecc);
+ goto err;
+ }
+
+ regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
+ if (IS_ERR(host->pmerrloc_base)) {
+ dev_err(host->dev,
+ "Can not get I/O resource for PMECC ERRLOC controller!\n");
+ err_no = PTR_ERR(host->pmerrloc_base);
+ goto err;
+ }
+
+ regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
+ if (IS_ERR(host->pmecc_rom_base)) {
+ dev_err(host->dev, "Can not get I/O resource for ROM!\n");
+ err_no = PTR_ERR(host->pmecc_rom_base);
+ goto err;
+ }
+
+ /* ECC is calculated for the whole page (1 step) */
+ nand_chip->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 2048:
+ host->pmecc_degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
+ host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
+ host->pmecc_sector_number = mtd->writesize / sector_size;
+ host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
+ cap, sector_size);
+ host->pmecc_alpha_to = pmecc_get_alpha_to(host);
+ host->pmecc_index_of = host->pmecc_rom_base +
+ host->pmecc_lookup_table_offset;
+
+ nand_chip->ecc.steps = 1;
+ nand_chip->ecc.strength = cap;
+ nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
+ host->pmecc_sector_number;
+ if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
+ dev_err(host->dev, "No room for ECC bytes\n");
+ err_no = -EINVAL;
+ goto err;
+ }
+ pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
+ mtd->oobsize,
+ nand_chip->ecc.bytes);
+ nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+ break;
+ case 512:
+ case 1024:
+ case 4096:
+ /* TODO */
+ dev_warn(host->dev,
+ "Unsupported page size for PMECC, use Software ECC\n");
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ /* Allocate data for PMECC computation */
+ err_no = pmecc_data_alloc(host);
+ if (err_no) {
+ dev_err(host->dev,
+ "Cannot allocate memory for PMECC computation!\n");
+ goto err;
+ }
+
+ nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+ nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+ nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
+
+ atmel_pmecc_core_init(mtd);
+
+ return 0;
+
+err:
+ return err_no;
+}
+
+/*
* Calculate HW ECC
*
* function called after a write
@@ -331,9 +1269,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
* mtd: mtd info structure
* chip: nand chip info structure
* buf: buffer to store read data
+ * oob_required: caller expects OOB data read to chip->oob_poi
*/
-static int atmel_nand_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -342,6 +1281,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
uint8_t *ecc_pos;
int stat;
+ unsigned int max_bitflips = 0;
/*
* Errata: ALE is incorrectly wired up to the ECC controller
@@ -351,10 +1291,9 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
* Workaround: Reset the parity registers before reading the
* actual data.
*/
- if (cpu_is_at32ap7000()) {
- struct atmel_nand_host *host = chip->priv;
+ struct atmel_nand_host *host = chip->priv;
+ if (host->board.need_reset_workaround)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
- }
/* read the page */
chip->read_buf(mtd, p, eccsize);
@@ -378,10 +1317,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
/* check if there's an error */
stat = chip->ecc.correct(mtd, p, oob, NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
/* get back to oob start (end of page) */
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -389,7 +1330,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
/* read the oob */
chip->read_buf(mtd, oob, mtd->oobsize);
- return 0;
+ return max_bitflips;
}
/*
@@ -473,57 +1414,583 @@ static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
*/
static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
{
- if (cpu_is_at32ap7000()) {
- struct nand_chip *nand_chip = mtd->priv;
- struct atmel_nand_host *host = nand_chip->priv;
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (host->board.need_reset_workaround)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+}
+
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ u32 val;
+ u32 offset[2];
+ int ecc_mode;
+ struct atmel_nand_data *board = &host->board;
+ enum of_gpio_flags flags = 0;
+
+ if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid addr-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->ale = val;
}
+
+ if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid cmd-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->cle = val;
+ }
+
+ ecc_mode = of_get_nand_ecc_mode(np);
+
+ board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
+
+ board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
+ if (of_get_nand_bus_width(np) == 16)
+ board->bus_width_16 = 1;
+
+ board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+ board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+ board->enable_pin = of_get_gpio(np, 1);
+ board->det_pin = of_get_gpio(np, 2);
+
+ host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
+
+ /* load the nfc driver if there is */
+ of_platform_populate(np, NULL, NULL, host->dev);
+
+ if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
+ return 0; /* Not using PMECC */
+
+ /* use PMECC, get correction capability, sector size and lookup
+ * table offset.
+ * If correction bits and sector size are not specified, then find
+ * them from NAND ONFI parameters.
+ */
+ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+ if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+ (val != 24)) {
+ dev_err(host->dev,
+ "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_corr_cap = (u8)val;
+ }
+
+ if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+ if ((val != 512) && (val != 1024)) {
+ dev_err(host->dev,
+ "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_sector_size = (u16)val;
+ }
+
+ if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
+ offset, 2) != 0) {
+ dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
+ return -EINVAL;
+ }
+ if (!offset[0] && !offset[1]) {
+ dev_err(host->dev, "Invalid PMECC lookup table offset\n");
+ return -EINVAL;
+ }
+ host->pmecc_lookup_table_offset_512 = offset[0];
+ host->pmecc_lookup_table_offset_1024 = offset[1];
+
+ return 0;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
+static int atmel_hw_nand_init_params(struct platform_device *pdev,
+ struct atmel_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *nand_chip = &host->nand_chip;
+ struct resource *regs;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs) {
+ dev_err(host->dev,
+ "Can't get I/O resource regs, use software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->ecc)) {
+ dev_err(host->dev, "ioremap failed\n");
+ return PTR_ERR(host->ecc);
+ }
+
+ /* ECC is calculated for the whole page (1 step) */
+ nand_chip->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ nand_chip->ecc.layout = &atmel_oobinfo_small;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
+ break;
+ case 1024:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
+ break;
+ case 2048:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
+ break;
+ case 4096:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ /* set up for HW ECC */
+ nand_chip->ecc.calculate = atmel_nand_calculate;
+ nand_chip->ecc.correct = atmel_nand_correct;
+ nand_chip->ecc.hwctl = atmel_nand_hwctl;
+ nand_chip->ecc.read_page = atmel_nand_read_page;
+ nand_chip->ecc.bytes = 4;
+ nand_chip->ecc.strength = 1;
+
+ return 0;
+}
+
+/* SMC interrupt service routine */
+static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
+{
+ struct atmel_nand_host *host = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ status = nfc_readl(host->nfc->hsmc_regs, SR);
+ mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+ pending = status & mask;
+
+ if (pending & NFC_SR_XFR_DONE) {
+ complete(&host->nfc->comp_nfc);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
+ } else if (pending & NFC_SR_RB_EDGE) {
+ complete(&host->nfc->comp_nfc);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+ } else if (pending & NFC_SR_CMD_DONE) {
+ complete(&host->nfc->comp_nfc);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ return ret;
+}
+
+/* NFC(Nand Flash Controller) related functions */
+static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+ unsigned long timeout;
+ init_completion(&host->nfc->comp_nfc);
+
+ /* Enable interrupt that need to wait for */
+ nfc_writel(host->nfc->hsmc_regs, IER, flag);
+
+ timeout = wait_for_completion_timeout(&host->nfc->comp_nfc,
+ msecs_to_jiffies(NFC_TIME_OUT_MS));
+ if (timeout)
+ return 0;
+
+ /* Time out to wait for the interrupt */
+ dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
+ return -ETIMEDOUT;
+}
+
+static int nfc_send_command(struct atmel_nand_host *host,
+ unsigned int cmd, unsigned int addr, unsigned char cycle0)
+{
+ unsigned long timeout;
+ dev_dbg(host->dev,
+ "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
+ cmd, addr, cycle0);
+
+ timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+ while (nfc_cmd_readl(NFCADDR_CMD_NFCBUSY, host->nfc->base_cmd_regs)
+ & NFCADDR_CMD_NFCBUSY) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(host->dev,
+ "Time out to wait CMD_NFCBUSY ready!\n");
+ return -ETIMEDOUT;
+ }
+ }
+ nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
+ nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
+ return nfc_wait_interrupt(host, NFC_SR_CMD_DONE);
+}
+
+static int nfc_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ if (!nfc_wait_interrupt(host, NFC_SR_RB_EDGE))
+ return 1;
+ return 0;
+}
+
+static void nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (chip == -1)
+ nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
+ else
+ nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
+}
+
+static int nfc_make_addr(struct mtd_info *mtd, int command, int column,
+ int page_addr, unsigned int *addr1234, unsigned int *cycle0)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ int acycle = 0;
+ unsigned char addr_bytes[8];
+ int index = 0, bit_shift;
+
+ BUG_ON(addr1234 == NULL || cycle0 == NULL);
+
+ *cycle0 = 0;
+ *addr1234 = 0;
+
+ if (column != -1) {
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ addr_bytes[acycle++] = column & 0xff;
+ if (mtd->writesize > 512)
+ addr_bytes[acycle++] = (column >> 8) & 0xff;
+ }
+
+ if (page_addr != -1) {
+ addr_bytes[acycle++] = page_addr & 0xff;
+ addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
+ if (chip->chipsize > (128 << 20))
+ addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
+ }
+
+ if (acycle > 4)
+ *cycle0 = addr_bytes[index++];
+
+ for (bit_shift = 0; index < acycle; bit_shift += 8)
+ *addr1234 += addr_bytes[index++] << bit_shift;
+
+ /* return acycle in cmd register */
+ return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
+}
+
+static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ unsigned long timeout;
+ unsigned int nfc_addr_cmd = 0;
+
+ unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+
+ /* Set default settings: no cmd2, no addr cycle. read from nand */
+ unsigned int cmd2 = 0;
+ unsigned int vcmd2 = 0;
+ int acycle = NFCADDR_CMD_ACYCLE_NONE;
+ int csid = NFCADDR_CMD_CSID_3;
+ int dataen = NFCADDR_CMD_DATADIS;
+ int nfcwr = NFCADDR_CMD_NFCRD;
+ unsigned int addr1234 = 0;
+ unsigned int cycle0 = 0;
+ bool do_addr = true;
+ host->nfc->data_in_sram = NULL;
+
+ dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
+ __func__, command, column, page_addr);
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
+ nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+ udelay(chip->chip_delay);
+
+ nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
+ timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(host->dev,
+ "Time out to wait status ready!\n");
+ break;
+ }
+ }
+ return;
+ case NAND_CMD_STATUS:
+ do_addr = false;
+ break;
+ case NAND_CMD_PARAM:
+ case NAND_CMD_READID:
+ do_addr = false;
+ acycle = NFCADDR_CMD_ACYCLE_1;
+ if (column != -1)
+ addr1234 = column;
+ break;
+ case NAND_CMD_RNDOUT:
+ cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
+ vcmd2 = NFCADDR_CMD_VCMD2;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+ cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+ }
+ if (host->nfc->use_nfc_sram) {
+ /* Enable Data transfer to sram */
+ dataen = NFCADDR_CMD_DATAEN;
+
+ /* Need enable PMECC now, since NFC will transfer
+ * data in bus after sending nfc read command.
+ */
+ if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+ pmecc_enable(host, NAND_ECC_READ);
+ }
+
+ cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
+ vcmd2 = NFCADDR_CMD_VCMD2;
+ break;
+ /* For prgramming command, the cmd need set to write enable */
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ nfcwr = NFCADDR_CMD_NFCWR;
+ if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
+ dataen = NFCADDR_CMD_DATAEN;
+ break;
+ default:
+ break;
+ }
+
+ if (do_addr)
+ acycle = nfc_make_addr(mtd, command, column, page_addr,
+ &addr1234, &cycle0);
+
+ nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
+ nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+
+ if (dataen == NFCADDR_CMD_DATAEN)
+ if (nfc_wait_interrupt(host, NFC_SR_XFR_DONE))
+ dev_err(host->dev, "something wrong, No XFR_DONE interrupt comes.\n");
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
+ */
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ return;
+
+ case NAND_CMD_READ0:
+ if (dataen == NFCADDR_CMD_DATAEN) {
+ host->nfc->data_in_sram = host->nfc->sram_bank0 +
+ nfc_get_sram_off(host);
+ return;
+ }
+ /* fall through */
+ default:
+ nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+ }
+}
+
+static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
+{
+ int cfg, len;
+ int status = 0;
+ struct atmel_nand_host *host = chip->priv;
+ void __iomem *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+
+ /* Subpage write is not supported */
+ if (offset || (data_len < mtd->writesize))
+ return -EINVAL;
+
+ cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
+ len = mtd->writesize;
+
+ if (unlikely(raw)) {
+ len += mtd->oobsize;
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
+ } else
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
+
+ /* Copy page data to sram that will write to nand via NFC */
+ if (use_dma) {
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
+ /* Fall back to use cpu copy */
+ memcpy32_toio(sram, buf, len);
+ } else {
+ memcpy32_toio(sram, buf, len);
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+ /*
+ * When use NFC sram, need set up PMECC before send
+ * NAND_CMD_SEQIN command. Since when the nand command
+ * is sent, nfc will do transfer from sram and nand.
+ */
+ pmecc_enable(host, NAND_ECC_WRITE);
+
+ host->nfc->will_write_sram = true;
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ host->nfc->will_write_sram = false;
+
+ if (likely(!raw))
+ /* Need to write ecc into oob */
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int nfc_sram_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ int res = 0;
+
+ /* Initialize the NFC CFG register */
+ unsigned int cfg_nfc = 0;
+
+ /* set page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ cfg_nfc = NFC_CFG_PAGESIZE_512;
+ break;
+ case 1024:
+ cfg_nfc = NFC_CFG_PAGESIZE_1024;
+ break;
+ case 2048:
+ cfg_nfc = NFC_CFG_PAGESIZE_2048;
+ break;
+ case 4096:
+ cfg_nfc = NFC_CFG_PAGESIZE_4096;
+ break;
+ case 8192:
+ cfg_nfc = NFC_CFG_PAGESIZE_8192;
+ break;
+ default:
+ dev_err(host->dev, "Unsupported page size for NFC.\n");
+ res = -ENXIO;
+ return res;
+ }
+
+ /* oob bytes size = (NFCSPARESIZE + 1) * 4
+ * Max support spare size is 512 bytes. */
+ cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
+ & NFC_CFG_NFC_SPARESIZE);
+ /* default set a max timeout */
+ cfg_nfc |= NFC_CFG_RSPARE |
+ NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
+
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
+
+ host->nfc->will_write_sram = false;
+ nfc_set_sram_bank(host, 0);
+
+ /* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
+ if (host->nfc->write_by_sram) {
+ if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
+ chip->ecc.mode == NAND_ECC_NONE)
+ chip->write_page = nfc_sram_write_page;
+ else
+ host->nfc->write_by_sram = false;
+ }
+
+ dev_info(host->dev, "Using NFC Sram read %s\n",
+ host->nfc->write_by_sram ? "and write" : "");
+ return 0;
+}
+
+static struct platform_driver atmel_nand_nfc_driver;
/*
* Probe for the NAND device.
*/
-static int __init atmel_nand_probe(struct platform_device *pdev)
+static int atmel_nand_probe(struct platform_device *pdev)
{
struct atmel_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
- struct resource *regs;
struct resource *mem;
- int res;
- struct mtd_partition *partitions = NULL;
- int num_partitions = 0;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
- return -ENXIO;
- }
+ struct mtd_part_parser_data ppdata = {};
+ int res, irq;
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
return -ENOMEM;
- }
- host->io_phys = (dma_addr_t)mem->start;
+ res = platform_driver_register(&atmel_nand_nfc_driver);
+ if (res)
+ dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
- host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
- if (host->io_base == NULL) {
- printk(KERN_ERR "atmel_nand: ioremap failed\n");
- res = -EIO;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(host->io_base)) {
+ dev_err(&pdev->dev, "atmel_nand: ioremap resource failed\n");
+ res = PTR_ERR(host->io_base);
goto err_nand_ioremap;
}
+ host->io_phys = (dma_addr_t)mem->start;
mtd = &host->mtd;
nand_chip = &host->nand_chip;
- host->board = pdev->dev.platform_data;
host->dev = &pdev->dev;
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ /* Only when CONFIG_OF is enabled of_node can be parsed */
+ res = atmel_of_init_port(host, pdev->dev.of_node);
+ if (res)
+ goto err_nand_ioremap;
+ } else {
+ memcpy(&host->board, dev_get_platdata(&pdev->dev),
+ sizeof(struct atmel_nand_data));
+ }
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
@@ -532,38 +1999,42 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = host->io_base;
nand_chip->IO_ADDR_W = host->io_base;
- nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- if (host->board->rdy_pin)
- nand_chip->dev_ready = atmel_nand_device_ready;
+ if (nand_nfc.is_initialized) {
+ /* NFC driver is probed and initialized */
+ host->nfc = &nand_nfc;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!regs && hard_ecc) {
- printk(KERN_ERR "atmel_nand: can't get I/O resource "
- "regs\nFalling back on software ECC\n");
- }
+ nand_chip->select_chip = nfc_select_chip;
+ nand_chip->dev_ready = nfc_device_ready;
+ nand_chip->cmdfunc = nfc_nand_command;
+
+ /* Initialize the interrupt for NFC */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(host->dev, "Cannot get HSMC irq!\n");
+ res = irq;
+ goto err_nand_ioremap;
+ }
- nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
- if (no_ecc)
- nand_chip->ecc.mode = NAND_ECC_NONE;
- if (hard_ecc && regs) {
- host->ecc = ioremap(regs->start, regs->end - regs->start + 1);
- if (host->ecc == NULL) {
- printk(KERN_ERR "atmel_nand: ioremap failed\n");
- res = -EIO;
- goto err_ecc_ioremap;
+ res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
+ 0, "hsmc", host);
+ if (res) {
+ dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
+ irq);
+ goto err_nand_ioremap;
}
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.calculate = atmel_nand_calculate;
- nand_chip->ecc.correct = atmel_nand_correct;
- nand_chip->ecc.hwctl = atmel_nand_hwctl;
- nand_chip->ecc.read_page = atmel_nand_read_page;
- nand_chip->ecc.bytes = 4;
+ } else {
+ res = atmel_nand_set_enable_ready_pins(mtd);
+ if (res)
+ goto err_nand_ioremap;
+
+ nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
}
+ nand_chip->ecc.mode = host->board.ecc_mode;
nand_chip->chip_delay = 20; /* 20us command delay time */
- if (host->board->bus_width_16) /* 16-bit bus width */
+ if (host->board.bus_width_16) /* 16-bit bus width */
nand_chip->options |= NAND_BUSWIDTH_16;
nand_chip->read_buf = atmel_read_buf;
@@ -572,20 +2043,37 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
- if (host->board->det_pin) {
- if (gpio_get_value(host->board->det_pin)) {
- printk(KERN_INFO "No SmartMedia card inserted.\n");
+ if (gpio_is_valid(host->board.det_pin)) {
+ res = devm_gpio_request(&pdev->dev,
+ host->board.det_pin, "nand_det");
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request det gpio %d\n",
+ host->board.det_pin);
+ goto err_no_card;
+ }
+
+ res = gpio_direction_input(host->board.det_pin);
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request input direction det gpio %d\n",
+ host->board.det_pin);
+ goto err_no_card;
+ }
+
+ if (gpio_get_value(host->board.det_pin)) {
+ dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
res = -ENXIO;
goto err_no_card;
}
}
- if (on_flash_bbt) {
- printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
- nand_chip->options |= NAND_USE_FLASH_BBT;
+ if (host->board.on_flash_bbt || on_flash_bbt) {
+ dev_info(&pdev->dev, "Use On Flash BBT\n");
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
- if (!cpu_has_dma())
+ if (!host->board.has_dma)
use_dma = 0;
if (use_dma) {
@@ -593,7 +2081,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
- host->dma_chan = dma_request_channel(mask, 0, NULL);
+ host->dma_chan = dma_request_channel(mask, NULL, NULL);
if (!host->dma_chan) {
dev_err(host->dev, "Failed to request DMA channel\n");
use_dma = 0;
@@ -612,39 +2100,21 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
if (nand_chip->ecc.mode == NAND_ECC_HW) {
- /* ECC is calculated for the whole page (1 step) */
- nand_chip->ecc.size = mtd->writesize;
-
- /* set ECC page size and oob layout */
- switch (mtd->writesize) {
- case 512:
- nand_chip->ecc.layout = &atmel_oobinfo_small;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
- break;
- case 1024:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
- break;
- case 2048:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
- break;
- case 4096:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
- break;
- default:
- /* page size not handled by HW ECC */
- /* switching back to soft ECC */
- nand_chip->ecc.mode = NAND_ECC_SOFT;
- nand_chip->ecc.calculate = NULL;
- nand_chip->ecc.correct = NULL;
- nand_chip->ecc.hwctl = NULL;
- nand_chip->ecc.read_page = NULL;
- nand_chip->ecc.postpad = 0;
- nand_chip->ecc.prepad = 0;
- nand_chip->ecc.bytes = 0;
- break;
+ if (host->has_pmecc)
+ res = atmel_pmecc_nand_init_params(pdev, host);
+ else
+ res = atmel_hw_nand_init_params(pdev, host);
+
+ if (res != 0)
+ goto err_hw_ecc;
+ }
+
+ /* initialize the nfc configuration register */
+ if (host->nfc && host->nfc->use_nfc_sram) {
+ res = nfc_sram_init(mtd);
+ if (res) {
+ host->nfc->use_nfc_sram = false;
+ dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
}
}
@@ -654,47 +2124,30 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_scan_tail;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "atmel_nand";
- num_partitions = parse_mtd_partitions(mtd, part_probes,
- &partitions, 0);
-#endif
- if (num_partitions <= 0 && host->board->partition_info)
- partitions = host->board->partition_info(mtd->size,
- &num_partitions);
-
- if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
- res = -ENXIO;
- goto err_no_partitions;
- }
-
- res = mtd_device_register(mtd, partitions, num_partitions);
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata,
+ host->board.parts, host->board.num_parts);
if (!res)
return res;
-err_no_partitions:
- nand_release(mtd);
err_scan_tail:
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+err_hw_ecc:
err_scan_ident:
err_no_card:
atmel_nand_disable(host);
- platform_set_drvdata(pdev, NULL);
if (host->dma_chan)
dma_release_channel(host->dma_chan);
- if (host->ecc)
- iounmap(host->ecc);
-err_ecc_ioremap:
- iounmap(host->io_base);
err_nand_ioremap:
- kfree(host);
return res;
}
/*
* Remove a NAND device.
*/
-static int __exit atmel_nand_remove(struct platform_device *pdev)
+static int atmel_nand_remove(struct platform_device *pdev)
{
struct atmel_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
@@ -703,40 +2156,90 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
atmel_nand_disable(host);
- if (host->ecc)
- iounmap(host->ecc);
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ pmerrloc_writel(host->pmerrloc_base, ELDIS,
+ PMERRLOC_DISABLE);
+ }
if (host->dma_chan)
dma_release_channel(host->dma_chan);
- iounmap(host->io_base);
- kfree(host);
+ platform_driver_unregister(&atmel_nand_nfc_driver);
return 0;
}
-static struct platform_driver atmel_nand_driver = {
- .remove = __exit_p(atmel_nand_remove),
- .driver = {
- .name = "atmel_nand",
- .owner = THIS_MODULE,
- },
+static const struct of_device_id atmel_nand_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-nand" },
+ { /* sentinel */ }
};
-static int __init atmel_nand_init(void)
+MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
+
+static int atmel_nand_nfc_probe(struct platform_device *pdev)
{
- return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
+ struct atmel_nfc *nfc = &nand_nfc;
+ struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
+
+ nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
+ if (IS_ERR(nfc->base_cmd_regs))
+ return PTR_ERR(nfc->base_cmd_regs);
+
+ nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
+ if (IS_ERR(nfc->hsmc_regs))
+ return PTR_ERR(nfc->hsmc_regs);
+
+ nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (nfc_sram) {
+ nfc->sram_bank0 = devm_ioremap_resource(&pdev->dev, nfc_sram);
+ if (IS_ERR(nfc->sram_bank0)) {
+ dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
+ PTR_ERR(nfc->sram_bank0));
+ } else {
+ nfc->use_nfc_sram = true;
+ nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
+
+ if (pdev->dev.of_node)
+ nfc->write_by_sram = of_property_read_bool(
+ pdev->dev.of_node,
+ "atmel,write-by-sram");
+ }
+ }
+
+ nfc->is_initialized = true;
+ dev_info(&pdev->dev, "NFC is probed.\n");
+ return 0;
}
+static const struct of_device_id atmel_nand_nfc_match[] = {
+ { .compatible = "atmel,sama5d3-nfc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
-static void __exit atmel_nand_exit(void)
-{
- platform_driver_unregister(&atmel_nand_driver);
-}
+static struct platform_driver atmel_nand_nfc_driver = {
+ .driver = {
+ .name = "atmel_nand_nfc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_nand_nfc_match),
+ },
+ .probe = atmel_nand_nfc_probe,
+};
+static struct platform_driver atmel_nand_driver = {
+ .probe = atmel_nand_probe,
+ .remove = atmel_nand_remove,
+ .driver = {
+ .name = "atmel_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_nand_dt_ids),
+ },
+};
-module_init(atmel_nand_init);
-module_exit(atmel_nand_exit);
+module_platform_driver(atmel_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 578c776e135..8a1e9a68675 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -3,7 +3,7 @@
* Based on AT91SAM9260 datasheet revision B.
*
* Copyright (C) 2007 Andrew Victor
- * Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) 2007 - 2012 Atmel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -36,4 +36,116 @@
#define ATMEL_ECC_NPR 0x10 /* NParity register */
#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */
+#define PMECC_CFG_BCH_ERR2 (0 << 0)
+#define PMECC_CFG_BCH_ERR4 (1 << 0)
+#define PMECC_CFG_BCH_ERR8 (2 << 0)
+#define PMECC_CFG_BCH_ERR12 (3 << 0)
+#define PMECC_CFG_BCH_ERR24 (4 << 0)
+
+#define PMECC_CFG_SECTOR512 (0 << 4)
+#define PMECC_CFG_SECTOR1024 (1 << 4)
+
+#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
+#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
+#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
+#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
+
+#define PMECC_CFG_READ_OP (0 << 12)
+#define PMECC_CFG_WRITE_OP (1 << 12)
+
+#define PMECC_CFG_SPARE_ENABLE (1 << 16)
+#define PMECC_CFG_SPARE_DISABLE (0 << 16)
+
+#define PMECC_CFG_AUTO_ENABLE (1 << 20)
+#define PMECC_CFG_AUTO_DISABLE (0 << 20)
+
+#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */
+#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */
+#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */
+#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */
+#define PMECC_CLK_133MHZ (2 << 0)
+
+#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */
+#define PMECC_CTRL_RST (1 << 0)
+#define PMECC_CTRL_DATA (1 << 1)
+#define PMECC_CTRL_USER (1 << 2)
+#define PMECC_CTRL_ENABLE (1 << 4)
+#define PMECC_CTRL_DISABLE (1 << 5)
+
+#define ATMEL_PMECC_SR 0x018 /* PMECC status register */
+#define PMECC_SR_BUSY (1 << 0)
+#define PMECC_SR_ENABLE (1 << 4)
+
+#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */
+#define PMECC_IER_ENABLE (1 << 0)
+#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */
+#define PMECC_IER_DISABLE (1 << 0)
+#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */
+#define PMECC_IER_MASK (1 << 0)
+#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */
+#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */
+#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */
+#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
+#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
+#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */
+#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */
+#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */
+#define PMERRLOC_DISABLE (1 << 0)
+
+#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */
+#define PMERRLOC_ELSR_BUSY (1 << 0)
+#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */
+#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */
+#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */
+#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */
+#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
+#define PMERRLOC_CALC_DONE (1 << 0)
+#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
+#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
+
+/* Register access macros for PMECC */
+#define pmecc_readl_relaxed(addr, reg) \
+ readl_relaxed((addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_readb_ecc_relaxed(addr, sector, n) \
+ readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
+
+#define pmecc_readl_rem_relaxed(addr, sector, n) \
+ readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
+
+#define pmerrloc_readl_relaxed(addr, reg) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
+ writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_sigma_relaxed(addr, n) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_el_relaxed(addr, n) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13 13
+#define PMECC_GF_DIMENSION_14 14
+
+#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS 100
+
#endif
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
new file mode 100644
index 00000000000..4efd117cd3a
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -0,0 +1,98 @@
+/*
+ * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
+ * Based on SAMA5D3 datasheet.
+ *
+ * © Copyright 2013 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_NFC_H
+#define ATMEL_NAND_NFC_H
+
+/*
+ * HSMC NFC registers
+ */
+#define ATMEL_HSMC_NFC_CFG 0x00 /* NFC Configuration Register */
+#define NFC_CFG_PAGESIZE (7 << 0)
+#define NFC_CFG_PAGESIZE_512 (0 << 0)
+#define NFC_CFG_PAGESIZE_1024 (1 << 0)
+#define NFC_CFG_PAGESIZE_2048 (2 << 0)
+#define NFC_CFG_PAGESIZE_4096 (3 << 0)
+#define NFC_CFG_PAGESIZE_8192 (4 << 0)
+#define NFC_CFG_WSPARE (1 << 8)
+#define NFC_CFG_RSPARE (1 << 9)
+#define NFC_CFG_NFC_DTOCYC (0xf << 16)
+#define NFC_CFG_NFC_DTOMUL (0x7 << 20)
+#define NFC_CFG_NFC_SPARESIZE (0x7f << 24)
+#define NFC_CFG_NFC_SPARESIZE_BIT_POS 24
+
+#define ATMEL_HSMC_NFC_CTRL 0x04 /* NFC Control Register */
+#define NFC_CTRL_ENABLE (1 << 0)
+#define NFC_CTRL_DISABLE (1 << 1)
+
+#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
+#define NFC_SR_XFR_DONE (1 << 16)
+#define NFC_SR_CMD_DONE (1 << 17)
+#define NFC_SR_RB_EDGE (1 << 24)
+
+#define ATMEL_HSMC_NFC_IER 0x0c
+#define ATMEL_HSMC_NFC_IDR 0x10
+#define ATMEL_HSMC_NFC_IMR 0x14
+#define ATMEL_HSMC_NFC_CYCLE0 0x18 /* NFC Address Cycle Zero */
+#define ATMEL_HSMC_NFC_ADDR_CYCLE0 (0xff)
+
+#define ATMEL_HSMC_NFC_BANK 0x1c /* NFC Bank Register */
+#define ATMEL_HSMC_NFC_BANK0 (0 << 0)
+#define ATMEL_HSMC_NFC_BANK1 (1 << 0)
+
+#define nfc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
+
+#define nfc_readl(addr, reg) \
+ readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
+
+/*
+ * NFC Address Command definitions
+ */
+#define NFCADDR_CMD_CMD1 (0xff << 2) /* Command for Cycle 1 */
+#define NFCADDR_CMD_CMD1_BIT_POS 2
+#define NFCADDR_CMD_CMD2 (0xff << 10) /* Command for Cycle 2 */
+#define NFCADDR_CMD_CMD2_BIT_POS 10
+#define NFCADDR_CMD_VCMD2 (0x1 << 18) /* Valid Cycle 2 Command */
+#define NFCADDR_CMD_ACYCLE (0x7 << 19) /* Number of Address required */
+#define NFCADDR_CMD_ACYCLE_NONE (0x0 << 19)
+#define NFCADDR_CMD_ACYCLE_1 (0x1 << 19)
+#define NFCADDR_CMD_ACYCLE_2 (0x2 << 19)
+#define NFCADDR_CMD_ACYCLE_3 (0x3 << 19)
+#define NFCADDR_CMD_ACYCLE_4 (0x4 << 19)
+#define NFCADDR_CMD_ACYCLE_5 (0x5 << 19)
+#define NFCADDR_CMD_ACYCLE_BIT_POS 19
+#define NFCADDR_CMD_CSID (0x7 << 22) /* Chip Select Identifier */
+#define NFCADDR_CMD_CSID_0 (0x0 << 22)
+#define NFCADDR_CMD_CSID_1 (0x1 << 22)
+#define NFCADDR_CMD_CSID_2 (0x2 << 22)
+#define NFCADDR_CMD_CSID_3 (0x3 << 22)
+#define NFCADDR_CMD_CSID_4 (0x4 << 22)
+#define NFCADDR_CMD_CSID_5 (0x5 << 22)
+#define NFCADDR_CMD_CSID_6 (0x6 << 22)
+#define NFCADDR_CMD_CSID_7 (0x7 << 22)
+#define NFCADDR_CMD_DATAEN (0x1 << 25) /* Data Transfer Enable */
+#define NFCADDR_CMD_DATADIS (0x0 << 25) /* Data Transfer Disable */
+#define NFCADDR_CMD_NFCRD (0x0 << 26) /* NFC Read Enable */
+#define NFCADDR_CMD_NFCWR (0x1 << 26) /* NFC Write Enable */
+#define NFCADDR_CMD_NFCBUSY (0x1 << 27) /* NFC Busy */
+
+#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
+ writel((addr1234), (cmd) + nfc_base)
+
+#define nfc_cmd_readl(bitstatus, nfc_base) \
+ readl_relaxed((bitstatus) + nfc_base)
+
+#define NFC_TIME_OUT_MS 100
+#define NFC_SRAM_BANK1_OFFSET 0x1200
+
+#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index e7767eef450..bc5c518828d 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -11,44 +11,31 @@
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
-#include <asm/mach-au1x00/au1xxx.h>
-#include <asm/mach-db1x00/bcsr.h>
-/*
- * MTD structure for NAND controller
- */
-static struct mtd_info *au1550_mtd = NULL;
-static void __iomem *p_nand;
-static int nand_width = 1; /* default x8 */
-static void (*au1550_write_byte)(struct mtd_info *, u_char);
+struct au1550nd_ctx {
+ struct mtd_info info;
+ struct nand_chip chip;
-/*
- * Define partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "NAND FS 0",
- .offset = 0,
- .size = 8 * 1024 * 1024},
- {
- .name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL}
+ int cs;
+ void __iomem *base;
+ void (*write_byte)(struct mtd_info *, u_char);
};
/**
* au_read_byte - read one byte from the chip
* @mtd: MTD device structure
*
- * read function for 8bit buswith
+ * read function for 8bit buswidth
*/
static u_char au_read_byte(struct mtd_info *mtd)
{
@@ -63,7 +50,7 @@ static u_char au_read_byte(struct mtd_info *mtd)
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
- * write function for 8it buswith
+ * write function for 8it buswidth
*/
static void au_write_byte(struct mtd_info *mtd, u_char byte)
{
@@ -73,11 +60,10 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte)
}
/**
- * au_read_byte16 - read one byte endianess aware from the chip
+ * au_read_byte16 - read one byte endianness aware from the chip
* @mtd: MTD device structure
*
- * read function for 16bit buswith with
- * endianess conversion
+ * read function for 16bit buswidth with endianness conversion
*/
static u_char au_read_byte16(struct mtd_info *mtd)
{
@@ -88,12 +74,11 @@ static u_char au_read_byte16(struct mtd_info *mtd)
}
/**
- * au_write_byte16 - write one byte endianess aware to the chip
+ * au_write_byte16 - write one byte endianness aware to the chip
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
- * write function for 16bit buswith with
- * endianess conversion
+ * write function for 16bit buswidth with endianness conversion
*/
static void au_write_byte16(struct mtd_info *mtd, u_char byte)
{
@@ -106,8 +91,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte)
* au_read_word - read one word from the chip
* @mtd: MTD device structure
*
- * read function for 16bit buswith without
- * endianess conversion
+ * read function for 16bit buswidth without endianness conversion
*/
static u16 au_read_word(struct mtd_info *mtd)
{
@@ -123,7 +107,7 @@ static u16 au_read_word(struct mtd_info *mtd)
* @buf: data buffer
* @len: number of bytes to write
*
- * write function for 8bit buswith
+ * write function for 8bit buswidth
*/
static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
@@ -142,7 +126,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
* @buf: buffer to store date
* @len: number of bytes to read
*
- * read function for 8bit buswith
+ * read function for 8bit buswidth
*/
static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
@@ -156,34 +140,12 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
}
/**
- * au_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * verify function for 8bit buswith
- */
-static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i = 0; i < len; i++) {
- if (buf[i] != readb(this->IO_ADDR_R))
- return -EFAULT;
- au_sync();
- }
-
- return 0;
-}
-
-/**
* au_write_buf16 - write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
- * write function for 16bit buswith
+ * write function for 16bit buswidth
*/
static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
{
@@ -205,7 +167,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
* @buf: buffer to store date
* @len: number of bytes to read
*
- * read function for 16bit buswith
+ * read function for 16bit buswidth
*/
static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
@@ -220,29 +182,6 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
}
}
-/**
- * au_verify_buf16 - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * verify function for 16bit buswith
- */
-static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++) {
- if (p[i] != readw(this->IO_ADDR_R))
- return -EFAULT;
- au_sync();
- }
- return 0;
-}
-
/* Select the chip by setting nCE to low */
#define NAND_CTL_SETNCE 1
/* Deselect the chip by setting nCE to high */
@@ -258,24 +197,25 @@ static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
{
- register struct nand_chip *this = mtd->priv;
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
switch (cmd) {
case NAND_CTL_SETCLE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_CMD;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
break;
case NAND_CTL_CLRCLE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
break;
case NAND_CTL_SETALE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
break;
case NAND_CTL_CLRALE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
/* FIXME: Nobody knows why this is necessary,
* but it works only that way */
udelay(1);
@@ -283,7 +223,7 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
case NAND_CTL_SETNCE:
/* assert (force assert) chip enable */
- au_writel((1 << (4 + NAND_CS)), MEM_STNDCTL);
+ au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
break;
case NAND_CTL_CLRNCE:
@@ -330,9 +270,10 @@ static void au1550_select_chip(struct mtd_info *mtd, int chip)
*/
static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
{
- register struct nand_chip *this = mtd->priv;
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
int ce_override = 0, i;
- ulong flags;
+ unsigned long flags = 0;
/* Begin command latch cycle */
au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
@@ -353,9 +294,9 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
column -= 256;
readcmd = NAND_CMD_READ1;
}
- au1550_write_byte(mtd, readcmd);
+ ctx->write_byte(mtd, readcmd);
}
- au1550_write_byte(mtd, command);
+ ctx->write_byte(mtd, command);
/* Set ALE and clear CLE to start address cycle */
au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
@@ -366,12 +307,13 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
+ if (this->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
- au1550_write_byte(mtd, column);
+ ctx->write_byte(mtd, column);
}
if (page_addr != -1) {
- au1550_write_byte(mtd, (u8)(page_addr & 0xff));
+ ctx->write_byte(mtd, (u8)(page_addr & 0xff));
if (command == NAND_CMD_READ0 ||
command == NAND_CMD_READ1 ||
@@ -389,11 +331,12 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
}
- au1550_write_byte(mtd, (u8)(page_addr >> 8));
+ ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */
if (this->chipsize > (32 << 20))
- au1550_write_byte(mtd, (u8)((page_addr >> 16) & 0x0f));
+ ctx->write_byte(mtd,
+ ((page_addr >> 16) & 0x0f));
}
/* Latch in address */
au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
@@ -439,121 +382,77 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
while(!this->dev_ready(mtd));
}
-
-/*
- * Main initialization routine
- */
-static int __init au1xxx_nand_init(void)
+static int find_nand_cs(unsigned long nand_base)
{
- struct nand_chip *this;
- u16 boot_swapboot = 0; /* default value */
- int retval;
- u32 mem_staddr;
- u32 nand_phys;
-
- /* Allocate memory for MTD device structure and private data */
- au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!au1550_mtd) {
- printk("Unable to allocate NAND MTD dev structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&au1550_mtd[1]);
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
- /* Link the private data with the MTD structure */
- au1550_mtd->priv = this;
- au1550_mtd->owner = THIS_MODULE;
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
+ return -ENODEV;
+}
- /* MEM_STNDCTL: disable ints, disable nand boot */
- au_writel(0, MEM_STNDCTL);
+static int au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct resource *r;
+ int ret, cs;
-#ifdef CONFIG_MIPS_PB1550
- /* set gpio206 high */
- gpio_direction_input(206);
+ pd = dev_get_platdata(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
- boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- switch (boot_swapboot) {
- case 0:
- case 2:
- case 8:
- case 0xC:
- case 0xD:
- /* x16 NAND Flash */
- nand_width = 0;
- break;
- case 1:
- case 9:
- case 3:
- case 0xE:
- case 0xF:
- /* x8 NAND Flash */
- nand_width = 1;
- break;
- default:
- printk("Pb1550 NAND: bad boot:swap\n");
- retval = -EINVAL;
- goto outmem;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
}
-#endif
-
- /* Configure chip-select; normally done by boot code, e.g. YAMON */
-#ifdef NAND_STCFG
- if (NAND_CS == 0) {
- au_writel(NAND_STCFG, MEM_STCFG0);
- au_writel(NAND_STTIME, MEM_STTIME0);
- au_writel(NAND_STADDR, MEM_STADDR0);
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
}
- if (NAND_CS == 1) {
- au_writel(NAND_STCFG, MEM_STCFG1);
- au_writel(NAND_STTIME, MEM_STTIME1);
- au_writel(NAND_STADDR, MEM_STADDR1);
- }
- if (NAND_CS == 2) {
- au_writel(NAND_STCFG, MEM_STCFG2);
- au_writel(NAND_STTIME, MEM_STTIME2);
- au_writel(NAND_STADDR, MEM_STADDR2);
- }
- if (NAND_CS == 3) {
- au_writel(NAND_STCFG, MEM_STCFG3);
- au_writel(NAND_STTIME, MEM_STTIME3);
- au_writel(NAND_STADDR, MEM_STADDR3);
- }
-#endif
-
- /* Locate NAND chip-select in order to determine NAND phys address */
- mem_staddr = 0x00000000;
- if (((au_readl(MEM_STCFG0) & 0x7) == 0x5) && (NAND_CS == 0))
- mem_staddr = au_readl(MEM_STADDR0);
- else if (((au_readl(MEM_STCFG1) & 0x7) == 0x5) && (NAND_CS == 1))
- mem_staddr = au_readl(MEM_STADDR1);
- else if (((au_readl(MEM_STCFG2) & 0x7) == 0x5) && (NAND_CS == 2))
- mem_staddr = au_readl(MEM_STADDR2);
- else if (((au_readl(MEM_STCFG3) & 0x7) == 0x5) && (NAND_CS == 3))
- mem_staddr = au_readl(MEM_STADDR3);
-
- if (mem_staddr == 0x00000000) {
- printk("Au1xxx NAND: ERROR WITH NAND CHIP-SELECT\n");
- kfree(au1550_mtd);
- return 1;
+
+ ctx->base = ioremap_nocache(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
}
- nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = ioremap(nand_phys, 0x1000);
+ this = &ctx->chip;
+ ctx->info.priv = this;
+ ctx->info.owner = THIS_MODULE;
- /* make controller and MTD agree */
- if (NAND_CS == 0)
- nand_width = au_readl(MEM_STCFG0) & (1 << 22);
- if (NAND_CS == 1)
- nand_width = au_readl(MEM_STCFG1) & (1 << 22);
- if (NAND_CS == 2)
- nand_width = au_readl(MEM_STCFG2) & (1 << 22);
- if (NAND_CS == 3)
- nand_width = au_readl(MEM_STCFG3) & (1 << 22);
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
- /* Set address of hardware control function */
this->dev_ready = au1550_device_ready;
this->select_chip = au1550_select_chip;
this->cmdfunc = au1550_command;
@@ -562,56 +461,58 @@ static int __init au1xxx_nand_init(void)
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
- this->options = NAND_NO_AUTOINCR;
-
- if (!nand_width)
+ if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
- this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
- au1550_write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
+ this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+ ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
this->read_word = au_read_word;
- this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
- this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
- this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
-
- /* Scan to find existence of the device */
- if (nand_scan(au1550_mtd, 1)) {
- retval = -ENXIO;
- goto outio;
+ this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+ this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+
+ ret = nand_scan(&ctx->info, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
}
- /* Register the partitions */
- mtd_device_register(au1550_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
- return 0;
+ platform_set_drvdata(pdev, ctx);
- outio:
- iounmap(p_nand);
+ return 0;
- outmem:
- kfree(au1550_mtd);
- return retval;
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
}
-module_init(au1xxx_nand_init);
-
-/*
- * Clean up routine
- */
-static void __exit au1550_cleanup(void)
+static int au1550nd_remove(struct platform_device *pdev)
{
- /* Release resources, unregister device */
- nand_release(au1550_mtd);
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Free the MTD device structure */
- kfree(au1550_mtd);
-
- /* Unmap */
- iounmap(p_nand);
+ nand_release(&ctx->info);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+ return 0;
}
-module_exit(au1550_cleanup);
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = au1550nd_probe,
+ .remove = au1550nd_remove,
+};
+
+module_platform_driver(au1550nd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Embedded Edge, LLC");
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
deleted file mode 100644
index eddc9a22498..00000000000
--- a/drivers/mtd/nand/autcpu12.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * drivers/mtd/autcpu12.c
- *
- * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
- *
- * Derived from drivers/mtd/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * autronix autcpu12 board, which is a SmartMediaCard. It supports
- * 16MiB, 32MiB and 64MiB cards.
- *
- *
- * 02-12-2002 TG Cleanup of module params
- *
- * 02-20-2002 TG adjusted for different rd/wr address support
- * added support for read device ready/busy line
- * added page_cache
- *
- * 10-06-2002 TG 128K card support added
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/sizes.h>
-#include <mach/autcpu12.h>
-
-/*
- * MTD structure for AUTCPU12 board
- */
-static struct mtd_info *autcpu12_mtd = NULL;
-static void __iomem *autcpu12_fio_base;
-
-/*
- * Define partitions for flash devices
- */
-static struct mtd_partition partition_info16k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 8 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 8 * SZ_1M,
- .size = 8 * SZ_1M },
-};
-
-static struct mtd_partition partition_info32k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 8 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 8 * SZ_1M,
- .size = 24 * SZ_1M },
-};
-
-static struct mtd_partition partition_info64k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 16 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 16 * SZ_1M,
- .size = 48 * SZ_1M },
-};
-
-static struct mtd_partition partition_info128k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 16 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 16 * SZ_1M,
- .size = 112 * SZ_1M },
-};
-
-#define NUM_PARTITIONS16K 2
-#define NUM_PARTITIONS32K 2
-#define NUM_PARTITIONS64K 2
-#define NUM_PARTITIONS128K 2
-/*
- * hardware specific access to control-lines
- *
- * ALE bit 4 autcpu12_pedr
- * CLE bit 5 autcpu12_pedr
- * NCE bit 0 fio_ctrl
- *
- */
-static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr;
- unsigned char bits;
-
- addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
- bits = (ctrl & NAND_CLE) << 4;
- bits |= (ctrl & NAND_ALE) << 2;
- writeb((readb(addr) & ~0x30) | bits, addr);
-
- addr = autcpu12_fio_base + AUTCPU12_SMC_SELECT_OFFSET;
- writeb((readb(addr) & ~0x1) | (ctrl & NAND_NCE), addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-int autcpu12_device_ready(struct mtd_info *mtd)
-{
- void __iomem *addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
-
- return readb(addr) & AUTCPU12_SMC_RDY;
-}
-
-/*
- * Main initialization routine
- */
-static int __init autcpu12_init(void)
-{
- struct nand_chip *this;
- int err = 0;
-
- /* Allocate memory for MTD device structure and private data */
- autcpu12_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!autcpu12_mtd) {
- printk("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
- err = -ENOMEM;
- goto out;
- }
-
- /* map physical address */
- autcpu12_fio_base = ioremap(AUTCPU12_PHYS_SMC, SZ_1K);
- if (!autcpu12_fio_base) {
- printk("Ioremap autcpu12 SmartMedia Card failed\n");
- err = -EIO;
- goto out_mtd;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&autcpu12_mtd[1]);
-
- /* Initialize structures */
- memset(autcpu12_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- autcpu12_mtd->priv = this;
- autcpu12_mtd->owner = THIS_MODULE;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = autcpu12_fio_base;
- this->IO_ADDR_W = autcpu12_fio_base;
- this->cmd_ctrl = autcpu12_hwcontrol;
- this->dev_ready = autcpu12_device_ready;
- /* 20 us command delay time */
- this->chip_delay = 20;
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Enable the following for a flash based bad block table */
- /*
- this->options = NAND_USE_FLASH_BBT;
- */
- this->options = NAND_USE_FLASH_BBT;
-
- /* Scan to find existence of the device */
- if (nand_scan(autcpu12_mtd, 1)) {
- err = -ENXIO;
- goto out_ior;
- }
-
- /* Register the partitions */
- switch (autcpu12_mtd->size) {
- case SZ_16M:
- mtd_device_register(autcpu12_mtd, partition_info16k,
- NUM_PARTITIONS16K);
- break;
- case SZ_32M:
- mtd_device_register(autcpu12_mtd, partition_info32k,
- NUM_PARTITIONS32K);
- break;
- case SZ_64M:
- mtd_device_register(autcpu12_mtd, partition_info64k,
- NUM_PARTITIONS64K);
- break;
- case SZ_128M:
- mtd_device_register(autcpu12_mtd, partition_info128k,
- NUM_PARTITIONS128K);
- break;
- default:
- printk("Unsupported SmartMedia device\n");
- err = -ENXIO;
- goto out_ior;
- }
- goto out;
-
- out_ior:
- iounmap(autcpu12_fio_base);
- out_mtd:
- kfree(autcpu12_mtd);
- out:
- return err;
-}
-
-module_init(autcpu12_init);
-
-/*
- * Clean up routine
- */
-static void __exit autcpu12_cleanup(void)
-{
- /* Release resources, unregister device */
- nand_release(autcpu12_mtd);
-
- /* unmap physical address */
- iounmap(autcpu12_fio_base);
-
- /* Free the MTD device structure */
- kfree(autcpu12_mtd);
-}
-
-module_exit(autcpu12_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("Glue layer for SmartMediaCard on autronix autcpu12");
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
new file mode 100644
index 00000000000..f05b119e134
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 00000000000..c005a62330b
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,26 @@
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
new file mode 100644
index 00000000000..10744591131
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -0,0 +1,80 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ int err = 0;
+
+ b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+ if (!b47n)
+ return -ENOMEM;
+
+ b47n->nand_chip.priv = b47n;
+ b47n->mtd.owner = THIS_MODULE;
+ b47n->mtd.priv = &b47n->nand_chip; /* Required */
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ return err;
+ }
+
+ err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+
+ if (nflash->mtd)
+ mtd_device_unregister(nflash->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
+ .remove = bcm47xxnflash_remove,
+ .driver = {
+ .name = "bcma_nflash",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 00000000000..b2ab373c9ee
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,413 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/bcma/bcma.h>
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 10000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
+ int chip)
+{
+ return;
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ pr_warn("Chip reset not implemented yet\n");
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ freq = 100000000;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq * 0xFFF) >> 3;
+ freq = (freq * 25000000) >> 3;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->mtd, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = b47n->nand_chip.chipsize >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
deleted file mode 100644
index a930666d068..00000000000
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*****************************************************************************
-* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/* ---- Include Files ---------------------------------------------------- */
-#include "nand_bcm_umi.h"
-
-/* ---- External Variable Declarations ----------------------------------- */
-/* ---- External Function Prototypes ------------------------------------- */
-/* ---- Public Variables ------------------------------------------------- */
-/* ---- Private Constants and Types -------------------------------------- */
-
-/* ---- Private Function Prototypes -------------------------------------- */
-static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page);
-static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf);
-
-/* ---- Private Variables ------------------------------------------------ */
-
-/*
-** nand_hw_eccoob
-** New oob placement block for use with hardware ecc generation.
-*/
-static struct nand_ecclayout nand_hw_eccoob_512 = {
- /* Reserve 5 for BI indicator */
- .oobfree = {
-#if (NAND_ECC_NUM_BYTES > 3)
- {.offset = 0, .length = 2}
-#else
- {.offset = 0, .length = 5},
- {.offset = 6, .length = 7}
-#endif
- }
-};
-
-/*
-** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
-** except the BI is at byte 0.
-*/
-static struct nand_ecclayout nand_hw_eccoob_2048 = {
- /* Reserve 0 as BI indicator */
- .oobfree = {
-#if (NAND_ECC_NUM_BYTES > 10)
- {.offset = 1, .length = 2},
-#elif (NAND_ECC_NUM_BYTES > 7)
- {.offset = 1, .length = 5},
- {.offset = 16, .length = 6},
- {.offset = 32, .length = 6},
- {.offset = 48, .length = 6}
-#else
- {.offset = 1, .length = 8},
- {.offset = 16, .length = 9},
- {.offset = 32, .length = 9},
- {.offset = 48, .length = 9}
-#endif
- }
-};
-
-/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
- * except the BI is at byte 0. */
-static struct nand_ecclayout nand_hw_eccoob_4096 = {
- /* Reserve 0 as BI indicator */
- .oobfree = {
-#if (NAND_ECC_NUM_BYTES > 10)
- {.offset = 1, .length = 2},
- {.offset = 16, .length = 3},
- {.offset = 32, .length = 3},
- {.offset = 48, .length = 3},
- {.offset = 64, .length = 3},
- {.offset = 80, .length = 3},
- {.offset = 96, .length = 3},
- {.offset = 112, .length = 3}
-#else
- {.offset = 1, .length = 5},
- {.offset = 16, .length = 6},
- {.offset = 32, .length = 6},
- {.offset = 48, .length = 6},
- {.offset = 64, .length = 6},
- {.offset = 80, .length = 6},
- {.offset = 96, .length = 6},
- {.offset = 112, .length = 6}
-#endif
- }
-};
-
-/* ---- Private Functions ------------------------------------------------ */
-/* ==== Public Functions ================================================= */
-
-/****************************************************************************
-*
-* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
-* @mtd: mtd info structure
-* @chip: nand chip info structure
-* @buf: buffer to store read data
-*
-***************************************************************************/
-static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t * buf,
- int page)
-{
- int sectorIdx = 0;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- uint8_t *datap = buf;
- uint8_t eccCalc[NAND_ECC_NUM_BYTES];
- int sectorOobSize = mtd->oobsize / eccsteps;
- int stat;
-
- for (sectorIdx = 0; sectorIdx < eccsteps;
- sectorIdx++, datap += eccsize) {
- if (sectorIdx > 0) {
- /* Seek to page location within sector */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
- -1);
- }
-
- /* Enable hardware ECC before reading the buf */
- nand_bcm_umi_bch_enable_read_hwecc();
-
- /* Read in data */
- bcm_umi_nand_read_buf(mtd, datap, eccsize);
-
- /* Pause hardware ECC after reading the buf */
- nand_bcm_umi_bch_pause_read_ecc_calc();
-
- /* Read the OOB ECC */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + sectorIdx * sectorOobSize, -1);
- nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
- NAND_ECC_NUM_BYTES,
- chip->oob_poi +
- sectorIdx * sectorOobSize);
-
- /* Correct any ECC detected errors */
- stat =
- nand_bcm_umi_bch_correct_page(datap, eccCalc,
- NAND_ECC_NUM_BYTES);
-
- /* Update Stats */
- if (stat < 0) {
-#if defined(NAND_BCM_UMI_DEBUG)
- printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
- __func__, sectorIdx);
- printk(KERN_WARNING
- "%s data %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- __func__, datap[0], datap[1], datap[2], datap[3],
- datap[4], datap[5], datap[6], datap[7]);
- printk(KERN_WARNING
- "%s ecc %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x "
- "%02x %02x %02x\n",
- __func__, eccCalc[0], eccCalc[1], eccCalc[2],
- eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
- eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
- eccCalc[11], eccCalc[12]);
- BUG();
-#endif
- mtd->ecc_stats.failed++;
- } else {
-#if defined(NAND_BCM_UMI_DEBUG)
- if (stat > 0) {
- printk(KERN_INFO
- "%s %d correctable_errors detected\n",
- __func__, stat);
- }
-#endif
- mtd->ecc_stats.corrected += stat;
- }
- }
- return 0;
-}
-
-/****************************************************************************
-*
-* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
-* @mtd: mtd info structure
-* @chip: nand chip info structure
-* @buf: data buffer
-*
-***************************************************************************/
-static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
-{
- int sectorIdx = 0;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- const uint8_t *datap = buf;
- uint8_t *oobp = chip->oob_poi;
- int sectorOobSize = mtd->oobsize / eccsteps;
-
- for (sectorIdx = 0; sectorIdx < eccsteps;
- sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
- /* Enable hardware ECC before writing the buf */
- nand_bcm_umi_bch_enable_write_hwecc();
- bcm_umi_nand_write_buf(mtd, datap, eccsize);
- nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
- NAND_ECC_NUM_BYTES);
- }
-
- bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
deleted file mode 100644
index 9ec280738a9..00000000000
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*****************************************************************************
-* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/* ---- Include Files ---------------------------------------------------- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/mach-types.h>
-#include <asm/system.h>
-
-#include <mach/reg_nand.h>
-#include <mach/reg_umi.h>
-
-#include "nand_bcm_umi.h"
-
-#include <mach/memory_settings.h>
-
-#define USE_DMA 1
-#include <mach/dma.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-
-/* ---- External Variable Declarations ----------------------------------- */
-/* ---- External Function Prototypes ------------------------------------- */
-/* ---- Public Variables ------------------------------------------------- */
-/* ---- Private Constants and Types -------------------------------------- */
-static const __devinitconst char gBanner[] = KERN_INFO \
- "BCM UMI MTD NAND Driver: 1.00\n";
-
-const char *part_probes[] = { "cmdlinepart", NULL };
-
-#if NAND_ECC_BCH
-static uint8_t scan_ff_pattern[] = { 0xff };
-
-static struct nand_bbt_descr largepage_bbt = {
- .options = 0,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-#endif
-
-/*
-** Preallocate a buffer to avoid having to do this every dma operation.
-** This is the size of the preallocated coherent DMA buffer.
-*/
-#if USE_DMA
-#define DMA_MIN_BUFLEN 512
-#define DMA_MAX_BUFLEN PAGE_SIZE
-#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
- ((len) > DMA_MAX_BUFLEN))
-
-/*
- * The current NAND data space goes from 0x80001900 to 0x80001FFF,
- * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
- * size NAND flash. Need to break the DMA down to multiple 1Ks.
- *
- * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
- */
-#define DMA_MAX_LEN 1024
-
-#else /* !USE_DMA */
-#define DMA_MIN_BUFLEN 0
-#define DMA_MAX_BUFLEN 0
-#define USE_DIRECT_IO(len) 1
-#endif
-/* ---- Private Function Prototypes -------------------------------------- */
-static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
-static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
- int len);
-
-/* ---- Private Variables ------------------------------------------------ */
-static struct mtd_info *board_mtd;
-static void __iomem *bcm_umi_io_base;
-static void *virtPtr;
-static dma_addr_t physPtr;
-static struct completion nand_comp;
-
-/* ---- Private Functions ------------------------------------------------ */
-#if NAND_ECC_BCH
-#include "bcm_umi_bch.c"
-#else
-#include "bcm_umi_hamming.c"
-#endif
-
-#if USE_DMA
-
-/* Handler called when the DMA finishes. */
-static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
-{
- complete(&nand_comp);
-}
-
-static int nand_dma_init(void)
-{
- int rc;
-
- rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
- nand_dma_handler, NULL);
- if (rc != 0) {
- printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
- return rc;
- }
-
- virtPtr =
- dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
- if (virtPtr == NULL) {
- printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void nand_dma_term(void)
-{
- if (virtPtr != NULL)
- dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
-}
-
-static void nand_dma_read(void *buf, int len)
-{
- int offset = 0;
- int tmp_len = 0;
- int len_left = len;
- DMA_Handle_t hndl;
-
- if (virtPtr == NULL)
- panic("nand_dma_read: virtPtr == NULL\n");
-
- if ((void *)physPtr == NULL)
- panic("nand_dma_read: physPtr == NULL\n");
-
- hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
- if (hndl < 0) {
- printk(KERN_ERR
- "nand_dma_read: unable to allocate dma channel: %d\n",
- (int)hndl);
- panic("\n");
- }
-
- while (len_left > 0) {
- if (len_left > DMA_MAX_LEN) {
- tmp_len = DMA_MAX_LEN;
- len_left -= DMA_MAX_LEN;
- } else {
- tmp_len = len_left;
- len_left = 0;
- }
-
- init_completion(&nand_comp);
- dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
- physPtr + offset, tmp_len);
- wait_for_completion(&nand_comp);
-
- offset += tmp_len;
- }
-
- dma_free_channel(hndl);
-
- if (buf != NULL)
- memcpy(buf, virtPtr, len);
-}
-
-static void nand_dma_write(const void *buf, int len)
-{
- int offset = 0;
- int tmp_len = 0;
- int len_left = len;
- DMA_Handle_t hndl;
-
- if (buf == NULL)
- panic("nand_dma_write: buf == NULL\n");
-
- if (virtPtr == NULL)
- panic("nand_dma_write: virtPtr == NULL\n");
-
- if ((void *)physPtr == NULL)
- panic("nand_dma_write: physPtr == NULL\n");
-
- memcpy(virtPtr, buf, len);
-
-
- hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
- if (hndl < 0) {
- printk(KERN_ERR
- "nand_dma_write: unable to allocate dma channel: %d\n",
- (int)hndl);
- panic("\n");
- }
-
- while (len_left > 0) {
- if (len_left > DMA_MAX_LEN) {
- tmp_len = DMA_MAX_LEN;
- len_left -= DMA_MAX_LEN;
- } else {
- tmp_len = len_left;
- len_left = 0;
- }
-
- init_completion(&nand_comp);
- dma_transfer_mem_to_mem(hndl, physPtr + offset,
- REG_NAND_DATA_PADDR, tmp_len);
- wait_for_completion(&nand_comp);
-
- offset += tmp_len;
- }
-
- dma_free_channel(hndl);
-}
-
-#endif
-
-static int nand_dev_ready(struct mtd_info *mtd)
-{
- return nand_bcm_umi_dev_ready();
-}
-
-/****************************************************************************
-*
-* bcm_umi_nand_inithw
-*
-* This routine does the necessary hardware (board-specific)
-* initializations. This includes setting up the timings, etc.
-*
-***************************************************************************/
-int bcm_umi_nand_inithw(void)
-{
- /* Configure nand timing parameters */
- REG_UMI_NAND_TCR &= ~0x7ffff;
- REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
-
-#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
- /* enable software control of CS */
- REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
-#endif
-
- /* keep NAND chip select asserted */
- REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
-
- REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
- /* enable writes to flash */
- REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
-
- writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
- nand_bcm_umi_wait_till_ready();
-
-#if NAND_ECC_BCH
- nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
-#endif
-
- return 0;
-}
-
-/* Used to turn latch the proper register for access. */
-static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- /* send command to hardware */
- struct nand_chip *chip = mtd->priv;
- if (ctrl & NAND_CTRL_CHANGE) {
- if (ctrl & NAND_CLE) {
- chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
- goto CMD;
- }
- if (ctrl & NAND_ALE) {
- chip->IO_ADDR_W =
- bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
- goto CMD;
- }
- chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
- }
-
-CMD:
- /* Send command to chip directly */
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
- int len)
-{
- if (USE_DIRECT_IO(len)) {
- /* Do it the old way if the buffer is small or too large.
- * Probably quicker than starting and checking dma. */
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i = 0; i < len; i++)
- writeb(buf[i], this->IO_ADDR_W);
- }
-#if USE_DMA
- else
- nand_dma_write(buf, len);
-#endif
-}
-
-static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
-{
- if (USE_DIRECT_IO(len)) {
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i = 0; i < len; i++)
- buf[i] = readb(this->IO_ADDR_R);
- }
-#if USE_DMA
- else
- nand_dma_read(buf, len);
-#endif
-}
-
-static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
-static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
- int len)
-{
- /*
- * Try to readback page with ECC correction. This is necessary
- * for MLC parts which may have permanently stuck bits.
- */
- struct nand_chip *chip = mtd->priv;
- int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
- if (ret < 0)
- return -EFAULT;
- else {
- if (memcmp(readbackbuf, buf, len) == 0)
- return 0;
-
- return -EFAULT;
- }
- return 0;
-}
-
-static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
-{
- struct nand_chip *this;
- struct resource *r;
- int err = 0;
-
- printk(gBanner);
-
- /* Allocate memory for MTD device structure and private data */
- board_mtd =
- kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!board_mtd) {
- printk(KERN_WARNING
- "Unable to allocate NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!r)
- return -ENXIO;
-
- /* map physical address */
- bcm_umi_io_base = ioremap(r->start, r->end - r->start + 1);
-
- if (!bcm_umi_io_base) {
- printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
- kfree(board_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&board_mtd[1]);
-
- /* Initialize structures */
- memset((char *)board_mtd, 0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- board_mtd->priv = this;
-
- /* Initialize the NAND hardware. */
- if (bcm_umi_nand_inithw() < 0) {
- printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return -EIO;
- }
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
- this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
-
- /* Set command delay time, see datasheet for correct value */
- this->chip_delay = 0;
- /* Assign the device ready function, if available */
- this->dev_ready = nand_dev_ready;
- this->options = 0;
-
- this->write_buf = bcm_umi_nand_write_buf;
- this->read_buf = bcm_umi_nand_read_buf;
- this->verify_buf = bcm_umi_nand_verify_buf;
-
- this->cmd_ctrl = bcm_umi_nand_hwcontrol;
- this->ecc.mode = NAND_ECC_HW;
- this->ecc.size = 512;
- this->ecc.bytes = NAND_ECC_NUM_BYTES;
-#if NAND_ECC_BCH
- this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
- this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
-#else
- this->ecc.correct = nand_correct_data512;
- this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
- this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
-#endif
-
-#if USE_DMA
- err = nand_dma_init();
- if (err != 0)
- return err;
-#endif
-
- /* Figure out the size of the device that we have.
- * We need to do this to figure out which ECC
- * layout we'll be using.
- */
-
- err = nand_scan_ident(board_mtd, 1, NULL);
- if (err) {
- printk(KERN_ERR "nand_scan failed: %d\n", err);
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return err;
- }
-
- /* Now that we know the nand size, we can setup the ECC layout */
-
- switch (board_mtd->writesize) { /* writesize is the pagesize */
- case 4096:
- this->ecc.layout = &nand_hw_eccoob_4096;
- break;
- case 2048:
- this->ecc.layout = &nand_hw_eccoob_2048;
- break;
- case 512:
- this->ecc.layout = &nand_hw_eccoob_512;
- break;
- default:
- {
- printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
- board_mtd->writesize);
- return -EINVAL;
- }
- }
-
-#if NAND_ECC_BCH
- if (board_mtd->writesize > 512) {
- if (this->options & NAND_USE_FLASH_BBT)
- largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
- this->badblock_pattern = &largepage_bbt;
- }
-#endif
-
- /* Now finish off the scan, now that ecc.layout has been initialized. */
-
- err = nand_scan_tail(board_mtd);
- if (err) {
- printk(KERN_ERR "nand_scan failed: %d\n", err);
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return err;
- }
-
- /* Register the partitions */
- {
- int nr_partitions;
- struct mtd_partition *partition_info;
-
- board_mtd->name = "bcm_umi-nand";
- nr_partitions =
- parse_mtd_partitions(board_mtd, part_probes,
- &partition_info, 0);
-
- if (nr_partitions <= 0) {
- printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
- nr_partitions);
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return -EIO;
- }
- mtd_device_register(board_mtd, partition_info, nr_partitions);
- }
-
- /* Return happy */
- return 0;
-}
-
-static int bcm_umi_nand_remove(struct platform_device *pdev)
-{
-#if USE_DMA
- nand_dma_term();
-#endif
-
- /* Release resources, unregister device */
- nand_release(board_mtd);
-
- /* unmap physical address */
- iounmap(bcm_umi_io_base);
-
- /* Free the MTD device structure */
- kfree(board_mtd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int bcm_umi_nand_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- printk(KERN_ERR "MTD NAND suspend is being called\n");
- return 0;
-}
-
-static int bcm_umi_nand_resume(struct platform_device *pdev)
-{
- printk(KERN_ERR "MTD NAND resume is being called\n");
- return 0;
-}
-#else
-#define bcm_umi_nand_suspend NULL
-#define bcm_umi_nand_resume NULL
-#endif
-
-static struct platform_driver nand_driver = {
- .driver = {
- .name = "bcm-nand",
- .owner = THIS_MODULE,
- },
- .probe = bcm_umi_nand_probe,
- .remove = bcm_umi_nand_remove,
- .suspend = bcm_umi_nand_suspend,
- .resume = bcm_umi_nand_resume,
-};
-
-static int __init nand_init(void)
-{
- return platform_driver_register(&nand_driver);
-}
-
-static void __exit nand_exit(void)
-{
- platform_driver_unregister(&nand_driver);
-}
-
-module_init(nand_init);
-module_exit(nand_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Broadcom");
-MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index dd899cb5d36..722898aea7a 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -37,7 +37,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
@@ -171,7 +170,7 @@ static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
{
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
}
/*
@@ -558,7 +557,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
}
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -566,11 +565,13 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
return 0;
}
-static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
/*
@@ -656,7 +657,7 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
/*
* Device management interface
*/
-static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
struct mtd_partition *parts = info->platform->partitions;
@@ -665,12 +666,10 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
return mtd_device_register(mtd, parts, nr);
}
-static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
+static int bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
- platform_set_drvdata(pdev, NULL);
-
/* first thing we need to do is release all our mtds
* and their partitions, then go through freeing the
* resources used
@@ -680,9 +679,6 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
peripheral_free_list(bfin_nfc_pin_req);
bf5xx_nand_dma_remove(info);
- /* free the common resources */
- kfree(info);
-
return 0;
}
@@ -702,9 +698,11 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
if (likely(mtd->writesize >= 512)) {
chip->ecc.size = 512;
chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
} else {
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
SSYNC();
}
@@ -721,7 +719,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
+static int bf5xx_nand_probe(struct platform_device *pdev)
{
struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
struct bf5xx_nand_info *info = NULL;
@@ -741,11 +739,10 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
return -EFAULT;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
- goto out_err_kzalloc;
+ goto out_err;
}
platform_set_drvdata(pdev, info);
@@ -790,7 +787,7 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
/* initialise the hardware */
err = bf5xx_nand_hw_init(info);
if (err)
- goto out_err_hw_init;
+ goto out_err;
/* setup hardware ECC data struct */
if (hardware_ecc) {
@@ -827,10 +824,7 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
out_err_nand_scan:
bf5xx_nand_dma_remove(info);
-out_err_hw_init:
- platform_set_drvdata(pdev, NULL);
- kfree(info);
-out_err_kzalloc:
+out_err:
peripheral_free_list(bfin_nfc_pin_req);
return err;
@@ -861,7 +855,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
/* driver device registration */
static struct platform_driver bf5xx_nand_driver = {
.probe = bf5xx_nand_probe,
- .remove = __devexit_p(bf5xx_nand_remove),
+ .remove = bf5xx_nand_remove,
.suspend = bf5xx_nand_suspend,
.resume = bf5xx_nand_resume,
.driver = {
@@ -870,21 +864,7 @@ static struct platform_driver bf5xx_nand_driver = {
},
};
-static int __init bf5xx_nand_init(void)
-{
- printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n",
- DRV_DESC, DRV_VERSION);
-
- return platform_driver_register(&bf5xx_nand_driver);
-}
-
-static void __exit bf5xx_nand_exit(void)
-{
- platform_driver_unregister(&bf5xx_nand_driver);
-}
-
-module_init(bf5xx_nand_init);
-module_exit(bf5xx_nand_exit);
+module_platform_driver(bf5xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRV_AUTHOR);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 87ebb4e5b0c..4e66726da9a 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#define CAFE_NAND_CTRL1 0x00
@@ -57,7 +58,6 @@
struct cafe_priv {
struct nand_chip nand;
- struct mtd_partition *parts;
struct pci_dev *pdev;
void __iomem *mmio;
struct rs_control *rs;
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int cafe_device_ready(struct mtd_info *mtd)
{
struct cafe_priv *cafe = mtd->priv;
- int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
cafe_writel(cafe, irqs, NAND_IRQ);
@@ -303,13 +303,7 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
case NAND_CMD_RNDOUT:
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
return;
}
@@ -364,25 +358,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 1;
+ return 0;
}
/**
- * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
*
- * The hw generator calculates the error syndrome automatically. Therefor
+ * The hw generator calculates the error syndrome automatically. Therefore
* we need a special oob layout and handling.
*/
static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct cafe_priv *cafe = mtd->priv;
+ unsigned int max_bitflips = 0;
cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +445,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
} else {
dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
mtd->ecc_stats.corrected += n;
+ max_bitflips = max_t(unsigned int, max_bitflips, n);
}
}
- return 0;
+ return max_bitflips;
}
static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -517,8 +514,9 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
};
-static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
struct cafe_priv *cafe = mtd->priv;
@@ -527,19 +525,25 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
+
+ return 0;
}
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
int status;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
+ status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf);
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
/*
* Cached progamming disabled for now, Not sure if its worth the
@@ -566,13 +570,6 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
}
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
- /* Send command to read back the data */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- if (chip->verify_buf(mtd, buf, mtd->writesize))
- return -EIO;
-#endif
return 0;
}
@@ -582,7 +579,7 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
}
/* F_2[X]/(X**6+X+1) */
-static unsigned short __devinit gf64_mul(u8 a, u8 b)
+static unsigned short gf64_mul(u8 a, u8 b)
{
u8 c;
unsigned int i;
@@ -601,7 +598,7 @@ static unsigned short __devinit gf64_mul(u8 a, u8 b)
}
/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
-static u16 __devinit gf4096_mul(u16 a, u16 b)
+static u16 gf4096_mul(u16 a, u16 b)
{
u8 ah, al, bh, bl, ch, cl;
@@ -616,22 +613,22 @@ static u16 __devinit gf4096_mul(u16 a, u16 b)
return (ch << 6) ^ cl;
}
-static int __devinit cafe_mul(int x)
+static int cafe_mul(int x)
{
if (x == 0)
return 1;
return gf4096_mul(x, 0xe01);
}
-static int __devinit cafe_nand_probe(struct pci_dev *pdev,
+static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mtd_info *mtd;
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
- struct mtd_partition *parts;
- int nr_parts;
+ int old_dma;
+ struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -645,10 +642,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
- if (!mtd) {
- dev_warn(&pdev->dev, "failed to alloc mtd_info\n");
+ if (!mtd)
return -ENOMEM;
- }
cafe = (void *)(&mtd[1]);
mtd->dev.parent = &pdev->dev;
@@ -662,13 +657,6 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
err = -ENOMEM;
goto out_free_mtd;
}
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112 + sizeof(struct nand_buffers),
- &cafe->dmaaddr, GFP_KERNEL);
- if (!cafe->dmabuf) {
- err = -ENOMEM;
- goto out_ior;
- }
- cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
if (!cafe->rs) {
@@ -686,7 +674,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.chip_delay = 0;
/* Enable the following for a flash based bad block table */
- cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+ cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+ cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -727,7 +716,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
"CAFE NAND", mtd);
if (err) {
dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
- goto out_free_dma;
+ goto out_ior;
}
/* Disable master reset, enable NAND clock */
@@ -741,6 +730,32 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+ cafe_readl(cafe, GLOBAL_CTRL),
+ cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ /* Do not use the DMA for the nand_scan_ident() */
+ old_dma = usedma;
+ usedma = 0;
+
+ /* Scan to find existence of the device */
+ if (nand_scan_ident(mtd, 2, NULL)) {
+ err = -ENXIO;
+ goto out_irq;
+ }
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf) {
+ err = -ENOMEM;
+ goto out_irq;
+ }
+ cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
+
/* Set up DMA address */
cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
if (sizeof(cafe->dmaaddr) > 4)
@@ -752,16 +767,13 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* Enable NAND IRQ in global IRQ mask register */
- cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
- cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
- cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+ /* this driver does not need the @ecccalc and @ecccode */
+ nbuf->ecccalc = NULL;
+ nbuf->ecccode = NULL;
+ nbuf->databuf = (uint8_t *)(nbuf + 1);
- /* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2, NULL)) {
- err = -ENXIO;
- goto out_irq;
- }
+ /* Restore the DMA flag */
+ usedma = old_dma;
cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
if (mtd->writesize == 2048)
@@ -779,11 +791,12 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
} else {
printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
mtd->writesize);
- goto out_irq;
+ goto out_free_dma;
}
cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
cafe->nand.ecc.size = mtd->writesize;
cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
cafe->nand.ecc.correct = (void *)cafe_nand_bug;
@@ -795,30 +808,24 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
err = nand_scan_tail(mtd);
if (err)
- goto out_irq;
+ goto out_free_dma;
pci_set_drvdata(pdev, mtd);
- /* We register the whole device first, separate from the partitions */
- mtd_device_register(mtd, NULL, 0);
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "cafe_nand";
-#endif
- nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
- if (nr_parts > 0) {
- cafe->parts = parts;
- dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
- mtd_device_register(mtd, parts, nr_parts);
- }
+ mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+
goto out;
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
- out_free_dma:
- dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_ior:
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
@@ -827,7 +834,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
return err;
}
-static void __devexit cafe_nand_remove(struct pci_dev *pdev)
+static void cafe_nand_remove(struct pci_dev *pdev)
{
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
@@ -838,7 +845,10 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ cafe->dmabuf, cafe->dmaaddr);
kfree(mtd);
}
@@ -893,21 +903,11 @@ static struct pci_driver cafe_nand_pci_driver = {
.name = "CAFÉ NAND",
.id_table = cafe_nand_tbl,
.probe = cafe_nand_probe,
- .remove = __devexit_p(cafe_nand_remove),
+ .remove = cafe_nand_remove,
.resume = cafe_nand_resume,
};
-static int __init cafe_nand_init(void)
-{
- return pci_register_driver(&cafe_nand_pci_driver);
-}
-
-static void __exit cafe_nand_exit(void)
-{
- pci_unregister_driver(&cafe_nand_pci_driver);
-}
-module_init(cafe_nand_init);
-module_exit(cafe_nand_exit);
+module_pci_driver(cafe_nand_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6fc043a30d1..66ec95e6ca6 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -22,6 +22,7 @@
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -50,8 +51,6 @@ static struct mtd_partition partition_info[] = {
};
#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-const char *part_probes[] = { "cmdlinepart", NULL };
-
static u_char cmx270_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
@@ -77,18 +76,6 @@ static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
*buf++ = readl(this->IO_ADDR_R) >> 16;
}
-static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16))
- return -EFAULT;
-
- return 0;
-}
-
static inline void nand_cs_on(void)
{
gpio_set_value(GPIO_NAND_CS, 0);
@@ -151,9 +138,6 @@ static int cmx270_device_ready(struct mtd_info *mtd)
static int __init cmx270_init(void)
{
struct nand_chip *this;
- const char *part_type;
- struct mtd_partition *mtd_parts;
- int mtd_parts_nb = 0;
int ret;
if (!(machine_is_armcore() && cpu_is_pxa27x()))
@@ -180,7 +164,6 @@ static int __init cmx270_init(void)
sizeof(struct nand_chip),
GFP_KERNEL);
if (!cmx270_nand_mtd) {
- pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
ret = -ENOMEM;
goto err_kzalloc;
}
@@ -213,7 +196,6 @@ static int __init cmx270_init(void)
this->read_byte = cmx270_read_byte;
this->read_buf = cmx270_read_buf;
this->write_buf = cmx270_write_buf;
- this->verify_buf = cmx270_verify_buf;
/* Scan to find existence of the device */
if (nand_scan (cmx270_nand_mtd, 1)) {
@@ -222,23 +204,9 @@ static int __init cmx270_init(void)
goto err_scan;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
- &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (!mtd_parts_nb) {
- mtd_parts = partition_info;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
/* Register the partitions */
- pr_notice("Using %s partition definition\n", part_type);
- ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
+ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
+ partition_info, NUM_PARTITIONS);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index f59ad1f2d5d..88109d375ae 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -197,9 +197,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Allocate memory for MTD device structure and private data */
- new_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!new_mtd) {
- printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
err = -ENOMEM;
goto out;
}
@@ -207,10 +206,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Get pointer to private data */
this = (struct nand_chip *)(&new_mtd[1]);
- /* Initialize structures */
- memset(new_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
new_mtd->priv = this;
new_mtd->owner = THIS_MODULE;
@@ -237,9 +232,10 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
this->ecc.hwctl = cs_enable_hwecc;
this->ecc.calculate = cs_calculate_ecc;
this->ecc.correct = nand_correct_data;
+ this->ecc.strength = 1;
/* Enable the following for a flash based bad block table */
- this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+ this->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
if (nand_scan(new_mtd, 1)) {
@@ -277,15 +273,11 @@ static int is_geode(void)
return 0;
}
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
static int __init cs553x_init(void)
{
int err = -ENXIO;
int i;
uint64_t val;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = NULL;
/* If the CPU isn't a Geode GX or LX, abort */
if (!is_geode())
@@ -315,13 +307,9 @@ static int __init cs553x_init(void)
do mtdconcat etc. if we want to. */
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
if (cs553x_mtd[i]) {
-
/* If any devices registered, return success. Else the last error. */
- mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- printk(KERN_NOTICE "Using command line partition definition\n");
- mtd_device_register(cs553x_mtd[i], mtd_parts,
- mtd_parts_nb);
+ mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
+ NULL, 0);
err = 0;
}
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 1f34951ae1a..b922c8efcf4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -24,7 +24,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -33,9 +32,12 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
-#include <mach/nand.h>
-#include <mach/aemif.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
/*
* This is a device driver for the NAND flash controller found on the
@@ -57,7 +59,6 @@ struct davinci_nand_info {
struct device *dev;
struct clk *clk;
- bool partitioned;
bool is_readmode;
@@ -486,7 +487,7 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_small __initconst = {
+static struct nand_ecclayout hwecc4_small = {
.eccbytes = 10,
.eccpos = { 0, 1, 2, 3, 4,
/* offset 5 holds the badblock marker */
@@ -502,7 +503,7 @@ static struct nand_ecclayout hwecc4_small __initconst = {
* storing ten ECC bytes plus the manufacturer's bad block marker byte,
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_2048 __initconst = {
+static struct nand_ecclayout hwecc4_2048 = {
.eccbytes = 40,
.eccpos = {
/* at the end of spare sector */
@@ -519,9 +520,88 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
},
};
-static int __init nand_davinci_probe(struct platform_device *pdev)
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+ {.compatible = "ti,davinci-nand", },
+ {.compatible = "ti,keystone-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
+ struct davinci_nand_pdata *pdata;
+ const char *mode;
+ u32 prop;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct davinci_nand_pdata),
+ GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-chipselect", &prop))
+ pdev->id = prop;
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-ale", &prop))
+ pdata->mask_ale = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-cle", &prop))
+ pdata->mask_cle = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-chipsel", &prop))
+ pdata->mask_chipsel = prop;
+ if (!of_property_read_string(pdev->dev.of_node,
+ "nand-ecc-mode", &mode) ||
+ !of_property_read_string(pdev->dev.of_node,
+ "ti,davinci-ecc-mode", &mode)) {
+ if (!strncmp("none", mode, 4))
+ pdata->ecc_mode = NAND_ECC_NONE;
+ if (!strncmp("soft", mode, 4))
+ pdata->ecc_mode = NAND_ECC_SOFT;
+ if (!strncmp("hw", mode, 2))
+ pdata->ecc_mode = NAND_ECC_HW;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-ecc-bits", &prop))
+ pdata->ecc_bits = prop;
+
+ prop = of_get_nand_bus_width(pdev->dev.of_node);
+ if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop))
+ if (prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+ if (of_property_read_bool(pdev->dev.of_node,
+ "nand-on-flash-bbt") ||
+ of_property_read_bool(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt"))
+ pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "ti,keystone-nand")) {
+ pdata->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+ }
+
+ return dev_get_platdata(&pdev->dev);
+}
+#else
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ return dev_get_platdata(&pdev->dev);
+}
+#endif
+
+static int nand_davinci_probe(struct platform_device *pdev)
{
- struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
+ struct davinci_nand_pdata *pdata;
struct davinci_nand_info *info;
struct resource *res1;
struct resource *res2;
@@ -530,8 +610,10 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
- struct mtd_partition *mtd_parts = NULL;
- int mtd_parts_nb = 0;
+
+ pdata = nand_davinci_get_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
/* insist on board-specific configuration */
if (!pdata)
@@ -541,12 +623,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- dev_err(&pdev->dev, "unable to allocate memory\n");
- ret = -ENOMEM;
- goto err_nomem;
- }
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
platform_set_drvdata(pdev, info);
@@ -554,16 +633,23 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res1 || !res2) {
dev_err(&pdev->dev, "resource missing\n");
- ret = -EINVAL;
- goto err_nomem;
+ return -EINVAL;
}
- vaddr = ioremap(res1->start, resource_size(res1));
- base = ioremap(res2->start, resource_size(res2));
- if (!vaddr || !base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EINVAL;
- goto err_ioremap;
+ vaddr = devm_ioremap_resource(&pdev->dev, res1);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ /*
+ * This registers range is used to setup NAND settings. In case with
+ * TI AEMIF driver, the same memory address range is requested already
+ * by AEMIF, so we cannot request it twice, just ioremap.
+ * The AEMIF and NAND drivers not use the same registers in this range.
+ */
+ base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+ return -EADDRNOTAVAIL;
}
info->dev = &pdev->dev;
@@ -581,7 +667,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.chip_delay = 0;
info->chip.select_chip = nand_davinci_select_chip;
- /* options such as NAND_USE_FLASH_BBT or 16-bit widths */
+ /* options such as NAND_BBT_USE_FLASH */
+ info->chip.bbt_options = pdata->bbt_options;
+ /* options such as 16-bit widths */
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
@@ -629,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
if (ret == -EBUSY)
- goto err_ecc;
+ return ret;
info->chip.ecc.calculate = nand_davinci_calculate_4bit;
info->chip.ecc.correct = nand_davinci_correct_4bit;
@@ -642,47 +730,27 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.bytes = 3;
}
info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
break;
default:
- ret = -EINVAL;
- goto err_ecc;
+ return -EINVAL;
}
info->chip.ecc.mode = ecc_mode;
- info->clk = clk_get(&pdev->dev, "aemif");
+ info->clk = devm_clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
- goto err_clk;
+ return ret;
}
- ret = clk_enable(info->clk);
+ ret = clk_prepare_enable(info->clk);
if (ret < 0) {
dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
ret);
goto err_clk_enable;
}
- /*
- * Setup Async configuration register in case we did not boot from
- * NAND and so bootloader did not bother to set it up.
- */
- val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
-
- /* Extended Wait is not valid and Select Strobe mode is not used */
- val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
- if (info->chip.options & NAND_BUSWIDTH_16)
- val |= 0x1;
-
- davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
-
- ret = davinci_aemif_setup_timing(info->timing, info->base,
- info->core_chipsel);
- if (ret < 0) {
- dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
- goto err_timing;
- }
-
spin_lock_irq(&davinci_nand_lock);
/* put CSxNAND into NAND mode */
@@ -696,7 +764,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
- goto err_scan;
+ goto err;
}
/* Update ECC layout if needed ... for 1-bit HW ECC, the default
@@ -710,7 +778,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (!chunks || info->mtd.oobsize < 16) {
dev_dbg(&pdev->dev, "too small\n");
ret = -EINVAL;
- goto err_scan;
+ goto err;
}
/* For small page chips, preserve the manufacturer's
@@ -741,7 +809,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
"for 4KiB-page NAND\n");
ret = -EIO;
- goto err_scan;
+ goto err;
syndrome_done:
info->chip.ecc.layout = &info->ecclayout;
@@ -749,38 +817,20 @@ syndrome_done:
ret = nand_scan_tail(&info->mtd);
if (ret < 0)
- goto err_scan;
-
- if (mtd_has_cmdlinepart()) {
- static const char *probes[] __initconst = {
- "cmdlinepart", NULL
- };
+ goto err;
- mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
- &mtd_parts, 0);
- }
+ if (pdata->parts)
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata->parts, pdata->nr_parts);
+ else {
+ struct mtd_part_parser_data ppdata;
- if (mtd_parts_nb <= 0) {
- mtd_parts = pdata->parts;
- mtd_parts_nb = pdata->nr_parts;
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
+ NULL, 0);
}
-
- /* Register any partitions */
- if (mtd_parts_nb > 0) {
- ret = mtd_device_register(&info->mtd, mtd_parts,
- mtd_parts_nb);
- if (ret == 0)
- info->partitioned = true;
- }
-
- /* If there's no partition info, just package the whole chip
- * as a single MTD device.
- */
- if (!info->partitioned)
- ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
-
if (ret < 0)
- goto err_scan;
+ goto err;
val = davinci_nand_readl(info, NRCSR_OFFSET);
dev_info(&pdev->dev, "controller rev. %d.%d\n",
@@ -788,75 +838,45 @@ syndrome_done:
return 0;
-err_scan:
-err_timing:
- clk_disable(info->clk);
+err:
+ clk_disable_unprepare(info->clk);
err_clk_enable:
- clk_put(info->clk);
-
spin_lock_irq(&davinci_nand_lock);
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
-
-err_ecc:
-err_clk:
-err_ioremap:
- if (base)
- iounmap(base);
- if (vaddr)
- iounmap(vaddr);
-
-err_nomem:
- kfree(info);
return ret;
}
-static int __exit nand_davinci_remove(struct platform_device *pdev)
+static int nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
- int status;
-
- status = mtd_device_unregister(&info->mtd);
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- iounmap(info->base);
- iounmap(info->vaddr);
-
nand_release(&info->mtd);
- clk_disable(info->clk);
- clk_put(info->clk);
-
- kfree(info);
+ clk_disable_unprepare(info->clk);
return 0;
}
static struct platform_driver nand_davinci_driver = {
- .remove = __exit_p(nand_davinci_remove),
+ .probe = nand_davinci_probe,
+ .remove = nand_davinci_remove,
.driver = {
.name = "davinci_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(davinci_nand_of_match),
},
};
MODULE_ALIAS("platform:davinci_nand");
-static int __init nand_davinci_init(void)
-{
- return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe);
-}
-module_init(nand_davinci_init);
-
-static void __exit nand_davinci_exit(void)
-{
- platform_driver_unregister(&nand_davinci_driver);
-}
-module_exit(nand_davinci_exit);
+module_platform_driver(nand_davinci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index d5276218945..9f2012a3e76 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -16,14 +16,12 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -89,13 +87,6 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
* format the bank into the proper bits for the controller */
#define BANK(x) ((x) << 24)
-/* List of platforms this NAND controller has be integrated into */
-static const struct pci_device_id denali_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
- { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
- { /* end: all zeroes */ }
-};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -134,7 +125,6 @@ static void reset_buf(struct denali_nand_info *denali)
static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
{
- BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
denali->buf.buf[denali->buf.tail++] = byte;
}
@@ -699,7 +689,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
if (comp_res == 0) {
/* timeout */
- printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
intr_status, irq_mask);
intr_status = 0;
@@ -906,7 +896,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
/* this function examines buffers to see if they contain data that
* indicate that the buffer is part of an erased region of flash.
*/
-bool is_erased(uint8_t *buf, int len)
+static bool is_erased(uint8_t *buf, int len)
{
int i = 0;
for (i = 0; i < len; i++)
@@ -924,9 +914,10 @@ bool is_erased(uint8_t *buf, int len)
#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
- uint32_t irq_status)
+ uint32_t irq_status, unsigned int *max_bitflips)
{
bool check_erased_page = false;
+ unsigned int bitflips = 0;
if (irq_status & INTR_STATUS__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
@@ -965,6 +956,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
/* correct the ECC error */
buf[offset] ^= err_correction_value;
denali->mtd.ecc_stats.corrected++;
+ bitflips++;
}
} else {
/* if the error is not correctable, need to
@@ -984,6 +976,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
clear_interrupts(denali);
denali_set_intr_modes(denali, true);
}
+ *max_bitflips = bitflips;
return check_erased_page;
}
@@ -1025,7 +1018,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
/* writes a page. user specifies type, and this function handles the
* configuration details. */
-static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, bool raw_xfer)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1075,6 +1068,8 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
denali_enable_dma(denali, false);
dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+
+ return 0;
}
/* NAND core entry points */
@@ -1083,24 +1078,24 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
* writing a page with ECC or without is similar, all the work is done
* by write_page above.
* */
-static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
/* for regular page writes, we let HW handle all the ECC
* data written to the device. */
- write_page(mtd, chip, buf, false);
+ return write_page(mtd, chip, buf, false);
}
/* This is the callback that the NAND core calls to write a page without ECC.
* raw access is similar to ECC page writes, so all the work is done in the
* write_page() function above.
*/
-static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
/* for raw page writes, we want to disable ECC and simply write
whatever data is in the buffer. */
- write_page(mtd, chip, buf, true);
+ return write_page(mtd, chip, buf, true);
}
static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1110,17 +1105,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
}
static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
read_oob_data(mtd, chip->oob_poi, page);
- return 0; /* notify NAND core to send command to
- NAND device. */
+ return 0;
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
+ unsigned int max_bitflips;
struct denali_nand_info *denali = mtd_to_denali(mtd);
dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1148,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
memcpy(buf, denali->buf.buf, mtd->writesize);
- check_erased_page = handle_ecc(denali, buf, irq_status);
+ check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
denali_enable_dma(denali, false);
if (check_erased_page) {
@@ -1167,11 +1162,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
denali->mtd.ecc_stats.failed++;
}
}
- return 0;
+ return max_bitflips;
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1238,7 +1233,7 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
return status;
}
-static void denali_erase(struct mtd_info *mtd, int page)
+static int denali_erase(struct mtd_info *mtd, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1255,8 +1250,7 @@ static void denali_erase(struct mtd_info *mtd, int page)
irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
INTR_STATUS__ERASE_FAIL);
- denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
- NAND_STATUS_FAIL : PASS;
+ return (irq_status & INTR_STATUS__ERASE_FAIL) ? NAND_STATUS_FAIL : PASS;
}
static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
@@ -1300,8 +1294,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
/* TODO: Read OOB data */
break;
default:
- printk(KERN_ERR ": unsupported command"
- " received 0x%x\n", cmd);
+ pr_err(": unsupported command received 0x%x\n", cmd);
break;
}
}
@@ -1346,6 +1339,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
* */
denali->bbtskipbytes = ioread32(denali->flash_reg +
SPARE_AREA_SKIP_BYTES);
+ detect_max_banks(denali);
denali_nand_reset(denali);
iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG,
@@ -1356,7 +1350,6 @@ static void denali_hw_init(struct denali_nand_info *denali)
/* Should set value for these registers when init */
iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->flash_reg + ECC_ENABLE);
- detect_max_banks(denali);
denali_nand_timing_set(denali);
denali_irq_init(denali);
}
@@ -1399,7 +1392,7 @@ static struct nand_bbt_descr bbt_mirror_descr = {
};
/* initialize driver data structures */
-void denali_drv_init(struct denali_nand_info *denali)
+static void denali_drv_init(struct denali_nand_info *denali)
{
denali->idx = 0;
@@ -1420,107 +1413,40 @@ void denali_drv_init(struct denali_nand_info *denali)
denali->irq_status = 0;
}
-/* driver entry point */
-static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int denali_init(struct denali_nand_info *denali)
{
- int ret = -ENODEV;
- resource_size_t csr_base, mem_base;
- unsigned long csr_len, mem_len;
- struct denali_nand_info *denali;
-
- denali = kzalloc(sizeof(*denali), GFP_KERNEL);
- if (!denali)
- return -ENOMEM;
-
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- goto failed_alloc_memery;
- }
+ int ret;
- if (id->driver_data == INTEL_CE4100) {
+ if (denali->platform == INTEL_CE4100) {
/* Due to a silicon limitation, we can only support
* ONFI timing mode 1 and below.
*/
if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
- printk(KERN_ERR "Intel CE4100 only supports"
- " ONFI timing mode 1 or below\n");
- ret = -EINVAL;
- goto failed_enable_dev;
- }
- denali->platform = INTEL_CE4100;
- mem_base = pci_resource_start(dev, 0);
- mem_len = pci_resource_len(dev, 1);
- csr_base = pci_resource_start(dev, 1);
- csr_len = pci_resource_len(dev, 1);
- } else {
- denali->platform = INTEL_MRST;
- csr_base = pci_resource_start(dev, 0);
- csr_len = pci_resource_len(dev, 0);
- mem_base = pci_resource_start(dev, 1);
- mem_len = pci_resource_len(dev, 1);
- if (!mem_len) {
- mem_base = csr_base + csr_len;
- mem_len = csr_len;
+ pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
+ return -EINVAL;
}
}
- /* Is 32-bit DMA supported? */
- ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- if (ret) {
- printk(KERN_ERR "Spectra: no usable DMA configuration\n");
- goto failed_enable_dev;
- }
- denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
- DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
- dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
- goto failed_enable_dev;
- }
-
- pci_set_master(dev);
- denali->dev = &dev->dev;
- denali->mtd.dev.parent = &dev->dev;
-
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request memory regions\n");
- goto failed_dma_map;
- }
-
- denali->flash_reg = ioremap_nocache(csr_base, csr_len);
- if (!denali->flash_reg) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_req_regions;
- }
-
- denali->flash_mem = ioremap_nocache(mem_base, mem_len);
- if (!denali->flash_mem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- ret = -ENOMEM;
- goto failed_remap_reg;
- }
+ /* allocate a temporary buffer for nand_scan_ident() */
+ denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!denali->buf.buf)
+ return -ENOMEM;
+ denali->mtd.dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
/* denali_isr register is done after all the hardware
* initilization is finished*/
- if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
DENALI_NAND_NAME, denali)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- goto failed_remap_mem;
+ pr_err("Spectra: Unable to allocate IRQ\n");
+ return -ENODEV;
}
/* now that our ISR is registered, we can enable interrupts */
denali_set_intr_modes(denali, true);
-
- pci_set_drvdata(dev, denali);
-
denali->mtd.name = "denali-nand";
denali->mtd.owner = THIS_MODULE;
denali->mtd.priv = &denali->nand;
@@ -1539,13 +1465,29 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_irq;
}
- /* MTD supported page sizes vary by kernel. We validate our
- * kernel supports the device here.
- */
- if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
- ret = -ENODEV;
- printk(KERN_ERR "Spectra: device size not supported by this "
- "version of MTD.");
+ /* allocate the right size buffer now */
+ devm_kfree(denali->dev, denali->buf.buf);
+ denali->buf.buf = devm_kzalloc(denali->dev,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ GFP_KERNEL);
+ if (!denali->buf.buf) {
+ ret = -ENOMEM;
+ goto failed_req_irq;
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("Spectra: no usable DMA configuration\n");
+ goto failed_req_irq;
+ }
+
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ ret = -EIO;
goto failed_req_irq;
}
@@ -1577,34 +1519,38 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->nand.bbt_md = &bbt_mirror_descr;
/* skip the scan for now until we have OOB read and write support */
- denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
+ denali->nand.options |= NAND_SKIP_BBTSCAN;
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
/* Denali Controller only support 15bit and 8bit ECC in MRST,
* so just let controller do 15bit ECC for MLC and 8bit ECC for
* SLC if possible.
* */
- if (denali->nand.cellinfo & 0xc &&
+ if (!nand_is_slc(&denali->nand) &&
(denali->mtd.oobsize > (denali->bbtskipbytes +
ECC_15BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE)))) {
/* if MLC OOB size is large enough, use 15bit ECC*/
+ denali->nand.ecc.strength = 15;
denali->nand.ecc.layout = &nand_15bit_oob;
denali->nand.ecc.bytes = ECC_15BITS;
iowrite32(15, denali->flash_reg + ECC_CORRECTION);
} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
ECC_8BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE))) {
- printk(KERN_ERR "Your NAND chip OOB is not large enough to"
- " contain 8bit ECC correction codes");
+ pr_err("Your NAND chip OOB is not large enough to \
+ contain 8bit ECC correction codes");
goto failed_req_irq;
} else {
+ denali->nand.ecc.strength = 8;
denali->nand.ecc.layout = &nand_8bit_oob;
denali->nand.ecc.bytes = ECC_8BITS;
iowrite32(8, denali->flash_reg + ECC_CORRECTION);
}
denali->nand.ecc.bytes *= denali->devnum;
+ denali->nand.ecc.strength *= denali->devnum;
denali->nand.ecc.layout->eccbytes *=
denali->mtd.writesize / ECC_SECTOR_SIZE;
denali->nand.ecc.layout->oobfree[0].offset =
@@ -1637,7 +1583,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->nand.ecc.write_page_raw = denali_write_page_raw;
denali->nand.ecc.read_oob = denali_read_oob;
denali->nand.ecc.write_oob = denali_write_oob;
- denali->nand.erase_cmd = denali_erase;
+ denali->nand.erase = denali_erase;
if (nand_scan_tail(&denali->mtd)) {
ret = -ENXIO;
@@ -1646,70 +1592,25 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
- dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
+ dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
ret);
goto failed_req_irq;
}
return 0;
failed_req_irq:
- denali_irq_cleanup(dev->irq, denali);
-failed_remap_mem:
- iounmap(denali->flash_mem);
-failed_remap_reg:
- iounmap(denali->flash_reg);
-failed_req_regions:
- pci_release_regions(dev);
-failed_dma_map:
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
-failed_enable_dev:
- pci_disable_device(dev);
-failed_alloc_memery:
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+
return ret;
}
+EXPORT_SYMBOL(denali_init);
/* driver exit point */
-static void denali_pci_remove(struct pci_dev *dev)
+void denali_remove(struct denali_nand_info *denali)
{
- struct denali_nand_info *denali = pci_get_drvdata(dev);
-
- nand_release(&denali->mtd);
- mtd_device_unregister(&denali->mtd);
-
- denali_irq_cleanup(dev->irq, denali);
-
- iounmap(denali->flash_reg);
- iounmap(denali->flash_mem);
- pci_release_regions(dev);
- pci_disable_device(dev);
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- pci_set_drvdata(dev, NULL);
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+ dma_unmap_single(denali->dev, denali->buf.dma_buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ DMA_BIDIRECTIONAL);
}
-
-MODULE_DEVICE_TABLE(pci, denali_pci_ids);
-
-static struct pci_driver denali_pci_driver = {
- .name = DENALI_NAND_NAME,
- .id_table = denali_pci_ids,
- .probe = denali_pci_probe,
- .remove = denali_pci_remove,
-};
-
-static int __devinit denali_init(void)
-{
- printk(KERN_INFO "Spectra MTD driver\n");
- return pci_register_driver(&denali_pci_driver);
-}
-
-/* Free memory */
-static void __devexit denali_exit(void)
-{
- pci_unregister_driver(&denali_pci_driver);
-}
-
-module_init(denali_init);
-module_exit(denali_exit);
+EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index fabb9d56b39..96681746242 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -455,17 +455,16 @@
#define ECC_SECTOR_SIZE 512
-#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
-
struct nand_buf {
int head;
int tail;
- uint8_t buf[DENALI_BUF_SIZE];
+ uint8_t *buf;
dma_addr_t dma_buf;
};
#define INTEL_CE4100 1
#define INTEL_MRST 2
+#define DT 3
struct denali_nand_info {
struct mtd_info mtd;
@@ -487,6 +486,7 @@ struct denali_nand_info {
uint32_t irq_status;
int irq_debug_array[32];
int idx;
+ int irq;
uint32_t devnum; /* represent how many nands connected */
uint32_t fwblks; /* represent how many blocks FW used */
@@ -496,4 +496,7 @@ struct denali_nand_info {
uint32_t max_banks;
};
+extern int denali_init(struct denali_nand_info *denali);
+extern void denali_remove(struct denali_nand_info *denali);
+
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
new file mode 100644
index 00000000000..35cb17f5780
--- /dev/null
+++ b/drivers/mtd/nand/denali_dt.c
@@ -0,0 +1,132 @@
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_nand_info denali;
+ struct clk *clk;
+};
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ { .compatible = "denali,denali-nand-dt" },
+ { /* sentinel */ }
+ };
+
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static u64 denali_dma_mask;
+
+static int denali_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *denali_reg, *nand_data;
+ struct denali_dt *dt;
+ struct denali_nand_info *denali;
+ int ret;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->denali;
+
+ denali->platform = DT;
+ denali->dev = &ofdev->dev;
+ denali->irq = platform_get_irq(ofdev, 0);
+ if (denali->irq < 0) {
+ dev_err(&ofdev->dev, "no irq defined\n");
+ return denali->irq;
+ }
+
+ denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+ denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg);
+ if (IS_ERR(denali->flash_reg))
+ return PTR_ERR(denali->flash_reg);
+
+ nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+ denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data);
+ if (IS_ERR(denali->flash_mem))
+ return PTR_ERR(denali->flash_mem);
+
+ if (!of_property_read_u32(ofdev->dev.of_node,
+ "dma-mask", (u32 *)&denali_dma_mask)) {
+ denali->dev->dma_mask = &denali_dma_mask;
+ } else {
+ denali->dev->dma_mask = NULL;
+ }
+
+ dt->clk = devm_clk_get(&ofdev->dev, NULL);
+ if (IS_ERR(dt->clk)) {
+ dev_err(&ofdev->dev, "no clk available\n");
+ return PTR_ERR(dt->clk);
+ }
+ clk_prepare_enable(dt->clk);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_disable_clk;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+
+ return ret;
+}
+
+static int denali_dt_remove(struct platform_device *ofdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(ofdev);
+
+ denali_remove(&dt->denali);
+ clk_disable(dt->clk);
+
+ return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .owner = THIS_MODULE,
+ .of_match_table = denali_nand_dt_ids,
+ },
+};
+
+module_platform_driver(denali_dt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
new file mode 100644
index 00000000000..6e2f387b823
--- /dev/null
+++ b/drivers/mtd/nand/denali_pci.c
@@ -0,0 +1,142 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ pr_err("Spectra: pci_enable_device failed.\n");
+ goto failed_alloc_memery;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ pr_err("Spectra: Unable to request memory regions\n");
+ goto failed_enable_dev;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ pr_err("Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_req_regions;
+ }
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ pr_err("Spectra: ioremap_nocache failed!");
+ ret = -ENOMEM;
+ goto failed_remap_reg;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ goto failed_remap_mem;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+failed_remap_mem:
+ iounmap(denali->flash_mem);
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
+ pci_release_regions(dev);
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
+ kfree(denali);
+
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ kfree(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int denali_init_pci(void)
+{
+ return pci_register_driver(&denali_pci_driver);
+}
+module_init(denali_init_pci);
+
+static void denali_exit_pci(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 7837728d02f..f68a7bccecd 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -31,13 +31,14 @@
#include <linux/mtd/doc2000.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/inftl.h>
+#include <linux/module.h>
/* Where to look for the devices? */
#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
#endif
-static unsigned long __initdata doc_locations[] = {
+static unsigned long doc_locations[] __initdata = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
@@ -45,15 +46,13 @@ static unsigned long __initdata doc_locations[] = {
0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /* CONFIG_MTD_DOCPROBE_HIGH */
+#else
0xc8000, 0xca000, 0xcc000, 0xce000,
0xd0000, 0xd2000, 0xd4000, 0xd6000,
0xd8000, 0xda000, 0xdc000, 0xde000,
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
+#endif
#endif
0xffffffff };
@@ -132,7 +131,7 @@ static struct rs_control *rs_decoder;
/*
* The HW decoder in the DoC ASIC's provides us a error syndrome,
- * which we must convert to a standard syndrom usable by the generic
+ * which we must convert to a standard syndrome usable by the generic
* Reed-Solomon library code.
*
* Fabrice Bellard figured this out in the old docecc code. I added
@@ -153,7 +152,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
parity = ecc[1];
- /* Initialize the syndrom buffer */
+ /* Initialize the syndrome buffer */
for (i = 0; i < NROOTS; i++)
s[i] = ds[0];
/*
@@ -375,19 +374,6 @@ static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
}
}
-static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = this->priv;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- for (i = 0; i < len; i++)
- if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
- return -EFAULT;
- return 0;
-}
-
static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
{
struct nand_chip *this = mtd->priv;
@@ -525,26 +511,6 @@ static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
buf[i] = ReadDOC(docptr, LastDataRead);
}
-static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = this->priv;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- /* Start read pipeline */
- ReadDOC(docptr, ReadPipeInit);
-
- for (i = 0; i < len - 1; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, LastDataRead);
- return i;
- }
- if (buf[i] != ReadDOC(docptr, LastDataRead))
- return i;
- return 0;
-}
-
static u_char doc2001plus_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
@@ -609,33 +575,6 @@ static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
printk("\n");
}
-static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = this->priv;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (debug)
- printk("verifybuf of %d bytes: ", len);
-
- /* Start read pipeline */
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- for (i = 0; i < len - 2; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, Mplus_LastDataRead);
- ReadDOC(docptr, Mplus_LastDataRead);
- return i;
- }
- if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
- return len - 2;
- if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
- return len - 1;
- return 0;
-}
-
static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
{
struct nand_chip *this = mtd->priv;
@@ -759,7 +698,8 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
+ if (this->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
WriteDOC(column, docptr, Mplus_FlashAddress);
}
@@ -1031,7 +971,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
else
WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- if (no_ecc_failures && (ret == -EBADMSG)) {
+ if (no_ecc_failures && mtd_is_eccerr(ret)) {
printk(KERN_ERR "suppressing ECC failure\n");
ret = 0;
}
@@ -1071,7 +1011,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
@@ -1096,7 +1036,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
@@ -1119,7 +1059,6 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
@@ -1227,7 +1166,6 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
@@ -1431,7 +1369,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
this->read_byte = doc2000_read_byte;
this->write_buf = doc2000_writebuf;
this->read_buf = doc2000_readbuf;
- this->verify_buf = doc2000_verifybuf;
this->scan_bbt = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1448,7 +1385,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
this->read_byte = doc2001_read_byte;
this->write_buf = doc2001_writebuf;
this->read_buf = doc2001_readbuf;
- this->verify_buf = doc2001_verifybuf;
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
@@ -1479,7 +1415,6 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
this->read_byte = doc2001plus_read_byte;
this->write_buf = doc2001plus_writebuf;
this->read_buf = doc2001plus_readbuf;
- this->verify_buf = doc2001plus_verifybuf;
this->scan_bbt = inftl_scan_bbt;
this->cmd_ctrl = NULL;
this->select_chip = doc2001plus_select_chip;
@@ -1504,10 +1439,13 @@ static int __init doc_probe(unsigned long physadr)
int reg, len, numchips;
int ret = 0;
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
+ return -EBUSY;
virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
if (!virtadr) {
printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
- return -EIO;
+ ret = -EIO;
+ goto error_ioremap;
}
/* It's not possible to cleanly detect the DiskOnChip - the
@@ -1625,7 +1563,6 @@ static int __init doc_probe(unsigned long physadr)
sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
mtd = kzalloc(len, GFP_KERNEL);
if (!mtd) {
- printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
ret = -ENOMEM;
goto fail;
}
@@ -1652,7 +1589,8 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = 512;
nand->ecc.bytes = 6;
- nand->options = NAND_USE_FLASH_BBT;
+ nand->ecc.strength = 2;
+ nand->bbt_options = NAND_BBT_USE_FLASH;
doc->physadr = physadr;
doc->virtadr = virtadr;
@@ -1692,6 +1630,10 @@ static int __init doc_probe(unsigned long physadr)
WriteDOC(save_control, virtadr, DOCControl);
fail:
iounmap(virtadr);
+
+error_ioremap:
+ release_mem_region(physadr, DOC_IOREMAP_LEN);
+
return ret;
}
@@ -1708,6 +1650,7 @@ static void release_nanddoc(void)
nextmtd = doc->nextdoc;
nand_release(mtd);
iounmap(doc->virtadr);
+ release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
kfree(mtd);
}
}
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 00000000000..ce24637e14f
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1394 @@
+/*
+ * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
+ *
+ * mtd nand driver for M-Systems DiskOnChip G4
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
+ * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
+ * Should work on these as well. Let me know!
+ *
+ * TODO:
+ *
+ * Mechanism for management of password-protected areas
+ *
+ * Hamming ecc when reading oob only
+ *
+ * According to the M-Sys documentation, this device is also available in a
+ * "dual-die" configuration having a 256MB capacity, but no mechanism for
+ * detecting this variant is documented. Currently this driver assumes 128MB
+ * capacity.
+ *
+ * Support for multiple cascaded devices ("floors"). Not sure which gadgets
+ * contain multiple G4s in a cascaded configuration, if any.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+#include <linux/bitrev.h>
+#include <linux/jiffies.h>
+
+/*
+ * In "reliable mode" consecutive 2k pages are used in parallel (in some
+ * fashion) to store the same data. The data can be read back from the
+ * even-numbered pages in the normal manner; odd-numbered pages will appear to
+ * contain junk. Systems that boot from the docg4 typically write the secondary
+ * program loader (SPL) code in this mode. The SPL is loaded by the initial
+ * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
+ * to the reset vector address). This module parameter enables you to use this
+ * driver to write the SPL. When in this mode, no more than 2k of data can be
+ * written at a time, because the addresses do not increment in the normal
+ * manner, and the starting offset must be within an even-numbered 2k region;
+ * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
+ * 0x1a00, ... Reliable mode is a special case and should not be used unless
+ * you know what you're doing.
+ */
+static bool reliable_mode;
+module_param(reliable_mode, bool, 0);
+MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
+
+/*
+ * You'll want to ignore badblocks if you're reading a partition that contains
+ * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
+ * it does not use mtd nand's method for marking bad blocks (using oob area).
+ * This will also skip the check of the "page written" flag.
+ */
+static bool ignore_badblocks;
+module_param(ignore_badblocks, bool, 0);
+MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
+
+struct docg4_priv {
+ struct mtd_info *mtd;
+ struct device *dev;
+ void __iomem *virtadr;
+ int status;
+ struct {
+ unsigned int command;
+ int column;
+ int page;
+ } last_command;
+ uint8_t oob_buf[16];
+ uint8_t ecc_buf[7];
+ int oob_page;
+ struct bch_control *bch;
+};
+
+/*
+ * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
+ * shared with other diskonchip devices (P3, G3 at least).
+ *
+ * Functions with names prefixed with docg4_ are mtd / nand interface functions
+ * (though they may also be called internally). All others are internal.
+ */
+
+#define DOC_IOSPACE_DATA 0x0800
+
+/* register offsets */
+#define DOC_CHIPID 0x1000
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_DATAEND 0x101e
+#define DOC_NOP 0x103e
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+#define DOCG4_MYSTERY_REG 0x1050
+
+/* apparently used only to write oob bytes 6 and 7 */
+#define DOCG4_OOB_6_7 0x1052
+
+/* DOC_FLASHSEQUENCE register commands */
+#define DOC_SEQ_RESET 0x00
+#define DOCG4_SEQ_PAGE_READ 0x03
+#define DOCG4_SEQ_FLUSH 0x29
+#define DOCG4_SEQ_PAGEWRITE 0x16
+#define DOCG4_SEQ_PAGEPROG 0x1e
+#define DOCG4_SEQ_BLOCKERASE 0x24
+#define DOCG4_SEQ_SETMODE 0x45
+
+/* DOC_FLASHCOMMAND register commands */
+#define DOCG4_CMD_PAGE_READ 0x00
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOCG4_CMD_FLUSH 0x70
+#define DOCG4_CMD_READ2 0x30
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOCG4_CMD_PAGEWRITE 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
+#define DOC_CMD_RELIABLE_MODE 0x22
+#define DOC_CMD_RESET 0xff
+
+/* DOC_POWERMODE register bits */
+#define DOC_POWERDOWN_READY 0x80
+
+/* DOC_FLASHCONTROL register bits */
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN 0x40
+#define DOC_CTRL_FLASHREADY 0x01
+
+/* DOC_ECCCONF0 register bits */
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_UNKNOWN 0x2000
+#define DOC_ECCCONF0_ECC_ENABLE 0x1000
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/* DOC_ECCCONF1 register bits */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_ECC_ENABLE 0x07
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+
+/* DOC_ASICMODE register bits */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/* good status values read after read/write/erase operations */
+#define DOCG4_PROGSTATUS_GOOD 0x51
+#define DOCG4_PROGSTATUS_GOOD_2 0xe0
+
+/*
+ * On read operations (page and oob-only), the first byte read from I/O reg is a
+ * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
+ * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
+ */
+#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
+
+/* anatomy of the device */
+#define DOCG4_CHIP_SIZE 0x8000000
+#define DOCG4_PAGE_SIZE 0x200
+#define DOCG4_PAGES_PER_BLOCK 0x200
+#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
+#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
+#define DOCG4_OOB_SIZE 0x10
+#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
+#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
+#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
+
+/* all but the last byte is included in ecc calculation */
+#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
+
+#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
+
+/* expected values from the ID registers */
+#define DOCG4_IDREG1_VALUE 0x0400
+#define DOCG4_IDREG2_VALUE 0xfbff
+
+/* primitive polynomial used to build the Galois field used by hw ecc gen */
+#define DOCG4_PRIMITIVE_POLY 0x4443
+
+#define DOCG4_M 14 /* Galois field is of order 2^14 */
+#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
+
+#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
+
+/*
+ * Bytes 0, 1 are used as badblock marker.
+ * Bytes 2 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 oob bytes only.
+ * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
+ * Byte 15 (the last) is used by the driver as a "page written" flag.
+ */
+static struct nand_ecclayout docg4_oobinfo = {
+ .eccbytes = 9,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+ .oobavail = 5,
+ .oobfree = { {.offset = 2, .length = 5} }
+};
+
+/*
+ * The device has a nop register which M-Sys claims is for the purpose of
+ * inserting precise delays. But beware; at least some operations fail if the
+ * nop writes are replaced with a generic delay!
+ */
+static inline void write_nop(void __iomem *docptr)
+{
+ writew(0, docptr + DOC_NOP);
+}
+
+static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(nand->IO_ADDR_R);
+}
+
+static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], nand->IO_ADDR_W);
+}
+
+static int poll_status(struct docg4_priv *doc)
+{
+ /*
+ * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
+ * register. Operations known to take a long time (e.g., block erase)
+ * should sleep for a while before calling this.
+ */
+
+ uint16_t flash_status;
+ unsigned long timeo;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* hardware quirk requires reading twice initially */
+ flash_status = readw(docptr + DOC_FLASHCONTROL);
+
+ timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
+ do {
+ cpu_relax();
+ flash_status = readb(docptr + DOC_FLASHCONTROL);
+ } while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+ time_before(jiffies, timeo));
+
+ if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
+ dev_err(doc->dev, "%s: timed out!\n", __func__);
+ return NAND_STATUS_FAIL;
+ }
+
+ return 0;
+}
+
+
+static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
+{
+
+ struct docg4_priv *doc = nand->priv;
+ int status = NAND_STATUS_WP; /* inverse logic?? */
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* report any previously unreported error */
+ if (doc->status) {
+ status |= doc->status;
+ doc->status = 0;
+ return status;
+ }
+
+ status |= poll_status(doc);
+ return status;
+}
+
+static void docg4_select_chip(struct mtd_info *mtd, int chip)
+{
+ /*
+ * Select among multiple cascaded chips ("floors"). Multiple floors are
+ * not yet supported, so the only valid non-negative value is 0.
+ */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
+
+ if (chip < 0)
+ return; /* deselected */
+
+ if (chip > 0)
+ dev_warn(doc->dev, "multiple floors currently unsupported\n");
+
+ writew(0, docptr + DOC_DEVICESELECT);
+}
+
+static void reset(struct mtd_info *mtd)
+{
+ /* full device reset */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+ write_nop(docptr);
+
+ writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
+
+ poll_status(doc);
+}
+
+static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
+{
+ /* read the 7 hw-generated ecc bytes */
+
+ int i;
+ for (i = 0; i < 7; i++) { /* hw quirk; read twice */
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ }
+}
+
+static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ /*
+ * Called after a page read when hardware reports bitflips.
+ * Up to four bitflips can be corrected.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i, numerrs, errpos[4];
+ const uint8_t blank_read_hwecc[8] = {
+ 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
+
+ read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
+
+ /* check if read error is due to a blank page */
+ if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
+ return 0; /* yes */
+
+ /* skip additional check of "written flag" if ignore_badblocks */
+ if (ignore_badblocks == false) {
+
+ /*
+ * If the hw ecc bytes are not those of a blank page, there's
+ * still a chance that the page is blank, but was read with
+ * errors. Check the "written flag" in last oob byte, which
+ * is set to zero when a page is written. If more than half
+ * the bits are set, assume a blank page. Unfortunately, the
+ * bit flips(s) are not reported in stats.
+ */
+
+ if (nand->oob_poi[15]) {
+ int bit, numsetbits = 0;
+ unsigned long written_flag = nand->oob_poi[15];
+ for_each_set_bit(bit, &written_flag, 8)
+ numsetbits++;
+ if (numsetbits > 4) { /* assume blank */
+ dev_warn(doc->dev,
+ "error(s) in blank page "
+ "at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis!
+ */
+ for (i = 0; i < 7; i++)
+ doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
+
+ numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
+ doc->ecc_buf, NULL, errpos);
+
+ if (numerrs == -EBADMSG) {
+ dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return -EBADMSG;
+ }
+
+ BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
+
+ /* undo last step in BCH alg (modulo mirroring not needed) */
+ for (i = 0; i < numerrs; i++)
+ errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
+
+ /* fix the errors */
+ for (i = 0; i < numerrs; i++) {
+
+ /* ignore if error within oob ecc bytes */
+ if (errpos[i] > DOCG4_USERDATA_LEN * 8)
+ continue;
+
+ /* if error within oob area preceeding ecc bytes... */
+ if (errpos[i] > DOCG4_PAGE_SIZE * 8)
+ change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
+ (unsigned long *)nand->oob_poi);
+
+ else /* error in page data */
+ change_bit(errpos[i], (unsigned long *)buf);
+ }
+
+ dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
+ numerrs, page * DOCG4_PAGE_SIZE);
+
+ return numerrs;
+}
+
+static uint8_t docg4_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ dev_dbg(doc->dev, "%s\n", __func__);
+
+ if (doc->last_command.command == NAND_CMD_STATUS) {
+ int status;
+
+ /*
+ * Previous nand command was status request, so nand
+ * infrastructure code expects to read the status here. If an
+ * error occurred in a previous operation, report it.
+ */
+ doc->last_command.command = 0;
+
+ if (doc->status) {
+ status = doc->status;
+ doc->status = 0;
+ }
+
+ /* why is NAND_STATUS_WP inverse logic?? */
+ else
+ status = NAND_STATUS_WP | NAND_STATUS_READY;
+
+ return status;
+ }
+
+ dev_warn(doc->dev, "unexpected call to read_byte()\n");
+
+ return 0;
+}
+
+static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
+{
+ /* write the four address bytes packed in docg4_addr to the device */
+
+ void __iomem *docptr = doc->virtadr;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+}
+
+static int read_progstatus(struct docg4_priv *doc)
+{
+ /*
+ * This apparently checks the status of programming. Done after an
+ * erasure, and after page data is written. On error, the status is
+ * saved, to be later retrieved by the nand infrastructure code.
+ */
+ void __iomem *docptr = doc->virtadr;
+
+ /* status is read from the I/O reg */
+ uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
+ __func__, status1, status2, status3);
+
+ if (status1 != DOCG4_PROGSTATUS_GOOD
+ || status2 != DOCG4_PROGSTATUS_GOOD_2
+ || status3 != DOCG4_PROGSTATUS_GOOD_2) {
+ doc->status = NAND_STATUS_FAIL;
+ dev_warn(doc->dev, "read_progstatus failed: "
+ "%02x, %02x, %02x\n", status1, status2, status3);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pageprog(struct mtd_info *mtd)
+{
+ /*
+ * Final step in writing a page. Writes the contents of its
+ * internal buffer out to the flash array, or some such.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int retval = 0;
+
+ dev_dbg(doc->dev, "docg4: %s\n", __func__);
+
+ writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* Just busy-wait; usleep_range() slows things down noticeably. */
+ poll_status(doc);
+
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ retval = read_progstatus(doc);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+
+ return retval;
+}
+
+static void sequence_reset(struct mtd_info *mtd)
+{
+ /* common starting sequence for all operations */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
+ writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+}
+
+static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in reading a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ write_addr(doc, docg4_addr);
+
+ write_nop(docptr);
+ writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ poll_status(doc);
+}
+
+static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in writing a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
+ sequence_reset(mtd);
+
+ if (unlikely(reliable_mode)) {
+ writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ }
+
+ writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_addr(doc, docg4_addr);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+}
+
+static uint32_t mtd_to_docg4_address(int page, int column)
+{
+ /*
+ * Convert mtd address to format used by the device, 32 bit packed.
+ *
+ * Some notes on G4 addressing... The M-Sys documentation on this device
+ * claims that pages are 2K in length, and indeed, the format of the
+ * address used by the device reflects that. But within each page are
+ * four 512 byte "sub-pages", each with its own oob data that is
+ * read/written immediately after the 512 bytes of page data. This oob
+ * data contains the ecc bytes for the preceeding 512 bytes.
+ *
+ * Rather than tell the mtd nand infrastructure that page size is 2k,
+ * with four sub-pages each, we engage in a little subterfuge and tell
+ * the infrastructure code that pages are 512 bytes in size. This is
+ * done because during the course of reverse-engineering the device, I
+ * never observed an instance where an entire 2K "page" was read or
+ * written as a unit. Each "sub-page" is always addressed individually,
+ * its data read/written, and ecc handled before the next "sub-page" is
+ * addressed.
+ *
+ * This requires us to convert addresses passed by the mtd nand
+ * infrastructure code to those used by the device.
+ *
+ * The address that is written to the device consists of four bytes: the
+ * first two are the 2k page number, and the second is the index into
+ * the page. The index is in terms of 16-bit half-words and includes
+ * the preceeding oob data, so e.g., the index into the second
+ * "sub-page" is 0x108, and the full device address of the start of mtd
+ * page 0x201 is 0x00800108.
+ */
+ int g4_page = page / 4; /* device's 2K page */
+ int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
+ return (g4_page << 16) | g4_index; /* pack */
+}
+
+static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
+ int page_addr)
+{
+ /* handle standard nand commands */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
+
+ dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
+ __func__, command, page_addr, column);
+
+ /*
+ * Save the command and its arguments. This enables emulation of
+ * standard flash devices, and also some optimizations.
+ */
+ doc->last_command.command = command;
+ doc->last_command.column = column;
+ doc->last_command.page = page_addr;
+
+ switch (command) {
+
+ case NAND_CMD_RESET:
+ reset(mtd);
+ break;
+
+ case NAND_CMD_READ0:
+ read_page_prologue(mtd, g4_addr);
+ break;
+
+ case NAND_CMD_STATUS:
+ /* next call to read_byte() will expect a status */
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (unlikely(reliable_mode)) {
+ uint16_t g4_page = g4_addr >> 16;
+
+ /* writes to odd-numbered 2k pages are invalid */
+ if (g4_page & 0x01)
+ dev_warn(doc->dev,
+ "invalid reliable mode address\n");
+ }
+
+ write_page_prologue(mtd, g4_addr);
+
+ /* hack for deferred write of oob bytes */
+ if (doc->oob_page == page_addr)
+ memcpy(nand->oob_poi, doc->oob_buf, 16);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ pageprog(mtd);
+ break;
+
+ /* we don't expect these, based on review of nand_base.c */
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ dev_warn(doc->dev, "docg4_command: "
+ "unexpected nand command 0x%x\n", command);
+ break;
+
+ }
+}
+
+static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status, edc_err, *buf16;
+ int bits_corrected = 0;
+
+ dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+
+ writew(DOC_ECCCONF0_READ_MODE |
+ DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is page data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_err(doc->dev,
+ "docg4_read_page: bad status: 0x%02x\n", status);
+ writew(0, docptr + DOC_DATAEND);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
+
+ /* this device always reads oob after page data */
+ /* first 14 oob bytes read from I/O reg */
+ docg4_read_buf(mtd, nand->oob_poi, 14);
+
+ /* last 2 read from another reg */
+ buf16 = (uint16_t *)(nand->oob_poi + 14);
+ *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ write_nop(docptr);
+
+ if (likely(use_ecc == true)) {
+
+ /* read the register that tells us if bitflip(s) detected */
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
+
+ /* If bitflips are reported, attempt to correct with ecc */
+ if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
+ bits_corrected = correct_data(mtd, buf, page);
+ if (bits_corrected == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += bits_corrected;
+ }
+ }
+
+ writew(0, docptr + DOC_DATAEND);
+ if (bits_corrected == -EBADMSG) /* uncorrectable errors */
+ return 0;
+ return bits_corrected;
+}
+
+
+static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int oob_required, int page)
+{
+ return read_page(mtd, nand, buf, page, false);
+}
+
+static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int oob_required, int page)
+{
+ return read_page(mtd, nand, buf, page, true);
+}
+
+static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status;
+
+ dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
+
+ docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+
+ writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is oob data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_warn(doc->dev,
+ "docg4_read_oob failed: status = 0x%02x\n", status);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, nand->oob_poi, 16);
+
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_erase_block(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t g4_page;
+
+ dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ /* only 2 bytes of address are written to specify erase block */
+ g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ g4_page >>= 8;
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ write_nop(docptr);
+
+ /* start the erasure */
+ writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ usleep_range(500, 1000); /* erasure is long; take a snooze */
+ poll_status(doc);
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ read_progstatus(doc);
+
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+
+ return nand->waitfunc(mtd, nand);
+}
+
+static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t ecc_buf[8];
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ writew(DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+
+ /* write the page data */
+ docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
+
+ /* oob bytes 0 through 5 are written to I/O reg */
+ docg4_write_buf16(mtd, nand->oob_poi, 6);
+
+ /* oob byte 6 written to a separate reg */
+ writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
+
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* write hw-generated ecc bytes to oob */
+ if (likely(use_ecc == true)) {
+ /* oob byte 7 is hamming code */
+ uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
+ hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
+ writew(hamming, docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+
+ /* read the 7 bch bytes from ecc regs */
+ read_hw_ecc(docptr, ecc_buf);
+ ecc_buf[7] = 0; /* clear the "page written" flag */
+ }
+
+ /* write user-supplied bytes to oob */
+ else {
+ writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+ memcpy(ecc_buf, &nand->oob_poi[8], 8);
+ }
+
+ docg4_write_buf16(mtd, ecc_buf, 8);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, int oob_required)
+{
+ return write_page(mtd, nand, buf, false);
+}
+
+static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, int oob_required)
+{
+ return write_page(mtd, nand, buf, true);
+}
+
+static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ /*
+ * Writing oob-only is not really supported, because MLC nand must write
+ * oob bytes at the same time as page data. Nonetheless, we save the
+ * oob buffer contents here, and then write it along with the page data
+ * if the same page is subsequently written. This allows user space
+ * utilities that write the oob data prior to the page data to work
+ * (e.g., nandwrite). The disdvantage is that, if the intention was to
+ * write oob only, the operation is quietly ignored. Also, oob can get
+ * corrupted if two concurrent processes are running nandwrite.
+ */
+
+ /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
+ struct docg4_priv *doc = nand->priv;
+ doc->oob_page = page;
+ memcpy(doc->oob_buf, nand->oob_poi, 16);
+ return 0;
+}
+
+static int __init read_factory_bbt(struct mtd_info *mtd)
+{
+ /*
+ * The device contains a read-only factory bad block table. Read it and
+ * update the memory-based bbt accordingly.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
+ uint8_t *buf;
+ int i, block;
+ __u32 eccfailed_stats = mtd->ecc_stats.failed;
+
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ read_page_prologue(mtd, g4_addr);
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
+
+ /*
+ * If no memory-based bbt was created, exit. This will happen if module
+ * parameter ignore_badblocks is set. Then why even call this function?
+ * For an unknown reason, block erase always fails if it's the first
+ * operation after device power-up. The above read ensures it never is.
+ * Ugly, I know.
+ */
+ if (nand->bbt == NULL) /* no memory-based bbt */
+ goto exit;
+
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ /*
+ * Whoops, an ecc failure ocurred reading the factory bbt.
+ * It is stored redundantly, so we get another chance.
+ */
+ eccfailed_stats = mtd->ecc_stats.failed;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ dev_warn(doc->dev,
+ "The factory bbt could not be read!\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Parse factory bbt and update memory-based bbt. Factory bbt format is
+ * simple: one bit per block, block numbers increase left to right (msb
+ * to lsb). Bit clear means bad block.
+ */
+ for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
+ int bitnum;
+ unsigned long bits = ~buf[i];
+ for_each_set_bit(bitnum, &bits, 8) {
+ int badblock = block + 7 - bitnum;
+ nand->bbt[badblock / 4] |=
+ 0x03 << ((badblock % 4) * 2);
+ mtd->ecc_stats.badblocks++;
+ dev_notice(doc->dev, "factory-marked bad block: %d\n",
+ badblock);
+ }
+ }
+ exit:
+ kfree(buf);
+ return 0;
+}
+
+static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ /*
+ * Mark a block as bad. Bad blocks are marked in the oob area of the
+ * first page of the block. The default scan_bbt() in the nand
+ * infrastructure code works fine for building the memory-based bbt
+ * during initialization, as does the nand infrastructure function that
+ * checks if a block is bad by reading the bbt. This function replaces
+ * the nand default because writes to oob-only are not supported.
+ */
+
+ int ret, i;
+ uint8_t *buf;
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ struct nand_bbt_descr *bbtd = nand->badblock_pattern;
+ int page = (int)(ofs >> nand->page_shift);
+ uint32_t g4_addr = mtd_to_docg4_address(page, 0);
+
+ dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
+
+ if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
+ dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
+ __func__, ofs);
+
+ /* allocate blank buffer for page data */
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* write bit-wise negation of pattern to oob buffer */
+ memset(nand->oob_poi, 0xff, mtd->oobsize);
+ for (i = 0; i < bbtd->len; i++)
+ nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
+
+ /* write first page of block */
+ write_page_prologue(mtd, g4_addr);
+ docg4_write_page(mtd, nand, buf, 1);
+ ret = pageprog(mtd);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* only called when module_param ignore_badblocks is set */
+ return 0;
+}
+
+static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /*
+ * Put the device into "deep power-down" mode. Note that CE# must be
+ * deasserted for this to take effect. The xscale, e.g., can be
+ * configured to float this signal when the processor enters power-down,
+ * and a suitable pull-up ensures its deassertion.
+ */
+
+ int i;
+ uint8_t pwr_down;
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* poll the register that tells us we're ready to go to sleep */
+ for (i = 0; i < 10; i++) {
+ pwr_down = readb(docptr + DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ usleep_range(1000, 4000);
+ }
+
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ dev_err(doc->dev, "suspend failed; "
+ "timeout polling DOC_POWERDOWN_READY\n");
+ return -EIO;
+ }
+
+ writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_resume(struct platform_device *pdev)
+{
+
+ /*
+ * Exit power-down. Twelve consecutive reads of the address below
+ * accomplishes this, assuming CE# has been asserted.
+ */
+
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ for (i = 0; i < 12; i++)
+ readb(docptr + 0x1fff);
+
+ return 0;
+}
+
+static void __init init_mtd_structs(struct mtd_info *mtd)
+{
+ /* initialize mtd and nand data structures */
+
+ /*
+ * Note that some of the following initializations are not usually
+ * required within a nand driver because they are performed by the nand
+ * infrastructure code as part of nand_scan(). In this case they need
+ * to be initialized here because we skip call to nand_scan_ident() (the
+ * first half of nand_scan()). The call to nand_scan_ident() is skipped
+ * because for this device the chip id is not read in the manner of a
+ * standard nand device. Unfortunately, nand_scan_ident() does other
+ * things as well, such as call nand_set_defaults().
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ mtd->size = DOCG4_CHIP_SIZE;
+ mtd->name = "Msys_Diskonchip_G4";
+ mtd->writesize = DOCG4_PAGE_SIZE;
+ mtd->erasesize = DOCG4_BLOCK_SIZE;
+ mtd->oobsize = DOCG4_OOB_SIZE;
+ nand->chipsize = DOCG4_CHIP_SIZE;
+ nand->chip_shift = DOCG4_CHIP_SHIFT;
+ nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
+ nand->chip_delay = 20;
+ nand->page_shift = DOCG4_PAGE_SHIFT;
+ nand->pagemask = 0x3ffff;
+ nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ nand->badblockbits = 8;
+ nand->ecc.layout = &docg4_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = DOCG4_PAGE_SIZE;
+ nand->ecc.prepad = 8;
+ nand->ecc.bytes = 8;
+ nand->ecc.strength = DOCG4_T;
+ nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
+ nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
+ nand->controller = &nand->hwcontrol;
+ spin_lock_init(&nand->controller->lock);
+ init_waitqueue_head(&nand->controller->wq);
+
+ /* methods */
+ nand->cmdfunc = docg4_command;
+ nand->waitfunc = docg4_wait;
+ nand->select_chip = docg4_select_chip;
+ nand->read_byte = docg4_read_byte;
+ nand->block_markbad = docg4_block_markbad;
+ nand->read_buf = docg4_read_buf;
+ nand->write_buf = docg4_write_buf16;
+ nand->erase = docg4_erase_block;
+ nand->ecc.read_page = docg4_read_page;
+ nand->ecc.write_page = docg4_write_page;
+ nand->ecc.read_page_raw = docg4_read_page_raw;
+ nand->ecc.write_page_raw = docg4_write_page_raw;
+ nand->ecc.read_oob = docg4_read_oob;
+ nand->ecc.write_oob = docg4_write_oob;
+
+ /*
+ * The way the nand infrastructure code is written, a memory-based bbt
+ * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
+ * nand->block_bad() is used. So when ignoring bad blocks, we skip the
+ * scan and define a dummy block_bad() which always returns 0.
+ */
+ if (ignore_badblocks) {
+ nand->options |= NAND_SKIP_BBTSCAN;
+ nand->block_bad = docg4_block_neverbad;
+ }
+
+}
+
+static int __init read_id_reg(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t id1, id2;
+
+ /* check for presence of g4 chip by reading id registers */
+ id1 = readw(docptr + DOC_CHIPID);
+ id1 = readw(docptr + DOCG4_MYSTERY_REG);
+ id2 = readw(docptr + DOC_CHIPID_INV);
+ id2 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
+ dev_info(doc->dev,
+ "NAND device: 128MiB Diskonchip G4 detected\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int __init probe_docg4(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ void __iomem *virtadr;
+ struct docg4_priv *doc;
+ int len, retval;
+ struct resource *r;
+ struct device *dev = &pdev->dev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(dev, "no io memory resource defined!\n");
+ return -ENODEV;
+ }
+
+ virtadr = ioremap(r->start, resource_size(r));
+ if (!virtadr) {
+ dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
+ return -EIO;
+ }
+
+ len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
+ sizeof(struct docg4_priv);
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (mtd == NULL) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct docg4_priv *) (nand + 1);
+ mtd->priv = nand;
+ nand->priv = doc;
+ mtd->owner = THIS_MODULE;
+ doc->virtadr = virtadr;
+ doc->dev = dev;
+
+ init_mtd_structs(mtd);
+
+ /* initialize kernel bch algorithm */
+ doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+ if (doc->bch == NULL) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, doc);
+
+ reset(mtd);
+ retval = read_id_reg(mtd);
+ if (retval == -ENODEV) {
+ dev_warn(dev, "No diskonchip G4 device found.\n");
+ goto fail;
+ }
+
+ retval = nand_scan_tail(mtd);
+ if (retval)
+ goto fail;
+
+ retval = read_factory_bbt(mtd);
+ if (retval)
+ goto fail;
+
+ retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (retval)
+ goto fail;
+
+ doc->mtd = mtd;
+ return 0;
+
+ fail:
+ iounmap(virtadr);
+ if (mtd) {
+ /* re-declarations avoid compiler warning */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ nand_release(mtd); /* deletes partitions and mtd devices */
+ free_bch(doc->bch);
+ kfree(mtd);
+ }
+
+ return retval;
+}
+
+static int __exit cleanup_docg4(struct platform_device *pdev)
+{
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ nand_release(doc->mtd);
+ free_bch(doc->bch);
+ kfree(doc->mtd);
+ iounmap(doc->virtadr);
+ return 0;
+}
+
+static struct platform_driver docg4_driver = {
+ .driver = {
+ .name = "docg4",
+ .owner = THIS_MODULE,
+ },
+ .suspend = docg4_suspend,
+ .resume = docg4_resume,
+ .remove = __exit_p(cleanup_docg4),
+};
+
+module_platform_driver_probe(docg4_driver, probe_docg4);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Dunn");
+MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
deleted file mode 100644
index 8400d0f6dad..00000000000
--- a/drivers/mtd/nand/edb7312.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * drivers/mtd/nand/edb7312.c
- *
- * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
- * a 64Mibit (8MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
-#include <asm/sizes.h>
-#include <asm/hardware/clps7111.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *ep7312_mtd = NULL;
-
-/*
- * Values specific to the EDB7312 board (used with EP7312 processor)
- */
-#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
-#define EP7312_PXDR 0x0001 /*
- * IO offset to Port B data register
- * where the CLE, ALE and NCE pins
- * are wired to.
- */
-#define EP7312_PXDDR 0x0041 /*
- * IO offset to Port B data direction
- * register so we can control the IO
- * lines.
- */
-
-/*
- * Module stuff
- */
-
-static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
-static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
-static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
- {.name = "EP7312 Nand Flash",
- .offset = 0,
- .size = 8 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- *
- * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1)
- * NAND_CLE: bit 1 -> bit 4
- * NAND_ALE: bit 2 -> bit 5
- */
-static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned char bits = 0x80;
-
- bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3;
- bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40;
-
- clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits,
- ep7312_pxdr);
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ep7312_device_ready(struct mtd_info *mtd)
-{
- return 1;
-}
-
-const char *part_probes[] = { "cmdlinepart", NULL };
-
-/*
- * Main initialization routine
- */
-static int __init ep7312_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- void __iomem *ep7312_fio_base;
-
- /* Allocate memory for MTD device structure and private data */
- ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ep7312_mtd) {
- printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
- if (!ep7312_fio_base) {
- printk("ioremap EDB7312 NAND flash failed\n");
- kfree(ep7312_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ep7312_mtd[1]);
-
- /* Initialize structures */
- memset(ep7312_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ep7312_mtd->priv = this;
- ep7312_mtd->owner = THIS_MODULE;
-
- /*
- * Set GPIO Port B control register so that the pins are configured
- * to be outputs for controlling the NAND flash.
- */
- clps_writeb(0xf0, ep7312_pxddr);
-
- /* insert callbacks */
- this->IO_ADDR_R = ep7312_fio_base;
- this->IO_ADDR_W = ep7312_fio_base;
- this->cmd_ctrl = ep7312_hwcontrol;
- this->dev_ready = ep7312_device_ready;
- /* 15 us command delay time */
- this->chip_delay = 15;
-
- /* Scan to find existence of the device */
- if (nand_scan(ep7312_mtd, 1)) {
- iounmap((void *)ep7312_fio_base);
- kfree(ep7312_mtd);
- return -ENXIO;
- }
- ep7312_mtd->name = "edb7312-nand";
- mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ep7312_init);
-
-/*
- * Clean up routine
- */
-static void __exit ep7312_cleanup(void)
-{
- struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
-
- /* Release resources, unregister device */
- nand_release(ap7312_mtd);
-
- /* Release io resource */
- iounmap(this->IO_ADDR_R);
-
- /* Free the MTD device structure */
- kfree(ep7312_mtd);
-}
-
-module_exit(ep7312_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
-MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 33d8aad8bba..545a5c002f0 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -24,10 +24,10 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -75,7 +75,7 @@ struct fsl_elbc_fcm_ctrl {
unsigned int use_mdr; /* Non zero if the MDR is to be set */
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int counter; /* counter for the initializations */
- char *oob_poi; /* Place to write ECC after read back */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
};
/* These map to the positions used by the FCM hardware ECC generator */
@@ -109,20 +109,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
};
/*
- * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
- * 1, so we have to adjust bad block pattern. This pattern should be used for
- * x8 chips only. So far hardware does not support x16 chips anyway.
- */
-static u8 scan_ff_pattern[] = { 0xff, };
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
-/*
* ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
* interfere with ECC positions, that's why we implement our own descriptors.
* OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
@@ -167,15 +153,22 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
elbc_fcm_ctrl->page = page_addr;
- out_be32(&lbc->fbar,
- page_addr >> (chip->phys_erase_shift - chip->page_shift));
-
if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
out_be32(&lbc->fpar,
((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
(oob ? FPAR_LP_MS : 0) | column);
buf_num = (page_addr & 1) << 2;
} else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
out_be32(&lbc->fpar,
((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
(oob ? FPAR_SP_MS : 0) | column);
@@ -244,6 +237,32 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
return -EIO;
}
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ elbc_fcm_ctrl->max_bitflips = 0;
+
+ if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+ uint32_t lteccr = in_be32(&lbc->lteccr);
+ /*
+ * if command was a full page read and the ELBC
+ * has the LTECCR register, then bits 12-15 (ppc order) of
+ * LTECCR indicates which 512 byte sub-pages had fixed errors.
+ * bits 28-31 are uncorrectable errors, marked elsewhere.
+ * for small page nand only 1 bit is used.
+ * if the ELBC doesn't have the lteccr register it reads 0
+ * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+ * count the number of sub-pages with bitflips and update
+ * ecc_stats.corrected accordingly.
+ */
+ if (lteccr & 0x000F000F)
+ out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+ if (lteccr & 0x000F0000) {
+ mtd->ecc_stats.corrected++;
+ elbc_fcm_ctrl->max_bitflips = 1;
+ }
+ }
+
return 0;
}
@@ -331,20 +350,22 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
fsl_elbc_run_command(mtd);
return;
- /* READID must read all 5 possible bytes while CEB is active */
case NAND_CMD_READID:
- dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
- out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* nand_get_flash_type() reads 8 bytes of entire ID string */
- out_be32(&lbc->fbcr, 8);
- elbc_fcm_ctrl->read_bytes = 8;
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
elbc_fcm_ctrl->use_mdr = 1;
- elbc_fcm_ctrl->mdr = 0;
-
+ elbc_fcm_ctrl->mdr = column;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
return;
@@ -389,9 +410,17 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
page_addr, column);
elbc_fcm_ctrl->column = column;
- elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
@@ -416,16 +445,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
- if (column >= mtd->writesize) {
+ if (elbc_fcm_ctrl->oob)
/* OOB area --> READOOB */
- column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
- elbc_fcm_ctrl->oob = 1;
- } else {
- WARN_ON(column != 0);
+ else
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
- }
}
out_be32(&lbc->fcr, fcr);
@@ -435,7 +460,6 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
- int full_page;
dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
"writing %d bytes.\n", elbc_fcm_ctrl->index);
@@ -445,34 +469,13 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
* write so the HW generates the ECC.
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
- elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) {
- out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
- full_page = 0;
- } else {
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
+ else
out_be32(&lbc->fbcr, 0);
- full_page = 1;
- }
fsl_elbc_run_command(mtd);
-
- /* Read back the page in order to fill in the ECC for the
- * caller. Is this really needed?
- */
- if (full_page && elbc_fcm_ctrl->oob_poi) {
- out_be32(&lbc->fbcr, 3);
- set_addr(mtd, 6, page_addr, 1);
-
- elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
-
- fsl_elbc_do_read(chip, 1);
- fsl_elbc_run_command(mtd);
-
- memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
- &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
- elbc_fcm_ctrl->index += 3;
- }
-
- elbc_fcm_ctrl->oob_poi = NULL;
return;
}
@@ -597,41 +600,6 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
len, avail);
}
-/*
- * Verify buffer against the FCM Controller Data Buffer
- */
-static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *chip = mtd->priv;
- struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
- int i;
-
- if (len < 0) {
- dev_err(priv->dev, "write_buf of %d bytes", len);
- return -EINVAL;
- }
-
- if ((unsigned int)len >
- elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
- dev_err(priv->dev,
- "verify_buf beyond end of buffer "
- "(%d requested, %u available)\n",
- len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
-
- elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
- return -EINVAL;
- }
-
- for (i = 0; i < len; i++)
- if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
- != buf[i])
- break;
-
- elbc_fcm_ctrl->index += len;
- return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
-}
-
/* This function is called after Program and Erase Operations to
* check for success or failure.
*/
@@ -664,9 +632,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
if (chip->pagemask & 0xff000000)
al++;
- /* add to ECCM mode set in fsl_elbc_init */
- priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
- (al << FMR_AL_SHIFT);
+ priv->fmr |= al << FMR_AL_SHIFT;
dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
chip->numchips);
@@ -684,8 +650,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->page_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
chip->phys_erase_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
- chip->ecclayout);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
chip->ecc.mode);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
@@ -719,7 +683,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
&fsl_elbc_oob_lp_eccm1 :
&fsl_elbc_oob_lp_eccm0;
- chip->badblock_pattern = &largepage_memorybased;
}
} else {
dev_err(priv->dev,
@@ -731,34 +694,46 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
-static int fsl_elbc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf,
- int page)
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
fsl_elbc_read_buf(mtd, buf, mtd->writesize);
- fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
- return 0;
+ return elbc_fcm_ctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_elbc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf)
+static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
- struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *buf, int oob_required)
+{
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- elbc_fcm_ctrl->oob_poi = chip->oob_poi;
+ return 0;
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -774,15 +749,16 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
priv->mtd.priv = chip;
priv->mtd.owner = THIS_MODULE;
- /* Set the ECCM according to the settings in bootloader.*/
- priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
chip->read_byte = fsl_elbc_read_byte;
chip->write_buf = fsl_elbc_write_buf;
chip->read_buf = fsl_elbc_read_buf;
- chip->verify_buf = fsl_elbc_verify_buf;
chip->select_chip = fsl_elbc_select_chip;
chip->cmdfunc = fsl_elbc_cmdfunc;
chip->waitfunc = fsl_elbc_wait;
@@ -791,14 +767,14 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->bbt_md = &bbt_mirror_descr;
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
- NAND_USE_FLASH_BBT;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &elbc_fcm_ctrl->controller;
chip->priv = priv;
chip->ecc.read_page = fsl_elbc_read_page;
chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
@@ -809,6 +785,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
&fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
chip->ecc.size = 512;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
} else {
/* otherwise fall back to default software ECC */
chip->ecc.mode = NAND_ECC_SOFT;
@@ -829,26 +806,26 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
elbc_fcm_ctrl->chips[priv->bank] = NULL;
kfree(priv);
- kfree(elbc_fcm_ctrl);
return 0;
}
static DEFINE_MUTEX(fsl_elbc_nand_mutex);
-static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
{
struct fsl_lbc_regs __iomem *lbc;
struct fsl_elbc_mtd *priv;
struct resource res;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
- struct mtd_partition *parts;
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
int ret;
int bank;
struct device *dev;
struct device_node *node = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ ppdata.of_node = pdev->dev.of_node;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
lbc = fsl_lbc_ctrl_dev->regs;
@@ -883,7 +860,6 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
if (!fsl_lbc_ctrl_dev->nand) {
elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
if (!elbc_fcm_ctrl) {
- dev_err(dev, "failed to allocate memory\n");
mutex_unlock(&fsl_elbc_nand_mutex);
ret = -ENOMEM;
goto err;
@@ -901,7 +877,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
elbc_fcm_ctrl->chips[bank] = priv;
priv->bank = bank;
priv->ctrl = fsl_lbc_ctrl_dev;
- priv->dev = dev;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(priv->dev, priv);
priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
@@ -910,7 +887,7 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
goto err;
}
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
@@ -934,17 +911,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
- if (ret < 0)
- goto err;
-
- if (ret == 0) {
- ret = of_mtd_parse_partitions(priv->dev, node, &parts);
- if (ret < 0)
- goto err;
- }
-
- mtd_device_register(&priv->mtd, parts, ret);
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
@@ -957,11 +925,10 @@ err:
static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
- int i;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
- for (i = 0; i < MAX_BANKS; i++)
- if (elbc_fcm_ctrl->chips[i])
- fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
+ struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+
+ fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
elbc_fcm_ctrl->counter--;
@@ -990,18 +957,7 @@ static struct platform_driver fsl_elbc_nand_driver = {
.remove = fsl_elbc_nand_remove,
};
-static int __init fsl_elbc_nand_init(void)
-{
- return platform_driver_register(&fsl_elbc_nand_driver);
-}
-
-static void __exit fsl_elbc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_elbc_nand_driver);
-}
-
-module_init(fsl_elbc_nand_init);
-module_exit(fsl_elbc_nand_exit);
+module_platform_driver(fsl_elbc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
new file mode 100644
index 00000000000..2338124dd05
--- /dev/null
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -0,0 +1,1181 @@
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/fsl_ifc.h>
+
+#define FSL_IFC_V1_1_0 0x01010000
+#define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+ for IFC NAND Machine */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+ void __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes;/* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/* 512-byte page with 4-bit ECC, 8-bit */
+static struct nand_ecclayout oob_512_8bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {0, 5}, {6, 2} },
+};
+
+/* 512-byte page with 4-bit ECC, 16-bit */
+static struct nand_ecclayout oob_512_16bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 6}, },
+};
+
+/* 2048-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_2048_ecc4 = {
+ .eccbytes = 32,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobfree = { {2, 6}, {40, 24} },
+};
+
+/* 4096-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_4096_ecc4 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ },
+ .oobfree = { {2, 6}, {72, 56} },
+};
+
+/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_4096_ecc8 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 82} },
+};
+
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+ .eccbytes = 256,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263,
+ },
+ .oobfree = { {2, 6}, {264, 80} },
+};
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+ iowrite32be(page_addr, &ifc->ifc_nand.row0);
+ iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ifc_nand_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ifc_nand_ctrl->index += mtd->writesize;
+}
+
+static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
+ u32 __iomem *mainarea = (u32 __iomem *)addr;
+ u8 __iomem *oob = addr + mtd->writesize;
+ int i;
+
+ for (i = 0; i < mtd->writesize / 4; i++) {
+ if (__raw_readl(&mainarea[i]) != 0xffffffff)
+ return 0;
+ }
+
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
+ int pos = chip->ecc.layout->eccpos[i];
+
+ if (__raw_readb(&oob[pos]) != 0xff)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 *eccstat, unsigned int bufnum)
+{
+ u32 reg = eccstat[bufnum / 4];
+ int errors;
+
+ errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
+
+ return errors;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 eccstat[4];
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+ ioread32be(&ifc->ifc_nand.nand_fir0),
+ ioread32be(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+ dev_err(priv->dev, "Controller is not responding\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+ dev_err(priv->dev, "NAND Flash Timeout Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+ dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+ nctrl->max_bitflips = 0;
+
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+ int sector = bufnum * chip->ecc.steps;
+ int sector_end = sector + chip->ecc.steps - 1;
+
+ for (i = sector / 4; i <= sector_end / 4; i++)
+ eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+
+ for (i = sector; i <= sector_end; i++) {
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * OK only if the whole page is blank.
+ *
+ * We disable ECCER reporting due to...
+ * erratum IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ if (!is_blank(mtd, bufnum))
+ ctrl->nand_stat |=
+ IFC_NAND_EVTER_STAT_ECCER;
+ break;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ nctrl->max_bitflips = max_t(unsigned int,
+ nctrl->max_bitflips,
+ errors);
+ }
+
+ nctrl->eccread = 0;
+ }
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+ } else {
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ if (oob)
+ iowrite32be(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ else
+ iowrite32be(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr) {
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ifc_nand_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ifc_nand_ctrl->index += column;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ ifc_nand_ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM)
+ timing = IFC_FIR_OP_RBCD;
+
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(column, &ifc->ifc_nand.row3);
+
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 256;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+
+ iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ifc_nand_ctrl->column = column;
+ ifc_nand_ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
+
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+ iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+ iowrite32be(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
+ } else {
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ case NAND_CMD_STATUS:
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ else
+ setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ case NAND_CMD_RESET:
+ iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, bufsize - ifc_nand_ctrl->index);
+ len = bufsize - ifc_nand_ctrl->index;
+ }
+
+ memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
+ ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int offset;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ offset = ifc_nand_ctrl->index++;
+ return in_8(ifc_nand_ctrl->addr + offset);
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ int avail;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ avail = min((unsigned int)len,
+ ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+ memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
+ ifc_nand_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 nand_fsr;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return nand_fsr | NAND_STATUS_WP;
+}
+
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+
+ fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
+ dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return nctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+
+ dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+ chip->numchips);
+ dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+ chip->chipsize);
+ dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+ chip->pagemask);
+ dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
+ chip->chip_delay);
+ dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+ chip->badblockpos);
+ dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+ chip->chip_shift);
+ dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+ chip->page_shift);
+ dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+ chip->ecc.total);
+ dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
+ chip->ecc.layout);
+ dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+ dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+ dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+ mtd->erasesize);
+ dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+ mtd->writesize);
+ dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+ mtd->oobsize);
+
+ return 0;
+}
+
+static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+ csor = ioread32be(&ifc->csor_cs[cs].csor);
+ csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+ iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+
+ /* READID */
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(0x0, &ifc->ifc_nand.row3);
+
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+
+ /* Program ROW0/COL0 */
+ iowrite32be(0x0, &ifc->ifc_nand.row0);
+ iowrite32be(0x0, &ifc->ifc_nand.col0);
+
+ /* set the chip select for NAND Transaction */
+ iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+
+ /* start read seq */
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+ /* Restore CSOR and CSOR_ext */
+ iowrite32be(csor, &ifc->csor_cs[cs].csor);
+ iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct nand_chip *chip = &priv->chip;
+ struct nand_ecclayout *layout;
+ u32 csor, ver;
+
+ /* Fill in fsl_ifc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ chip->read_byte = fsl_ifc_read_byte16;
+ else
+ chip->read_byte = fsl_ifc_read_byte;
+
+ chip->write_buf = fsl_ifc_write_buf;
+ chip->read_buf = fsl_ifc_read_buf;
+ chip->select_chip = fsl_ifc_select_chip;
+ chip->cmdfunc = fsl_ifc_cmdfunc;
+ chip->waitfunc = fsl_ifc_wait;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+
+ if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ chip->read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+ chip->read_byte = fsl_ifc_read_byte;
+ }
+
+ chip->controller = &ifc_nand_ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+ csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (chip->options & NAND_BUSWIDTH_16) {
+ layout = &oob_512_16bit_ecc4;
+ } else {
+ layout = &oob_512_8bit_ecc4;
+
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ layout = &oob_2048_ecc4;
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_4096_ecc4;
+ } else {
+ layout = &oob_4096_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 1;
+ break;
+
+ case CSOR_NAND_PGS_8K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_8192_ecc4;
+ } else {
+ layout = &oob_8192_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 0;
+ break;
+
+ default:
+ dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.layout = layout;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ ver = ioread32be(&ifc->ifc_rev);
+ if (ver == FSL_IFC_V1_1_0)
+ fsl_ifc_sram_init(priv);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+ nand_release(&priv->mtd);
+
+ kfree(priv->mtd.name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ifc_nand_ctrl->chips[priv->bank] = NULL;
+
+ return 0;
+}
+
+static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
+ phys_addr_t addr)
+{
+ u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+ if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+ return 0;
+
+ return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int fsl_ifc_nand_probe(struct platform_device *dev)
+{
+ struct fsl_ifc_regs __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device_node *node = dev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = dev->dev.of_node;
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+ ifc = fsl_ifc_ctrl_dev->regs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
+ if (match_bank(ifc, bank, res.start))
+ break;
+ }
+
+ if (bank >= FSL_IFC_BANK_COUNT) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ if (!fsl_ifc_ctrl_dev->nand) {
+ ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+ if (!ifc_nand_ctrl) {
+ mutex_unlock(&fsl_ifc_nand_mutex);
+ return -ENOMEM;
+ }
+
+ ifc_nand_ctrl->read_bytes = 0;
+ ifc_nand_ctrl->index = 0;
+ ifc_nand_ctrl->addr = NULL;
+ fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+ spin_lock_init(&ifc_nand_ctrl->controller.lock);
+ init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
+ } else {
+ ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ ifc_nand_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_ifc_ctrl_dev;
+ priv->dev = &dev->dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(priv->dev, priv);
+
+ iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
+
+ /* enable NAND Machine Interrupts */
+ iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_ifc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
+ if (ret)
+ goto err;
+
+ ret = fsl_ifc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
+
+ dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_ifc_chip_remove(priv);
+ return ret;
+}
+
+static int fsl_ifc_nand_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+
+ fsl_ifc_chip_remove(priv);
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ ifc_nand_ctrl->counter--;
+ if (!ifc_nand_ctrl->counter) {
+ fsl_ifc_ctrl_dev->nand = NULL;
+ kfree(ifc_nand_ctrl);
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+ {
+ .compatible = "fsl,ifc-nand",
+ },
+ {}
+};
+
+static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+ .name = "fsl,ifc-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_ifc_nand_match,
+ },
+ .probe = fsl_ifc_nand_probe,
+ .remove = fsl_ifc_nand_remove,
+};
+
+module_platform_driver(fsl_ifc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 23752fd5bc5..4d203e84e8c 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -18,6 +18,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
@@ -152,13 +153,13 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
fun_wait_rnb(fun);
}
-static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
- const struct device_node *upm_np,
- const struct resource *io_res)
+static int fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
{
int ret;
struct device_node *flash_np;
- static const char *part_types[] = { "cmdlinepart", NULL, };
+ struct mtd_part_parser_data ppdata;
fun->chip.IO_ADDR_R = fun->io_base;
fun->chip.IO_ADDR_W = fun->io_base;
@@ -192,22 +193,16 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
if (ret)
goto err;
- ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
-
-#ifdef CONFIG_MTD_OF_PARTS
- if (ret == 0) {
- ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
- if (ret < 0)
- goto err;
- }
-#endif
- ret = mtd_device_register(&fun->mtd, fun->parts, ret);
+ ppdata.of_node = flash_np;
+ ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
err:
of_node_put(flash_np);
+ if (ret)
+ kfree(fun->mtd.name);
return ret;
}
-static int __devinit fun_probe(struct platform_device *ofdev)
+static int fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource io_res;
@@ -324,7 +319,7 @@ err1:
return ret;
}
-static int __devexit fun_remove(struct platform_device *ofdev)
+static int fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
int i;
@@ -356,20 +351,10 @@ static struct platform_driver of_fun_driver = {
.of_match_table = of_fun_match,
},
.probe = fun_probe,
- .remove = __devexit_p(fun_remove),
+ .remove = fun_remove,
};
-static int __init fun_module_init(void)
-{
- return platform_driver_register(&of_fun_driver);
-}
-module_init(fun_module_init);
-
-static void __exit fun_module_exit(void)
-{
- platform_driver_unregister(&of_fun_driver);
-}
-module_exit(fun_module_exit);
+module_platform_driver(of_fun_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e9b275ac381..1550692973d 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,6 +17,10 @@
*/
#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -27,6 +31,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -34,7 +39,7 @@
#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
-static struct nand_ecclayout fsmc_ecc1_layout = {
+static struct nand_ecclayout fsmc_ecc1_128_layout = {
.eccbytes = 24,
.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
@@ -50,7 +55,127 @@ static struct nand_ecclayout fsmc_ecc1_layout = {
}
};
-static struct nand_ecclayout fsmc_ecc4_lp_layout = {
+static struct nand_ecclayout fsmc_ecc1_64_layout = {
+ .eccbytes = 12,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_16_layout = {
+ .eccbytes = 3,
+ .eccpos = {2, 3, 4},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
+ * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_256_layout = {
+ .eccbytes = 208,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126,
+ 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142,
+ 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158,
+ 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174,
+ 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190,
+ 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206,
+ 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222,
+ 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238,
+ 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 3},
+ {.offset = 143, .length = 3},
+ {.offset = 159, .length = 3},
+ {.offset = 175, .length = 3},
+ {.offset = 191, .length = 3},
+ {.offset = 207, .length = 3},
+ {.offset = 223, .length = 3},
+ {.offset = 239, .length = 3},
+ {.offset = 255, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_224_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 97}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_128_layout = {
.eccbytes = 104,
.eccpos = { 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14,
@@ -82,6 +207,45 @@ static struct nand_ecclayout fsmc_ecc4_lp_layout = {
};
/*
+ * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
+ * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_64_layout = {
+ .eccbytes = 52,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 1},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
+ * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
+ * byte is free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_16_layout = {
+ .eccbytes = 13,
+ .eccpos = { 0, 1, 2, 3, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14
+ },
+ .oobfree = {
+ {.offset = 15, .length = 1},
+ }
+};
+
+/*
* ECC placement definitions in oobfree type format.
* There are 13 bytes of ecc for every 512 byte block and it has to be read
* consecutively and immediately after the 512 byte data block for hardware to
@@ -103,16 +267,6 @@ static struct fsmc_eccplace fsmc_ecc4_lp_place = {
}
};
-static struct nand_ecclayout fsmc_ecc4_sp_layout = {
- .eccbytes = 13,
- .eccpos = { 0, 1, 2, 3, 6, 7, 8,
- 9, 10, 11, 12, 13, 14
- },
- .oobfree = {
- {.offset = 15, .length = 1},
- }
-};
-
static struct fsmc_eccplace fsmc_ecc4_sp_place = {
.eccplace = {
{.offset = 0, .length = 4},
@@ -120,67 +274,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-/*
- * Default partition tables to be used if the partition information not
- * provided through platform data.
- *
- * Default partition layout for small page(= 512 bytes) devices
- * Size for "Root file system" is updated in driver based on actual device size
- */
-static struct mtd_partition partition_info_16KB_blk[] = {
- {
- .name = "X-loader",
- .offset = 0,
- .size = 4*0x4000,
- },
- {
- .name = "U-Boot",
- .offset = 0x10000,
- .size = 20*0x4000,
- },
- {
- .name = "Kernel",
- .offset = 0x60000,
- .size = 256*0x4000,
- },
- {
- .name = "Root File System",
- .offset = 0x460000,
- .size = 0,
- },
-};
-
-/*
- * Default partition layout for large page(> 512 bytes) devices
- * Size for "Root file system" is updated in driver based on actual device size
- */
-static struct mtd_partition partition_info_128KB_blk[] = {
- {
- .name = "X-loader",
- .offset = 0,
- .size = 4*0x20000,
- },
- {
- .name = "U-Boot",
- .offset = 0x80000,
- .size = 12*0x20000,
- },
- {
- .name = "Kernel",
- .offset = 0x200000,
- .size = 48*0x20000,
- },
- {
- .name = "Root File System",
- .offset = 0x800000,
- .size = 0,
- },
-};
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
-
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
*
@@ -194,6 +287,11 @@ const char *part_probes[] = { "cmdlinepart", NULL };
* @bank: Bank number for probed device.
* @clk: Clock structure for FSMC.
*
+ * @read_dma_chan: DMA channel for read access
+ * @write_dma_chan: DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @data_pa: NAND Physical port for Data.
* @data_va: NAND port for Data.
* @cmd_va: NAND port for Command.
* @addr_va: NAND port for Address.
@@ -208,13 +306,18 @@ struct fsmc_nand_data {
struct fsmc_eccplace *ecc_place;
unsigned int bank;
+ struct device *dev;
+ enum access_mode mode;
struct clk *clk;
- struct resource *resregs;
- struct resource *rescmd;
- struct resource *resaddr;
- struct resource *resdata;
+ /* DMA related objects */
+ struct dma_chan *read_dma_chan;
+ struct dma_chan *write_dma_chan;
+ struct completion dma_access_complete;
+ struct fsmc_nand_timings *dev_timings;
+
+ dma_addr_t data_pa;
void __iomem *data_va;
void __iomem *cmd_va;
void __iomem *addr_va;
@@ -258,34 +361,35 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
struct nand_chip *this = mtd->priv;
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
unsigned int bank = host->bank;
if (ctrl & NAND_CTRL_CHANGE) {
+ u32 pc;
+
if (ctrl & NAND_CLE) {
- this->IO_ADDR_R = (void __iomem *)host->cmd_va;
- this->IO_ADDR_W = (void __iomem *)host->cmd_va;
+ this->IO_ADDR_R = host->cmd_va;
+ this->IO_ADDR_W = host->cmd_va;
} else if (ctrl & NAND_ALE) {
- this->IO_ADDR_R = (void __iomem *)host->addr_va;
- this->IO_ADDR_W = (void __iomem *)host->addr_va;
+ this->IO_ADDR_R = host->addr_va;
+ this->IO_ADDR_W = host->addr_va;
} else {
- this->IO_ADDR_R = (void __iomem *)host->data_va;
- this->IO_ADDR_W = (void __iomem *)host->data_va;
+ this->IO_ADDR_R = host->data_va;
+ this->IO_ADDR_W = host->data_va;
}
- if (ctrl & NAND_NCE) {
- writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE,
- &regs->bank_regs[bank].pc);
- } else {
- writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE,
- &regs->bank_regs[bank].pc);
- }
+ pc = readl(FSMC_NAND_REG(regs, bank, PC));
+ if (ctrl & NAND_NCE)
+ pc |= FSMC_ENABLE;
+ else
+ pc &= ~FSMC_ENABLE;
+ writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
}
mb();
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W);
+ writeb_relaxed(cmd, this->IO_ADDR_W);
}
/*
@@ -294,22 +398,46 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
* This routine initializes timing parameters related to NAND memory access in
* FSMC registers
*/
-static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank,
- uint32_t busw)
+static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
+ uint32_t busw, struct fsmc_nand_timings *timings)
{
uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ uint32_t tclr, tar, thiz, thold, twait, tset;
+ struct fsmc_nand_timings *tims;
+ struct fsmc_nand_timings default_timings = {
+ .tclr = FSMC_TCLR_1,
+ .tar = FSMC_TAR_1,
+ .thiz = FSMC_THIZ_1,
+ .thold = FSMC_THOLD_4,
+ .twait = FSMC_TWAIT_6,
+ .tset = FSMC_TSET_0,
+ };
+
+ if (timings)
+ tims = timings;
+ else
+ tims = &default_timings;
+
+ tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+ tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+ thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+ thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+ twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+ tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (busw)
- writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc);
+ writel_relaxed(value | FSMC_DEVWID_16,
+ FSMC_NAND_REG(regs, bank, PC));
else
- writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc);
-
- writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1,
- &regs->bank_regs[bank].pc);
- writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
- &regs->bank_regs[bank].comm);
- writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
- &regs->bank_regs[bank].attrib);
+ writel_relaxed(value | FSMC_DEVWID_8,
+ FSMC_NAND_REG(regs, bank, PC));
+
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, COMM));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, ATTRIB));
}
/*
@@ -319,15 +447,15 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
- writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256,
- &regs->bank_regs[bank].pc);
- writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN,
- &regs->bank_regs[bank].pc);
- writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN,
- &regs->bank_regs[bank].pc);
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
}
/*
@@ -340,37 +468,42 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
uint32_t ecc_tmp;
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
- if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY)
+ if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
break;
else
cond_resched();
} while (!time_after_eq(jiffies, deadline));
- ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+ if (time_after_eq(jiffies, deadline)) {
+ dev_err(host->dev, "calculate ecc timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
ecc[3] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(&regs->bank_regs[bank].ecc2);
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
ecc[4] = (uint8_t) (ecc_tmp >> 0);
ecc[5] = (uint8_t) (ecc_tmp >> 8);
ecc[6] = (uint8_t) (ecc_tmp >> 16);
ecc[7] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(&regs->bank_regs[bank].ecc3);
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
ecc[8] = (uint8_t) (ecc_tmp >> 0);
ecc[9] = (uint8_t) (ecc_tmp >> 8);
ecc[10] = (uint8_t) (ecc_tmp >> 16);
ecc[11] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(&regs->bank_regs[bank].sts);
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
ecc[12] = (uint8_t) (ecc_tmp >> 16);
return 0;
@@ -386,11 +519,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
uint32_t ecc_tmp;
- ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -398,11 +531,176 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
return 0;
}
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+ struct fsmc_nand_data *host = param;
+
+ complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ enum dma_data_direction direction)
+{
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src, dma_addr;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int ret;
+
+ if (direction == DMA_TO_DEVICE)
+ chan = host->write_dma_chan;
+ else if (direction == DMA_FROM_DEVICE)
+ chan = host->read_dma_chan;
+ else
+ return -EINVAL;
+
+ dma_dev = chan->device;
+ dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+ if (direction == DMA_TO_DEVICE) {
+ dma_src = dma_addr;
+ dma_dst = host->data_pa;
+ } else {
+ dma_src = host->data_pa;
+ dma_dst = dma_addr;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(host->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto unmap_dma;
+ }
+
+ tx->callback = dma_complete;
+ tx->callback_param = host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(host->dev, "dma_submit_error %d\n", cookie);
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(chan);
+
+ ret =
+ wait_for_completion_timeout(&host->dma_access_complete,
+ msecs_to_jiffies(3000));
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(host->dev, "wait_for_completion_timeout\n");
+ if (!ret)
+ ret = -ETIMEDOUT;
+ goto unmap_dma;
+ }
+
+ ret = 0;
+
+unmap_dma:
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+ return ret;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ writel_relaxed(p[i], chip->IO_ADDR_W);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb_relaxed(buf[i], chip->IO_ADDR_W);
+ }
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ p[i] = readl_relaxed(chip->IO_ADDR_R);
+ } else {
+ for (i = 0; i < len; i++)
+ buf[i] = readb_relaxed(chip->IO_ADDR_R);
+ }
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
/*
* fsmc_read_page_hwecc
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
* @page: page number to read
*
* This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -412,7 +710,7 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
* max of 8 bits)
*/
static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
@@ -431,9 +729,9 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
*/
uint16_t ecc_oob[7];
uint8_t *oob = (uint8_t *)&ecc_oob[0];
+ unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
-
chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -444,31 +742,35 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
group++;
/*
- * length is intentionally kept a higher multiple of 2
- * to read at least 13 bytes even in case of 16 bit NAND
- * devices
- */
- len = roundup(len, 2);
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+
chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
chip->read_buf(mtd, oob + j, len);
j += len;
}
- memcpy(&ecc_code[i], oob, 13);
+ memcpy(&ecc_code[i], oob, chip->ecc.bytes);
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/*
- * fsmc_correct_data
+ * fsmc_bch8_correct_data
* @mtd: mtd info structure
* @dat: buffer of read data
* @read_ecc: ecc read from device spare area
@@ -477,19 +779,51 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* calc_ecc is a 104 bit information containing maximum of 8 error
* offset informations of 13 bits each in 512 bytes of read data.
*/
-static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- struct fsmc_regs *regs = host->regs_va;
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *regs = host->regs_va;
unsigned int bank = host->bank;
- uint16_t err_idx[8];
- uint64_t ecc_data[2];
+ uint32_t err_idx[8];
uint32_t num_err, i;
+ uint32_t ecc1, ecc2, ecc3, ecc4;
- /* The calculated ecc is actually the correction index in data */
- memcpy(ecc_data, calc_ecc, 13);
+ num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+
+ /* no bit flipping */
+ if (likely(num_err == 0))
+ return 0;
+
+ /* too many errors */
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+ int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, chip->ecc.size);
+ return bits_data;
+ }
+
+ return -EBADMSG;
+ }
/*
* ------------------- calc_ecc[] bit wise -----------|--13 bits--|
@@ -500,27 +834,26 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
* uint64_t array and error offset indexes are populated in err_idx
* array
*/
- for (i = 0; i < 8; i++) {
- if (i == 4) {
- err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0];
- ecc_data[1] >>= 1;
- continue;
- }
- err_idx[i] = (ecc_data[i/4] & 0x1FFF);
- ecc_data[i/4] >>= 13;
- }
-
- num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF;
-
- if (num_err == 0xF)
- return -EBADMSG;
+ ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
i = 0;
while (num_err--) {
change_bit(0, (unsigned long *)&err_idx[i]);
change_bit(1, (unsigned long *)&err_idx[i]);
- if (err_idx[i] <= 512 * 8) {
+ if (err_idx[i] < chip->ecc.size * 8) {
change_bit(err_idx[i], (unsigned long *)dat);
i++;
}
@@ -528,6 +861,58 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
return i;
}
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+#ifdef CONFIG_OF
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 val;
+
+ /* Set default NAND width to 8 bits */
+ pdata->width = 8;
+ if (!of_property_read_u32(np, "bank-width", &val)) {
+ if (val == 2) {
+ pdata->width = 16;
+ } else if (val != 1) {
+ dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+ if (of_get_property(np, "nand-skip-bbtscan", NULL))
+ pdata->options = NAND_SKIP_BBTSCAN;
+
+ pdata->nand_timings = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->nand_timings), GFP_KERNEL);
+ if (!pdata->nand_timings)
+ return -ENOMEM;
+ of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+ sizeof(*pdata->nand_timings));
+
+ /* Set default NAND bank to 0 */
+ pdata->bank = 0;
+ if (!of_property_read_u32(np, "bank", &val)) {
+ if (val > 3) {
+ dev_err(&pdev->dev, "invalid bank %u\n", val);
+ return -EINVAL;
+ }
+ pdata->bank = val;
+ }
+ return 0;
+}
+#else
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
@@ -535,102 +920,68 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
static int __init fsmc_nand_probe(struct platform_device *pdev)
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node __maybe_unused *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata = {};
struct fsmc_nand_data *host;
struct mtd_info *mtd;
struct nand_chip *nand;
- struct fsmc_regs *regs;
struct resource *res;
+ dma_cap_mask_t mask;
int ret = 0;
u32 pid;
int i;
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ ret = fsmc_nand_probe_config_dt(pdev, np);
+ if (ret) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+ }
+
if (!pdata) {
dev_err(&pdev->dev, "platform data is NULL\n");
return -EINVAL;
}
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure\n");
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
return -ENOMEM;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->resdata = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!host->resdata) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->data_va = ioremap(res->start, resource_size(res));
- if (!host->data_va) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE,
- resource_size(res), pdev->name);
- if (!host->resaddr) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res));
- if (!host->addr_va) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE,
- resource_size(res), pdev->name);
- if (!host->rescmd) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res));
- if (!host->cmd_va) {
- ret = -EIO;
- goto err_probe1;
- }
+ host->data_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->data_va))
+ return PTR_ERR(host->data_va);
+
+ host->data_pa = (dma_addr_t)res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->addr_va))
+ return PTR_ERR(host->addr_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->cmd_va))
+ return PTR_ERR(host->cmd_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
- if (!res) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->resregs = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!host->resregs) {
- ret = -EIO;
- goto err_probe1;
- }
-
- host->regs_va = ioremap(res->start, resource_size(res));
- if (!host->regs_va) {
- ret = -EIO;
- goto err_probe1;
- }
+ host->regs_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_va))
+ return PTR_ERR(host->regs_va);
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to fetch block clock\n");
- ret = PTR_ERR(host->clk);
- host->clk = NULL;
- goto err_probe1;
+ return PTR_ERR(host->clk);
}
- ret = clk_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
if (ret)
- goto err_probe1;
+ goto err_clk_prepare_enable;
/*
* This device ID is actually a common AMBA ID as used on the
@@ -646,7 +997,14 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->bank = pdata->bank;
host->select_chip = pdata->select_bank;
- regs = host->regs_va;
+ host->partitions = pdata->partitions;
+ host->nr_partitions = pdata->nr_partitions;
+ host->dev = &pdev->dev;
+ host->dev_timings = pdata->nand_timings;
+ host->mode = pdata->mode;
+
+ if (host->mode == USE_DMA_ACCESS)
+ init_completion(&host->dma_access_complete);
/* Link all private pointers */
mtd = &host->mtd;
@@ -665,21 +1023,53 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.size = 512;
nand->options = pdata->options;
nand->select_chip = fsmc_select_chip;
+ nand->badblockbits = 7;
if (pdata->width == FSMC_NAND_BW16)
nand->options |= NAND_BUSWIDTH_16;
- fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
+ switch (host->mode) {
+ case USE_DMA_ACCESS:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->read_dma_chan = dma_request_channel(mask, filter,
+ pdata->read_dma_priv);
+ if (!host->read_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ goto err_req_read_chnl;
+ }
+ host->write_dma_chan = dma_request_channel(mask, filter,
+ pdata->write_dma_priv);
+ if (!host->write_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ goto err_req_write_chnl;
+ }
+ nand->read_buf = fsmc_read_buf_dma;
+ nand->write_buf = fsmc_write_buf_dma;
+ break;
+
+ default:
+ case USE_WORD_ACCESS:
+ nand->read_buf = fsmc_read_buf;
+ nand->write_buf = fsmc_write_buf;
+ break;
+ }
+
+ fsmc_nand_setup(host->regs_va, host->bank,
+ nand->options & NAND_BUSWIDTH_16,
+ host->dev_timings);
if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
nand->ecc.calculate = fsmc_read_hwecc_ecc4;
- nand->ecc.correct = fsmc_correct_data;
+ nand->ecc.correct = fsmc_bch8_correct_data;
nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
} else {
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
nand->ecc.correct = nand_correct_data;
nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
}
/*
@@ -688,19 +1078,52 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (nand_scan_ident(&host->mtd, 1, NULL)) {
ret = -ENXIO;
dev_err(&pdev->dev, "No NAND Device found!\n");
- goto err_probe;
+ goto err_scan_ident;
}
if (AMBA_REV_BITS(host->pid) >= 8) {
- if (host->mtd.writesize == 512) {
- nand->ecc.layout = &fsmc_ecc4_sp_layout;
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc4_16_layout;
host->ecc_place = &fsmc_ecc4_sp_place;
- } else {
- nand->ecc.layout = &fsmc_ecc4_lp_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc4_64_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc4_128_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 224:
+ nand->ecc.layout = &fsmc_ecc4_224_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 256:
+ nand->ecc.layout = &fsmc_ecc4_256_layout;
host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ default:
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
}
} else {
- nand->ecc.layout = &fsmc_ecc1_layout;
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc1_16_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc1_64_layout;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc1_128_layout;
+ break;
+ default:
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
}
/* Second stage of scan to fill MTD data-structures */
@@ -716,65 +1139,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* platform data,
* default partition information present in driver.
*/
-#ifdef CONFIG_MTD_CMDLINE_PARTS
/*
- * Check if partition info passed via command line
+ * Check for partition info passed
*/
host->mtd.name = "nand";
- host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
- &host->partitions, 0);
- if (host->nr_partitions <= 0) {
-#endif
- /*
- * Check if partition info passed via command line
- */
- if (pdata->partitions) {
- host->partitions = pdata->partitions;
- host->nr_partitions = pdata->nr_partitions;
- } else {
- struct mtd_partition *partition;
- int i;
-
- /* Select the default partitions info */
- switch (host->mtd.size) {
- case 0x01000000:
- case 0x02000000:
- case 0x04000000:
- host->partitions = partition_info_16KB_blk;
- host->nr_partitions =
- sizeof(partition_info_16KB_blk) /
- sizeof(struct mtd_partition);
- break;
- case 0x08000000:
- case 0x10000000:
- case 0x20000000:
- case 0x40000000:
- host->partitions = partition_info_128KB_blk;
- host->nr_partitions =
- sizeof(partition_info_128KB_blk) /
- sizeof(struct mtd_partition);
- break;
- default:
- ret = -ENXIO;
- pr_err("Unsupported NAND size\n");
- goto err_probe;
- }
-
- partition = host->partitions;
- for (i = 0; i < host->nr_partitions; i++, partition++) {
- if (partition->size == 0) {
- partition->size = host->mtd.size -
- partition->offset;
- break;
- }
- }
- }
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- }
-#endif
-
- ret = mtd_device_register(&host->mtd, host->partitions,
- host->nr_partitions);
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
+ host->partitions, host->nr_partitions);
if (ret)
goto err_probe;
@@ -783,32 +1154,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
return 0;
err_probe:
- clk_disable(host->clk);
-err_probe1:
- if (host->clk)
- clk_put(host->clk);
- if (host->regs_va)
- iounmap(host->regs_va);
- if (host->resregs)
- release_mem_region(host->resregs->start,
- resource_size(host->resregs));
- if (host->cmd_va)
- iounmap(host->cmd_va);
- if (host->rescmd)
- release_mem_region(host->rescmd->start,
- resource_size(host->rescmd));
- if (host->addr_va)
- iounmap(host->addr_va);
- if (host->resaddr)
- release_mem_region(host->resaddr->start,
- resource_size(host->resaddr));
- if (host->data_va)
- iounmap(host->data_va);
- if (host->resdata)
- release_mem_region(host->resdata->start,
- resource_size(host->resdata));
-
- kfree(host);
+err_scan_ident:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->write_dma_chan);
+err_req_write_chnl:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->read_dma_chan);
+err_req_read_chnl:
+ clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
+ clk_put(host->clk);
return ret;
}
@@ -819,52 +1174,51 @@ static int fsmc_nand_remove(struct platform_device *pdev)
{
struct fsmc_nand_data *host = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (host) {
- mtd_device_unregister(&host->mtd);
- clk_disable(host->clk);
- clk_put(host->clk);
+ nand_release(&host->mtd);
- iounmap(host->regs_va);
- release_mem_region(host->resregs->start,
- resource_size(host->resregs));
- iounmap(host->cmd_va);
- release_mem_region(host->rescmd->start,
- resource_size(host->rescmd));
- iounmap(host->addr_va);
- release_mem_region(host->resaddr->start,
- resource_size(host->resaddr));
- iounmap(host->data_va);
- release_mem_region(host->resdata->start,
- resource_size(host->resdata));
-
- kfree(host);
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_release_channel(host->write_dma_chan);
+ dma_release_channel(host->read_dma_chan);
+ }
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
}
+
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host)
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return 0;
}
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
- if (host)
- clk_enable(host->clk);
+ if (host) {
+ clk_prepare_enable(host->clk);
+ fsmc_nand_setup(host->regs_va, host->bank,
+ host->nand.options & NAND_BUSWIDTH_16,
+ host->dev_timings);
+ }
return 0;
}
+#endif
-static const struct dev_pm_ops fsmc_nand_pm_ops = {
- .suspend = fsmc_nand_suspend,
- .resume = fsmc_nand_resume,
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id fsmc_nand_id_table[] = {
+ { .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
+ {}
};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
#endif
static struct platform_driver fsmc_nand_driver = {
@@ -872,24 +1226,12 @@ static struct platform_driver fsmc_nand_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "fsmc-nand",
-#ifdef CONFIG_PM
+ .of_match_table = of_match_ptr(fsmc_nand_id_table),
.pm = &fsmc_nand_pm_ops,
-#endif
},
};
-static int __init fsmc_nand_init(void)
-{
- return platform_driver_probe(&fsmc_nand_driver,
- fsmc_nand_probe);
-}
-module_init(fsmc_nand_init);
-
-static void __exit fsmc_nand_exit(void)
-{
- platform_driver_unregister(&fsmc_nand_driver);
-}
-module_exit(fsmc_nand_exit);
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 2c2060b2800..117ce333fdd 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -17,7 +17,7 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -27,6 +27,9 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
@@ -83,231 +86,201 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
}
-static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+static int gpio_nand_devready(struct mtd_info *mtd)
{
- struct nand_chip *this = mtd->priv;
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- writesb(this->IO_ADDR_W, buf, len);
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
-static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
{
- struct nand_chip *this = mtd->priv;
+ u32 val;
- readsb(this->IO_ADDR_R, buf, len);
-}
+ if (!dev->of_node)
+ return -ENODEV;
-static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- unsigned char read, *p = (unsigned char *) buf;
- int i, err = 0;
-
- for (i = 0; i < len; i++) {
- read = readb(this->IO_ADDR_R);
- if (read != p[i]) {
- pr_debug("%s: err at %d (read %04x vs %04x)\n",
- __func__, i, read, p[i]);
- err = -EFAULT;
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
}
}
- return err;
-}
-static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
- int len)
-{
- struct nand_chip *this = mtd->priv;
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
- if (IS_ALIGNED((unsigned long)buf, 2)) {
- writesw(this->IO_ADDR_W, buf, len>>1);
- } else {
- int i;
- unsigned short *ptr = (unsigned short *)buf;
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
- for (i = 0; i < len; i += 2, ptr++)
- writew(*ptr, this->IO_ADDR_W);
- }
+ return 0;
}
-static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
{
- struct nand_chip *this = mtd->priv;
+ struct resource *r;
+ u64 addr;
- if (IS_ALIGNED((unsigned long)buf, 2)) {
- readsw(this->IO_ADDR_R, buf, len>>1);
- } else {
- int i;
- unsigned short *ptr = (unsigned short *)buf;
+ if (of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
- for (i = 0; i < len; i += 2, ptr++)
- *ptr = readw(this->IO_ADDR_R);
- }
-}
+ r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
-static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
- int len)
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
{
- struct nand_chip *this = mtd->priv;
- unsigned short read, *p = (unsigned short *) buf;
- int i, err = 0;
- len >>= 1;
-
- for (i = 0; i < len; i++) {
- read = readw(this->IO_ADDR_R);
- if (read != p[i]) {
- pr_debug("%s: err at %d (read %04x vs %04x)\n",
- __func__, i, read, p[i]);
- err = -EFAULT;
- }
- }
- return err;
+ return -ENOSYS;
}
-
-static int gpio_nand_devready(struct mtd_info *mtd)
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
{
- struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+ return NULL;
}
+#endif /* CONFIG_OF */
-static int __devexit gpio_nand_remove(struct platform_device *dev)
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
{
- struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
- struct resource *res;
+ int ret = gpio_nand_get_config_of(dev, plat);
- nand_release(&gpiomtd->mtd_info);
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 1);
- iounmap(gpiomtd->io_sync);
- if (res)
- release_mem_region(res->start, resource_size(res));
+ if (!ret)
+ return ret;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, resource_size(res));
+ if (dev_get_platdata(dev)) {
+ memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
+ return 0;
+ }
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+ return -EINVAL;
+}
- gpio_free(gpiomtd->plat.gpio_cle);
- gpio_free(gpiomtd->plat.gpio_ale);
- gpio_free(gpiomtd->plat.gpio_nce);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_free(gpiomtd->plat.gpio_nwp);
- gpio_free(gpiomtd->plat.gpio_rdy);
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
- kfree(gpiomtd);
+ if (r)
+ return r;
- return 0;
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
-static void __iomem *request_and_remap(struct resource *res, size_t size,
- const char *name, int *err)
+static int gpio_nand_remove(struct platform_device *pdev)
{
- void __iomem *ptr;
+ struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
- if (!request_mem_region(res->start, resource_size(res), name)) {
- *err = -EBUSY;
- return NULL;
- }
+ nand_release(&gpiomtd->mtd_info);
- ptr = ioremap(res->start, size);
- if (!ptr) {
- release_mem_region(res->start, resource_size(res));
- *err = -ENOMEM;
- }
- return ptr;
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+
+ return 0;
}
-static int __devinit gpio_nand_probe(struct platform_device *dev)
+static int gpio_nand_probe(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd;
- struct nand_chip *this;
- struct resource *res0, *res1;
- int ret;
-
- if (!dev->dev.platform_data)
- return -EINVAL;
+ struct nand_chip *chip;
+ struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
- res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res0)
+ if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
return -EINVAL;
- gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
- if (gpiomtd == NULL) {
- dev_err(&dev->dev, "failed to create NAND MTD\n");
+ gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ if (!gpiomtd)
return -ENOMEM;
- }
- this = &gpiomtd->nand_chip;
- this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
- if (!this->IO_ADDR_R) {
- dev_err(&dev->dev, "unable to map NAND\n");
- goto err_map;
- }
+ chip = &gpiomtd->nand_chip;
- res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
- if (res1) {
- gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
- if (!gpiomtd->io_sync) {
- dev_err(&dev->dev, "unable to map sync NAND\n");
- goto err_sync;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->IO_ADDR_R))
+ return PTR_ERR(chip->IO_ADDR_R);
+
+ res = gpio_nand_get_io_sync(pdev);
+ if (res) {
+ gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpiomtd->io_sync))
+ return PTR_ERR(gpiomtd->io_sync);
}
- memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+ ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ if (ret)
+ return ret;
- ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
- goto err_nce;
+ return ret;
gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
+ "NAND NWP");
if (ret)
- goto err_nwp;
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ return ret;
}
- ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
if (ret)
- goto err_ale;
+ return ret;
gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
- ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
if (ret)
- goto err_cle;
+ return ret;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
- ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
- if (ret)
- goto err_rdy;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
-
-
- this->IO_ADDR_W = this->IO_ADDR_R;
- this->ecc.mode = NAND_ECC_SOFT;
- this->options = gpiomtd->plat.options;
- this->chip_delay = gpiomtd->plat.chip_delay;
-
- /* install our routines */
- this->cmd_ctrl = gpio_nand_cmd_ctrl;
- this->dev_ready = gpio_nand_devready;
-
- if (this->options & NAND_BUSWIDTH_16) {
- this->read_buf = gpio_nand_readbuf16;
- this->write_buf = gpio_nand_writebuf16;
- this->verify_buf = gpio_nand_verifybuf16;
- } else {
- this->read_buf = gpio_nand_readbuf;
- this->write_buf = gpio_nand_writebuf;
- this->verify_buf = gpio_nand_verifybuf;
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
+ "NAND RDY");
+ if (ret)
+ return ret;
+ gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ chip->dev_ready = gpio_nand_devready;
}
- /* set the mtd private data for the nand driver */
- gpiomtd->mtd_info.priv = this;
- gpiomtd->mtd_info.owner = THIS_MODULE;
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->options = gpiomtd->plat.options;
+ chip->chip_delay = gpiomtd->plat.chip_delay;
+ chip->cmd_ctrl = gpio_nand_cmd_ctrl;
+
+ gpiomtd->mtd_info.priv = chip;
+ gpiomtd->mtd_info.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, gpiomtd);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
if (nand_scan(&gpiomtd->mtd_info, 1)) {
- dev_err(&dev->dev, "no nand chips found?\n");
ret = -ENXIO;
goto err_wp;
}
@@ -316,34 +289,17 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
- platform_set_drvdata(dev, gpiomtd);
-
- return 0;
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (!ret)
+ return 0;
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- gpio_free(gpiomtd->plat.gpio_rdy);
-err_rdy:
- gpio_free(gpiomtd->plat.gpio_cle);
-err_cle:
- gpio_free(gpiomtd->plat.gpio_ale);
-err_ale:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_free(gpiomtd->plat.gpio_nwp);
-err_nwp:
- gpio_free(gpiomtd->plat.gpio_nce);
-err_nce:
- iounmap(gpiomtd->io_sync);
- if (res1)
- release_mem_region(res1->start, resource_size(res1));
-err_sync:
- iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, resource_size(res0));
-err_map:
- kfree(gpiomtd);
+
return ret;
}
@@ -352,23 +308,12 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(gpio_nand_id_table),
},
};
-static int __init gpio_nand_init(void)
-{
- printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
-
- return platform_driver_register(&gpio_nand_driver);
-}
-
-static void __exit gpio_nand_exit(void)
-{
- platform_driver_unregister(&gpio_nand_driver);
-}
-
-module_init(gpio_nand_init);
-module_exit(gpio_nand_exit);
+module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 00000000000..3a462487c35
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
+gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 00000000000..05bb91f2f4c
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,128 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL 0x00000000
+#define HW_BCH_CTRL_SET 0x00000004
+#define HW_BCH_CTRL_CLR 0x00000008
+#define HW_BCH_CTRL_TOG 0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
+
+#define HW_BCH_STATUS0 0x00000010
+#define HW_BCH_MODE 0x00000020
+#define HW_BCH_ENCODEPTR 0x00000030
+#define HW_BCH_DATAPTR 0x00000040
+#define HW_BCH_METAPTR 0x00000050
+#define HW_BCH_LAYOUTSELECT 0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0 0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0 12
+#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
+ : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & BM_BCH_FLASH0LAYOUT0_ECC0) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ )
+
+#define HW_BCH_FLASH0LAYOUT1 0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
+ (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN 12
+#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
+ : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & BM_BCH_FLASH0LAYOUT1_ECCN) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ )
+
+#define HW_BCH_VERSION 0x00000160
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 00000000000..87e658ce23e
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1355 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+static struct timing_threshod timing_default_threshold = {
+ .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
+ BP_GPMI_TIMING0_DATA_SETUP),
+ .internal_data_setup_in_ns = 0,
+ .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
+ BP_GPMI_CTRL1_RDN_DELAY),
+ .max_dll_clock_period_in_ns = 32,
+ .max_dll_delay_in_ns = 16,
+};
+
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ /* clear the bit */
+ writel(mask, addr + MXS_CLR_ADDR);
+
+ /*
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
+ * recommends to wait 1us.
+ */
+ udelay(1);
+
+ /* poll the bit becoming clear */
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+#define MODULE_CLKGATE (1 << 30)
+#define MODULE_SFTRST (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ if (!just_enable) {
+ /* set SFTRST to reset the block */
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+ }
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+ struct clk *clk;
+ int ret;
+ int i;
+
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
+ clk = this->resources.clock[i];
+ if (!clk)
+ break;
+
+ if (v) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk;
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+ return 0;
+
+err_clk:
+ for (; i > 0; i--)
+ clk_disable_unprepare(this->resources.clock[i - 1]);
+ return ret;
+}
+
+#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
+#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
+
+int gpmi_init(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ goto err_out;
+ ret = gpmi_reset_block(r->gpmi_regs, false);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Set the IRQ polarity. */
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Disable Write-Protection. */
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Select BCH ECC. */
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * Decouple the chip select from dma channel. We use dma0 for all
+ * the chips.
+ */
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ gpmi_disable_clk(this);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *geo = &this->bch_geometry;
+ u32 reg;
+ int i;
+
+ dev_err(this->dev, "Show GPMI registers :\n");
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+ reg = readl(r->gpmi_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+
+ /* start to print out the BCH info */
+ dev_err(this->dev, "Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+ dev_err(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset);
+}
+
+/* Configures the geometry for BCH. */
+int bch_set_geometry(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ unsigned int block_count;
+ unsigned int block_size;
+ unsigned int metadata_size;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int gf_len;
+ int ret;
+
+ if (common_nfc_set_geometry(this))
+ return !0;
+
+ block_count = bch_geo->ecc_chunk_count - 1;
+ block_size = bch_geo->ecc_chunk_size;
+ metadata_size = bch_geo->metadata_size;
+ ecc_strength = bch_geo->ecc_strength >> 1;
+ page_size = bch_geo->page_size;
+ gf_len = bch_geo->gf_len;
+
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+ * On the other hand, the MX28 needs the reset, because one case has been
+ * seen where the BCH produced ECC errors constantly after 10000
+ * consecutive reboots. The latter case has not been seen on the MX23
+ * yet, still we don't know if it could happen there as well.
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+ /* Configure layout 0. */
+ writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
+ | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
+ | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
+ | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
+ | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* Set *all* chip selects to use layout 0. */
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+ /* Enable interrupts. */
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ r->bch_regs + HW_BCH_CTRL_SET);
+
+ gpmi_disable_clk(this);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* Converts time in nanoseconds to cycles. */
+static unsigned int ns_to_cycles(unsigned int time,
+ unsigned int period, unsigned int min)
+{
+ unsigned int k;
+
+ k = (time + period - 1) / period;
+ return max(k, min);
+}
+
+#define DEF_MIN_PROP_DELAY 5
+#define DEF_MAX_PROP_DELAY 9
+/* Apply timing to current hardware conditions. */
+static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct timing_threshod *nfc = &timing_default_threshold;
+ struct resources *r = &this->resources;
+ struct nand_chip *nand = &this->nand;
+ struct nand_timing target = this->timing;
+ bool improved_timing_is_available;
+ unsigned long clock_frequency_in_hz;
+ unsigned int clock_period_in_ns;
+ bool dll_use_half_periods;
+ unsigned int dll_delay_shift;
+ unsigned int max_sample_delay_in_ns;
+ unsigned int address_setup_in_cycles;
+ unsigned int data_setup_in_ns;
+ unsigned int data_setup_in_cycles;
+ unsigned int data_hold_in_cycles;
+ int ideal_sample_delay_in_ns;
+ unsigned int sample_delay_factor;
+ int tEYE;
+ unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+ unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
+
+ /*
+ * If there are multiple chips, we need to relax the timings to allow
+ * for signal distortion due to higher capacitance.
+ */
+ if (nand->numchips > 2) {
+ target.data_setup_in_ns += 10;
+ target.data_hold_in_ns += 10;
+ target.address_setup_in_ns += 10;
+ } else if (nand->numchips > 1) {
+ target.data_setup_in_ns += 5;
+ target.data_hold_in_ns += 5;
+ target.address_setup_in_ns += 5;
+ }
+
+ /* Check if improved timing information is available. */
+ improved_timing_is_available =
+ (target.tREA_in_ns >= 0) &&
+ (target.tRLOH_in_ns >= 0) &&
+ (target.tRHOH_in_ns >= 0);
+
+ /* Inspect the clock. */
+ nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
+ clock_frequency_in_hz = nfc->clock_frequency_in_hz;
+ clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
+
+ /*
+ * The NFC quantizes setup and hold parameters in terms of clock cycles.
+ * Here, we quantize the setup and hold timing parameters to the
+ * next-highest clock period to make sure we apply at least the
+ * specified times.
+ *
+ * For data setup and data hold, the hardware interprets a value of zero
+ * as the largest possible delay. This is not what's intended by a zero
+ * in the input parameter, so we impose a minimum of one cycle.
+ */
+ data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
+ clock_period_in_ns, 1);
+ data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
+ clock_period_in_ns, 1);
+ address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
+ clock_period_in_ns, 0);
+
+ /*
+ * The clock's period affects the sample delay in a number of ways:
+ *
+ * (1) The NFC HAL tells us the maximum clock period the sample delay
+ * DLL can tolerate. If the clock period is greater than half that
+ * maximum, we must configure the DLL to be driven by half periods.
+ *
+ * (2) We need to convert from an ideal sample delay, in ns, to a
+ * "sample delay factor," which the NFC uses. This factor depends on
+ * whether we're driving the DLL with full or half periods.
+ * Paraphrasing the reference manual:
+ *
+ * AD = SDF x 0.125 x RP
+ *
+ * where:
+ *
+ * AD is the applied delay, in ns.
+ * SDF is the sample delay factor, which is dimensionless.
+ * RP is the reference period, in ns, which is a full clock period
+ * if the DLL is being driven by full periods, or half that if
+ * the DLL is being driven by half periods.
+ *
+ * Let's re-arrange this in a way that's more useful to us:
+ *
+ * 8
+ * SDF = AD x ----
+ * RP
+ *
+ * The reference period is either the clock period or half that, so this
+ * is:
+ *
+ * 8 AD x DDF
+ * SDF = AD x ----- = --------
+ * f x P P
+ *
+ * where:
+ *
+ * f is 1 or 1/2, depending on how we're driving the DLL.
+ * P is the clock period.
+ * DDF is the DLL Delay Factor, a dimensionless value that
+ * incorporates all the constants in the conversion.
+ *
+ * DDF will be either 8 or 16, both of which are powers of two. We can
+ * reduce the cost of this conversion by using bit shifts instead of
+ * multiplication or division. Thus:
+ *
+ * AD << DDS
+ * SDF = ---------
+ * P
+ *
+ * or
+ *
+ * AD = (SDF >> DDS) x P
+ *
+ * where:
+ *
+ * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
+ */
+ if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
+ dll_use_half_periods = true;
+ dll_delay_shift = 3 + 1;
+ } else {
+ dll_use_half_periods = false;
+ dll_delay_shift = 3;
+ }
+
+ /*
+ * Compute the maximum sample delay the NFC allows, under current
+ * conditions. If the clock is running too slowly, no sample delay is
+ * possible.
+ */
+ if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
+ max_sample_delay_in_ns = 0;
+ else {
+ /*
+ * Compute the delay implied by the largest sample delay factor
+ * the NFC allows.
+ */
+ max_sample_delay_in_ns =
+ (nfc->max_sample_delay_factor * clock_period_in_ns) >>
+ dll_delay_shift;
+
+ /*
+ * Check if the implied sample delay larger than the NFC
+ * actually allows.
+ */
+ if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
+ max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
+ }
+
+ /*
+ * Check if improved timing information is available. If not, we have to
+ * use a less-sophisticated algorithm.
+ */
+ if (!improved_timing_is_available) {
+ /*
+ * Fold the read setup time required by the NFC into the ideal
+ * sample delay.
+ */
+ ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
+ nfc->internal_data_setup_in_ns;
+
+ /*
+ * The ideal sample delay may be greater than the maximum
+ * allowed by the NFC. If so, we can trade off sample delay time
+ * for more data setup time.
+ *
+ * In each iteration of the following loop, we add a cycle to
+ * the data setup time and subtract a corresponding amount from
+ * the sample delay until we've satisified the constraints or
+ * can't do any better.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ data_setup_in_cycles++;
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds most closely
+ * to the ideal sample delay. If the result is too large for the
+ * NFC, use the maximum value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the
+ * sample delay factor. We do this because the form of the
+ * computation is the same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /* Skip to the part where we return our results. */
+ goto return_results;
+ }
+
+ /*
+ * If control arrives here, we have more detailed timing information,
+ * so we can use a better algorithm.
+ */
+
+ /*
+ * Fold the read setup time required by the NFC into the maximum
+ * propagation delay.
+ */
+ max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
+
+ /*
+ * Earlier, we computed the number of clock cycles required to satisfy
+ * the data setup time. Now, we need to know the actual nanoseconds.
+ */
+ data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
+
+ /*
+ * Compute tEYE, the width of the data eye when reading from the NAND
+ * Flash. The eye width is fundamentally determined by the data setup
+ * time, perturbed by propagation delays and some characteristics of the
+ * NAND Flash device.
+ *
+ * start of the eye = max_prop_delay + tREA
+ * end of the eye = min_prop_delay + tRHOH + data_setup
+ */
+ tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
+ (int)data_setup_in_ns;
+
+ tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
+
+ /*
+ * The eye must be open. If it's not, we can try to open it by
+ * increasing its main forcer, the data setup time.
+ *
+ * In each iteration of the following loop, we increase the data setup
+ * time by a single clock cycle. We do this until either the eye is
+ * open or we run into NFC limits.
+ */
+ while ((tEYE <= 0) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+ }
+
+ /*
+ * When control arrives here, the eye is open. The ideal time to sample
+ * the data is in the center of the eye:
+ *
+ * end of the eye + start of the eye
+ * --------------------------------- - data_setup
+ * 2
+ *
+ * After some algebra, this simplifies to the code immediately below.
+ */
+ ideal_sample_delay_in_ns =
+ ((int)max_prop_delay_in_ns +
+ (int)target.tREA_in_ns +
+ (int)min_prop_delay_in_ns +
+ (int)target.tRHOH_in_ns -
+ (int)data_setup_in_ns) >> 1;
+
+ /*
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ *
+ * __ _____________________________________
+ * RDN \_________________/
+ *
+ * <---- tEYE ----->
+ * /-----------------\
+ * Read Data ----------------------------< >---------
+ * \-----------------/
+ * ^ ^ ^ ^
+ * | | | |
+ * |<--Data Setup -->|<--Delay Time -->| |
+ * | | | |
+ * | | |
+ * | |<-- Quantized Delay Time -->|
+ * | | |
+ *
+ *
+ * We have some issues we must now address:
+ *
+ * (1) The *ideal* sample delay time must not be negative. If it is, we
+ * jam it to zero.
+ *
+ * (2) The *ideal* sample delay time must not be greater than that
+ * allowed by the NFC. If it is, we can increase the data setup
+ * time, which will reduce the delay between the end of the data
+ * setup and the center of the eye. It will also make the eye
+ * larger, which might help with the next issue...
+ *
+ * (3) The *quantized* sample delay time must not fall either before the
+ * eye opens or after it closes (the latter is the problem
+ * illustrated in the above figure).
+ */
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * Extend the data setup as needed to reduce the ideal sample delay
+ * below the maximum permitted by the NFC.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds to the ideal sample
+ * delay. If the result is too large, then use the maximum allowed
+ * value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the sample
+ * delay factor. We do this because the form of the computation is the
+ * same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /*
+ * These macros conveniently encapsulate a computation we'll use to
+ * continuously evaluate whether or not the data sample delay is inside
+ * the eye.
+ */
+ #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
+
+ #define QUANTIZED_DELAY \
+ ((int) ((sample_delay_factor * clock_period_in_ns) >> \
+ dll_delay_shift))
+
+ #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
+
+ #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
+
+ /*
+ * While the quantized sample time falls outside the eye, reduce the
+ * sample delay or extend the data setup to move the sampling point back
+ * toward the eye. Do not allow the number of data setup cycles to
+ * exceed the maximum allowed by the NFC.
+ */
+ while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * outside the eye. Check if it's before the eye opens, or after
+ * the eye closes.
+ */
+ if (QUANTIZED_DELAY > IDEAL_DELAY) {
+ /*
+ * If control arrives here, the quantized sample delay
+ * falls after the eye closes. Decrease the quantized
+ * delay time and then go back to re-evaluate.
+ */
+ if (sample_delay_factor != 0)
+ sample_delay_factor--;
+ continue;
+ }
+
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * before the eye opens. Shift the sample point by increasing
+ * data setup time. This will also make the eye larger.
+ */
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* ...and one less period for the delay time. */
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * We have a new ideal sample delay, so re-compute the quantized
+ * delay.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+ }
+
+ /* Control arrives here when we're ready to return our results. */
+return_results:
+ hw->data_setup_in_cycles = data_setup_in_cycles;
+ hw->data_hold_in_cycles = data_hold_in_cycles;
+ hw->address_setup_in_cycles = address_setup_in_cycles;
+ hw->use_half_periods = dll_use_half_periods;
+ hw->sample_delay_factor = sample_delay_factor;
+ hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
+ hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
+
+ /* Return success. */
+ return 0;
+}
+
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ * The GPMI-clock is the internal clock in the gpmi nand controller.
+ * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ * The frequency on the nand chip pins is derived from the GPMI-clock.
+ * We can get it from the following equation:
+ *
+ * F = G / (DS + DH)
+ *
+ * F : the frequency on the nand chip pins.
+ * G : the GPMI clock, such as 100MHz.
+ * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ * the nand EDO(extended Data Out) timing could be applied.
+ * The GPMI implements a feedback read strobe to sample the read data.
+ * The feedback read strobe can be delayed to support the nand EDO timing
+ * where the read strobe may deasserts before the read data is valid, and
+ * read data is valid for some time after read strobe.
+ *
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ * |<---tREA---->|
+ * | |
+ * | | |
+ * |<--tRP-->| |
+ * | | |
+ * __ ___|__________________________________
+ * RDN \________/ |
+ * |
+ * /---------\
+ * Read Data --------------< >---------
+ * \---------/
+ * | |
+ * |<-D->|
+ * FeedbackRDN ________ ____________
+ * \___________/
+ *
+ * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ * 4.1) From the aspect of the nand chip pins:
+ * Delay = (tREA + C - tRP) {1}
+ *
+ * tREA : the maximum read access time. From the ONFI nand standards,
+ * we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
+ * Please check it in : www.onfi.org
+ * C : a constant for adjust the delay. default is 4.
+ * tRP : the read pulse width.
+ * Specified by the HW_GPMI_TIMING0:DATA_SETUP:
+ * tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ * 4.2) From the aspect of the GPMI nand controller:
+ * Delay = RDN_DELAY * 0.125 * RP {2}
+ *
+ * RP : the DLL reference period.
+ * if (GPMI-clock-period > DLL_THRETHOLD)
+ * RP = GPMI-clock-period / 2;
+ * else
+ * RP = GPMI-clock-period;
+ *
+ * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ * is 16ns, but in mx6q, we use 12ns.
+ *
+ * 4.3) since {1} equals {2}, we get:
+ *
+ * (tREA + 4 - tRP) * 8
+ * RDN_DELAY = --------------------- {3}
+ * RP
+ *
+ * 4.4) We only support the fastest asynchronous mode of ONFI nand.
+ * For some ONFI nand, the mode 4 is the fastest mode;
+ * while for some ONFI nand, the mode 5 is the fastest mode.
+ * So we only support the mode 4 and mode 5. It is no need to
+ * support other modes.
+ */
+static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct resources *r = &this->resources;
+ unsigned long rate = clk_get_rate(r->clock[0]);
+ int mode = this->timing_mode;
+ int dll_threshold = this->devdata->max_chain_delay;
+ unsigned long delay;
+ unsigned long clk_period;
+ int t_rea;
+ int c = 4;
+ int t_rp;
+ int rp;
+
+ /*
+ * [1] for GPMI_HW_GPMI_TIMING0:
+ * The async mode requires 40MHz for mode 4, 50MHz for mode 5.
+ * The GPMI can support 100MHz at most. So if we want to
+ * get the 40MHz or 50MHz, we have to set DS=1, DH=1.
+ * Set the ADDRESS_SETUP to 0 in mode 4.
+ */
+ hw->data_setup_in_cycles = 1;
+ hw->data_hold_in_cycles = 1;
+ hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
+
+ /* [2] for GPMI_HW_GPMI_TIMING1 */
+ hw->device_busy_timeout = 0x9000;
+
+ /* [3] for GPMI_HW_GPMI_CTRL1 */
+ hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+
+ /*
+ * Enlarge 10 times for the numerator and denominator in {3}.
+ * This make us to get more accurate result.
+ */
+ clk_period = NSEC_PER_SEC / (rate / 10);
+ dll_threshold *= 10;
+ t_rea = ((mode == 5) ? 16 : 20) * 10;
+ c *= 10;
+
+ t_rp = clk_period * 1; /* DATA_SETUP is 1 */
+
+ if (clk_period > dll_threshold) {
+ hw->use_half_periods = 1;
+ rp = clk_period / 2;
+ } else {
+ hw->use_half_periods = 0;
+ rp = clk_period;
+ }
+
+ /*
+ * Multiply the numerator with 10, we could do a round off:
+ * 7.8 round up to 8; 7.4 round down to 7.
+ */
+ delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
+ delay = (delay + 5) / 10;
+
+ hw->sample_delay_factor = delay;
+}
+
+static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
+{
+ struct resources *r = &this->resources;
+ struct nand_chip *nand = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ uint8_t *feature;
+ unsigned long rate;
+ int ret;
+
+ feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
+ if (!feature)
+ return -ENOMEM;
+
+ nand->select_chip(mtd, 0);
+
+ /* [1] send SET FEATURE commond to NAND */
+ feature[0] = mode;
+ ret = nand->onfi_set_features(mtd, nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+ if (ret)
+ goto err_out;
+
+ /* [2] send GET FEATURE command to double-check the timing mode */
+ memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ ret = nand->onfi_get_features(mtd, nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+ if (ret || feature[0] != mode)
+ goto err_out;
+
+ nand->select_chip(mtd, -1);
+
+ /* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
+ rate = (mode == 5) ? 100000000 : 80000000;
+ clk_set_rate(r->clock[0], rate);
+
+ /* Let the gpmi_begin() re-compute the timing again. */
+ this->flags &= ~GPMI_TIMING_INIT_OK;
+
+ this->flags |= GPMI_ASYNC_EDO_ENABLED;
+ this->timing_mode = mode;
+ kfree(feature);
+ dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
+ return 0;
+
+err_out:
+ nand->select_chip(mtd, -1);
+ kfree(feature);
+ dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
+ return -EINVAL;
+}
+
+int gpmi_extra_init(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+
+ /* Enable the asynchronous EDO feature. */
+ if (GPMI_IS_MX6(this) && chip->onfi_version) {
+ int mode = onfi_get_async_timing_mode(chip);
+
+ /* We only support the timing mode 4 and mode 5. */
+ if (mode & ONFI_TIMING_MODE_5)
+ mode = 5;
+ else if (mode & ONFI_TIMING_MODE_4)
+ mode = 4;
+ else
+ return 0;
+
+ return enable_edo_mode(this, mode);
+ }
+ return 0;
+}
+
+/* Begin the I/O */
+void gpmi_begin(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ void __iomem *gpmi_regs = r->gpmi_regs;
+ unsigned int clock_period_in_ns;
+ uint32_t reg;
+ unsigned int dll_wait_time_in_us;
+ struct gpmi_nfc_hardware_timing hw;
+ int ret;
+
+ /* Enable the clock. */
+ ret = gpmi_enable_clk(this);
+ if (ret) {
+ dev_err(this->dev, "We failed in enable the clk\n");
+ goto err_out;
+ }
+
+ /* Only initialize the timing once */
+ if (this->flags & GPMI_TIMING_INIT_OK)
+ return;
+ this->flags |= GPMI_TIMING_INIT_OK;
+
+ if (this->flags & GPMI_ASYNC_EDO_ENABLED)
+ gpmi_compute_edo_timing(this, &hw);
+ else
+ gpmi_nfc_compute_hardware_timing(this, &hw);
+
+ /* [1] Set HW_GPMI_TIMING0 */
+ reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
+
+ writel(reg, gpmi_regs + HW_GPMI_TIMING0);
+
+ /* [2] Set HW_GPMI_TIMING1 */
+ writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
+ gpmi_regs + HW_GPMI_TIMING1);
+
+ /* [3] The following code is to set the HW_GPMI_CTRL1. */
+
+ /* Set the WRN_DLY_SEL */
+ writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Clear out the DLL control fields. */
+ reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
+ writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* If no sample delay is called for, return immediately. */
+ if (!hw.sample_delay_factor)
+ return;
+
+ /* Set RDN_DELAY or HALF_PERIOD. */
+ reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
+ | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
+
+ writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* At last, we enable the DLL. */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * After we enable the GPMI DLL, we have to wait 64 clock cycles before
+ * we can use the GPMI. Calculate the amount of time we need to wait,
+ * in microseconds.
+ */
+ clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
+ dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
+
+ if (!dll_wait_time_in_us)
+ dll_wait_time_in_us = 1;
+
+ /* Wait for the DLL to settle. */
+ udelay(dll_wait_time_in_us);
+
+err_out:
+ return;
+}
+
+void gpmi_end(struct gpmi_nand_data *this)
+{
+ gpmi_disable_clk(this);
+}
+
+/* Clears a BCH interrupt. */
+void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+/* Returns the Ready/Busy status of the given chip. */
+int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
+{
+ struct resources *r = &this->resources;
+ uint32_t mask = 0;
+ uint32_t reg = 0;
+
+ if (GPMI_IS_MX23(this)) {
+ mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
+ reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
+ } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
+ /*
+ * In the imx6, all the ready/busy pins are bound
+ * together. So we only need to check chip 0.
+ */
+ if (GPMI_IS_MX6(this))
+ chip = 0;
+
+ /* MX28 shares the same R/B register as MX6Q. */
+ mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
+ reg = readl(r->gpmi_regs + HW_GPMI_STAT);
+ } else
+ dev_err(this->dev, "unknow arch.\n");
+ return reg & mask;
+}
+
+static inline void set_dma_type(struct gpmi_nand_data *this,
+ enum dma_ops_type type)
+{
+ this->last_dma_type = this->dma_type;
+ this->dma_type = type;
+}
+
+int gpmi_send_command(struct gpmi_nand_data *this)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ int chip = this->current_chip;
+ u32 pio[3];
+
+ /* [1] send out the PIO words */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+ | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
+ pio[1] = pio[2] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
+ sgl = &this->cmd_sgl;
+
+ sg_init_one(sgl, this->cmd_buffer, this->command_length);
+ dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel,
+ sgl, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_COMMAND);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ uint32_t command_mode;
+ uint32_t address;
+ u32 pio[2];
+
+ /* [1] PIO */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] send DMA request */
+ prepare_data_dma(this, DMA_TO_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_WRITE_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_read_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[2];
+
+ /* [1] : send PIO */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] : send DMA request */
+ prepare_data_dma(this, DMA_FROM_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] : submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* A DMA descriptor that does an ECC page read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
+
+int gpmi_read_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* [1] Wait for the chip to report ready. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] Enable the BCH block and read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] Disable the BCH block */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+ pio[1] = 0;
+ pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 3,
+ DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [4] submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 00000000000..f638cd8077c
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,1844 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+#include "gpmi-nand.h"
+#include "bch-regs.h"
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
+static struct nand_ecclayout gpmi_hw_ecclayout = {
+ .eccbytes = 0,
+ .eccpos = { 0, },
+ .oobfree = { {.offset = 0, .length = 0} }
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+ .type = IS_MX23,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+ .type = IS_MX28,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+ .type = IS_MX6Q,
+ .bch_max_ecc_strength = 40,
+ .max_chain_delay = 12,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+ .type = IS_MX6SX,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12,
+};
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+ struct gpmi_nand_data *this = cookie;
+
+ gpmi_clear_bch(this);
+ complete(&this->bch_done);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Calculate the ECC strength by hand:
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * O : the oobsize of the NAND chip.
+ * M : the metasize of per page.
+ *
+ * The formula is :
+ * E * G * N
+ * ------------ <= (O - M)
+ * 8
+ *
+ * So, we get E by:
+ * (O - M) * 8
+ * E <= -------------
+ * G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ int ecc_strength;
+
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ /* We need the minor even number. */
+ return round_down(ecc_strength, 2);
+}
+
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+ }
+ return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
+}
+
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree;
+ unsigned int block_mark_bit_offset;
+
+ if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ return false;
+
+ switch (chip->ecc_step_ds) {
+ case SZ_512:
+ geo->gf_len = 13;
+ break;
+ case SZ_1K:
+ geo->gf_len = 14;
+ break;
+ default:
+ dev_err(this->dev,
+ "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+ chip->ecc_strength_ds, chip->ecc_step_ds);
+ return false;
+ }
+ geo->ecc_chunk_size = chip->ecc_step_ds;
+ geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+ if (!gpmi_check_ecc(this))
+ return false;
+
+ /* Keep the C >= O */
+ if (geo->ecc_chunk_size < mtd->oobsize) {
+ dev_err(this->dev,
+ "unsupported nand chip. ecc size: %d, oob size : %d\n",
+ chip->ecc_step_ds, mtd->oobsize);
+ return false;
+ }
+
+ /* The default value, see comment in the legacy_set_geometry(). */
+ geo->metadata_size = 10;
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /*
+ * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+ *
+ * | P |
+ * |<----------------------------------------------------->|
+ * | |
+ * | (Block Mark) |
+ * | P' | | | |
+ * |<-------------------------------------------->| D | | O' |
+ * | |<---->| |<--->|
+ * V V V V V
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * | M | data |E| data |E| data |E| data |E| |
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * ^ ^
+ * | O |
+ * |<------------>|
+ * | |
+ *
+ * P : the page size for BCH module.
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * M : the metasize of per page.
+ * C : the ecc chunk size, aka the "data" above.
+ * P': the nand chip's page size.
+ * O : the nand chip's oob size.
+ * O': the free oob.
+ *
+ * The formula for P is :
+ *
+ * E * G * N
+ * P = ------------ + P' + M
+ * 8
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * Please see the comment in legacy_set_geometry().
+ * With the condition C >= O , we still can get same result.
+ * So the bit position of the physical block mark within the ECC-based
+ * view of the page is :
+ * (P' - D) * 8
+ */
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+ /* The available oob size we have. */
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
+ of->offset = geo->page_size - mtd->writesize;
+ of->length = mtd->oobsize - of->offset;
+ }
+
+ geo->payload_size = mtd->writesize;
+
+ geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+ geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ + ALIGN(geo->ecc_chunk_count, 4);
+
+ if (!this->swap_block_mark)
+ return true;
+
+ /* For bit swap. */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return true;
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int metadata_size;
+ unsigned int status_size;
+ unsigned int block_mark_bit_offset;
+
+ /*
+ * The size of the metadata can be changed, though we set it to 10
+ * bytes now. But it can't be too large, because we have to save
+ * enough space for BCH.
+ */
+ geo->metadata_size = 10;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. */
+ geo->ecc_chunk_size = 512;
+ while (geo->ecc_chunk_size < mtd->oobsize) {
+ geo->ecc_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /* We use the same ECC strength for all chunks. */
+ geo->ecc_strength = get_ecc_strength(this);
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "We can not support this nand chip."
+ " Its required ecc strength(%d) is beyond our"
+ " capability(%d).\n", geo->ecc_strength,
+ this->devdata->bch_max_ecc_strength);
+ return -EINVAL;
+ }
+
+ geo->page_size = mtd->writesize + mtd->oobsize;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ metadata_size = ALIGN(geo->metadata_size, 4);
+ status_size = ALIGN(geo->ecc_chunk_count, 4);
+
+ geo->auxiliary_size = metadata_size + status_size;
+ geo->auxiliary_status_offset = metadata_size;
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /*
+ * We need to compute the byte and bit offsets of
+ * the physical block mark within the ECC-based view of the page.
+ *
+ * NAND chip with 2K page shows below:
+ * (Block Mark)
+ * | |
+ * | D |
+ * |<---->|
+ * V V
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ * | M | data |E| data |E| data |E| data |E|
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * With the formula to compute the ECC strength, and the condition
+ * : C >= O (C is the ecc chunk size)
+ *
+ * It's easy to deduce to the following result:
+ *
+ * E * G (O - M) C - M C - M
+ * ----------- <= ------- <= -------- < ---------
+ * 8 N N (N - 1)
+ *
+ * So, we get:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M) < C
+ * 8
+ *
+ * The above inequality means the position of block mark
+ * within the ECC-based view of the page is still in the data chunk,
+ * and it's NOT in the ECC bits of the chunk.
+ *
+ * Use the following to compute the bit position of the
+ * physical block mark within the ECC-based view of the page:
+ * (page_size - D) * 8
+ *
+ * --Huang Shijie
+ */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
+ && set_geometry_by_ecc_info(this))
+ return 0;
+ return legacy_set_geometry(this);
+}
+
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+ /* We use the DMA channel 0 to access all the nand chips. */
+ return this->dma_chans[0];
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+{
+ struct scatterlist *sgl = &this->data_sgl;
+ int ret;
+
+ /* first try to map the upper buffer directly */
+ if (virt_addr_valid(this->upper_buf) &&
+ !object_is_on_stack(this->upper_buf)) {
+ sg_init_one(sgl, this->upper_buf, this->upper_len);
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0)
+ goto map_fail;
+
+ this->direct_dma_map_ok = true;
+ return;
+ }
+
+map_fail:
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+
+ if (dr == DMA_TO_DEVICE)
+ memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+
+ dma_map_sg(this->dev, sgl, 1, dr);
+
+ this->direct_dma_map_ok = false;
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+ if (this->direct_dma_map_ok == false)
+ memcpy(this->upper_buf, this->data_buffer_dma,
+ this->upper_len);
+ break;
+
+ case DMA_FOR_WRITE_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_ECC_PAGE:
+ case DMA_FOR_WRITE_ECC_PAGE:
+ /* We have to wait the BCH interrupt to finish. */
+ break;
+
+ default:
+ dev_err(this->dev, "in wrong DMA operation.\n");
+ }
+
+ complete(dma_c);
+}
+
+int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *dma_c = &this->dma_done;
+ int err;
+
+ init_completion(dma_c);
+
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
+
+ /* Wait for the interrupt from the DMA block. */
+ err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+ if (!err) {
+ dev_err(this->dev, "DMA timeout, last DMA :%d\n",
+ this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * This function is used in BCH reading or BCH writing pages.
+ * It will wait for the BCH interrupt as long as ONE second.
+ * Actually, we must wait for two interrupts :
+ * [1] firstly the DMA interrupt and
+ * [2] secondly the BCH interrupt.
+ */
+int start_dma_with_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *bch_c = &this->bch_done;
+ int err;
+
+ /* Prepare to receive an interrupt from the BCH block. */
+ init_completion(bch_c);
+
+ /* start the DMA */
+ start_dma_without_bch_irq(this, desc);
+
+ /* Wait for the interrupt from the BCH block. */
+ err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+ if (!err) {
+ dev_err(this->dev, "BCH timeout, last DMA :%d\n",
+ this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int acquire_register_block(struct gpmi_nand_data *this,
+ const char *res_name)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ struct resource *r;
+ void __iomem *p;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ p = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+ res->gpmi_regs = p;
+ else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+ res->bch_regs = p;
+ else
+ dev_err(this->dev, "unknown resource name : %s\n", res_name);
+
+ return 0;
+}
+
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+ struct platform_device *pdev = this->pdev;
+ const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+ struct resource *r;
+ int err;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+ if (!r) {
+ dev_err(this->dev, "Can't get resource for %s\n", res_name);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ if (err)
+ dev_err(this->dev, "error requesting BCH IRQ\n");
+
+ return err;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+ unsigned int i;
+ for (i = 0; i < DMA_CHANS; i++)
+ if (this->dma_chans[i]) {
+ dma_release_channel(this->dma_chans[i]);
+ this->dma_chans[i] = NULL;
+ }
+}
+
+static int acquire_dma_channels(struct gpmi_nand_data *this)
+{
+ struct platform_device *pdev = this->pdev;
+ struct dma_chan *dma_chan;
+
+ /* request dma channel */
+ dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ if (!dma_chan) {
+ dev_err(this->dev, "Failed to request DMA channel.\n");
+ goto acquire_err;
+ }
+
+ this->dma_chans[0] = dma_chan;
+ return 0;
+
+acquire_err:
+ release_dma_channels(this);
+ return -EINVAL;
+}
+
+static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
+ "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static int gpmi_get_clks(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ char **extra_clks = NULL;
+ struct clk *clk;
+ int err, i;
+
+ /* The main clock is stored in the first. */
+ r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
+ if (IS_ERR(r->clock[0])) {
+ err = PTR_ERR(r->clock[0]);
+ goto err_clock;
+ }
+
+ /* Get extra clocks */
+ if (GPMI_IS_MX6(this))
+ extra_clks = extra_clks_for_mx6q;
+ if (!extra_clks)
+ return 0;
+
+ for (i = 1; i < GPMI_CLK_MAX; i++) {
+ if (extra_clks[i - 1] == NULL)
+ break;
+
+ clk = devm_clk_get(this->dev, extra_clks[i - 1]);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto err_clock;
+ }
+
+ r->clock[i] = clk;
+ }
+
+ if (GPMI_IS_MX6(this))
+ /*
+ * Set the default value for the gpmi clock.
+ *
+ * If you want to use the ONFI nand which is in the
+ * Synchronous Mode, you should change the clock as you need.
+ */
+ clk_set_rate(r->clock[0], 22000000);
+
+ return 0;
+
+err_clock:
+ dev_dbg(this->dev, "failed in finding the clocks.\n");
+ return err;
+}
+
+static int acquire_resources(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_bch_irq(this, bch_irq);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_dma_channels(this);
+ if (ret)
+ goto exit_regs;
+
+ ret = gpmi_get_clks(this);
+ if (ret)
+ goto exit_clock;
+ return 0;
+
+exit_clock:
+ release_dma_channels(this);
+exit_regs:
+ return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+ release_dma_channels(this);
+}
+
+static int init_hardware(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /*
+ * This structure contains the "safe" GPMI timing that should succeed
+ * with any NAND Flash device
+ * (although, with less-than-optimal performance).
+ */
+ struct nand_timing safe_timing = {
+ .data_setup_in_ns = 80,
+ .data_hold_in_ns = 60,
+ .address_setup_in_ns = 25,
+ .gpmi_sample_delay_in_ns = 6,
+ .tREA_in_ns = -1,
+ .tRLOH_in_ns = -1,
+ .tRHOH_in_ns = -1,
+ };
+
+ /* Initialize the hardwares. */
+ ret = gpmi_init(this);
+ if (ret)
+ return ret;
+
+ this->timing = safe_timing;
+ return 0;
+}
+
+static int read_page_prepare(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(destination)) {
+ dma_addr_t dest_phys;
+
+ dest_phys = dma_map_single(dev, destination,
+ length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_phys)) {
+ if (alt_size < length) {
+ dev_err(dev, "Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = destination;
+ *use_phys = dest_phys;
+ this->direct_dma_map_ok = true;
+ return 0;
+ }
+
+map_failed:
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ this->direct_dma_map_ok = false;
+ return 0;
+}
+
+static inline void read_page_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (this->direct_dma_map_ok)
+ dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
+}
+
+static inline void read_page_swap_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (!this->direct_dma_map_ok)
+ memcpy(destination, alt_virt, length);
+}
+
+static int send_page_prepare(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(source)) {
+ dma_addr_t source_phys;
+
+ source_phys = dma_map_single(dev, (void *)source, length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, source_phys)) {
+ if (alt_size < length) {
+ dev_err(dev, "Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = source;
+ *use_phys = source_phys;
+ return 0;
+ }
+map_failed:
+ /*
+ * Copy the content of the source buffer into the alternate
+ * buffer and set up the return values accordingly.
+ */
+ memcpy(alt_virt, source, length);
+
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ return 0;
+}
+
+static void send_page_end(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void *used_virt, dma_addr_t used_phys)
+{
+ struct device *dev = this->dev;
+ if (used_virt == source)
+ dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+
+ if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
+ dma_free_coherent(dev, this->page_buffer_size,
+ this->page_buffer_virt,
+ this->page_buffer_phys);
+ kfree(this->cmd_buffer);
+ kfree(this->data_buffer_dma);
+
+ this->cmd_buffer = NULL;
+ this->data_buffer_dma = NULL;
+ this->page_buffer_virt = NULL;
+ this->page_buffer_size = 0;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
+
+ /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
+ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+ if (this->cmd_buffer == NULL)
+ goto error_alloc;
+
+ /*
+ * [2] Allocate a read/write data buffer.
+ * The gpmi_alloc_dma_buffer can be called twice.
+ * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+ * is called before the nand_scan_ident; and we allocate a buffer
+ * of the real NAND page size when the gpmi_alloc_dma_buffer is
+ * called after the nand_scan_ident.
+ */
+ this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (this->data_buffer_dma == NULL)
+ goto error_alloc;
+
+ /*
+ * [3] Allocate the page buffer.
+ *
+ * Both the payload buffer and the auxiliary buffer must appear on
+ * 32-bit boundaries. We presume the size of the payload buffer is a
+ * power of two and is much larger than four, which guarantees the
+ * auxiliary buffer will appear on a 32-bit boundary.
+ */
+ this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
+ this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
+ &this->page_buffer_phys, GFP_DMA);
+ if (!this->page_buffer_virt)
+ goto error_alloc;
+
+
+ /* Slice up the page buffer. */
+ this->payload_virt = this->page_buffer_virt;
+ this->payload_phys = this->page_buffer_phys;
+ this->auxiliary_virt = this->payload_virt + geo->payload_size;
+ this->auxiliary_phys = this->payload_phys + geo->payload_size;
+ return 0;
+
+error_alloc:
+ gpmi_free_dma_buffer(this);
+ return -ENOMEM;
+}
+
+static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret;
+
+ /*
+ * Every operation begins with a command byte and a series of zero or
+ * more address bytes. These are distinguished by either the Address
+ * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+ * asserted. When MTD is ready to execute the command, it will deassert
+ * both latch enables.
+ *
+ * Rather than run a separate DMA operation for every single byte, we
+ * queue them up and run a single DMA operation for the entire series
+ * of command and data bytes. NAND_CMD_NONE means the END of the queue.
+ */
+ if ((ctrl & (NAND_ALE | NAND_CLE))) {
+ if (data != NAND_CMD_NONE)
+ this->cmd_buffer[this->command_length++] = data;
+ return;
+ }
+
+ if (!this->command_length)
+ return;
+
+ ret = gpmi_send_command(this);
+ if (ret)
+ dev_err(this->dev, "Chip: %u, Error %d\n",
+ this->current_chip, ret);
+
+ this->command_length = 0;
+}
+
+static int gpmi_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ return gpmi_is_ready(this, this->current_chip);
+}
+
+static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ if ((this->current_chip < 0) && (chipnr >= 0))
+ gpmi_begin(this);
+ else if ((this->current_chip >= 0) && (chipnr < 0))
+ gpmi_end(this);
+
+ this->current_chip = chipnr;
+}
+
+static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ dev_dbg(this->dev, "len is %d\n", len);
+ this->upper_buf = buf;
+ this->upper_len = len;
+
+ gpmi_read_data(this);
+}
+
+static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ dev_dbg(this->dev, "len is %d\n", len);
+ this->upper_buf = (uint8_t *)buf;
+ this->upper_len = len;
+
+ gpmi_send_data(this);
+}
+
+static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ uint8_t *buf = this->data_buffer_dma;
+
+ gpmi_read_buf(mtd, buf, 1);
+ return buf[0];
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+ void *payload, void *auxiliary)
+{
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ unsigned char *p;
+ unsigned char *a;
+ unsigned int bit;
+ unsigned char mask;
+ unsigned char from_data;
+ unsigned char from_oob;
+
+ if (!this->swap_block_mark)
+ return;
+
+ /*
+ * If control arrives here, we're swapping. Make some convenience
+ * variables.
+ */
+ bit = nfc_geo->block_mark_bit_offset;
+ p = payload + nfc_geo->block_mark_byte_offset;
+ a = auxiliary;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+ /* Get the byte from the OOB. */
+ from_oob = a[0];
+
+ /* Swap them. */
+ a[0] = from_data;
+
+ mask = (0x1 << bit) - 1;
+ p[0] = (p[0] & mask) | (from_oob << bit);
+
+ mask = ~0 << bit;
+ p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ void *payload_virt;
+ dma_addr_t payload_phys;
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ unsigned int i;
+ unsigned char *status;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ dev_dbg(this->dev, "page number is : %d\n", page);
+ ret = read_page_prepare(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ dev_err(this->dev, "Inadequate DMA buffer\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* go! */
+ ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
+ read_page_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ if (ret) {
+ dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
+ return ret;
+ }
+
+ /* handle the block mark swapping */
+ block_mark_swapping(this, payload_virt, auxiliary_virt);
+
+ /* Loop over status bytes, accumulating ECC status. */
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+
+ for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+ if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+ continue;
+
+ if (*status == STATUS_UNCORRECTABLE) {
+ mtd->ecc_stats.failed++;
+ continue;
+ }
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
+ }
+
+ if (oob_required) {
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+ * for details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the
+ * block mark to th caller's buffer. Note that, if block mark
+ * swapping was necessary, it has already been done, so we can
+ * rely on the first byte of the auxiliary buffer to contain
+ * the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ }
+
+ read_page_swap_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+
+ return max_bitflips;
+}
+
+/* Fake a virtual small page for the subpage read */
+static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offs, uint32_t len, uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ void __iomem *bch_regs = this->resources.bch_regs;
+ struct bch_geometry old_geo = this->bch_geometry;
+ struct bch_geometry *geo = &this->bch_geometry;
+ int size = chip->ecc.size; /* ECC chunk size */
+ int meta, n, page_size;
+ u32 r1_old, r2_old, r1_new, r2_new;
+ unsigned int max_bitflips;
+ int first, last, marker_pos;
+ int ecc_parity_size;
+ int col = 0;
+
+ /* The size of ECC parity */
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+ /* Align it with the chunk size */
+ first = offs / size;
+ last = (offs + len - 1) / size;
+
+ /*
+ * Find the chunk which contains the Block Marker. If this chunk is
+ * in the range of [first, last], we have to read out the whole page.
+ * Why? since we had swapped the data at the position of Block Marker
+ * to the metadata which is bound with the chunk 0.
+ */
+ marker_pos = geo->block_mark_byte_offset / size;
+ if (last >= marker_pos && first <= marker_pos) {
+ dev_dbg(this->dev, "page:%d, first:%d, last:%d, marker at:%d\n",
+ page, first, last, marker_pos);
+ return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ }
+
+ meta = geo->metadata_size;
+ if (first) {
+ col = meta + (size + ecc_parity_size) * first;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
+
+ meta = 0;
+ buf = buf + first * size;
+ }
+
+ /* Save the old environment */
+ r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
+ r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* change the BCH registers and bch_geometry{} */
+ n = last - first + 1;
+ page_size = meta + (size + ecc_parity_size) * n;
+
+ r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
+ BM_BCH_FLASH0LAYOUT0_META_SIZE);
+ r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
+ writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
+ r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
+ writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ geo->ecc_chunk_count = n;
+ geo->payload_size = n * size;
+ geo->page_size = page_size;
+ geo->auxiliary_status_offset = ALIGN(meta, 4);
+
+ dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
+ page, offs, len, col, first, n, page_size);
+
+ /* Read the subpage now */
+ this->swap_block_mark = false;
+ max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+
+ /* Restore */
+ writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
+ writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
+ this->bch_geometry = old_geo;
+ this->swap_block_mark = true;
+
+ return max_bitflips;
+}
+
+static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ const void *payload_virt;
+ dma_addr_t payload_phys;
+ const void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ int ret;
+
+ dev_dbg(this->dev, "ecc write page.\n");
+ if (this->swap_block_mark) {
+ /*
+ * If control arrives here, we're doing block mark swapping.
+ * Since we can't modify the caller's buffers, we must copy them
+ * into our own.
+ */
+ memcpy(this->payload_virt, buf, mtd->writesize);
+ payload_virt = this->payload_virt;
+ payload_phys = this->payload_phys;
+
+ memcpy(this->auxiliary_virt, chip->oob_poi,
+ nfc_geo->auxiliary_size);
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* Handle block mark swapping. */
+ block_mark_swapping(this,
+ (void *) payload_virt, (void *) auxiliary_virt);
+ } else {
+ /*
+ * If control arrives here, we're not doing block mark swapping,
+ * so we can to try and use the caller's buffers.
+ */
+ ret = send_page_prepare(this,
+ buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ dev_err(this->dev, "Inadequate payload DMA buffer\n");
+ return 0;
+ }
+
+ ret = send_page_prepare(this,
+ chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ &auxiliary_virt, &auxiliary_phys);
+ if (ret) {
+ dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
+ goto exit_auxiliary;
+ }
+ }
+
+ /* Ask the NFC. */
+ ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
+ if (ret)
+ dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
+
+ if (!this->swap_block_mark) {
+ send_page_end(this, chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ auxiliary_virt, auxiliary_phys);
+exit_auxiliary:
+ send_page_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ }
+
+ return 0;
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ * | Raw | ECC-based |
+ * -------------+-------------------------+-------------------------+
+ * | Read the conventional | |
+ * | OOB at the end of the | |
+ * Swapping | page and return it. It | |
+ * | contains exactly what | |
+ * | we want. | Read the block mark and |
+ * -------------+-------------------------+ return it in a buffer |
+ * | Read the conventional | full of set bits. |
+ * | OOB at the end of the | |
+ * | page and also the block | |
+ * Transcribing | mark in the metadata. | |
+ * | Copy the block mark | |
+ * | into the first byte of | |
+ * | the OOB. | |
+ * -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ *
+ * FIXME: The following paragraph is incorrect, now that there exist
+ * ecc.read_oob_raw and ecc.write_oob_raw functions.
+ *
+ * Since MTD assumes the OOB is not covered by ECC, there is no pair of
+ * ECC-based/raw functions for reading or or writing the OOB. The fact that the
+ * caller wants an ECC-based or raw view of the page is not propagated down to
+ * this driver.
+ */
+static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+
+ dev_dbg(this->dev, "page number is %d\n", page);
+ /* clear the OOB buffer */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+
+ /* Read out the conventional OOB. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /*
+ * Now, we want to make sure the block mark is correct. In the
+ * Swapping/Raw case, we already have it. Otherwise, we need to
+ * explicitly read it.
+ */
+ if (!this->swap_block_mark) {
+ /* Read the block mark into the first byte of the OOB buffer. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ chip->oob_poi[0] = chip->read_byte(mtd);
+ }
+
+ return 0;
+}
+
+static int
+gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct nand_oobfree *of = mtd->ecclayout->oobfree;
+ int status = 0;
+
+ /* Do we have available oob area? */
+ if (!of->length)
+ return -EPERM;
+
+ if (!nand_is_slc(chip))
+ return -EPERM;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
+ chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret = 0;
+ uint8_t *block_mark;
+ int column, page, status, chipnr;
+
+ chipnr = (int)(ofs >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ column = this->swap_block_mark ? mtd->writesize : 0;
+
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
+
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+ chip->write_buf(mtd, block_mark, 1);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
+ chip->select_chip(mtd, -1);
+
+ return ret;
+}
+
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+ /*
+ * Set the boot block stride size.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->stride_size_in_pages = 64;
+
+ /*
+ * Set the search area stride exponent.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->search_area_stride_exponent = 2;
+ return 0;
+}
+
+static const char *fingerprint = "STMP";
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int search_area_size_in_strides;
+ unsigned int stride;
+ unsigned int page;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int found_an_ncb_fingerprint = false;
+
+ /* Compute the number of strides in a search area. */
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Loop through the first search area, looking for the NCB fingerprint.
+ */
+ dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+ /*
+ * Read the NCB fingerprint. The fingerprint is four bytes long
+ * and starts in the 12th byte of the page.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ chip->read_buf(mtd, buffer, strlen(fingerprint));
+
+ /* Look for the fingerprint. */
+ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+ found_an_ncb_fingerprint = true;
+ break;
+ }
+
+ }
+
+ chip->select_chip(mtd, saved_chip_number);
+
+ if (found_an_ncb_fingerprint)
+ dev_dbg(dev, "\tFound a fingerprint\n");
+ else
+ dev_dbg(dev, "\tNo fingerprint found\n");
+ return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int block_size_in_pages;
+ unsigned int search_area_size_in_strides;
+ unsigned int search_area_size_in_pages;
+ unsigned int search_area_size_in_blocks;
+ unsigned int block;
+ unsigned int stride;
+ unsigned int page;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int status;
+
+ /* Compute the search area geometry. */
+ block_size_in_pages = mtd->erasesize / mtd->writesize;
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+ search_area_size_in_pages = search_area_size_in_strides *
+ rom_geo->stride_size_in_pages;
+ search_area_size_in_blocks =
+ (search_area_size_in_pages + (block_size_in_pages - 1)) /
+ block_size_in_pages;
+
+ dev_dbg(dev, "Search Area Geometry :\n");
+ dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+ dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+ dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
+
+ /* Select chip 0. */
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /* Loop over blocks in the first search area, erasing them. */
+ dev_dbg(dev, "Erasing the search area...\n");
+
+ for (block = 0; block < search_area_size_in_blocks; block++) {
+ /* Compute the page address. */
+ page = block * block_size_in_pages;
+
+ /* Erase this block. */
+ dev_dbg(dev, "\tErasing block 0x%x\n", block);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ /* Wait for the erase to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Erase failed.\n", __func__);
+ }
+
+ /* Write the NCB fingerprint into the page buffer. */
+ memset(buffer, ~0, mtd->writesize);
+ memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+ /* Loop through the first search area, writing NCB fingerprints. */
+ dev_dbg(dev, "Writing NCB fingerprints...\n");
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ /* Write the first page of the current stride. */
+ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ chip->ecc.write_page_raw(mtd, chip, buffer, 0);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ /* Wait for the write to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Write failed.\n", __func__);
+ }
+
+ /* Deselect chip 0. */
+ chip->select_chip(mtd, saved_chip_number);
+ return 0;
+}
+
+static int mx23_boot_init(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int block_count;
+ unsigned int block;
+ int chipnr;
+ int page;
+ loff_t byte;
+ uint8_t block_mark;
+ int ret = 0;
+
+ /*
+ * If control arrives here, we can't use block mark swapping, which
+ * means we're forced to use transcription. First, scan for the
+ * transcription stamp. If we find it, then we don't have to do
+ * anything -- the block marks are already transcribed.
+ */
+ if (mx23_check_transcription_stamp(this))
+ return 0;
+
+ /*
+ * If control arrives here, we couldn't find a transcription stamp, so
+ * so we presume the block marks are in the conventional location.
+ */
+ dev_dbg(dev, "Transcribing bad block marks...\n");
+
+ /* Compute the number of blocks in the entire medium. */
+ block_count = chip->chipsize >> chip->phys_erase_shift;
+
+ /*
+ * Loop over all the blocks in the medium, transcribing block marks as
+ * we go.
+ */
+ for (block = 0; block < block_count; block++) {
+ /*
+ * Compute the chip, page and byte addresses for this block's
+ * conventional mark.
+ */
+ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+ page = block << (chip->phys_erase_shift - chip->page_shift);
+ byte = block << chip->phys_erase_shift;
+
+ /* Send the command to read the conventional block mark. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ block_mark = chip->read_byte(mtd);
+ chip->select_chip(mtd, -1);
+
+ /*
+ * Check if the block is marked bad. If so, we need to mark it
+ * again, but this time the result will be a mark in the
+ * location where we transcribe block marks.
+ */
+ if (block_mark != 0xff) {
+ dev_dbg(dev, "Transcribing mark in block %u\n", block);
+ ret = chip->block_markbad(mtd, byte);
+ if (ret)
+ dev_err(dev, "Failed to mark block bad with "
+ "ret %d\n", ret);
+ }
+ }
+
+ /* Write the stamp that indicates we've transcribed the block marks. */
+ mx23_write_transcription_stamp(this);
+ return 0;
+}
+
+static int nand_boot_init(struct gpmi_nand_data *this)
+{
+ nand_boot_set_geometry(this);
+
+ /* This is ROM arch-specific initilization before the BBT scanning. */
+ if (GPMI_IS_MX23(this))
+ return mx23_boot_init(this);
+ return 0;
+}
+
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Free the temporary DMA memory for reading ID. */
+ gpmi_free_dma_buffer(this);
+
+ /* Set up the NFC geometry which is used by BCH. */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
+ return ret;
+ }
+
+ /* Alloc the new DMA buffers according to the pagesize and oobsize */
+ return gpmi_alloc_dma_buffer(this);
+}
+
+static void gpmi_nand_exit(struct gpmi_nand_data *this)
+{
+ nand_release(&this->mtd);
+ gpmi_free_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ int ret;
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
+ if (ret)
+ return ret;
+
+ /* Init the nand_ecc_ctrl{} */
+ ecc->read_page = gpmi_ecc_read_page;
+ ecc->write_page = gpmi_ecc_write_page;
+ ecc->read_oob = gpmi_ecc_read_oob;
+ ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = bch_geo->ecc_chunk_size;
+ ecc->strength = bch_geo->ecc_strength;
+ ecc->layout = &gpmi_hw_ecclayout;
+
+ /*
+ * We only enable the subpage read when:
+ * (1) the chip is imx6, and
+ * (2) the size of the ECC parity is byte aligned.
+ */
+ if (GPMI_IS_MX6(this) &&
+ ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
+ ecc->read_subpage = gpmi_ecc_read_subpage;
+ chip->options |= NAND_SUBPAGE_READ;
+ }
+
+ /*
+ * Can we enable the extra features? such as EDO or Sync mode.
+ *
+ * We do not check the return value now. That's means if we fail in
+ * enable the extra features, we still can run in the normal way.
+ */
+ gpmi_extra_init(this);
+
+ return 0;
+}
+
+static int gpmi_nand_init(struct gpmi_nand_data *this)
+{
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_part_parser_data ppdata = {};
+ int ret;
+
+ /* init current chip */
+ this->current_chip = -1;
+
+ /* init the MTD data structures */
+ mtd->priv = chip;
+ mtd->name = "gpmi-nand";
+ mtd->owner = THIS_MODULE;
+
+ /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+ chip->priv = this;
+ chip->select_chip = gpmi_select_chip;
+ chip->cmd_ctrl = gpmi_cmd_ctrl;
+ chip->dev_ready = gpmi_dev_ready;
+ chip->read_byte = gpmi_read_byte;
+ chip->read_buf = gpmi_read_buf;
+ chip->write_buf = gpmi_write_buf;
+ chip->badblock_pattern = &gpmi_bbt_descr;
+ chip->block_markbad = gpmi_block_markbad;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ if (of_get_nand_on_flash_bbt(this->dev->of_node))
+ chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ /*
+ * Allocate a temporary DMA buffer for reading ID in the
+ * nand_scan_ident().
+ */
+ this->bch_geometry.payload_size = 1024;
+ this->bch_geometry.auxiliary_size = 128;
+ ret = gpmi_alloc_dma_buffer(this);
+ if (ret)
+ goto err_out;
+
+ ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
+ if (ret)
+ goto err_out;
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ goto err_out;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ goto err_out;
+
+ ret = nand_boot_init(this);
+ if (ret)
+ goto err_out;
+ chip->scan_bbt(mtd);
+
+ ppdata.of_node = this->pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret)
+ goto err_out;
+ return 0;
+
+err_out:
+ gpmi_nand_exit(this);
+ return ret;
+}
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+ {
+ .compatible = "fsl,imx23-gpmi-nand",
+ .data = (void *)&gpmi_devdata_imx23,
+ }, {
+ .compatible = "fsl,imx28-gpmi-nand",
+ .data = (void *)&gpmi_devdata_imx28,
+ }, {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = (void *)&gpmi_devdata_imx6q,
+ }, {
+ .compatible = "fsl,imx6sx-gpmi-nand",
+ .data = (void *)&gpmi_devdata_imx6sx,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
+static int gpmi_nand_probe(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this;
+ const struct of_device_id *of_id;
+ int ret;
+
+ this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+ if (of_id) {
+ this->devdata = of_id->data;
+ } else {
+ dev_err(&pdev->dev, "Failed to find the right device id.\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, this);
+ this->pdev = pdev;
+ this->dev = &pdev->dev;
+
+ ret = acquire_resources(this);
+ if (ret)
+ goto exit_acquire_resources;
+
+ ret = init_hardware(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ ret = gpmi_nand_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ dev_info(this->dev, "driver registered.\n");
+
+ return 0;
+
+exit_nfc_init:
+ release_resources(this);
+exit_acquire_resources:
+ dev_err(this->dev, "driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static int gpmi_nand_remove(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+
+ gpmi_nand_exit(this);
+ release_resources(this);
+ return 0;
+}
+
+static struct platform_driver gpmi_nand_driver = {
+ .driver = {
+ .name = "gpmi-nand",
+ .of_match_table = gpmi_nand_id_table,
+ },
+ .probe = gpmi_nand_probe,
+ .remove = gpmi_nand_remove,
+};
+module_platform_driver(gpmi_nand_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 00000000000..32c6ba49f98
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,305 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
+struct resources {
+ void __iomem *gpmi_regs;
+ void __iomem *bch_regs;
+ unsigned int dma_low_channel;
+ unsigned int dma_high_channel;
+ struct clk *clock[GPMI_CLK_MAX];
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len: The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength: A number that describes the strength of the ECC
+ * algorithm.
+ * @page_size: The size, in bytes, of a physical page, including
+ * both data and OOB.
+ * @metadata_size: The size, in bytes, of the metadata.
+ * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
+ * the first chunk in the page includes both data and
+ * metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count: The number of ECC chunks in the page,
+ * @payload_size: The size, in bytes, of the payload buffer.
+ * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset: The offset into the auxiliary buffer at which
+ * the ECC status appears.
+ * @block_mark_byte_offset: The byte offset in the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @block_mark_bit_offset: The bit offset into the ECC-based page view at
+ * which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+ unsigned int gf_len;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int metadata_size;
+ unsigned int ecc_chunk_size;
+ unsigned int ecc_chunk_count;
+ unsigned int payload_size;
+ unsigned int auxiliary_size;
+ unsigned int auxiliary_status_offset;
+ unsigned int block_mark_byte_offset;
+ unsigned int block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages: The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ * search area in boot block strides.
+ */
+struct boot_rom_geometry {
+ unsigned int stride_size_in_pages;
+ unsigned int search_area_stride_exponent;
+};
+
+/* DMA operations types */
+enum dma_ops_type {
+ DMA_FOR_COMMAND = 1,
+ DMA_FOR_READ_DATA,
+ DMA_FOR_WRITE_DATA,
+ DMA_FOR_READ_ECC_PAGE,
+ DMA_FOR_WRITE_ECC_PAGE
+};
+
+/**
+ * struct nand_timing - Fundamental timing attributes for NAND.
+ * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
+ * maximum of tDS and tWP. A negative value
+ * indicates this characteristic isn't known.
+ * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
+ * maximum of tDH, tWH and tREH. A negative value
+ * indicates this characteristic isn't known.
+ * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
+ * the maximum of tCLS, tCS and tALS. A negative
+ * value indicates this characteristic isn't known.
+ * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
+ * indicates this characteristic isn't known.
+ * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ */
+struct nand_timing {
+ int8_t data_setup_in_ns;
+ int8_t data_hold_in_ns;
+ int8_t address_setup_in_ns;
+ int8_t gpmi_sample_delay_in_ns;
+ int8_t tREA_in_ns;
+ int8_t tRLOH_in_ns;
+ int8_t tRHOH_in_ns;
+};
+
+enum gpmi_type {
+ IS_MX23,
+ IS_MX28,
+ IS_MX6Q,
+ IS_MX6SX
+};
+
+struct gpmi_devdata {
+ enum gpmi_type type;
+ int bch_max_ecc_strength;
+ int max_chain_delay; /* See the async EDO mode */
+};
+
+struct gpmi_nand_data {
+ /* flags */
+#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
+#define GPMI_TIMING_INIT_OK (1 << 1)
+ int flags;
+ const struct gpmi_devdata *devdata;
+
+ /* System Interface */
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* Resources */
+ struct resources resources;
+
+ /* Flash Hardware */
+ struct nand_timing timing;
+ int timing_mode;
+
+ /* BCH */
+ struct bch_geometry bch_geometry;
+ struct completion bch_done;
+
+ /* NAND Boot issue */
+ bool swap_block_mark;
+ struct boot_rom_geometry rom_geometry;
+
+ /* MTD / NAND */
+ struct nand_chip nand;
+ struct mtd_info mtd;
+
+ /* General-use Variables */
+ int current_chip;
+ unsigned int command_length;
+
+ /* passed from upper layer */
+ uint8_t *upper_buf;
+ int upper_len;
+
+ /* for DMA operations */
+ bool direct_dma_map_ok;
+
+ struct scatterlist cmd_sgl;
+ char *cmd_buffer;
+
+ struct scatterlist data_sgl;
+ char *data_buffer_dma;
+
+ void *page_buffer_virt;
+ dma_addr_t page_buffer_phys;
+ unsigned int page_buffer_size;
+
+ void *payload_virt;
+ dma_addr_t payload_phys;
+
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+
+ /* DMA channels */
+#define DMA_CHANS 8
+ struct dma_chan *dma_chans[DMA_CHANS];
+ enum dma_ops_type last_dma_type;
+ enum dma_ops_type dma_type;
+ struct completion dma_done;
+
+ /* private */
+ void *private;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @data_setup_in_cycles: The data setup time, in cycles.
+ * @data_hold_in_cycles: The data hold time, in cycles.
+ * @address_setup_in_cycles: The address setup time, in cycles.
+ * @device_busy_timeout: The timeout waiting for NAND Ready/Busy,
+ * this value is the number of cycles multiplied
+ * by 4096.
+ * @use_half_periods: Indicates the clock is running slowly, so the
+ * NFC DLL should use half-periods.
+ * @sample_delay_factor: The sample delay factor.
+ * @wrn_dly_sel: The delay on the GPMI write strobe.
+ */
+struct gpmi_nfc_hardware_timing {
+ /* for HW_GPMI_TIMING0 */
+ uint8_t data_setup_in_cycles;
+ uint8_t data_hold_in_cycles;
+ uint8_t address_setup_in_cycles;
+
+ /* for HW_GPMI_TIMING1 */
+ uint16_t device_busy_timeout;
+#define GPMI_DEFAULT_BUSY_TIMEOUT 0x500 /* default busy timeout value.*/
+
+ /* for HW_GPMI_CTRL1 */
+ bool use_half_periods;
+ uint8_t sample_delay_factor;
+ uint8_t wrn_dly_sel;
+};
+
+/**
+ * struct timing_threshod - Timing threshold
+ * @max_data_setup_cycles: The maximum number of data setup cycles that
+ * can be expressed in the hardware.
+ * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
+ * for data read internal setup. In the Reference
+ * Manual, see the chapter "High-Speed NAND
+ * Timing" for more details.
+ * @max_sample_delay_factor: The maximum sample delay factor that can be
+ * expressed in the hardware.
+ * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
+ * sample delay DLL hardware can possibly work
+ * with (the DLL is unusable with longer periods).
+ * If the full-cycle period is greater than HALF
+ * this value, the DLL must be configured to use
+ * half-periods.
+ * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
+ * DLL can implement.
+ * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
+ * I/O transaction. If no I/O transaction is in
+ * progress, this is the clock frequency during
+ * the most recent I/O transaction.
+ */
+struct timing_threshod {
+ const unsigned int max_chip_count;
+ const unsigned int max_data_setup_cycles;
+ const unsigned int internal_data_setup_in_ns;
+ const unsigned int max_sample_delay_factor;
+ const unsigned int max_dll_clock_period_in_ns;
+ const unsigned int max_dll_delay_in_ns;
+ unsigned long clock_frequency_in_hz;
+
+};
+
+/* Common Services */
+extern int common_nfc_set_geometry(struct gpmi_nand_data *);
+extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+extern void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+
+/* GPMI-NAND helper function library */
+extern int gpmi_init(struct gpmi_nand_data *);
+extern int gpmi_extra_init(struct gpmi_nand_data *);
+extern void gpmi_clear_bch(struct gpmi_nand_data *);
+extern void gpmi_dump_info(struct gpmi_nand_data *);
+extern int bch_set_geometry(struct gpmi_nand_data *);
+extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+extern int gpmi_send_command(struct gpmi_nand_data *);
+extern void gpmi_begin(struct gpmi_nand_data *);
+extern void gpmi_end(struct gpmi_nand_data *);
+extern int gpmi_read_data(struct gpmi_nand_data *);
+extern int gpmi_send_data(struct gpmi_nand_data *);
+extern int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+extern int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD 0x00
+#define STATUS_ERASED 0xff
+#define STATUS_UNCORRECTABLE 0xfe
+
+/* Use the devdata to distinguish different Archs. */
+#define GPMI_IS_MX23(x) ((x)->devdata->type == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
+#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
+
+#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 00000000000..82114cdc833
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,187 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0 0x00000000
+#define HW_GPMI_CTRL0_SET 0x00000004
+#define HW_GPMI_CTRL0_CLR 0x00000008
+#define HW_GPMI_CTRL0_TOG 0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE 24
+#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
+ (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
+
+/*
+ * Difference in LOCK_CS between imx23 and imx28 :
+ * This bit may impact the _POWER_ consumption. So some chips
+ * do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
+#define LOCK_CS_ENABLE 0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS 20
+#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
+ (GPMI_IS_MX23((x)) \
+ ? MX23_BM_GPMI_CTRL0_CS \
+ : MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS 17
+#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v) \
+ (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT 0
+#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v) \
+ (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE 0x00000010
+
+#define HW_GPMI_ECCCTRL 0x00000020
+#define HW_GPMI_ECCCTRL_SET 0x00000024
+#define HW_GPMI_ECCCTRL_CLR 0x00000028
+#define HW_GPMI_ECCCTRL_TOG 0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD 13
+#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
+ (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
+ (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
+
+#define HW_GPMI_ECCCOUNT 0x00000030
+#define HW_GPMI_PAYLOAD 0x00000040
+#define HW_GPMI_AUXILIARY 0x00000050
+#define HW_GPMI_CTRL1 0x00000060
+#define HW_GPMI_CTRL1_SET 0x00000064
+#define HW_GPMI_CTRL1_CLR 0x00000068
+#define HW_GPMI_CTRL1_TOG 0x0000006c
+
+#define BP_GPMI_CTRL1_DECOUPLE_CS 24
+#define BM_GPMI_CTRL1_DECOUPLE_CS (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
+#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
+#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
+#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
+ (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
+
+#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE 17
+#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD 16
+#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY 12
+#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v) \
+ (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
+
+#define HW_GPMI_TIMING0 0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD 8
+#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP 0
+#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1 0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
+#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
+ (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
+
+#define HW_GPMI_TIMING2 0x00000090
+#define HW_GPMI_DATA 0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT 0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY 24
+#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
+ (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG 0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0 28
+#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
deleted file mode 100644
index 02a03e67109..00000000000
--- a/drivers/mtd/nand/h1910.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * drivers/mtd/nand/h1910.c
- *
- * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
- * a 128Mibit (16MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
-#include <asm/sizes.h>
-#include <mach/h1900-gpio.h>
-#include <mach/ipaq.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *h1910_nand_mtd = NULL;
-
-/*
- * Module stuff
- */
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
- {name:"h1910 NAND Flash",
- offset:0,
- size:16 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- *
- * NAND_NCE: bit 0 - don't care
- * NAND_CLE: bit 1 - address bit 2
- * NAND_ALE: bit 2 - address bit 3
- */
-static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
-}
-
-/*
- * read device ready pin
- */
-#if 0
-static int h1910_device_ready(struct mtd_info *mtd)
-{
- return (GPLR(55) & GPIO_bit(55));
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init h1910_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- void __iomem *nandaddr;
-
- if (!machine_is_h1900())
- return -ENODEV;
-
- nandaddr = ioremap(0x08000000, 0x1000);
- if (!nandaddr) {
- printk("Failed to ioremap nand flash.\n");
- return -ENOMEM;
- }
-
- /* Allocate memory for MTD device structure and private data */
- h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!h1910_nand_mtd) {
- printk("Unable to allocate h1910 NAND MTD device structure.\n");
- iounmap((void *)nandaddr);
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&h1910_nand_mtd[1]);
-
- /* Initialize structures */
- memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- h1910_nand_mtd->priv = this;
- h1910_nand_mtd->owner = THIS_MODULE;
-
- /*
- * Enable VPEN
- */
- GPSR(37) = GPIO_bit(37);
-
- /* insert callbacks */
- this->IO_ADDR_R = nandaddr;
- this->IO_ADDR_W = nandaddr;
- this->cmd_ctrl = h1910_hwcontrol;
- this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
- /* 15 us command delay time */
- this->chip_delay = 50;
- this->ecc.mode = NAND_ECC_SOFT;
- this->options = NAND_NO_AUTOINCR;
-
- /* Scan to find existence of the device */
- if (nand_scan(h1910_nand_mtd, 1)) {
- printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
- kfree(h1910_nand_mtd);
- iounmap((void *)nandaddr);
- return -ENXIO;
- }
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(h1910_init);
-
-/*
- * Clean up routine
- */
-static void __exit h1910_cleanup(void)
-{
- struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
-
- /* Release resources, unregister device */
- nand_release(h1910_nand_mtd);
-
- /* Release io resource */
- iounmap((void *)this->IO_ADDR_W);
-
- /* Free the MTD device structure */
- kfree(h1910_nand_mtd);
-}
-
-module_exit(h1910_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
-MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 6e813daed06..a2c804de156 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -52,9 +52,10 @@
#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa
-#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
#define JZ_NAND_MEM_CMD_OFFSET 0x08000
+#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
struct jz_nand {
struct mtd_info mtd;
@@ -62,8 +63,11 @@ struct jz_nand {
void __iomem *base;
struct resource *mem;
- void __iomem *bank_base;
- struct resource *bank_mem;
+ unsigned char banks[JZ_NAND_NUM_BANKS];
+ void __iomem *bank_base[JZ_NAND_NUM_BANKS];
+ struct resource *bank_mem[JZ_NAND_NUM_BANKS];
+
+ int selected_bank;
struct jz_nand_platform_data *pdata;
bool is_reading;
@@ -74,26 +78,50 @@ static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
return container_of(mtd, struct jz_nand, mtd);
}
+static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct nand_chip *chip = mtd->priv;
+ uint32_t ctrl;
+ int banknr;
+
+ ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+ ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK;
+
+ if (chipnr == -1) {
+ banknr = -1;
+ } else {
+ banknr = nand->banks[chipnr] - 1;
+ chip->IO_ADDR_R = nand->bank_base[banknr];
+ chip->IO_ADDR_W = nand->bank_base[banknr];
+ }
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+ nand->selected_bank = banknr;
+}
+
static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
{
struct jz_nand *nand = mtd_to_jz_nand(mtd);
struct nand_chip *chip = mtd->priv;
uint32_t reg;
+ void __iomem *bank_base = nand->bank_base[nand->selected_bank];
+
+ BUG_ON(nand->selected_bank < 0);
if (ctrl & NAND_CTRL_CHANGE) {
BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
if (ctrl & NAND_ALE)
- chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET;
+ bank_base += JZ_NAND_MEM_ADDR_OFFSET;
else if (ctrl & NAND_CLE)
- chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET;
- else
- chip->IO_ADDR_W = nand->bank_base;
+ bank_base += JZ_NAND_MEM_CMD_OFFSET;
+ chip->IO_ADDR_W = bank_base;
reg = readl(nand->base + JZ_REG_NAND_CTRL);
if (ctrl & NAND_NCE)
- reg |= JZ_NAND_CTRL_ASSERT_CHIP(0);
+ reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
else
- reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0);
+ reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
writel(reg, nand->base + JZ_REG_NAND_CTRL);
}
if (dat != NAND_CMD_NONE)
@@ -251,12 +279,8 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
return 0;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = {"cmdline", NULL};
-#endif
-
static int jz_nand_ioremap_resource(struct platform_device *pdev,
- const char *name, struct resource **res, void __iomem **base)
+ const char *name, struct resource **res, void *__iomem *base)
{
int ret;
@@ -292,29 +316,112 @@ err:
return ret;
}
-static int __devinit jz_nand_probe(struct platform_device *pdev)
+static inline void jz_nand_iounmap_resource(struct resource *res,
+ void __iomem *base)
+{
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
+}
+
+static int jz_nand_detect_bank(struct platform_device *pdev,
+ struct jz_nand *nand, unsigned char bank,
+ size_t chipnr, uint8_t *nand_maf_id,
+ uint8_t *nand_dev_id)
+{
+ int ret;
+ int gpio;
+ char gpio_name[9];
+ char res_name[6];
+ uint32_t ctrl;
+ struct mtd_info *mtd = &nand->mtd;
+ struct nand_chip *chip = &nand->chip;
+
+ /* Request GPIO port. */
+ gpio = JZ_GPIO_MEM_CS0 + bank - 1;
+ sprintf(gpio_name, "NAND CS%d", bank);
+ ret = gpio_request(gpio, gpio_name);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "Failed to request %s gpio %d: %d\n",
+ gpio_name, gpio, ret);
+ goto notfound_gpio;
+ }
+
+ /* Request I/O resource. */
+ sprintf(res_name, "bank%d", bank);
+ ret = jz_nand_ioremap_resource(pdev, res_name,
+ &nand->bank_mem[bank - 1],
+ &nand->bank_base[bank - 1]);
+ if (ret)
+ goto notfound_resource;
+
+ /* Enable chip in bank. */
+ jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
+ ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+ ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+ if (chipnr == 0) {
+ /* Detect first chip. */
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ goto notfound_id;
+
+ /* Retrieve the IDs from the first chip. */
+ chip->select_chip(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ *nand_maf_id = chip->read_byte(mtd);
+ *nand_dev_id = chip->read_byte(mtd);
+ } else {
+ /* Detect additional chip. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ if (*nand_maf_id != chip->read_byte(mtd)
+ || *nand_dev_id != chip->read_byte(mtd)) {
+ ret = -ENODEV;
+ goto notfound_id;
+ }
+
+ /* Update size of the MTD. */
+ chip->numchips++;
+ mtd->size += chip->chipsize;
+ }
+
+ dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
+ return 0;
+
+notfound_id:
+ dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
+ ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+ jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+notfound_resource:
+ gpio_free(gpio);
+notfound_gpio:
+ return ret;
+}
+
+static int jz_nand_probe(struct platform_device *pdev)
{
int ret;
struct jz_nand *nand;
struct nand_chip *chip;
struct mtd_info *mtd;
- struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
- struct mtd_partition *partition_info;
- int num_partitions = 0;
+ struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ size_t chipnr, bank_idx;
+ uint8_t nand_maf_id = 0, nand_dev_id = 0;
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
- if (!nand) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ if (!nand)
return -ENOMEM;
- }
ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
if (ret)
goto err_free;
- ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem,
- &nand->bank_base);
- if (ret)
- goto err_iounmap_mmio;
if (pdata && gpio_is_valid(pdata->busy_gpio)) {
ret = gpio_request(pdata->busy_gpio, "NAND busy pin");
@@ -322,7 +429,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Failed to request busy gpio %d: %d\n",
pdata->busy_gpio, ret);
- goto err_iounmap_mem;
+ goto err_iounmap_mmio;
}
}
@@ -338,28 +445,58 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
chip->ecc.size = 512;
chip->ecc.bytes = 9;
+ chip->ecc.strength = 4;
if (pdata)
chip->ecc.layout = pdata->ecc_layout;
chip->chip_delay = 50;
chip->cmd_ctrl = jz_nand_cmd_ctrl;
+ chip->select_chip = jz_nand_select_chip;
if (pdata && gpio_is_valid(pdata->busy_gpio))
chip->dev_ready = jz_nand_dev_ready;
- chip->IO_ADDR_R = nand->bank_base;
- chip->IO_ADDR_W = nand->bank_base;
-
nand->pdata = pdata;
platform_set_drvdata(pdev, nand);
- writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL);
-
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to scan nand\n");
- goto err_gpio_free;
+ /* We are going to autodetect NAND chips in the banks specified in the
+ * platform data. Although nand_scan_ident() can detect multiple chips,
+ * it requires those chips to be numbered consecuitively, which is not
+ * always the case for external memory banks. And a fixed chip-to-bank
+ * mapping is not practical either, since for example Dingoo units
+ * produced at different times have NAND chips in different banks.
+ */
+ chipnr = 0;
+ for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) {
+ unsigned char bank;
+
+ /* If there is no platform data, look for NAND in bank 1,
+ * which is the most likely bank since it is the only one
+ * that can be booted from.
+ */
+ bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1;
+ if (bank == 0)
+ break;
+ if (bank > JZ_NAND_NUM_BANKS) {
+ dev_warn(&pdev->dev,
+ "Skipping non-existing bank: %d\n", bank);
+ continue;
+ }
+ /* The detection routine will directly or indirectly call
+ * jz_nand_select_chip(), so nand->banks has to contain the
+ * bank we're checking.
+ */
+ nand->banks[chipnr] = bank;
+ if (jz_nand_detect_bank(pdev, nand, bank, chipnr,
+ &nand_maf_id, &nand_dev_id) == 0)
+ chipnr++;
+ else
+ nand->banks[chipnr] = 0;
+ }
+ if (chipnr == 0) {
+ dev_err(&pdev->dev, "No NAND chips found\n");
+ goto err_gpio_busy;
}
if (pdata && pdata->ident_callback) {
@@ -369,19 +506,13 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
ret = nand_scan_tail(mtd);
if (ret) {
- dev_err(&pdev->dev, "Failed to scan nand\n");
- goto err_gpio_free;
+ dev_err(&pdev->dev, "Failed to scan NAND\n");
+ goto err_unclaim_banks;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- num_partitions = parse_mtd_partitions(mtd, part_probes,
- &partition_info, 0);
-#endif
- if (num_partitions <= 0 && pdata) {
- num_partitions = pdata->num_partitions;
- partition_info = pdata->partitions;
- }
- ret = mtd_device_register(mtd, partition_info, num_partitions);
+ ret = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
@@ -393,34 +524,49 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
return 0;
err_nand_release:
- nand_release(&nand->mtd);
-err_gpio_free:
- platform_set_drvdata(pdev, NULL);
- gpio_free(pdata->busy_gpio);
-err_iounmap_mem:
- iounmap(nand->bank_base);
+ nand_release(mtd);
+err_unclaim_banks:
+ while (chipnr--) {
+ unsigned char bank = nand->banks[chipnr];
+ gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+ }
+ writel(0, nand->base + JZ_REG_NAND_CTRL);
+err_gpio_busy:
+ if (pdata && gpio_is_valid(pdata->busy_gpio))
+ gpio_free(pdata->busy_gpio);
err_iounmap_mmio:
- iounmap(nand->base);
+ jz_nand_iounmap_resource(nand->mem, nand->base);
err_free:
kfree(nand);
return ret;
}
-static int __devexit jz_nand_remove(struct platform_device *pdev)
+static int jz_nand_remove(struct platform_device *pdev)
{
struct jz_nand *nand = platform_get_drvdata(pdev);
+ struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ size_t i;
nand_release(&nand->mtd);
/* Deassert and disable all chips */
writel(0, nand->base + JZ_REG_NAND_CTRL);
- iounmap(nand->bank_base);
- release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem));
- iounmap(nand->base);
- release_mem_region(nand->mem->start, resource_size(nand->mem));
+ for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) {
+ unsigned char bank = nand->banks[i];
+ if (bank != 0) {
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+ gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+ }
+ }
+ if (pdata && gpio_is_valid(pdata->busy_gpio))
+ gpio_free(pdata->busy_gpio);
+
+ jz_nand_iounmap_resource(nand->mem, nand->base);
- platform_set_drvdata(pdev, NULL);
kfree(nand);
return 0;
@@ -428,24 +574,14 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
static struct platform_driver jz_nand_driver = {
.probe = jz_nand_probe,
- .remove = __devexit_p(jz_nand_remove),
+ .remove = jz_nand_remove,
.driver = {
.name = "jz4740-nand",
.owner = THIS_MODULE,
},
};
-static int __init jz_nand_init(void)
-{
- return platform_driver_register(&jz_nand_driver);
-}
-module_init(jz_nand_init);
-
-static void __exit jz_nand_exit(void)
-{
- platform_driver_unregister(&jz_nand_driver);
-}
-module_exit(jz_nand_exit);
+module_platform_driver(jz_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
new file mode 100644
index 00000000000..687478c9f09
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -0,0 +1,896 @@
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x) (x + 0x00000)
+#define MLC_DATA(x) (x + 0x08000)
+#define MLC_CMD(x) (x + 0x10000)
+#define MLC_ADDR(x) (x + 0x10004)
+#define MLC_ECC_ENC_REG(x) (x + 0x10008)
+#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
+#define MLC_RPR(x) (x + 0x10018)
+#define MLC_WPR(x) (x + 0x1001C)
+#define MLC_RUBP(x) (x + 0x10020)
+#define MLC_ROBP(x) (x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
+#define MLC_ICR(x) (x + 0x10030)
+#define MLC_TIME_REG(x) (x + 0x10034)
+#define MLC_IRQ_MR(x) (x + 0x10038)
+#define MLC_IRQ_SR(x) (x + 0x1003C)
+#define MLC_LOCK_PR(x) (x + 0x10044)
+#define MLC_ISR(x) (x + 0x10048)
+#define MLC_CEH(x) (x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET 0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT (1 << 3)
+#define MLCICR_LARGEBLOCK (1 << 2)
+#define MLCICR_LONGADDR (1 << 1)
+#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY (1 << 5)
+#define MLCIRQ_CONTROLLER_READY (1 << 4)
+#define MLCIRQ_DECODE_FAILURE (1 << 3)
+#define MLCIRQ_DECODE_ERROR (1 << 2)
+#define MLCIRQ_ECC_READY (1 << 1)
+#define MLCIRQ_WRPROT_FAULT (1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC 0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE (1 << 6)
+#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED (1 << 3)
+#define MLCISR_ECC_READY (1 << 2)
+#define MLCISR_CONTROLLER_READY (1 << 1)
+#define MLCISR_NAND_READY (1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL (1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+ uint32_t tcea_delay;
+ uint32_t busy_delay;
+ uint32_t nand_ta;
+ uint32_t rd_high;
+ uint32_t rd_low;
+ uint32_t wr_high;
+ uint32_t wr_low;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+static struct nand_ecclayout lpc32xx_nand_oob = {
+ .eccbytes = 40,
+ .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
+ .oobfree = {
+ { .offset = 0,
+ .length = 6, },
+ { .offset = 16,
+ .length = 6, },
+ { .offset = 32,
+ .length = 6, },
+ { .offset = 48,
+ .length = 6, },
+ },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_mlc_platform_data *pdata;
+ struct clk *clk;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ int irq;
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct completion comp_nand;
+ struct completion comp_controller;
+ uint32_t llptr;
+ /*
+ * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ dma_addr_t oob_buf_phy;
+ /*
+ * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ uint8_t *oob_buf;
+ /* Physical address of DMA base address */
+ dma_addr_t io_base_phy;
+
+ struct completion comp_dma;
+ struct dma_chan *dma_chan;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+ uint8_t *dma_buf;
+ uint8_t *dummy_buf;
+ int mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset MLC controller */
+ writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+ udelay(1000);
+
+ /* Get base clock for MLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = 104000000;
+
+ /* Unlock MLC_ICR
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Configure MLC Controller: Large Block, 5 Byte Address */
+ tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+ writel(tmp, MLC_ICR(host->io_base));
+
+ /* Unlock MLC_TIME_REG
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Compute clock setup values, see LPC and NAND manual */
+ tmp = 0;
+ tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+ tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+ tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+ tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+ tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+ tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+ tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+ writel(tmp, MLC_TIME_REG(host->io_base));
+
+ /* Enable IRQ for CONTROLLER_READY and NAND_READY */
+ writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+ MLC_IRQ_MR(host->io_base));
+
+ /* Normal nCE operation: nCE controlled by controller */
+ writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct lpc32xx_nand_host *host = nand_chip->priv;
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, MLC_CMD(host->io_base));
+ else
+ writel(cmd, MLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct lpc32xx_nand_host *host = nand_chip->priv;
+
+ if ((readb(MLC_ISR(host->io_base)) &
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+ return 1;
+
+ return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+ uint8_t sr;
+
+ /* Clear interrupt flag by reading status */
+ sr = readb(MLC_IRQ_SR(host->io_base));
+ if (sr & MLCIRQ_NAND_READY)
+ complete(&host->comp_nand);
+ if (sr & MLCIRQ_CONTROLLER_READY)
+ complete(&host->comp_controller);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_nand);
+
+ while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+ /* Seems to be delayed sometimes by controller */
+ dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
+ struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_controller);
+
+ while (!(readb(MLC_ISR(host->io_base)) &
+ MLCISR_CONTROLLER_READY)) {
+ dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ lpc32xx_waitfunc_nand(mtd, chip);
+ lpc32xx_waitfunc_controller(mtd, chip);
+
+ return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+ enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp_dma);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp_dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ int i, j;
+ uint8_t *oobbuf = chip->oob_poi;
+ uint32_t mlc_isr;
+ int res;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->dma_buf;
+ dma_mapped = false;
+ }
+
+ /* Writing Command and Address */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* For all sub-pages */
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Auto Decode Command */
+ writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(mtd, chip);
+
+ /* Check ECC Error status */
+ mlc_isr = readl(MLC_ISR(host->io_base));
+ if (mlc_isr & MLCISR_DECODER_FAILURE) {
+ mtd->ecc_stats.failed++;
+ dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+ } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+ mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+ }
+
+ /* Read 512 + 16 Bytes */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ *((uint32_t *)(buf)) =
+ readl(MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ for (j = 0; j < (16 >> 2); j++) {
+ *((uint32_t *)(oobbuf)) =
+ readl(MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ }
+ }
+
+ if (use_dma && !dma_mapped)
+ memcpy(buf, dma_buf, mtd->writesize);
+
+ return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ const uint8_t *oobbuf = chip->oob_poi;
+ uint8_t *dma_buf = (uint8_t *)buf;
+ int res;
+ int i, j;
+
+ if (use_dma && (void *)buf >= high_memory) {
+ dma_buf = host->dma_buf;
+ memcpy(dma_buf, buf, mtd->writesize);
+ }
+
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Encode */
+ writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+ /* Write 512 + 6 Bytes to Buffer */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_MEM_TO_DEV);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ writel(*((uint32_t *)(buf)),
+ MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 12;
+
+ /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+ writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(mtd, chip);
+ }
+ return 0;
+}
+
+static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Read whole page - necessary with MLC controller! */
+ lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
+
+ return 0;
+}
+
+static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+ return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-mlc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Set direction to a sensible value even if the dmaengine driver
+ * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+ * driver criticizes it as "alien transfer direction".
+ */
+ host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 128;
+ host->dma_slave_config.dst_maxburst = 128;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+ host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ goto out1;
+ }
+
+ return 0;
+out1:
+ dma_release_channel(host->dma_chan);
+ return -ENXIO;
+}
+
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
+ of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
+ of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
+ of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
+ of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
+ of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
+ of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
+
+ if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
+ !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
+ !ncfg->wr_low) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *rc;
+ int res;
+ struct mtd_part_parser_data ppdata = {};
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ host->io_base_phy = rc->start;
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) &&
+ gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock initialization failure\n");
+ res = -ENOENT;
+ goto err_exit1;
+ }
+ clk_enable(host->clk);
+
+ nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ nand_chip->dev_ready = lpc32xx_nand_device_ready;
+ nand_chip->chip_delay = 25; /* us */
+ nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
+ nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* Initialize function pointers */
+ nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
+ nand_chip->ecc.read_page_raw = lpc32xx_read_page;
+ nand_chip->ecc.read_page = lpc32xx_read_page;
+ nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+ nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+ nand_chip->ecc.write_oob = lpc32xx_write_oob;
+ nand_chip->ecc.read_oob = lpc32xx_read_oob;
+ nand_chip->ecc.strength = 4;
+ nand_chip->waitfunc = lpc32xx_waitfunc;
+
+ nand_chip->options = NAND_NO_SUBPAGE_WRITE;
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ nand_chip->bbt_td = &lpc32xx_nand_bbt;
+ nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+ /* bitflip_threshold's default is defined as ecc_strength anyway.
+ * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
+ * being 0, it causes bad block table scanning errors in
+ * nand_scan_tail(), so preparing it here. */
+ mtd->bitflip_threshold = nand_chip->ecc.strength;
+
+ if (use_dma) {
+ res = lpc32xx_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto err_exit2;
+ }
+ }
+
+ /*
+ * Scan to find existance of the device and
+ * Get the type of NAND device SMALL block or LARGE block
+ */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf) {
+ res = -ENOMEM;
+ goto err_exit3;
+ }
+
+ host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf) {
+ res = -ENOMEM;
+ goto err_exit3;
+ }
+
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.layout = &lpc32xx_nand_oob;
+ host->mlcsubpages = mtd->writesize / 512;
+
+ /* initially clear interrupt status */
+ readb(MLC_IRQ_SR(host->io_base));
+
+ init_completion(&host->comp_nand);
+ init_completion(&host->comp_controller);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "failed to get platform irq\n");
+ res = -EINVAL;
+ goto err_exit3;
+ }
+
+ if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /*
+ * Fills out all the uninitialized function pointers with the defaults
+ * And scans for a bad block table if appropriate.
+ */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_exit4;
+ }
+
+ mtd->name = DRV_NAME;
+
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+err_exit4:
+ free_irq(host->irq, host);
+err_exit3:
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+err_exit2:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+err_exit1:
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+ free_irq(host->irq, host);
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Re-enable NAND clock */
+ clk_enable(host->clk);
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable(host->clk);
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-mlc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = lpc32xx_nand_remove,
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
new file mode 100644
index 00000000000..53a6742e3da
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -0,0 +1,1018 @@
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ * Kevin Wells <kevin.wells@nxp.com>
+ * Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_slc.h>
+
+#define LPC32XX_MODNAME "lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x) (x + 0x000)
+#define SLC_ADDR(x) (x + 0x004)
+#define SLC_CMD(x) (x + 0x008)
+#define SLC_STOP(x) (x + 0x00C)
+#define SLC_CTRL(x) (x + 0x010)
+#define SLC_CFG(x) (x + 0x014)
+#define SLC_STAT(x) (x + 0x018)
+#define SLC_INT_STAT(x) (x + 0x01C)
+#define SLC_IEN(x) (x + 0x020)
+#define SLC_ISR(x) (x + 0x024)
+#define SLC_ICR(x) (x + 0x028)
+#define SLC_TAC(x) (x + 0x02C)
+#define SLC_TC(x) (x + 0x030)
+#define SLC_ECC(x) (x + 0x034)
+#define SLC_DMA_DATA(x) (x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE 4096
+#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES 3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE 133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT 100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static struct nand_ecclayout lpc32xx_nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {10, 11, 12, 13, 14, 15},
+ .oobfree = {
+ { .offset = 0, .length = 4 },
+ { .offset = 6, .length = 4 },
+ },
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+ uint32_t wdr_clks;
+ uint32_t wwidth;
+ uint32_t whold;
+ uint32_t wsetup;
+ uint32_t rdr_clks;
+ uint32_t rwidth;
+ uint32_t rhold;
+ uint32_t rsetup;
+ bool use_bbt;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_slc_platform_data *pdata;
+ struct clk *clk;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ struct lpc32xx_nand_cfg_slc *ncfg;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+ uint32_t dma_buf_len;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+
+ /*
+ * DMA and CPU addresses of ECC work area and data buffer
+ */
+ uint32_t *ecc_buf;
+ uint8_t *data_buf;
+ dma_addr_t io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset SLC controller */
+ writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+ udelay(1000);
+
+ /* Basic setup */
+ writel(0, SLC_CFG(host->io_base));
+ writel(0, SLC_IEN(host->io_base));
+ writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+ SLC_ICR(host->io_base));
+
+ /* Get base clock for SLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = LPC32XX_DEF_BUS_RATE;
+
+ /* Compute clock setup values */
+ tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+ SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
+ SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
+ SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
+ SLCTAC_RDR(host->ncfg->rdr_clks) |
+ SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
+ SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
+ SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
+ writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ uint32_t tmp;
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Does CE state need to be changed? */
+ tmp = readl(SLC_CFG(host->io_base));
+ if (ctrl & NAND_NCE)
+ tmp |= SLCCFG_CE_LOW;
+ else
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CFG(host->io_base));
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, SLC_CMD(host->io_base));
+ else
+ writel(cmd, SLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ int rdy = 0;
+
+ if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+ rdy = 1;
+
+ return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ /*
+ * ECC is calculated automatically in hardware during syndrome read
+ * and write operations, so it doesn't need to be calculated here.
+ */
+ return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Direct device read with no ECC */
+ while (len-- > 0)
+ *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Direct device write with no ECC */
+ while (len-- > 0)
+ writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+ int i;
+
+ for (i = 0; i < (count * 3); i += 3) {
+ uint32_t ce = ecc[i / 3];
+ ce = ~(ce << 2) & 0xFFFFFF;
+ spare[i + 2] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i + 1] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i] = (uint8_t)(ce & 0xFF);
+ }
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+ void *mem, int len, enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ host->dma_slave_config.direction = dir;
+ host->dma_slave_config.src_addr = dma;
+ host->dma_slave_config.dst_addr = dma;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 4;
+ host->dma_slave_config.dst_maxburst = 4;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ return -ENXIO;
+ }
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+ int read)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ int i, status = 0;
+ unsigned long timeout;
+ int res;
+ enum dma_transfer_direction dir =
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->data_buf;
+ dma_mapped = false;
+ if (!read)
+ memcpy(host->data_buf, buf, mtd->writesize);
+ }
+
+ if (read) {
+ writel(readl(SLC_CFG(host->io_base)) |
+ SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+ } else {
+ writel((readl(SLC_CFG(host->io_base)) |
+ SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+ ~SLCCFG_DMA_DIR,
+ SLC_CFG(host->io_base));
+ }
+
+ /* Clear initial ECC */
+ writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+ /* Transfer size is data area only */
+ writel(mtd->writesize, SLC_TC(host->io_base));
+
+ /* Start transfer in the NAND controller */
+ writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ /* Data */
+ res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+ dma_buf + i * chip->ecc.size,
+ mtd->writesize / chip->ecc.steps, dir);
+ if (res)
+ return res;
+
+ /* Always _read_ ECC */
+ if (i == chip->ecc.steps - 1)
+ break;
+ if (!read) /* ECC availability delayed on write */
+ udelay(10);
+ res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+ &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ }
+
+ /*
+ * According to NXP, the DMA can be finished here, but the NAND
+ * controller may still have buffered data. After porting to using the
+ * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+ * appears to be always true, according to tests. Keeping the check for
+ * safety reasons for now.
+ */
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+ dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+ timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+ while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ if (!time_before(jiffies, timeout)) {
+ dev_err(mtd->dev.parent, "FIFO held data too long\n");
+ status = -EIO;
+ }
+ }
+
+ /* Read last calculated ECC value */
+ if (!read)
+ udelay(10);
+ host->ecc_buf[chip->ecc.steps - 1] =
+ readl(SLC_ECC(host->io_base));
+
+ /* Flush DMA */
+ dmaengine_terminate_all(host->dma_chan);
+
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+ readl(SLC_TC(host->io_base))) {
+ /* Something is left in the FIFO, something is wrong */
+ dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+ status = -EIO;
+ }
+
+ /* Stop DMA & HW ECC */
+ writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+ writel(readl(SLC_CFG(host->io_base)) &
+ ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+ if (!dma_mapped && read)
+ memcpy(buf, host->data_buf, mtd->writesize);
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ int stat, i, status;
+ uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+ /* Issue read command */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* Read data and oob, calculate ECC */
+ status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+ /* Get OOB data */
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Convert to stored ECC format */
+ lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+ /* Pointer to ECC data retrieved from NAND spare area */
+ oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ stat = chip->ecc.correct(mtd, buf, oobecc,
+ &tmpecc[i * chip->ecc.bytes]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ buf += chip->ecc.size;
+ oobecc += chip->ecc.bytes;
+ }
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ /* Issue read command */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* Raw reads can just use the FIFO interface */
+ chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
+ int error;
+
+ /* Write data, calculate ECC on outbound data */
+ error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+ if (error)
+ return error;
+
+ /*
+ * The calculated ECC needs some manual work done to it before
+ * committing it to NAND. Process the calculated ECC and place
+ * the resultant values directly into the OOB buffer. */
+ lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+ /* Write ECC data to device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ /* Raw writes can just use the FIFO interface */
+ chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-slc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_slc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
+ of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
+ of_property_read_u32(np, "nxp,whold", &ncfg->whold);
+ of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
+ of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
+ of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
+ of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
+ of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
+
+ if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
+ !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
+ !ncfg->rhold || !ncfg->rsetup) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *rc;
+ struct mtd_part_parser_data ppdata = {};
+ int res;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (rc == NULL) {
+ dev_err(&pdev->dev, "No memory resource found for device\n");
+ return -EBUSY;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ host->io_base_dma = rc->start;
+
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+ host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ mtd = &host->mtd;
+ chip = &host->nand_chip;
+ chip->priv = host;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock failure\n");
+ res = -ENOENT;
+ goto err_exit1;
+ }
+ clk_enable(host->clk);
+
+ /* Set NAND IO addresses and command/ready functions */
+ chip->IO_ADDR_R = SLC_DATA(host->io_base);
+ chip->IO_ADDR_W = SLC_DATA(host->io_base);
+ chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ chip->dev_ready = lpc32xx_nand_device_ready;
+ chip->chip_delay = 20; /* 20us command delay time */
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* NAND callbacks for LPC32xx SLC hardware */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->read_byte = lpc32xx_nand_read_byte;
+ chip->read_buf = lpc32xx_nand_read_buf;
+ chip->write_buf = lpc32xx_nand_write_buf;
+ chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+ chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+ chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+ chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+ chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.strength = 1;
+ chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+
+ /* bitflip_threshold's default is defined as ecc_strength anyway.
+ * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
+ * being 0, it causes bad block table scanning errors in
+ * nand_scan_tail(), so preparing it here already. */
+ mtd->bitflip_threshold = chip->ecc.strength;
+
+ /*
+ * Allocate a large enough buffer for a single huge page plus
+ * extra space for the spare area and ECC storage area
+ */
+ host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+ host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+ GFP_KERNEL);
+ if (host->data_buf == NULL) {
+ res = -ENOMEM;
+ goto err_exit2;
+ }
+
+ res = lpc32xx_nand_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto err_exit2;
+ }
+
+ /* Find NAND device */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ chip->ecc.layout = &lpc32xx_nand_oob_16;
+
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = chip->ecc.postpad = 0;
+
+ /* Avoid extra scan if using BBT, setup BBT support */
+ if (host->ncfg->use_bbt) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if (mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+ }
+
+ /*
+ * Fills out all the uninitialized function pointers with the defaults
+ */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ mtd->name = "nxp_lpc3220_slc";
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+err_exit3:
+ dma_release_channel(host->dma_chan);
+err_exit2:
+ clk_disable(host->clk);
+err_exit1:
+ lpc32xx_wp_enable(host);
+
+ return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+ dma_release_channel(host->dma_chan);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ clk_disable(host->clk);
+ lpc32xx_wp_enable(host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Re-enable NAND clock */
+ clk_enable(host->clk);
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable(host->clk);
+
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-slc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = lpc32xx_nand_remove,
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = LPC32XX_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 2f7c930872f..e78841a2dcc 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -30,13 +30,14 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mpc5121.h>
@@ -131,8 +132,6 @@ struct mpc5121_nfc_prv {
static void mpc5121_nfc_done(struct mtd_info *mtd);
-static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
-
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
{
@@ -508,27 +507,6 @@ static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
}
-/* Compare buffer with NAND flash */
-static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- u_char tmp[256];
- uint bsize;
-
- while (len) {
- bsize = min(len, 256);
- mpc5121_nfc_read_buf(mtd, tmp, bsize);
-
- if (memcmp(buf, tmp, bsize))
- return 1;
-
- buf += bsize;
- len -= bsize;
- }
-
- return 0;
-}
-
/* Read byte from NFC buffers */
static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
{
@@ -640,29 +618,28 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
- if (prv->clk) {
- clk_disable(prv->clk);
- clk_put(prv->clk);
- }
+ if (prv->clk)
+ clk_disable_unprepare(prv->clk);
if (prv->csreg)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct platform_device *op)
+static int mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *rootnode, *dn = op->dev.of_node;
+ struct clk *clk;
struct device *dev = &op->dev;
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
- struct mtd_partition *parts;
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const __be32 *chips_no;
int resettime = 0;
int retval = 0;
int rev, len;
+ struct mtd_part_parser_data ppdata;
/*
* Check SoC revision. This driver supports only NFC
@@ -675,10 +652,8 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
}
prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
- if (!prv) {
- dev_err(dev, "Memory exhausted!\n");
+ if (!prv)
return -ENOMEM;
- }
mtd = &prv->mtd;
chip = &prv->chip;
@@ -713,7 +688,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
}
regs_paddr = res.start;
- regs_size = res.end - res.start + 1;
+ regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
@@ -727,15 +702,15 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
}
mtd->name = "MPC5121 NAND";
+ ppdata.of_node = dn;
chip->dev_ready = mpc5121_nfc_dev_ready;
chip->cmdfunc = mpc5121_nfc_command;
chip->read_byte = mpc5121_nfc_read_byte;
chip->read_word = mpc5121_nfc_read_word;
chip->read_buf = mpc5121_nfc_read_buf;
chip->write_buf = mpc5121_nfc_write_buf;
- chip->verify_buf = mpc5121_nfc_verify_buf;
chip->select_chip = mpc5121_nfc_select_chip;
- chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
/* Support external chip-select logic on ADS5121 board */
@@ -753,14 +728,18 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
of_node_put(rootnode);
/* Enable NFC clock */
- prv->clk = clk_get(dev, "nfc_clk");
- if (IS_ERR(prv->clk)) {
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
dev_err(dev, "Unable to acquire NFC clock!\n");
- retval = PTR_ERR(prv->clk);
+ retval = PTR_ERR(clk);
goto error;
}
-
- clk_enable(prv->clk);
+ retval = clk_prepare_enable(clk);
+ if (retval) {
+ dev_err(dev, "Unable to enable NFC clock!\n");
+ goto error;
+ }
+ prv->clk = clk;
/* Reset NAND Flash controller */
nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
@@ -804,7 +783,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
/* Detect NAND chips */
if (nand_scan(mtd, be32_to_cpup(chips_no))) {
dev_err(dev, "NAND Flash not found !\n");
- devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
@@ -829,7 +807,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
default:
dev_err(dev, "Unsupported NAND flash!\n");
- devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
@@ -837,22 +814,9 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
- retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
-#ifdef CONFIG_MTD_OF_PARTS
- if (retval == 0)
- retval = of_mtd_parse_partitions(dev, dn, &parts);
-#endif
- if (retval < 0) {
- dev_err(dev, "Error parsing MTD partitions!\n");
- devm_free_irq(dev, prv->irq, mtd);
- retval = -EINVAL;
- goto error;
- }
-
- retval = mtd_device_register(mtd, parts, retval);
+ retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
- devm_free_irq(dev, prv->irq, mtd);
goto error;
}
@@ -862,28 +826,25 @@ error:
return retval;
}
-static int __devexit mpc5121_nfc_remove(struct platform_device *op)
+static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
- struct nand_chip *chip = mtd->priv;
- struct mpc5121_nfc_prv *prv = chip->priv;
nand_release(mtd);
- devm_free_irq(dev, prv->irq, mtd);
mpc5121_nfc_free(dev, mtd);
return 0;
}
-static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+static struct of_device_id mpc5121_nfc_match[] = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
- .remove = __devexit_p(mpc5121_nfc_remove),
+ .remove = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -891,19 +852,7 @@ static struct platform_driver mpc5121_nfc_driver = {
},
};
-static int __init mpc5121_nfc_init(void)
-{
- return platform_driver_register(&mpc5121_nfc_driver);
-}
-
-module_init(mpc5121_nfc_init);
-
-static void __exit mpc5121_nfc_cleanup(void)
-{
- platform_driver_unregister(&mpc5121_nfc_driver);
-}
-
-module_exit(mpc5121_nfc_cleanup);
+module_platform_driver(mpc5121_nfc_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 90df34c4d26..dba262bf766 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,18 +32,15 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
-#include <mach/mxc_nand.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/mtd-mxc_nand.h>
#define DRIVER_NAME "mxc_nand"
-#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
-#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2() cpu_is_mx51()
-#define nfc_is_v3() nfc_is_v3_2()
-
/* Addresses for NFC registers */
#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
@@ -120,7 +117,7 @@
#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
-#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
@@ -140,14 +137,48 @@
#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+ void (*preset)(struct mtd_info *);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+ u32 (*get_ecc_status)(struct mxc_nand_host *);
+ struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
+
+ /*
+ * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+ * (CONFIG1:INT_MSK is set). To handle this the driver uses
+ * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+ */
+ int irqpending_quirk;
+ int needs_ip;
+
+ size_t regs_offset;
+ size_t spare0_offset;
+ size_t axi_offset;
+
+ int spare_len;
+ int eccbytes;
+ int eccsize;
+ int ppb_shift;
+};
+
struct mxc_nand_host {
struct mtd_info mtd;
struct nand_chip nand;
- struct mtd_partition *parts;
struct device *dev;
- void *spare0;
- void *main_area0;
+ void __iomem *spare0;
+ void __iomem *main_area0;
void __iomem *base;
void __iomem *regs;
@@ -164,16 +195,9 @@ struct mxc_nand_host {
uint8_t *data_buf;
unsigned int buf_start;
- int spare_len;
-
- void (*preset)(struct mtd_info *);
- void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
- void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
- void (*send_page)(struct mtd_info *, unsigned int);
- void (*send_read_id)(struct mxc_nand_host *);
- uint16_t (*get_dev_status)(struct mxc_nand_host *);
- int (*check_int)(struct mxc_nand_host *);
- void (*irq_control)(struct mxc_nand_host *, int);
+
+ const struct mxc_nand_devtype_data *devtype_data;
+ struct mxc_nand_platform_data pdata;
};
/* OOB placement block for use with hardware ecc generation */
@@ -243,20 +267,27 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
- struct mxc_nand_host *host = dev_id;
-
- if (!host->check_int(host))
- return IRQ_NONE;
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
- host->irq_control(host, 0);
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
- complete(&host->op_completion);
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ u32 __iomem *t = trg;
+ const u32 *s = src;
- return IRQ_HANDLED;
+ for (i = 0; i < (size >> 2); i++)
+ __raw_writel(*s++, t++);
}
static int check_int_v3(struct mxc_nand_host *host)
@@ -281,26 +312,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
if (!(tmp & NFC_V1_V2_CONFIG2_INT))
return 0;
- if (!cpu_is_mx21())
+ if (!host->devtype_data->irqpending_quirk)
writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
return 1;
}
-/*
- * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
- * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
- * driver can enable/disable the irq line rather than simply masking the
- * interrupts.
- */
-static void irq_control_mx21(struct mxc_nand_host *host, int activate)
-{
- if (activate)
- enable_irq(host->irq);
- else
- disable_irq_nosync(host->irq);
-}
-
static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
{
uint16_t tmp;
@@ -329,6 +346,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
writel(tmp, NFC_V3_CONFIG2);
}
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+ if (host->devtype_data->irqpending_quirk) {
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ } else {
+ host->devtype_data->irq_control(host, activate);
+ }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+ return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+ return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+ return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->devtype_data->check_int(host))
+ return IRQ_NONE;
+
+ irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
@@ -337,21 +395,20 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
int max_retries = 8000;
if (useirq) {
- if (!host->check_int(host)) {
- INIT_COMPLETION(host->op_completion);
- host->irq_control(host, 1);
+ if (!host->devtype_data->check_int(host)) {
+ reinit_completion(&host->op_completion);
+ irq_control(host, 1);
wait_for_completion(&host->op_completion);
}
} else {
while (max_retries-- > 0) {
- if (host->check_int(host))
+ if (host->devtype_data->check_int(host))
break;
udelay(1);
}
if (max_retries < 0)
- DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
- __func__);
+ pr_debug("%s: INT not set\n", __func__);
}
}
@@ -371,12 +428,12 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
- if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
int max_retries = 100;
/* Reset completion is indicated by NFC_CONFIG2 */
/* being set to 0 */
@@ -387,8 +444,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
- __func__);
+ pr_debug("%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -411,7 +467,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
+ pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -436,13 +492,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
wait_op_done(host, false);
}
-static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
int bufs, i;
- if (nfc_is_v1() && mtd->writesize > 512)
+ if (mtd->writesize > 512)
bufs = 4;
else
bufs = 1;
@@ -461,12 +531,23 @@ static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
static void send_read_id_v3(struct mxc_nand_host *host)
{
+ struct nand_chip *this = &host->nand;
+
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
- memcpy(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
}
/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -482,7 +563,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
/* Wait for operation to complete */
wait_op_done(host, true);
- memcpy(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
if (this->options & NAND_BUSWIDTH_16) {
/* compress the ID info */
@@ -558,11 +639,10 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
* additional correction. 2-Bit errors cannot be corrected by
* HW ECC, so we need to return failure
*/
- uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+ uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
return -1;
}
@@ -584,10 +664,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
no_subpages = mtd->writesize >> 9;
- if (nfc_is_v21())
- ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
- else
- ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+ ecc_stat = host->devtype_data->get_ecc_status(host);
do {
err = ecc_stat & ecc_bit_mask;
@@ -600,7 +677,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- mtd->ecc_stats.corrected += ret;
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
@@ -620,7 +696,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
/* Check for status request */
if (host->status_request)
- return host->get_dev_status(host) & 0xFF;
+ return host->devtype_data->get_dev_status(host) & 0xFF;
ret = *(uint8_t *)(host->data_buf + host->buf_start);
host->buf_start++;
@@ -676,17 +752,9 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
host->buf_start += n;
}
-/* Used by the upper layer to verify the data in NAND Flash
- * with the data in the buf. */
-static int mxc_nand_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- return -EFAULT;
-}
-
/* This function is used by upper layer for select and
* deselect of the NAND chip */
-static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -694,7 +762,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
@@ -702,14 +770,33 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
if (!host->clk_act) {
/* Enable the NFC clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
}
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
- if (nfc_is_v21()) {
- host->active_cs = chip;
- writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
}
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
/*
@@ -722,23 +809,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
u16 i, j;
u16 n = mtd->writesize >> 9;
u8 *d = host->data_buf + mtd->writesize;
- u8 *s = host->spare0;
- u16 t = host->spare_len;
+ u8 __iomem *s = host->spare0;
+ u16 t = host->devtype_data->spare_len;
j = (mtd->oobsize / n >> 1) << 1;
if (bfrom) {
for (i = 0; i < n - 1; i++)
- memcpy(d + i * j, s + i * t, j);
+ memcpy32_fromio(d + i * j, s + i * t, j);
/* the last section */
- memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+ memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
} else {
for (i = 0; i < n - 1; i++)
- memcpy(&s[i * t], &d[i * j], j);
+ memcpy32_toio(&s[i * t], &d[i * j], j);
/* the last section */
- memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
}
}
@@ -755,34 +842,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
* perform a read/write buf operation, the saved column
* address is used to index into the full page.
*/
- host->send_addr(host, 0, page_addr == -1);
+ host->devtype_data->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
- host->send_addr(host, 0, false);
+ host->devtype_data->send_addr(host, 0, false);
}
/* Write out page address, if necessary */
if (page_addr != -1) {
/* paddr_0 - p_addr_7 */
- host->send_addr(host, (page_addr & 0xff), false);
+ host->devtype_data->send_addr(host, (page_addr & 0xff), false);
if (mtd->writesize > 512) {
if (mtd->size >= 0x10000000) {
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, false);
- host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
} else
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, false);
- host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
} else
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
}
}
}
@@ -804,7 +901,35 @@ static int get_eccsize(struct mtd_info *mtd)
return 8;
}
-static void preset_v1_v2(struct mtd_info *mtd)
+static void preset_v1(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ host->eccsize = 1;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -813,13 +938,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
if (nand_chip->ecc.mode == NAND_ECC_HW)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
- if (nfc_is_v21())
- config1 |= NFC_V2_CONFIG1_FP_INT;
+ config1 |= NFC_V2_CONFIG1_FP_INT;
- if (!cpu_is_mx21())
+ if (!host->devtype_data->irqpending_quirk)
config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
- if (nfc_is_v21() && mtd->writesize) {
+ if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
host->eccsize = get_eccsize(mtd);
@@ -838,20 +962,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
writew(0x2, NFC_V1_V2_CONFIG);
/* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
- } else if (nfc_is_v1()) {
- writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
- } else
- BUG();
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
/* Unlock Block Command for given address range */
writew(0x4, NFC_V1_V2_WRPROT);
@@ -902,7 +1020,9 @@ static void preset_v3(struct mtd_info *mtd)
}
if (mtd->writesize) {
- config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
+ config2 |= NFC_V3_CONFIG2_PPB(
+ ffs(mtd->erasesize / mtd->writesize) - 6,
+ host->devtype_data->ppb_shift);
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 8)
config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
@@ -932,8 +1052,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- DEBUG(MTD_DEBUG_LEVEL3,
- "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
@@ -942,15 +1061,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
case NAND_CMD_RESET:
- host->preset(mtd);
- host->send_cmd(host, command, false);
+ host->devtype_data->preset(mtd);
+ host->devtype_data->send_cmd(host, command, false);
break;
case NAND_CMD_STATUS:
host->buf_start = 0;
host->status_request = true;
- host->send_cmd(host, command, true);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -963,15 +1082,17 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
command = NAND_CMD_READ0; /* only READ0 is valid */
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
if (mtd->writesize > 512)
- host->send_cmd(host, NAND_CMD_READSTART, true);
+ host->devtype_data->send_cmd(host,
+ NAND_CMD_READSTART, true);
- host->send_page(mtd, NFC_OUTPUT);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy(host->data_buf, host->main_area0, mtd->writesize);
+ memcpy32_fromio(host->data_buf, host->main_area0,
+ mtd->writesize);
copy_spare(mtd, true);
break;
@@ -982,28 +1103,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->buf_start = column;
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_PAGEPROG:
- memcpy(host->main_area0, host->data_buf, mtd->writesize);
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
copy_spare(mtd, false);
- host->send_page(mtd, NFC_INPUT);
- host->send_cmd(host, command, true);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_READID:
- host->send_cmd(host, command, true);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
- host->send_read_id(host);
+ host->devtype_data->send_read_id(host);
host->buf_start = column;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1037,23 +1158,256 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern,
};
-static int __init mxcnd_probe(struct platform_device *pdev)
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 1,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .axi_offset = 0,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+ .preset = preset_v2,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v2,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v2,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_4k,
+ .select_chip = mxc_nand_select_chip_v2,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0x1e00,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0,
+ .spare_len = 64,
+ .eccbytes = 9,
+ .eccsize = 0,
+};
+
+/* v3.2a: i.MX51 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 8,
+};
+
+static inline int is_imx21_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx21_nand_devtype_data;
+}
+
+static inline int is_imx27_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx27_nand_devtype_data;
+}
+
+static inline int is_imx25_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx25_nand_devtype_data;
+}
+
+static inline int is_imx51_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx51_nand_devtype_data;
+}
+
+static inline int is_imx53_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx53_nand_devtype_data;
+}
+
+static struct platform_device_id mxcnd_devtype[] = {
+ {
+ .name = "imx21-nand",
+ .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
+ }, {
+ .name = "imx27-nand",
+ .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
+ }, {
+ .name = "imx25-nand",
+ .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
+ }, {
+ .name = "imx51-nand",
+ .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
+ }, {
+ .name = "imx53-nand",
+ .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+ {
+ .compatible = "fsl,imx21-nand",
+ .data = &imx21_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx27-nand",
+ .data = &imx27_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx25-nand",
+ .data = &imx25_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx51-nand",
+ .data = &imx51_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx53-nand",
+ .data = &imx53_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct mxc_nand_platform_data *pdata = &host->pdata;
+ const struct of_device_id *of_id =
+ of_match_device(mxcnd_dt_ids, host->dev);
+ int buswidth;
+
+ if (!np)
+ return 1;
+
+ if (of_get_nand_ecc_mode(np) >= 0)
+ pdata->hw_ecc = 1;
+
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ buswidth = of_get_nand_bus_width(np);
+ if (buswidth < 0)
+ return buswidth;
+
+ pdata->width = buswidth / 8;
+
+ host->devtype_data = of_id->data;
+
+ return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ return 1;
+}
+#endif
+
+static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
- struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- int err = 0, __maybe_unused nr_parts = 0;
- struct nand_ecclayout *oob_smallpage, *oob_largepage;
+ int err = 0;
/* Allocate memory for MTD device structure and private data */
- host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
- NAND_MAX_OOBSIZE, GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+ GFP_KERNEL);
if (!host)
return -ENOMEM;
- host->data_buf = (uint8_t *)(host + 1);
+ /* allocate a temporary buffer for the nand_scan_ident() */
+ host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
host->dev = &pdev->dev;
/* structures must be linked */
@@ -1070,159 +1424,142 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->priv = host;
this->dev_ready = mxc_nand_dev_ready;
this->cmdfunc = mxc_nand_command;
- this->select_chip = mxc_nand_select_chip;
this->read_byte = mxc_nand_read_byte;
this->read_word = mxc_nand_read_word;
this->write_buf = mxc_nand_write_buf;
this->read_buf = mxc_nand_read_buf;
- this->verify_buf = mxc_nand_verify_buf;
- host->clk = clk_get(&pdev->dev, "nfc");
- if (IS_ERR(host->clk)) {
- err = PTR_ERR(host->clk);
- goto eclk;
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ err = mxcnd_probe_dt(host);
+ if (err > 0) {
+ struct mxc_nand_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ host->pdata = *pdata;
+ host->devtype_data = (struct mxc_nand_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ }
}
+ if (err < 0)
+ return err;
- clk_enable(host->clk);
- host->clk_act = 1;
+ if (host->devtype_data->needs_ip) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_ip))
+ return PTR_ERR(host->regs_ip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto eres;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
}
- host->base = ioremap(res->start, resource_size(res));
- if (!host->base) {
- err = -ENOMEM;
- goto eres;
- }
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
host->main_area0 = host->base;
- if (nfc_is_v1() || nfc_is_v21()) {
- host->preset = preset_v1_v2;
- host->send_cmd = send_cmd_v1_v2;
- host->send_addr = send_addr_v1_v2;
- host->send_page = send_page_v1_v2;
- host->send_read_id = send_read_id_v1_v2;
- host->get_dev_status = get_dev_status_v1_v2;
- host->check_int = check_int_v1_v2;
- if (cpu_is_mx21())
- host->irq_control = irq_control_mx21;
- else
- host->irq_control = irq_control_v1_v2;
- }
+ if (host->devtype_data->regs_offset)
+ host->regs = host->base + host->devtype_data->regs_offset;
+ host->spare0 = host->base + host->devtype_data->spare0_offset;
+ if (host->devtype_data->axi_offset)
+ host->regs_axi = host->base + host->devtype_data->axi_offset;
- if (nfc_is_v21()) {
- host->regs = host->base + 0x1e00;
- host->spare0 = host->base + 0x1000;
- host->spare_len = 64;
- oob_smallpage = &nandv2_hw_eccoob_smallpage;
- oob_largepage = &nandv2_hw_eccoob_largepage;
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- host->regs = host->base + 0xe00;
- host->spare0 = host->base + 0x800;
- host->spare_len = 16;
- oob_smallpage = &nandv1_hw_eccoob_smallpage;
- oob_largepage = &nandv1_hw_eccoob_largepage;
- this->ecc.bytes = 3;
- host->eccsize = 1;
- } else if (nfc_is_v3_2()) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- err = -ENODEV;
- goto eirq;
- }
- host->regs_ip = ioremap(res->start, resource_size(res));
- if (!host->regs_ip) {
- err = -ENOMEM;
- goto eirq;
- }
- host->regs_axi = host->base + 0x1e00;
- host->spare0 = host->base + 0x1000;
- host->spare_len = 64;
- host->preset = preset_v3;
- host->send_cmd = send_cmd_v3;
- host->send_addr = send_addr_v3;
- host->send_page = send_page_v3;
- host->send_read_id = send_read_id_v3;
- host->check_int = check_int_v3;
- host->get_dev_status = get_dev_status_v3;
- host->irq_control = irq_control_v3;
- oob_smallpage = &nandv2_hw_eccoob_smallpage;
- oob_largepage = &nandv2_hw_eccoob_largepage;
- } else
- BUG();
+ this->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+ this->select_chip = host->devtype_data->select_chip;
this->ecc.size = 512;
- this->ecc.layout = oob_smallpage;
+ this->ecc.layout = host->devtype_data->ecclayout_512;
- if (pdata->hw_ecc) {
+ if (host->pdata.hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
- if (nfc_is_v1())
- this->ecc.correct = mxc_nand_correct_data_v1;
- else
- this->ecc.correct = mxc_nand_correct_data_v2_v3;
+ this->ecc.correct = host->devtype_data->correct_data;
this->ecc.mode = NAND_ECC_HW;
} else {
this->ecc.mode = NAND_ECC_SOFT;
}
- /* NAND bus width determines access funtions used by upper layer */
- if (pdata->width == 2)
+ /* NAND bus width determines access functions used by upper layer */
+ if (host->pdata.width == 2)
this->options |= NAND_BUSWIDTH_16;
- if (pdata->flash_bbt) {
+ if (host->pdata.flash_bbt) {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
/* update flash based bbt */
- this->options |= NAND_USE_FLASH_BBT;
+ this->bbt_options |= NAND_BBT_USE_FLASH;
}
init_completion(&host->op_completion);
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
/*
- * mask the interrupt. For i.MX21 explicitely call
- * irq_control_v1_v2 to use the mask bit. We can't call
- * disable_irq_nosync() for an interrupt we do not own yet.
+ * Use host->devtype_data->irq_control() here instead of irq_control()
+ * because we must not disable_irq_nosync without having requested the
+ * irq.
*/
- if (cpu_is_mx21())
- irq_control_v1_v2(host, 0);
- else
- host->irq_control(host, 0);
+ host->devtype_data->irq_control(host, 0);
- err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+ 0, DRIVER_NAME, host);
if (err)
- goto eirq;
+ return err;
- host->irq_control(host, 0);
+ err = clk_prepare_enable(host->clk);
+ if (err)
+ return err;
+ host->clk_act = 1;
/*
- * Now that the interrupt is disabled make sure the interrupt
- * mask bit is cleared on i.MX21. Otherwise we can't read
- * the interrupt status bit on this machine.
+ * Now that we "own" the interrupt make sure the interrupt mask bit is
+ * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+ * on this machine.
*/
- if (cpu_is_mx21())
- irq_control_v1_v2(host, 1);
+ if (host->devtype_data->irqpending_quirk) {
+ disable_irq_nosync(host->irq);
+ host->devtype_data->irq_control(host, 1);
+ }
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
+ if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
err = -ENXIO;
goto escan;
}
+ /* allocate the right size buffer now */
+ devm_kfree(&pdev->dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf) {
+ err = -ENOMEM;
+ goto escan;
+ }
+
/* Call preset again, with correct writesize this time */
- host->preset(mtd);
+ host->devtype_data->preset(mtd);
if (mtd->writesize == 2048)
- this->ecc.layout = oob_largepage;
- if (nfc_is_v21() && mtd->writesize == 4096)
- this->ecc.layout = &nandv2_hw_eccoob_4k;
+ this->ecc.layout = host->devtype_data->ecclayout_2k;
+ else if (mtd->writesize == 4096)
+ this->ecc.layout = host->devtype_data->ecclayout_4k;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
+ this->ecc.strength = 1;
+ else
+ this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
/* second phase scan */
if (nand_scan_tail(mtd)) {
@@ -1231,49 +1568,31 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Register the partitions */
- nr_parts =
- parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
- if (nr_parts > 0)
- mtd_device_register(mtd, host->parts, nr_parts);
- else if (pdata->parts)
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
- else {
- pr_info("Registering %s as whole device\n", mtd->name);
- mtd_device_register(mtd, NULL, 0);
- }
+ mtd_device_parse_register(mtd, part_probes,
+ &(struct mtd_part_parser_data){
+ .of_node = pdev->dev.of_node,
+ },
+ host->pdata.parts,
+ host->pdata.nr_parts);
platform_set_drvdata(pdev, host);
return 0;
escan:
- free_irq(host->irq, host);
-eirq:
- if (host->regs_ip)
- iounmap(host->regs_ip);
- iounmap(host->base);
-eres:
- clk_put(host->clk);
-eclk:
- kfree(host);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return err;
}
-static int __devexit mxcnd_remove(struct platform_device *pdev)
+static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
- clk_put(host->clk);
-
- platform_set_drvdata(pdev, NULL);
-
nand_release(&host->mtd);
- free_irq(host->irq, host);
- if (host->regs_ip)
- iounmap(host->regs_ip);
- iounmap(host->base);
- kfree(host);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return 0;
}
@@ -1281,23 +1600,14 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mxcnd_dt_ids),
},
- .remove = __devexit_p(mxcnd_remove),
+ .id_table = mxcnd_devtype,
+ .probe = mxcnd_probe,
+ .remove = mxcnd_remove,
};
-
-static int __init mxc_nd_init(void)
-{
- return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
-}
-
-static void __exit mxc_nd_cleanup(void)
-{
- /* Unregister the device structure */
- platform_driver_unregister(&mxcnd_driver);
-}
-
-module_init(mxc_nd_init);
-module_exit(mxc_nd_cleanup);
+module_platform_driver(mxcnd_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXC NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a46e9bb847b..4f3e80c68a2 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4,7 +4,6 @@
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
- * Basic support for AG-AND chips is provided.
*
* Additional technical information is available on
* http://www.linux-mtd.infradead.org/doc/nand.html
@@ -21,9 +20,7 @@
* TODO:
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
- * if we have HW ecc support.
- * The AG-AND chips have nice features for speed improvement,
- * which are not supported yet. Read / program 4 pages in one go.
+ * if we have HW ECC support.
* BBT table is not serialized, has to be fixed
*
* This program is free software; you can redistribute it and/or modify
@@ -32,12 +29,15 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -93,8 +93,7 @@ static struct nand_ecclayout nand_oob_128 = {
.length = 78} }
};
-static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
- int new_state);
+static int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
@@ -112,22 +111,14 @@ static int check_offs_len(struct mtd_info *mtd,
int ret = 0;
/* Start address must align on block boundary */
- if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
- if (len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
- __func__);
- ret = -EINVAL;
- }
-
- /* Do not allow past end of device */
- if (ofs + len > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
- __func__);
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
@@ -136,17 +127,14 @@ static int check_offs_len(struct mtd_info *mtd,
/**
* nand_release_device - [GENERIC] release chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Deselect, release chip lock and wake up anyone waiting on the device
+ * Release chip lock and wake up anyone waiting on the device.
*/
static void nand_release_device(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* De-select the NAND device */
- chip->select_chip(mtd, -1);
-
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
@@ -157,9 +145,9 @@ static void nand_release_device(struct mtd_info *mtd)
/**
* nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Default read function for 8bit buswith
+ * Default read function for 8bit buswidth
*/
static uint8_t nand_read_byte(struct mtd_info *mtd)
{
@@ -168,11 +156,12 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
}
/**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
- * @mtd: MTD device structure
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
*
- * Default read function for 16bit buswith with
- * endianess conversion
*/
static uint8_t nand_read_byte16(struct mtd_info *mtd)
{
@@ -182,10 +171,9 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd)
/**
* nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Default read function for 16bit buswith without
- * endianess conversion
+ * Default read function for 16bit buswidth without endianness conversion.
*/
static u16 nand_read_word(struct mtd_info *mtd)
{
@@ -195,8 +183,8 @@ static u16 nand_read_word(struct mtd_info *mtd)
/**
* nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chipnr: chipnumber to select, -1 for deselect
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
*
* Default select function for 1 chip devices.
*/
@@ -217,134 +205,127 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
}
/**
- * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
*
- * Default write function for 8bit buswith
+ * Default function to write a byte to I/O[7:0]
*/
-static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
{
- int i;
struct nand_chip *chip = mtd->priv;
- for (i = 0; i < len; i++)
- writeb(buf[i], chip->IO_ADDR_W);
+ chip->write_buf(mtd, &byte, 1);
}
/**
- * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
*
- * Default read function for 8bit buswith
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
*/
-static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
{
- int i;
struct nand_chip *chip = mtd->priv;
+ uint16_t word = byte;
- for (i = 0; i < len; i++)
- buf[i] = readb(chip->IO_ADDR_R);
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_onfi_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->write_buf(mtd, (uint8_t *)&word, 2);
}
/**
- * nand_verify_buf - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
*
- * Default verify function for 8bit buswith
+ * Default write function for 8bit buswidth.
*/
-static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
- for (i = 0; i < len; i++)
- if (buf[i] != readb(chip->IO_ADDR_R))
- return -EFAULT;
- return 0;
+ iowrite8_rep(chip->IO_ADDR_W, buf, len);
}
/**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
*
- * Default write function for 16bit buswith
+ * Default read function for 8bit buswidth.
*/
-static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++)
- writew(p[i], chip->IO_ADDR_W);
+ ioread8_rep(chip->IO_ADDR_R, buf, len);
}
/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
*
- * Default read function for 16bit buswith
+ * Default write function for 16bit buswidth.
*/
-static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
u16 *p = (u16 *) buf;
- len >>= 1;
- for (i = 0; i < len; i++)
- p[i] = readw(chip->IO_ADDR_R);
+ iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
}
/**
- * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
*
- * Default verify function for 16bit buswith
+ * Default read function for 16bit buswidth.
*/
-static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
u16 *p = (u16 *) buf;
- len >>= 1;
- for (i = 0; i < len; i++)
- if (p[i] != readw(chip->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
+ ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
}
/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
*
* Check, if the block is bad.
*/
static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
- int page, chipnr, res = 0;
+ int page, chipnr, res = 0, i = 0;
struct nand_chip *chip = mtd->priv;
u16 bad;
- if (chip->options & NAND_BBT_SCANLASTPAGE)
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -352,89 +333,131 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
if (getchip) {
chipnr = (int)(ofs >> chip->chip_shift);
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
}
- if (chip->options & NAND_BUSWIDTH_16) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
- page);
- bad = cpu_to_le16(chip->read_word(mtd));
- if (chip->badblockpos & 0x1)
- bad >>= 8;
- else
- bad &= 0xFF;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- bad = chip->read_byte(mtd);
- }
+ do {
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ chip->badblockpos & 0xFE, page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ else
+ bad &= 0xFF;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+ page);
+ bad = chip->read_byte(mtd);
+ }
- if (likely(chip->badblockbits == 8))
- res = bad != 0xFF;
- else
- res = hweight8(bad) < chip->badblockbits;
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ ofs += mtd->writesize;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
- if (getchip)
+ if (getchip) {
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
+ }
return res;
}
/**
- * nand_default_block_markbad - [DEFAULT] mark a block bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
*
- * This is the default implementation, which can be overridden by
- * a hardware specific driver.
-*/
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
uint8_t buf[2] = { 0, 0 };
- int block, ret, i = 0;
+ int ret = 0, res, i = 0;
- if (chip->options & NAND_BBT_SCANLASTPAGE)
+ ops.datbuf = NULL;
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
+ do {
+ res = nand_do_write_oob(mtd, ofs, &ops);
+ if (!ret)
+ ret = res;
- /* Get block number */
- block = (int)(ofs >> chip->bbt_erase_shift);
- if (chip->bbt)
- chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+ i++;
+ ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
- /* Do we have a flash based bad block table ? */
- if (chip->options & NAND_USE_FLASH_BBT)
- ret = nand_update_bbt(mtd, ofs);
- else {
- nand_get_device(chip, mtd, FL_WRITING);
+ return ret;
+}
- /* Write to first two pages and to byte 1 and 6 if necessary.
- * If we write to more than one location, the first error
- * encountered quits the procedure. We write two bytes per
- * location, so we dont have to mess with 16 bit access.
- */
- do {
- chip->ops.len = chip->ops.ooblen = 2;
- chip->ops.datbuf = NULL;
- chip->ops.oobbuf = buf;
- chip->ops.ooboffs = chip->badblockpos & ~0x01;
-
- ret = nand_do_write_oob(mtd, ofs, &chip->ops);
-
- if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
- chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
- & ~0x01;
- ret = nand_do_write_oob(mtd, ofs, &chip->ops);
- }
- i++;
- ofs += mtd->writesize;
- } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) &&
- i < 2);
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ int res, ret = 0;
+
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+ struct erase_info einfo;
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ einfo.len = 1ULL << chip->phys_erase_shift;
+ nand_erase_nand(mtd, &einfo, 0);
+
+ /* Write bad block marker to OOB */
+ nand_get_device(mtd, FL_WRITING);
+ ret = chip->block_markbad(mtd, ofs);
nand_release_device(mtd);
}
+
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = nand_markbad_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+
if (!ret)
mtd->ecc_stats.badblocks++;
@@ -443,16 +466,16 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- * Check, if the device is write protected
+ * @mtd: MTD device structure
*
- * The function expects, that the device is already selected
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
*/
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* broken xD cards report WP despite being writable */
+ /* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
@@ -463,10 +486,10 @@ static int nand_check_wp(struct mtd_info *mtd)
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
- * @allowbbt: 1, if its allowed to access the bbt area
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
@@ -485,8 +508,8 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
- * @timeo: Timeout
+ * @mtd: MTD device structure
+ * @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
@@ -505,21 +528,18 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
}
}
-/*
- * Wait for the ready pin, after a command
- * The timeout is catched later.
- */
+/* Wait for the ready pin, after a command. The timeout is caught later. */
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- unsigned long timeo = jiffies + 2;
+ unsigned long timeo = jiffies + msecs_to_jiffies(20);
/* 400ms timeout */
if (in_interrupt() || oops_in_progress)
return panic_nand_wait_ready(mtd, 400);
led_trigger_event(nand_led_trigger, LED_FULL);
- /* wait until command is processed or timeout occures */
+ /* Wait until command is processed or timeout occurs */
do {
if (chip->dev_ready(mtd))
break;
@@ -531,13 +551,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
* nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
*
- * Send command to NAND device. This function is used for small page
- * devices (256/512 Bytes per page)
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
*/
static void nand_command(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -545,9 +565,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
register struct nand_chip *chip = mtd->priv;
int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
- /*
- * Write out the command to the device.
- */
+ /* Write out the command to the device */
if (command == NAND_CMD_SEQIN) {
int readcmd;
@@ -567,14 +585,13 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
}
chip->cmd_ctrl(mtd, command, ctrl);
- /*
- * Address cycle, when necessary
- */
+ /* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
@@ -590,8 +607,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
*/
switch (command) {
@@ -625,8 +642,10 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
return;
}
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
ndelay(100);
nand_wait_ready(mtd);
@@ -634,14 +653,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This is the version for the new large page
- * devices We dont have the separate regions as we have in the small page
- * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
*/
static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -655,8 +674,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command & 0xff,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -664,7 +682,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
@@ -683,8 +702,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
- * program and erase have their own busy handlers
- * status, sequential in, and deplete1 need no delay
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
*/
switch (command) {
@@ -695,18 +714,6 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- /*
- * read error status commands require only a short delay
- */
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
@@ -739,7 +746,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
default:
/*
* If we don't have access to the busy pin, we apply the given
- * command delay
+ * command delay.
*/
if (!chip->dev_ready) {
udelay(chip->chip_delay);
@@ -747,8 +754,10 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
ndelay(100);
nand_wait_ready(mtd);
@@ -756,31 +765,31 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
/**
* panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
*
* Used when in panic, no locks are taken.
*/
static void panic_nand_get_device(struct nand_chip *chip,
struct mtd_info *mtd, int new_state)
{
- /* Hardware controller shared among independend devices */
+ /* Hardware controller shared among independent devices */
chip->controller->active = chip;
chip->state = new_state;
}
/**
* nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
static int
-nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
+nand_get_device(struct mtd_info *mtd, int new_state)
{
+ struct nand_chip *chip = mtd->priv;
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
@@ -812,10 +821,10 @@ retry:
}
/**
- * panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
- * @timeo: Timeout
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
*
* Wait for command done. This is a helper function for nand_wait used when
* we are in interrupt context. May happen when in panic and trying to write
@@ -838,39 +847,34 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
*
- * Wait for command done. This applies to erase and program only
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
+ * Wait for command done. This applies to erase and program only. Erase can
+ * take up to 400ms and program up to 20ms according to general NAND and
+ * SmartMedia specs.
*/
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- unsigned long timeo = jiffies;
int status, state = chip->state;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
+ unsigned long timeo = (state == FL_ERASING ? 400 : 20);
led_trigger_event(nand_led_trigger, LED_FULL);
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
ndelay(100);
- if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
- chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
- else
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
@@ -885,21 +889,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
return status;
}
/**
* __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- *
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
- * @invert: when = 0, unlock the range of blocks within the lower and
- * upper boundary address
- * when = 1, unlock the range of blocks outside the boundaries
- * of the lower and upper boundary address
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * when = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
*
- * return - unlock status
+ * Returs unlock status.
*/
static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
uint64_t len, int invert)
@@ -919,10 +924,9 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
- udelay(1000);
/* See if device thinks it succeeded */
- if (status & 0x01) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
}
@@ -932,12 +936,11 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/**
* nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- *
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
*
- * return - unlock status
+ * Returns unlock status.
*/
int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
@@ -945,7 +948,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
int chipnr;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
@@ -955,7 +958,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ofs + len == mtd->size)
len -= mtd->erasesize;
- nand_get_device(chip, mtd, FL_UNLOCKING);
+ nand_get_device(mtd, FL_UNLOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -964,7 +967,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ pr_debug("%s: device is write protected!\n",
__func__);
ret = -EIO;
goto out;
@@ -973,6 +976,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -981,18 +985,16 @@ EXPORT_SYMBOL(nand_unlock);
/**
* nand_lock - [REPLACEABLE] locks all blocks present in the device
- *
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
*
- * return - lock status
- *
- * This feature is not supported in many NAND parts. 'Micron' NAND parts
- * do have this feature, but it allows only to lock all blocks, not for
- * specified range for block.
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
*
- * Implementing 'lock' feature by making use of 'unlock', for now.
+ * Returns lock status.
*/
int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
@@ -1000,13 +1002,13 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
int chipnr, status, page;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
- nand_get_device(chip, mtd, FL_LOCKING);
+ nand_get_device(mtd, FL_LOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -1015,7 +1017,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ pr_debug("%s: device is write protected!\n",
__func__);
status = MTD_ERASE_FAILED;
ret = -EIO;
@@ -1028,10 +1030,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
- udelay(1000);
/* See if device thinks it succeeded */
- if (status & 0x01) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
goto out;
@@ -1040,6 +1041,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -1047,34 +1049,37 @@ out:
EXPORT_SYMBOL(nand_lock);
/**
- * nand_read_page_raw - [Intern] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
- * Not for syndrome calculating ecc controllers, which use a special oob layout
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
- * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
* We need a special oob layout and handling even when OOB isn't used.
*/
static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1107,14 +1112,15 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
}
/**
- * nand_read_page_swecc - [REPLACABLE] software ecc based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*/
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1123,8 +1129,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
- chip->ecc.read_page_raw(mtd, chip, buf, page);
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1139,24 +1146,28 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
- * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @data_offs: offset of requested data within the page
- * @readlen: data length
- * @bufpoi: buffer to store read data
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
*/
static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
+ uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
+ int page)
{
int start_step, end_step, num_steps;
uint32_t *eccpos = chip->ecc.layout->eccpos;
@@ -1164,14 +1175,16 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int data_col_addr, i, gaps = 0;
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
- int index = 0;
+ int index;
+ unsigned int max_bitflips = 0;
- /* Column address wihin the page aligned to ECC size (256bytes). */
+ /* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
end_step = (data_offs + readlen - 1) / chip->ecc.size;
num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
- /* Data size aligned to ECC ecc.size*/
+ /* Data size aligned to ECC ecc.size */
datafrag_len = num_steps * chip->ecc.size;
eccfrag_len = num_steps * chip->ecc.bytes;
@@ -1183,16 +1196,16 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
p = bufpoi + data_col_addr;
chip->read_buf(mtd, p, datafrag_len);
- /* Calculate ECC */
+ /* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
- /* The performance is faster if to position offsets
- according to ecc.pos. Let make sure here that
- there are no gaps in ecc positions */
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
for (i = 0; i < eccfrag_len - 1; i++) {
- if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
- eccpos[i + start_step * chip->ecc.bytes + 1]) {
+ if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
gaps = 1;
break;
}
@@ -1201,10 +1214,10 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
} else {
- /* send the command to read the particular ecc bytes */
- /* take care about buswidth alignment in read_buf */
- index = start_step * chip->ecc.bytes;
-
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
aligned_pos = eccpos[index] & ~(busw - 1);
aligned_len = eccfrag_len;
if (eccpos[index] & (busw - 1))
@@ -1226,25 +1239,28 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
stat = chip->ecc.correct(mtd, p,
&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
- * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
- * Not for syndrome calculating ecc controllers which need a special oob layout
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
*/
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1253,6 +1269,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1271,30 +1288,32 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
- * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
- * Hardware ECC for large page chips, require OOB to be read first.
- * For this ECC mode, the write_page method is re-used from ECC_HW.
- * These methods read/write ECC from the OOB area, unlike the
- * ECC_HW_SYNDROME support with multiple ECC steps, follows the
- * "infix ECC" scheme and reads/writes ECC from the data area, by
- * overwriting the NAND manufacturer bad block markings.
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
*/
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1303,6 +1322,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *ecc_calc = chip->buffers->ecccalc;
+ unsigned int max_bitflips = 0;
/* Read the OOB area first */
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1320,32 +1340,36 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
- * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
@@ -1362,10 +1386,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, oob, eccbytes);
stat = chip->ecc.correct(mtd, p, oob, NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
oob += eccbytes;
@@ -1380,33 +1406,33 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
if (i)
chip->read_buf(mtd, oob, i);
- return 0;
+ return max_bitflips;
}
/**
- * nand_transfer_oob - [Internal] Transfer oob to client buffer
- * @chip: nand chip structure
- * @oob: oob destination address
- * @ops: oob ops structure
- * @len: size of oob to transfer
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
*/
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
return oob + len;
- case MTD_OOB_AUTO: {
+ case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, roffs = ops->ooboffs;
size_t bytes = 0;
for (; free->length && len; free++, len -= bytes) {
- /* Read request not from offset 0 ? */
+ /* Read request not from offset 0? */
if (unlikely(roffs)) {
if (roffs >= free->length) {
roffs -= free->length;
@@ -1432,31 +1458,53 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
}
/**
- * nand_do_read_ops - [Internal] Read data with ECC
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
*
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob ops structure
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int chipnr, page, realpage, col, bytes, aligned;
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
- struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
- int sndcmd = 1;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
- uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+ uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
-
- stats = mtd->ecc_stats;
+ int use_bufpoi;
+ unsigned int max_bitflips = 0;
+ int retry_mode = 0;
+ bool ecc_fail = false;
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1468,45 +1516,72 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf = ops->datbuf;
oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
while (1) {
+ unsigned int ecc_failures = mtd->ecc_stats.failed;
+
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
- /* Is the current page in the buffer ? */
+ if (!aligned)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !virt_addr_valid(buf);
+ else
+ use_bufpoi = 0;
+
+ /* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = aligned ? buf : chip->buffers->databuf;
+ bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
- if (likely(sndcmd)) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- sndcmd = 0;
- }
+ if (use_bufpoi && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
- /* Now read the page into the buffer */
- if (unlikely(ops->mode == MTD_OOB_RAW))
- ret = chip->ecc.read_page_raw(mtd, chip,
- bufpoi, page);
- else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
+read_retry:
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
ret = chip->ecc.read_subpage(mtd, chip,
- col, bytes, bufpoi);
+ col, bytes, bufpoi,
+ page);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
- page);
- if (ret < 0)
+ oob_required, page);
+ if (ret < 0) {
+ if (use_bufpoi)
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
break;
+ }
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
/* Transfer not aligned data */
- if (!aligned) {
- if (!NAND_SUBPAGE_READ(chip) && !oob &&
- !(mtd->ecc_stats.failed - stats.failed))
+ if (use_bufpoi) {
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - ecc_failures) &&
+ (ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
+ chip->pagebuf_bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
memcpy(buf, chip->buffers->databuf + col, bytes);
}
- buf += bytes;
-
if (unlikely(oob)) {
-
int toread = min(oobreadlen, max_oobsize);
if (toread) {
@@ -1516,30 +1591,53 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
}
}
- if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do
- * this before the AUTOINCR check, so no
- * problems arise if a chip which does auto
- * increment is marked as NOAUTOINCR by the
- * board driver.
- */
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
nand_wait_ready(mtd);
}
+
+ if (mtd->ecc_stats.failed - ecc_failures) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(mtd,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset failures; retry */
+ mtd->ecc_stats.failed = ecc_failures;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagebuf_bitflips);
}
readlen -= bytes;
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(mtd, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
if (!readlen)
break;
- /* For subsequent reads align to page boundary. */
+ /* For subsequent reads align to page boundary */
col = 0;
/* Increment page address */
realpage++;
@@ -1551,92 +1649,72 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
+ chip->select_chip(mtd, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
- if (ret)
+ if (ret < 0)
return ret;
- if (mtd->ecc_stats.failed - stats.failed)
+ if (ecc_fail)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
* nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
*
- * Get hold of the chip and call nand_do_read
+ * Get hold of the chip and call nand_do_read.
*/
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((from + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
- nand_get_device(chip, mtd, FL_READING);
-
- chip->ops.len = len;
- chip->ops.datbuf = buf;
- chip->ops.oobbuf = NULL;
-
- ret = nand_do_read_ops(mtd, from, &chip->ops);
-
- *retlen = chip->ops.retlen;
-
+ nand_get_device(mtd, FL_READING);
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ret = nand_do_read_ops(mtd, from, &ops);
+ *retlen = ops.retlen;
nand_release_device(mtd);
-
return ret;
}
/**
- * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
*/
static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ return 0;
}
/**
- * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
* with syndromes
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
*/
static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
uint8_t *buf = chip->oob_poi;
int length = mtd->oobsize;
@@ -1663,14 +1741,14 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
if (length > 0)
chip->read_buf(mtd, bufpoi, length);
- return 1;
+ return 0;
}
/**
- * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
*/
static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
@@ -1690,11 +1768,11 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
- * with syndrome - only for large page flash !
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
*/
static int nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
@@ -1749,34 +1827,37 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
}
/**
- * nand_do_read_oob - [Intern] NAND read out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operations description structure
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
*
- * NAND read out-of-band data from the spare area
+ * NAND read out-of-band data from the spare area.
*/
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int page, realpage, chipnr, sndcmd = 1;
+ int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ struct mtd_ecc_stats stats;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
+ int ret = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
- if (ops->mode == MTD_OOB_AUTO)
+ stats = mtd->ecc_stats;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
if (unlikely(ops->ooboffs >= len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read "
- "outside oob\n", __func__);
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
return -EINVAL;
}
@@ -1784,8 +1865,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (unlikely(from >= mtd->size ||
ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
(from >> chip->page_shift)) * len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end "
- "of device\n", __func__);
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
@@ -1797,18 +1878,19 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
page = realpage & chip->pagemask;
while (1) {
- sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(mtd, chip, page);
+ else
+ ret = chip->ecc.read_oob(mtd, chip, page);
+
+ if (ret < 0)
+ break;
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
- if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do this
- * before the AUTOINCR check, so no problems arise if a
- * chip which does auto increment is marked as
- * NOAUTOINCR by the board driver.
- */
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -1829,47 +1911,48 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
+ chip->select_chip(mtd, -1);
- ops->oobretlen = ops->ooblen;
- return 0;
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
/**
* nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
- * NAND read data and/or out-of-band data
+ * NAND read data and/or out-of-band data.
*/
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
/* Do not allow reads past end of device */
if (ops->datbuf && (from + ops->len) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read "
- "beyond end of device\n", __func__);
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
break;
default:
@@ -1888,31 +1971,36 @@ out:
/**
- * nand_write_page_raw - [Intern] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
- * Not for syndrome calculating ecc controllers, which use a special oob layout
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
/**
- * nand_write_page_raw_syndrome - [Intern] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
-static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
+static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1928,7 +2016,7 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ chip->write_buf(mtd, oob, eccbytes);
oob += eccbytes;
if (chip->ecc.postpad) {
@@ -1940,15 +2028,18 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
size = mtd->oobsize - (oob - chip->oob_poi);
if (size)
chip->write_buf(mtd, oob, size);
+
+ return 0;
}
/**
- * nand_write_page_swecc - [REPLACABLE] software ecc based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*/
-static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1957,24 +2048,25 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *p = buf;
uint32_t *eccpos = chip->ecc.layout->eccpos;
- /* Software ecc calculation */
+ /* Software ECC calculation */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
- chip->ecc.write_page_raw(mtd, chip, buf);
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1);
}
/**
- * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*/
-static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1993,19 +2085,85 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[eccpos[i]] = ecc_calc[i];
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
+
/**
- * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
+ * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required)
+{
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, i;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ chip->write_buf(mtd, buf, ecc_size);
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
*/
-static void nand_write_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+static int nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2037,42 +2195,61 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
i = mtd->oobsize - (oob - chip->oob_poi);
if (i)
chip->write_buf(mtd, oob, i);
+
+ return 0;
}
/**
* nand_write_page - [REPLACEABLE] write one page
- * @mtd: MTD device structure
- * @chip: NAND chip descriptor
- * @buf: the data to write
- * @page: page number to write
- * @cached: cached programming
- * @raw: use _raw version of write_page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
- int status;
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required);
+ else if (subpage)
+ status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+ buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf);
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
/*
- * Cached progamming disabled for now, Not sure if its worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+ * Cached progamming disabled for now. Not sure if it's worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
*/
cached = 0;
- if (!cached || !(chip->options & NAND_CACHEPRG)) {
+ if (!cached || !NAND_HAS_CACHEPROG(chip)) {
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
/*
* See if operation failed and additional status checks are
- * available
+ * available.
*/
if ((status & NAND_STATUS_FAIL) && (chip->errstat))
status = chip->errstat(mtd, chip, FL_WRITING, status,
@@ -2085,40 +2262,41 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
}
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
- /* Send command to read back the data */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- if (chip->verify_buf(mtd, buf, mtd->writesize))
- return -EIO;
-#endif
return 0;
}
/**
- * nand_fill_oob - [Internal] Transfer client buffer to oob
- * @chip: nand chip structure
- * @oob: oob data buffer
- * @len: oob data write length
- * @ops: oob ops structure
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
- struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
+ struct nand_chip *chip = mtd->priv;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
memcpy(chip->oob_poi + ops->ooboffs, oob, len);
return oob + len;
- case MTD_OOB_AUTO: {
+ case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, woffs = ops->ooboffs;
size_t bytes = 0;
for (; free->length && len; free++, len -= bytes) {
- /* Write request not from offset 0 ? */
+ /* Write request not from offset 0? */
if (unlikely(woffs)) {
if (woffs >= free->length) {
woffs -= free->length;
@@ -2146,12 +2324,12 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
- * nand_do_write_ops - [Internal] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operations description structure
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
*
- * NAND write with ECC
+ * NAND write with ECC.
*/
static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
@@ -2161,36 +2339,35 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
- uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+ uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
mtd->oobavail : mtd->oobsize;
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
- int ret, subpage;
+ int ret;
+ int oob_required = oob ? 1 : 0;
ops->retlen = 0;
if (!writelen)
return 0;
- /* reject writes, which are not page aligned */
+ /* Reject writes, which are not page aligned */
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
- printk(KERN_NOTICE "%s: Attempt to write not "
- "page aligned data\n", __func__);
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
return -EINVAL;
}
column = to & (mtd->writesize - 1);
- subpage = column || (writelen & (mtd->writesize - 1));
-
- if (subpage && oob)
- return -EINVAL;
chipnr = (int)(to >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
- return -EIO;
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -2201,23 +2378,33 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
(chip->pagebuf << chip->page_shift) < (to + ops->len))
chip->pagebuf = -1;
- /* If we're not given explicit OOB data, let it be 0xFF */
- if (likely(!oob))
- memset(chip->oob_poi, 0xff, mtd->oobsize);
-
/* Don't allow multipage oob writes with offset */
- if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
- return -EINVAL;
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
while (1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
+ int use_bufpoi;
+ int part_pagewr = (column || writelen < (mtd->writesize - 1));
- /* Partial page write ? */
- if (unlikely(column || writelen < (mtd->writesize - 1))) {
+ if (part_pagewr)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !virt_addr_valid(buf);
+ else
+ use_bufpoi = 0;
+
+ /* Partial page write?, or need to use bounce buffer */
+ if (use_bufpoi) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
cached = 0;
- bytes = min_t(int, bytes - column, (int) writelen);
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
memset(chip->buffers->databuf, 0xff, mtd->writesize);
memcpy(&chip->buffers->databuf[column], buf, bytes);
@@ -2226,12 +2413,15 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
- oob = nand_fill_oob(chip, oob, len, ops);
+ oob = nand_fill_oob(mtd, oob, len, ops);
oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
}
-
- ret = chip->write_page(mtd, chip, wbuf, page, cached,
- (ops->mode == MTD_OOB_RAW));
+ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+ oob_required, page, cached,
+ (ops->mode == MTD_OPS_RAW));
if (ret)
break;
@@ -2255,16 +2445,19 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->retlen = ops->len - writelen;
if (unlikely(oob))
ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
return ret;
}
/**
* panic_nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
*
* NAND write with ECC. Used when performing writes in interrupt context, this
* may for example be called by mtdoops when writing an oops while in panic.
@@ -2273,74 +2466,60 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((to + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
- /* Wait for the device to get ready. */
+ /* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
- /* Grab the device. */
+ /* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
- chip->ops.len = len;
- chip->ops.datbuf = (uint8_t *)buf;
- chip->ops.oobbuf = NULL;
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &chip->ops);
+ ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = chip->ops.retlen;
+ *retlen = ops.retlen;
return ret;
}
/**
* nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
*
- * NAND write with ECC
+ * NAND write with ECC.
*/
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((to + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
- nand_get_device(chip, mtd, FL_WRITING);
-
- chip->ops.len = len;
- chip->ops.datbuf = (uint8_t *)buf;
- chip->ops.oobbuf = NULL;
-
- ret = nand_do_write_ops(mtd, to, &chip->ops);
-
- *retlen = chip->ops.retlen;
-
+ nand_get_device(mtd, FL_WRITING);
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ret = nand_do_write_ops(mtd, to, &ops);
+ *retlen = ops.retlen;
nand_release_device(mtd);
-
return ret;
}
/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*
- * NAND write out-of-band
+ * NAND write out-of-band.
*/
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
@@ -2348,24 +2527,24 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
int chipnr, page, status, len;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+ pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write "
- "past end of page\n", __func__);
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
return -EINVAL;
}
if (unlikely(ops->ooboffs >= len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start "
- "write outside oob\n", __func__);
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
return -EINVAL;
}
@@ -2374,8 +2553,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
ops->ooboffs + ops->ooblen >
((mtd->size >> chip->page_shift) -
(to >> chip->page_shift)) * len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
- "end of device\n", __func__);
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
return -EINVAL;
}
@@ -2394,17 +2573,23 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
return -EROFS;
+ }
/* Invalidate the page cache, if we write to the cached page */
if (page == chip->pagebuf)
chip->pagebuf = -1;
- memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
- status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
- memset(chip->oob_poi, 0xff, mtd->oobsize);
+ nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+
+ chip->select_chip(mtd, -1);
if (status)
return status;
@@ -2416,31 +2601,30 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
/**
* nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*/
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
/* Do not allow writes past end of device */
if (ops->datbuf && (to + ops->len) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
- "end of device\n", __func__);
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
break;
default:
@@ -2458,80 +2642,58 @@ out:
}
/**
- * single_erease_cmd - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
+ * single_erase - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
*
- * Standard erase command for NAND chips
+ * Standard erase command for NAND chips. Returns NAND status.
*/
-static void single_erase_cmd(struct mtd_info *mtd, int page)
+static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd->priv;
/* Send commands to erase a block */
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-/**
- * multi_erease_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function
- * Erase 4 consecutive blocks
- */
-static void multi_erase_cmd(struct mtd_info *mtd, int page)
-{
- struct nand_chip *chip = mtd->priv;
- /* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ return chip->waitfunc(mtd, chip);
}
/**
* nand_erase - [MTD Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
+ * @mtd: MTD device structure
+ * @instr: erase instruction
*
- * Erase one ore more blocks
+ * Erase one ore more blocks.
*/
static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
return nand_erase_nand(mtd, instr, 0);
}
-#define BBT_PAGE_MASK 0xffffff3f
/**
- * nand_erase_nand - [Internal] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- * @allowbbt: allow erasing the bbt area
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
*
- * Erase one ore more blocks
+ * Erase one ore more blocks.
*/
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt)
{
int page, status, pages_per_block, ret, chipnr;
struct nand_chip *chip = mtd->priv;
- loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
- unsigned int bbt_masked_page = 0xffffffff;
loff_t len;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
- __func__, (unsigned long long)instr->addr,
- (unsigned long long)instr->len);
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_ERASING);
+ nand_get_device(mtd, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -2545,49 +2707,36 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
- __func__);
+ pr_debug("%s: device is write protected!\n",
+ __func__);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
- /*
- * If BBT requires refresh, set the BBT page mask to see if the BBT
- * should be rewritten. Otherwise the mask is set to 0xffffffff which
- * can not be matched. This is also done when the bbt is actually
- * erased to avoid recusrsive updates
- */
- if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
- bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
-
/* Loop through the pages */
len = instr->len;
instr->state = MTD_ERASING;
while (len) {
- /*
- * heck if we have a bad block, we do not erase bad blocks !
- */
+ /* Check if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(mtd, ((loff_t) page) <<
chip->page_shift, 0, allowbbt)) {
- printk(KERN_WARNING "%s: attempt to erase a bad block "
- "at page 0x%08x\n", __func__, page);
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
/*
* Invalidate the page cache, if we erase the block which
- * contains the current cached page
+ * contains the current cached page.
*/
if (page <= chip->pagebuf && chip->pagebuf <
(page + pages_per_block))
chip->pagebuf = -1;
- chip->erase_cmd(mtd, page & chip->pagemask);
-
- status = chip->waitfunc(mtd, chip);
+ status = chip->erase(mtd, page & chip->pagemask);
/*
* See if operation failed and additional status checks are
@@ -2599,25 +2748,16 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* See if block erase succeeded */
if (status & NAND_STATUS_FAIL) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, "
- "page 0x%08x\n", __func__, page);
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
instr->state = MTD_ERASE_FAILED;
instr->fail_addr =
((loff_t)page << chip->page_shift);
goto erase_exit;
}
- /*
- * If BBT requires refresh, set the BBT rewrite flag to the
- * page being erased
- */
- if (bbt_masked_page != 0xffffffff &&
- (page & BBT_PAGE_MASK) == bbt_masked_page)
- rewrite_bbt[chipnr] =
- ((loff_t)page << chip->page_shift);
-
/* Increment page address and decrement length */
- len -= (1 << chip->phys_erase_shift);
+ len -= (1ULL << chip->phys_erase_shift);
page += pages_per_block;
/* Check, if we cross a chip boundary */
@@ -2625,15 +2765,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
-
- /*
- * If BBT requires refresh and BBT-PERCHIP, set the BBT
- * page mask to see if this BBT should be rewritten
- */
- if (bbt_masked_page != 0xffffffff &&
- (chip->bbt_td->options & NAND_BBT_PERCHIP))
- bbt_masked_page = chip->bbt_td->pages[chipnr] &
- BBT_PAGE_MASK;
}
}
instr->state = MTD_ERASE_DONE;
@@ -2643,100 +2774,129 @@ erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
/* Do call back function */
if (!ret)
mtd_erase_callback(instr);
- /*
- * If BBT requires refresh and erase was successful, rewrite any
- * selected bad block tables
- */
- if (bbt_masked_page == 0xffffffff || ret)
- return ret;
-
- for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
- if (!rewrite_bbt[chipnr])
- continue;
- /* update the BBT for chip */
- DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt "
- "(%d:0x%0llx 0x%0x)\n", __func__, chipnr,
- rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]);
- nand_update_bbt(mtd, rewrite_bbt[chipnr]);
- }
-
/* Return more or less happy */
return ret;
}
/**
* nand_sync - [MTD Interface] sync
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Sync is actually a wait for chip ready function
+ * Sync is actually a wait for chip ready function.
*/
static void nand_sync(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
- DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+ pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_SYNCING);
+ nand_get_device(mtd, FL_SYNCING);
/* Release it and go back */
nand_release_device(mtd);
}
/**
* nand_block_isbad - [MTD Interface] Check if block at offset is bad
- * @mtd: MTD device structure
- * @offs: offset relative to mtd start
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
- /* Check for invalid offset */
- if (offs > mtd->size)
- return -EINVAL;
-
return nand_block_checkbad(mtd, offs, 1, 0);
}
/**
* nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
*/
static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- struct nand_chip *chip = mtd->priv;
int ret;
ret = nand_block_isbad(mtd, ofs);
if (ret) {
- /* If it was bad already, return success and do nothing. */
+ /* If it was bad already, return success and do nothing */
if (ret > 0)
return 0;
return ret;
}
- return chip->block_markbad(mtd, ofs);
+ return nand_block_markbad_lowlevel(mtd, ofs);
+}
+
+/**
+ * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int status;
+ int i;
+
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, subfeature_param[i]);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ return 0;
+}
+
+/**
+ * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int i;
+
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ /* clear the sub feature parameters */
+ memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->read_byte(mtd);
+ return 0;
}
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*/
static int nand_suspend(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
- return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
+ return nand_get_device(mtd, FL_PM_SUSPENDED);
}
/**
* nand_resume - [MTD Interface] Resume the NAND flash
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*/
static void nand_resume(struct mtd_info *mtd)
{
@@ -2745,13 +2905,11 @@ static void nand_resume(struct mtd_info *mtd)
if (chip->state == FL_PM_SUSPENDED)
nand_release_device(mtd);
else
- printk(KERN_ERR "%s called for a chip which is not "
- "in suspended state\n", __func__);
+ pr_err("%s called for a chip which is not in suspended state\n",
+ __func__);
}
-/*
- * Set default functions
- */
+/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip, int busw)
{
/* check for proper chip_delay setup, set 20us if not */
@@ -2768,7 +2926,15 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
if (!chip->select_chip)
chip->select_chip = nand_select_chip;
- if (!chip->read_byte)
+
+ /* set for ONFI nand */
+ if (!chip->onfi_set_features)
+ chip->onfi_set_features = nand_onfi_set_features;
+ if (!chip->onfi_get_features)
+ chip->onfi_get_features = nand_onfi_get_features;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->read_byte || chip->read_byte == nand_read_byte)
chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
if (!chip->read_word)
chip->read_word = nand_read_word;
@@ -2776,12 +2942,12 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
chip->block_bad = nand_block_bad;
if (!chip->block_markbad)
chip->block_markbad = nand_default_block_markbad;
- if (!chip->write_buf)
+ if (!chip->write_buf || chip->write_buf == nand_write_buf)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
- if (!chip->read_buf)
+ if (!chip->write_byte || chip->write_byte == nand_write_byte)
+ chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
- if (!chip->verify_buf)
- chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
if (!chip->scan_bbt)
chip->scan_bbt = nand_default_bbt;
@@ -2793,23 +2959,21 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
}
-/*
- * sanitize ONFI strings so we can safely print them
- */
+/* Sanitize ONFI strings so we can safely print them */
static void sanitize_string(uint8_t *s, size_t len)
{
ssize_t i;
- /* null terminate */
+ /* Null terminate */
s[len - 1] = 0;
- /* remove non printable chars */
+ /* Remove non printable chars */
for (i = 0; i < len - 1; i++) {
if (s[i] < ' ' || s[i] > 127)
s[i] = '?';
}
- /* remove trailing spaces */
+ /* Remove trailing spaces */
strim(s);
}
@@ -2825,37 +2989,133 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
return crc;
}
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+ struct nand_chip *chip, struct nand_onfi_params *p)
+{
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret = -EINVAL;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /* Send our own NAND_CMD_PARAM. */
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+ /* Use the Change Read Column command to skip the ONFI param pages. */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ sizeof(*p) * p->num_of_param_pages , -1);
+
+ /* Read out the Extended Parameter Page. */
+ chip->read_buf(mtd, (uint8_t *)ep, len);
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+ feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+ if (le16_to_cpu(p->vendor_revision) < 1)
+ return;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
/*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
- int busw)
+ int *busw)
{
struct nand_onfi_params *p = &chip->onfi_params;
- int i;
+ int i, j;
int val;
- /* try ONFI for unknow chip or LP */
+ /* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
- printk(KERN_INFO "ONFI flash detected\n");
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
+ for (j = 0; j < sizeof(*p); j++)
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
- printk(KERN_INFO "ONFI param page %d valid\n", i);
break;
}
}
- if (i == 3)
+ if (i == 3) {
+ pr_err("Could not find valid ONFI parameter page; aborting\n");
return 0;
+ }
- /* check version */
+ /* Check version */
val = le16_to_cpu(p->revision);
if (val & (1 << 5))
chip->onfi_version = 23;
@@ -2867,12 +3127,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->onfi_version = 20;
else if (val & (1 << 1))
chip->onfi_version = 10;
- else
- chip->onfi_version = 0;
if (!chip->onfi_version) {
- printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
- __func__, val);
+ pr_info("unsupported ONFI version: %d\n", val);
return 0;
}
@@ -2880,40 +3137,457 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
sanitize_string(p->model, sizeof(p->model));
if (!mtd->name)
mtd->name = p->model;
+
mtd->writesize = le32_to_cpu(p->byte_per_page);
- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
- busw = 0;
- if (le16_to_cpu(p->features) & 1)
- busw = NAND_BUSWIDTH_16;
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= (NAND_NO_READRDY |
- NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+ /* See erasesize comment */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ if (p->ecc_bits != 0xff) {
+ chip->ecc_strength_ds = p->ecc_bits;
+ chip->ecc_step_ds = 512;
+ } else if (chip->onfi_version >= 21 &&
+ (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->cmdfunc. So try to update the chip->cmdfunc
+ * now. We do not replace user supplied command function.
+ */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(mtd, chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ if (p->jedec_id == NAND_MFR_MICRON)
+ nand_onfi_detect_micron(chip, p);
return 1;
}
/*
- * Get the flash and manufacturer id and lookup if the type is supported
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_jedec_params *p = &chip->jedec_params;
+ struct jedec_ecc_info *ecc;
+ int val;
+ int i, j;
+
+ /* Try JEDEC for unknown chip or LP */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
+ if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
+ chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
+ chip->read_byte(mtd) != 'C')
+ return 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < sizeof(*p); j++)
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
+
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+ le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ return 0;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ chip->jedec_version = 10;
+ else if (val & (1 << 1))
+ chip->jedec_version = 1; /* vendor specific version */
+
+ if (!chip->jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ return 1;
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 id_data[8], int *busw)
+{
+ int extid, id_len;
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ id_len = nand_id_len(id_data, 8);
+
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
+ * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
+ *
+ * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+ * ID to decide what to do.
+ */
+ if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+ !nand_is_slc(chip) && id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 1:
+ mtd->oobsize = 128;
+ break;
+ case 2:
+ mtd->oobsize = 218;
+ break;
+ case 3:
+ mtd->oobsize = 400;
+ break;
+ case 4:
+ mtd->oobsize = 436;
+ break;
+ case 5:
+ mtd->oobsize = 512;
+ break;
+ case 6:
+ mtd->oobsize = 640;
+ break;
+ case 7:
+ default: /* Other cases are "reserved" (unknown) */
+ mtd->oobsize = 1024;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ *busw = 0;
+ } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
+ !nand_is_slc(chip)) {
+ unsigned int tmp;
+
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 0:
+ mtd->oobsize = 128;
+ break;
+ case 1:
+ mtd->oobsize = 224;
+ break;
+ case 2:
+ mtd->oobsize = 448;
+ break;
+ case 3:
+ mtd->oobsize = 64;
+ break;
+ case 4:
+ mtd->oobsize = 32;
+ break;
+ case 5:
+ mtd->oobsize = 16;
+ break;
+ default:
+ mtd->oobsize = 640;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
+ if (tmp < 0x03)
+ mtd->erasesize = (128 * 1024) << tmp;
+ else if (tmp == 0x03)
+ mtd->erasesize = 768 * 1024;
+ else
+ mtd->erasesize = (64 * 1024) << tmp;
+ *busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+ nand_is_slc(chip) &&
+ (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(id_data[4] & 0x80) /* !BENAND */) {
+ mtd->oobsize = 32 * mtd->writesize >> 9;
+ }
+
+ }
+}
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 id_data[8],
+ int *busw)
+{
+ int maf_id = id_data[0];
+
+ mtd->erasesize = type->erasesize;
+ mtd->writesize = type->pagesize;
+ mtd->oobsize = mtd->writesize / 32;
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ /* All legacy ID NAND are small-page, SLC */
+ chip->bits_per_cell = 1;
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
+ && id_data[6] == 0x00 && id_data[7] == 0x00
+ && mtd->writesize == 512) {
+ mtd->erasesize = 128 * 1024;
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 id_data[8])
+{
+ int maf_id = id_data[0];
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+
+ /*
+ * Bad block marker is stored in the last page of each block on Samsung
+ * and Hynix MLC devices; stored in first two pages of each block on
+ * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
+ * AMD/Spansion, and Macronix. All others scan only the first page.
+ */
+ if (!nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX))
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ else if ((nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX ||
+ maf_id == NAND_MFR_TOSHIBA ||
+ maf_id == NAND_MFR_AMD ||
+ maf_id == NAND_MFR_MACRONIX)) ||
+ (mtd->writesize == 2048 &&
+ maf_id == NAND_MFR_MICRON))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ mtd->writesize = type->pagesize;
+ mtd->erasesize = type->erasesize;
+ mtd->oobsize = type->oobsize;
+
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+ chip->options |= type->options;
+ chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+ chip->ecc_step_ds = NAND_ECC_STEP(type);
+
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw,
int *maf_id, int *dev_id,
struct nand_flash_dev *type)
{
+ int busw;
int i, maf_idx;
u8 id_data[8];
- int ret;
/* Select the device */
chip->select_chip(mtd, 0);
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
- * after power-up
+ * after power-up.
*/
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
@@ -2924,7 +3598,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
*maf_id = chip->read_byte(mtd);
*dev_id = chip->read_byte(mtd);
- /* Try again to make sure, as some systems the bus-hold or other
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
* interface concerns can cause random data which looks like a
* possibly credible NAND flash to appear. If the two results do
* not match, ignore the device completely.
@@ -2932,37 +3607,38 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- for (i = 0; i < 2; i++)
+ /* Read entire ID string */
+ for (i = 0; i < 8; i++)
id_data[i] = chip->read_byte(mtd);
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
- printk(KERN_INFO "%s: second ID read did not match "
- "%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, *dev_id, id_data[0], id_data[1]);
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+ *maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
if (!type)
type = nand_flash_ids;
- for (; type->name != NULL; type++)
- if (*dev_id == type->id)
- break;
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+ goto ident_done;
+ } else if (*dev_id == type->dev_id) {
+ break;
+ }
+ }
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
- /* Check is chip is ONFI compliant */
- ret = nand_flash_detect_onfi(mtd, chip, busw);
- if (ret)
+ /* Check if the chip is ONFI compliant */
+ if (nand_flash_detect_onfi(mtd, chip, &busw))
goto ident_done;
- }
-
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- /* Read entire ID string */
- for (i = 0; i < 8; i++)
- id_data[i] = chip->read_byte(mtd);
+ /* Check if the chip is JEDEC compliant */
+ if (nand_flash_detect_jedec(mtd, chip, &busw))
+ goto ident_done;
+ }
if (!type->name)
return ERR_PTR(-ENODEV);
@@ -2973,125 +3649,54 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chipsize = (uint64_t)type->chipsize << 20;
if (!type->pagesize && chip->init_size) {
- /* set the pagesize, oobsize, erasesize by the driver*/
+ /* Set the pagesize, oobsize, erasesize by the driver */
busw = chip->init_size(mtd, chip, id_data);
} else if (!type->pagesize) {
- int extid;
- /* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = id_data[2];
- /* The 4th id byte is the important one */
- extid = id_data[3];
-
- /*
- * Field definitions are in the following datasheets:
- * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
- * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
- *
- * Check for wraparound + Samsung ID + nonzero 6th byte
- * to decide what to do.
- */
- if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
- id_data[0] == NAND_MFR_SAMSUNG &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- id_data[5] != 0x00) {
- /* Calc pagesize */
- mtd->writesize = 2048 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- switch (extid & 0x03) {
- case 1:
- mtd->oobsize = 128;
- break;
- case 2:
- mtd->oobsize = 218;
- break;
- case 3:
- mtd->oobsize = 400;
- break;
- default:
- mtd->oobsize = 436;
- break;
- }
- extid >>= 2;
- /* Calc blocksize */
- mtd->erasesize = (128 * 1024) <<
- (((extid >> 1) & 0x04) | (extid & 0x03));
- busw = 0;
- } else {
- /* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) *
- (mtd->writesize >> 9);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
- }
+ /* Decode parameters from extended ID */
+ nand_decode_ext_id(mtd, chip, id_data, &busw);
} else {
- /*
- * Old devices have chip data hardcoded in the device id table
- */
- mtd->erasesize = type->erasesize;
- mtd->writesize = type->pagesize;
- mtd->oobsize = mtd->writesize / 32;
- busw = type->options & NAND_BUSWIDTH_16;
-
- /*
- * Check for Spansion/AMD ID + repeating 5th, 6th byte since
- * some Spansion chips have erasesize that conflicts with size
- * listed in nand_ids table
- * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
- */
- if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
- id_data[5] == 0x00 && id_data[6] == 0x00 &&
- id_data[7] == 0x00 && mtd->writesize == 512) {
- mtd->erasesize = 128 * 1024;
- mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
- }
+ nand_decode_id(mtd, chip, type, id_data, &busw);
}
- /* Get chip options, preserve non chip based options */
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+ /* Get chip options */
+ chip->options |= type->options;
- /* Check if chip is a not a samsung device. Do not clear the
- * options for chips which are not having an extended id.
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+ * options for chips which do not have an extended id.
*/
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
ident_done:
- /*
- * Set chip as a default. Board drivers can override it, if necessary
- */
- chip->options |= NAND_NO_AUTOINCR;
-
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
if (nand_manuf_ids[maf_idx].id == *maf_id)
break;
}
- /*
- * Check, if buswidth is correct. Hardware drivers should set
- * chip correct !
- */
- if (busw != (chip->options & NAND_BUSWIDTH_16)) {
- printk(KERN_INFO "NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
- printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
- (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
- busw ? 16 : 8);
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ chip->options |= busw;
+ nand_set_defaults(chip, busw);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
return ERR_PTR(-EINVAL);
}
+ nand_decode_bbm_options(mtd, chip, id_data);
+
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
- /* Convert chipsize to number of pages per chip -1. */
+ /* Convert chipsize to number of pages per chip -1 */
chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -3104,96 +3709,65 @@ ident_done:
}
chip->badblockbits = 8;
+ chip->erase = single_erase;
- /* Set the bad block position */
- if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
- chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
- else
- chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
-
- /*
- * Bad block marker is stored in the last page of each block
- * on Samsung and Hynix MLC devices; stored in first two pages
- * of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
- * only the first page.
- */
- if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- (*maf_id == NAND_MFR_SAMSUNG ||
- *maf_id == NAND_MFR_HYNIX))
- chip->options |= NAND_BBT_SCANLASTPAGE;
- else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- (*maf_id == NAND_MFR_SAMSUNG ||
- *maf_id == NAND_MFR_HYNIX ||
- *maf_id == NAND_MFR_TOSHIBA ||
- *maf_id == NAND_MFR_AMD)) ||
- (mtd->writesize == 2048 &&
- *maf_id == NAND_MFR_MICRON))
- chip->options |= NAND_BBT_SCAN2NDPAGE;
-
- /*
- * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
- */
- if (!(busw & NAND_BUSWIDTH_16) &&
- *maf_id == NAND_MFR_STMICRO &&
- mtd->writesize == 2048) {
- chip->options |= NAND_BBT_SCANBYTE1AND6;
- chip->badblockpos = 0;
- }
-
- /* Check for AND chips with 4 page planes */
- if (chip->options & NAND_4PAGE_ARRAY)
- chip->erase_cmd = multi_erase_cmd;
- else
- chip->erase_cmd = single_erase_cmd;
-
- /* Do not replace user supplied command function ! */
+ /* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- /* TODO onfi flash name */
- printk(KERN_INFO "NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
- nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name);
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ if (chip->onfi_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->onfi_params.model);
+ else if (chip->jedec_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->jedec_params.model);
+ else
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ type->name);
+
+ pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
+ (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->writesize, mtd->oobsize);
return type;
}
/**
* nand_scan_ident - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
- * @table: Alternative NAND ID table
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
*
- * This is the first phase of the normal nand_scan() function. It
- * reads the flash ID and sets up MTD fields accordingly.
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
{
- int i, busw, nand_maf_id, nand_dev_id;
+ int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd->priv;
struct nand_flash_dev *type;
- /* Get buswidth to select the correct functions */
- busw = chip->options & NAND_BUSWIDTH_16;
/* Set the default functions */
- nand_set_defaults(chip, busw);
+ nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw,
- &nand_maf_id, &nand_dev_id, table);
+ type = nand_get_flash_type(mtd, chip, &nand_maf_id,
+ &nand_dev_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
- printk(KERN_WARNING "No NAND device found.\n");
+ pr_warn("No NAND device found\n");
chip->select_chip(mtd, -1);
return PTR_ERR(type);
}
+ chip->select_chip(mtd, -1);
+
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
chip->select_chip(mtd, i);
@@ -3203,11 +3777,14 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd))
+ nand_dev_id != chip->read_byte(mtd)) {
+ chip->select_chip(mtd, -1);
break;
+ }
+ chip->select_chip(mtd, -1);
}
if (i > 1)
- printk(KERN_INFO "%d NAND chips detected\n", i);
+ pr_info("%d chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
chip->numchips = i;
@@ -3217,48 +3794,97 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
}
EXPORT_SYMBOL(nand_scan_ident);
+/*
+ * Check if the chip configuration meet the datasheet requirements.
+
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+static bool nand_ecc_strength_good(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int corr, ds_corr;
+
+ if (ecc->size == 0 || chip->ecc_step_ds == 0)
+ /* Not enough information */
+ return true;
+
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * ecc->strength) / ecc->size;
+ ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+
+ return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+}
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * This is the second phase of the normal nand_scan() function. It
- * fills out all the uninitialized function pointers with the defaults
- * and scans for a bad block table if appropriate.
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
*/
int nand_scan_tail(struct mtd_info *mtd)
{
int i;
struct nand_chip *chip = mtd->priv;
-
- if (!(chip->options & NAND_OWN_BUFFERS))
- chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
- if (!chip->buffers)
- return -ENOMEM;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_buffers *nbuf;
+
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH));
+
+ if (!(chip->options & NAND_OWN_BUFFERS)) {
+ nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
+ + mtd->oobsize * 3, GFP_KERNEL);
+ if (!nbuf)
+ return -ENOMEM;
+ nbuf->ecccalc = (uint8_t *)(nbuf + 1);
+ nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
+ nbuf->databuf = nbuf->ecccode + mtd->oobsize;
+
+ chip->buffers = nbuf;
+ } else {
+ if (!chip->buffers)
+ return -ENOMEM;
+ }
/* Set the internal oob buffer location, just after the page data */
chip->oob_poi = chip->buffers->databuf + mtd->writesize;
/*
- * If no default placement scheme is given, select an appropriate one
+ * If no default placement scheme is given, select an appropriate one.
*/
- if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
+ if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
case 8:
- chip->ecc.layout = &nand_oob_8;
+ ecc->layout = &nand_oob_8;
break;
case 16:
- chip->ecc.layout = &nand_oob_16;
+ ecc->layout = &nand_oob_16;
break;
case 64:
- chip->ecc.layout = &nand_oob_64;
+ ecc->layout = &nand_oob_64;
break;
case 128:
- chip->ecc.layout = &nand_oob_128;
+ ecc->layout = &nand_oob_128;
break;
default:
- printk(KERN_WARNING "No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ pr_warn("No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
}
@@ -3267,166 +3893,179 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->write_page = nand_write_page;
/*
- * check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
*/
- switch (chip->ecc.mode) {
+ switch (ecc->mode) {
case NAND_ECC_HW_OOB_FIRST:
/* Similar to NAND_ECC_HW, but a separate read_page handle */
- if (!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) {
- printk(KERN_WARNING "No ECC functions supplied; "
- "Hardware ECC not possible\n");
+ if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
BUG();
}
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc_oob_first;
case NAND_ECC_HW:
- /* Use standard hwecc read page function ? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_hwecc;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_std;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_std;
+ /* Use standard hwecc read page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage)
+ ecc->write_subpage = nand_write_subpage_hwecc;
case NAND_ECC_HW_SYNDROME:
- if ((!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) &&
- (!chip->ecc.read_page ||
- chip->ecc.read_page == nand_read_page_hwecc ||
- !chip->ecc.write_page ||
- chip->ecc.write_page == nand_write_page_hwecc)) {
- printk(KERN_WARNING "No ECC functions supplied; "
- "Hardware ECC not possible\n");
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
BUG();
}
- /* Use standard syndrome read/write page function ? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_syndrome;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_syndrome;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_syndrome;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_syndrome;
-
- if (mtd->writesize >= chip->ecc.size)
+ /* Use standard syndrome read/write page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
+ pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+ BUG();
+ }
break;
- printk(KERN_WARNING "%d byte HW ECC not possible on "
- "%d byte page size, fallback to SW ECC\n",
- chip->ecc.size, mtd->writesize);
- chip->ecc.mode = NAND_ECC_SOFT;
+ }
+ pr_warn("%d byte HW ECC not possible on "
+ "%d byte page size, fallback to SW ECC\n",
+ ecc->size, mtd->writesize);
+ ecc->mode = NAND_ECC_SOFT;
case NAND_ECC_SOFT:
- chip->ecc.calculate = nand_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_subpage = nand_read_subpage;
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.size)
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
break;
case NAND_ECC_SOFT_BCH:
if (!mtd_nand_has_bch()) {
- printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n");
+ pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
BUG();
}
- chip->ecc.calculate = nand_bch_calculate_ecc;
- chip->ecc.correct = nand_bch_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_subpage = nand_read_subpage;
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.write_oob = nand_write_oob_std;
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
/*
* Board driver should supply ecc.size and ecc.bytes values to
* select how many bits are correctable; see nand_bch_init()
- * for details.
- * Otherwise, default to 4 bits for large page devices
+ * for details. Otherwise, default to 4 bits for large page
+ * devices.
*/
- if (!chip->ecc.size && (mtd->oobsize >= 64)) {
- chip->ecc.size = 512;
- chip->ecc.bytes = 7;
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->bytes = 7;
}
- chip->ecc.priv = nand_bch_init(mtd,
- chip->ecc.size,
- chip->ecc.bytes,
- &chip->ecc.layout);
- if (!chip->ecc.priv) {
- printk(KERN_WARNING "BCH ECC initialization failed!\n");
+ ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
+ &ecc->layout);
+ if (!ecc->priv) {
+ pr_warn("BCH ECC initialization failed!\n");
BUG();
}
+ ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
break;
case NAND_ECC_NONE:
- printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
- "This is not recommended !!\n");
- chip->ecc.read_page = nand_read_page_raw;
- chip->ecc.write_page = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.write_oob = nand_write_oob_std;
- chip->ecc.size = mtd->writesize;
- chip->ecc.bytes = 0;
+ pr_warn("NAND_ECC_NONE selected by board driver. "
+ "This is not recommended!\n");
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
break;
default:
- printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
- chip->ecc.mode);
+ pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
BUG();
}
+ /* For many systems, the standard OOB write also works for raw */
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
+
/*
* The number of bytes available for a client to place data into
- * the out of band area
+ * the out of band area.
*/
- chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length
- && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
- chip->ecc.layout->oobavail +=
- chip->ecc.layout->oobfree[i].length;
- mtd->oobavail = chip->ecc.layout->oobavail;
+ ecc->layout->oobavail = 0;
+ for (i = 0; ecc->layout->oobfree[i].length
+ && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
+ ecc->layout->oobavail += ecc->layout->oobfree[i].length;
+ mtd->oobavail = ecc->layout->oobavail;
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(mtd))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ mtd->name);
/*
* Set the number of read / write steps for one page depending on ECC
- * mode
+ * mode.
*/
- chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
- printk(KERN_WARNING "Invalid ecc parameters\n");
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ pr_warn("Invalid ECC parameters\n");
BUG();
}
- chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+ ecc->total = ecc->steps * ecc->bytes;
- /*
- * Allow subpage writes up to ecc.steps. Not possible for MLC
- * FLASH.
- */
- if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
- !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
- switch (chip->ecc.steps) {
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+ switch (ecc->steps) {
case 2:
mtd->subpage_sft = 1;
break;
@@ -3442,35 +4081,53 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Initialize state */
chip->state = FL_READY;
- /* De-select the device */
- chip->select_chip(mtd, -1);
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
+
/* Fill in remaining MTD driver data */
- mtd->type = MTD_NANDFLASH;
+ mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
- mtd->erase = nand_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = nand_read;
- mtd->write = nand_write;
- mtd->panic_write = panic_nand_write;
- mtd->read_oob = nand_read_oob;
- mtd->write_oob = nand_write_oob;
- mtd->sync = nand_sync;
- mtd->lock = NULL;
- mtd->unlock = NULL;
- mtd->suspend = nand_suspend;
- mtd->resume = nand_resume;
- mtd->block_isbad = nand_block_isbad;
- mtd->block_markbad = nand_block_markbad;
+ mtd->_erase = nand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = nand_read;
+ mtd->_write = nand_write;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = NULL;
+ mtd->_unlock = NULL;
+ mtd->_suspend = nand_suspend;
+ mtd->_resume = nand_resume;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
- /* propagate ecc.layout to mtd_info */
- mtd->ecclayout = chip->ecc.layout;
+ /* propagate ecc info to mtd_info */
+ mtd->ecclayout = ecc->layout;
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = mtd->ecc_strength;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
@@ -3481,9 +4138,11 @@ int nand_scan_tail(struct mtd_info *mtd)
}
EXPORT_SYMBOL(nand_scan_tail);
-/* is_module_text_address() isn't exported, and it's mostly a pointless
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
* test if this is a module _anyway_ -- they'd have to try _really_ hard
- * to call us from in-kernel code if the core NAND support is modular. */
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
#ifdef MODULE
#define caller_is_module() (1)
#else
@@ -3493,15 +4152,13 @@ EXPORT_SYMBOL(nand_scan_tail);
/**
* nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
- *
- * This fills out all the uninitialized function pointers
- * with the defaults.
- * The flash ID is read and the mtd/chip structures are
- * filled with the appropriate values.
- * The mtd->owner field must be set to the module of the caller
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
*
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
@@ -3509,8 +4166,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
/* Many callers got this wrong, so check for it for a while... */
if (!mtd->owner && caller_is_module()) {
- printk(KERN_CRIT "%s called with NULL mtd->owner!\n",
- __func__);
+ pr_crit("%s called with NULL mtd->owner!\n", __func__);
BUG();
}
@@ -3523,8 +4179,8 @@ EXPORT_SYMBOL(nand_scan);
/**
* nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
-*/
+ * @mtd: MTD device structure
+ */
void nand_release(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index ccbeaa1e4a8..7f0c3b4c2a4 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -4,7 +4,7 @@
* Overview:
* Bad block table support for the NAND driver
*
- * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -14,7 +14,7 @@
*
* When nand_scan_bbt is called, then it tries to find the bad block table
* depending on the options in the BBT descriptor(s). If no flash based BBT
- * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
* marked good / bad blocks. This information is used to create a memory BBT.
* Once a new bad block is discovered then the "factory" information is updated
* on the device.
@@ -22,7 +22,7 @@
* BBT on flash. If a BBT is found then the contents are read and the memory
* based BBT is created. If a mirrored BBT is selected then the mirror is
* searched too and the versions are compared. If the mirror has a greater
- * version number than the mirror BBT is used to build the memory based BBT.
+ * version number, then the mirror BBT is used to build the memory based BBT.
* If the tables are not versioned, then we "or" the bad block information.
* If one of the BBTs is out of date or does not exist it is (re)created.
* If no BBT exists at all then the device is scanned for factory marked
@@ -36,9 +36,9 @@
* The table is marked in the OOB area with an ident pattern and a version
* number which indicates which of both tables is more up to date. If the NAND
* controller needs the complete OOB area for the ECC information then the
- * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern
- * and the version byte into the data area and the OOB area will remain
- * untouched.
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
*
* The table uses 2 bits per block
* 11b: block is good
@@ -62,126 +62,90 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/string.h>
-static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
{
- int ret;
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
- ret = memcmp(buf, td->pattern, td->len);
- if (!ret)
- return ret;
- return -1;
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
}
/**
* check_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @len: the length of buffer to search
- * @paglen: the pagelength
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers.
- * If the SCAN_EMPTY option is set then check, if all bytes except the
- * pattern area contain 0xff
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
*
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
- int i, end = 0;
- uint8_t *p = buf;
-
if (td->options & NAND_BBT_NO_OOB)
return check_pattern_no_oob(buf, td);
- end = paglen + td->offs;
- if (td->options & NAND_BBT_SCANEMPTY) {
- for (i = 0; i < end; i++) {
- if (p[i] != 0xff)
- return -1;
- }
- }
- p += end;
-
/* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[i] != td->pattern[i])
- return -1;
- }
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
- /* Check both positions 1 and 6 for pattern? */
- if (td->options & NAND_BBT_SCANBYTE1AND6) {
- if (td->options & NAND_BBT_SCANEMPTY) {
- p += td->len;
- end += NAND_SMALL_BADBLOCK_POS - td->offs;
- /* Check region between positions 1 and 6 */
- for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
- i++) {
- if (*p++ != 0xff)
- return -1;
- }
- }
- else {
- p += NAND_SMALL_BADBLOCK_POS - td->offs;
- }
- /* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[i] != td->pattern[i])
- return -1;
- }
- }
-
- if (td->options & NAND_BBT_SCANEMPTY) {
- p += td->len;
- end += td->len;
- for (i = end; i < len; i++) {
- if (*p++ != 0xff)
- return -1;
- }
- }
return 0;
}
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers. Same as check_pattern, but
- * no optional empty check
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
*
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
{
- int i;
- uint8_t *p = buf;
-
/* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[td->offs + i] != td->pattern[i])
- return -1;
- }
- /* Need to check location 1 AND 6? */
- if (td->options & NAND_BBT_SCANBYTE1AND6) {
- for (i = 0; i < td->len; i++) {
- if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
- return -1;
- }
- }
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
return 0;
}
/**
* add_marker_len - compute the length of the marker in data area
- * @td: BBT descriptor used for computation
+ * @td: BBT descriptor used for computation
*
- * The length will be 0 if the markeris located in OOB area.
+ * The length will be 0 if the marker is located in OOB area.
*/
static u32 add_marker_len(struct nand_bbt_descr *td)
{
@@ -198,34 +162,33 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @page: the starting page
- * @num: the number of bbt descriptors to read
- * @td: the bbt describtion table
- * @offs: offset in the memory table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
*
* Read the bad block table starting from page.
- *
*/
static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
struct nand_bbt_descr *td, int offs)
{
- int res, i, j, act = 0;
+ int res, ret = 0, i, j, act = 0;
struct nand_chip *this = mtd->priv;
size_t retlen, len, totlen;
loff_t from;
int bits = td->options & NAND_BBT_NRBITS_MSK;
- uint8_t msk = (uint8_t) ((1 << bits) - 1);
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
u32 marker_len;
int reserved_block_code = td->reserved_block_code;
totlen = (num * bits) >> 3;
marker_len = add_marker_len(td);
- from = ((loff_t) page) << this->page_shift;
+ from = ((loff_t)page) << this->page_shift;
while (totlen) {
- len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
if (marker_len) {
/*
* In case the BBT marker is not in the OOB area it
@@ -235,58 +198,72 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
from += marker_len;
marker_len = 0;
}
- res = mtd->read(mtd, from, len, &retlen, buf);
+ res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
- if (retlen != len) {
- printk(KERN_INFO "nand_bbt: Error reading bad block table\n");
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
return res;
}
- printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
}
/* Analyse data */
for (i = 0; i < len; i++) {
uint8_t dat = buf[i];
- for (j = 0; j < 8; j += bits, act += 2) {
+ for (j = 0; j < 8; j += bits, act++) {
uint8_t tmp = (dat >> j) & msk;
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
- printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
mtd->ecc_stats.bbtblocks++;
continue;
}
- /* Leave it for now, if its matured we can move this
- * message to MTD_DEBUG_LEVEL0 */
- printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- /* Factory marked bad or worn out ? */
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
if (tmp == 0)
- this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
else
- this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
mtd->ecc_stats.badblocks++;
}
}
totlen -= len;
from += len;
}
- return 0;
+ return ret;
}
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @chip: read the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
*
- * Read the bad block table for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
-*/
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
{
struct nand_chip *this = mtd->priv;
@@ -301,7 +278,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
td, offs);
if (res)
return res;
- offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ offs += this->chipsize >> this->bbt_erase_shift;
}
} else {
res = read_bbt(mtd, buf, td->pages[0],
@@ -312,10 +289,8 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
return 0;
}
-/*
- * BBT marker is in the first page, no OOB.
- */
-static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
struct nand_bbt_descr *td)
{
size_t retlen;
@@ -325,70 +300,73 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
if (td->options & NAND_BBT_VERSION)
len++;
- return mtd->read(mtd, offs, len, &retlen, buf);
+ return mtd_read(mtd, offs, len, &retlen, buf);
}
-/*
- * Scan read raw data from flash
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
*/
-static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
- int res;
+ int res, ret = 0;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
-
while (len > 0) {
- if (len <= mtd->writesize) {
- ops.oobbuf = buf + len;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
- } else {
- ops.oobbuf = buf + mtd->writesize;
- ops.datbuf = buf;
- ops.len = mtd->writesize;
- res = mtd->read_oob(mtd, offs, &ops);
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
- if (res)
+ res = mtd_read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
}
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
+ offs += mtd->writesize;
}
- return 0;
+ return ret;
}
-static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
- return scan_read_raw_data(mtd, buf, offs, td);
+ return scan_read_data(mtd, buf, offs, td);
else
- return scan_read_raw_oob(mtd, buf, offs, len);
+ return scan_read_oob(mtd, buf, offs, len);
}
-/*
- * Scan write data with oob to flash
- */
+/* Scan write data with oob to flash */
static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.datbuf = buf;
ops.oobbuf = oob;
ops.len = len;
- return mtd->write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -402,65 +380,41 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
*
- * Read the bad block table(s) for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
- *
-*/
-static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
struct nand_chip *this = mtd->priv;
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
- printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
- td->pages[0], td->version[0]);
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize, td);
+ scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
- printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
- md->pages[0], md->version[0]);
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
}
- return 1;
}
-/*
- * Scan a given block full
- */
-static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, size_t readlen,
- int scanlen, int len)
-{
- int ret, j;
-
- ret = scan_read_raw_oob(mtd, buf, offs, readlen);
- if (ret)
- return ret;
-
- for (j = 0; j < len; j++, buf += scanlen) {
- if (check_pattern(buf, scanlen, mtd->writesize, bd))
- return 1;
- }
- return 0;
-}
-
-/*
- * Scan a given block partially
- */
+/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, int len)
+ loff_t offs, uint8_t *buf, int numpages)
{
struct mtd_oob_ops ops;
int j, ret;
@@ -469,16 +423,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
ops.oobbuf = buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
- for (j = 0; j < len; j++) {
+ for (j = 0; j < numpages; j++) {
/*
- * Read the full oob until read_oob is fixed to
- * handle single byte reads for 16 bit
- * buswidth
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
*/
- ret = mtd->read_oob(mtd, offs, &ops);
- if (ret)
+ ret = mtd_read_oob(mtd, offs, &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
if (check_short_pattern(buf, bd))
@@ -491,86 +445,65 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- * @chip: create the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
*
- * Create a bad block table by scanning the device
- * for the given good/bad block identify pattern
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
struct nand_chip *this = mtd->priv;
- int i, numblocks, len, scanlen;
+ int i, numblocks, numpages;
int startblock;
loff_t from;
- size_t readlen;
- printk(KERN_INFO "Scanning device for bad blocks\n");
+ pr_info("Scanning device for bad blocks\n");
- if (bd->options & NAND_BBT_SCANALLPAGES)
- len = 1 << (this->bbt_erase_shift - this->page_shift);
- else if (bd->options & NAND_BBT_SCAN2NDPAGE)
- len = 2;
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ numpages = 2;
else
- len = 1;
-
- if (!(bd->options & NAND_BBT_SCANEMPTY)) {
- /* We need only read few bytes from the OOB area */
- scanlen = 0;
- readlen = bd->len;
- } else {
- /* Full page content should be read */
- scanlen = mtd->writesize + mtd->oobsize;
- readlen = len * mtd->writesize;
- }
+ numpages = 1;
if (chip == -1) {
- /* Note that numblocks is 2 * (real numblocks) here, see i+=2
- * below as it makes shifting and masking less painful */
- numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
from = 0;
} else {
if (chip >= this->numchips) {
- printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
chip + 1, this->numchips);
return -EINVAL;
}
- numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> this->bbt_erase_shift;
startblock = chip * numblocks;
numblocks += startblock;
- from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+ from = (loff_t)startblock << this->bbt_erase_shift;
}
- if (this->options & NAND_BBT_SCANLASTPAGE)
- from += mtd->erasesize - (mtd->writesize * len);
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * numpages);
- for (i = startblock; i < numblocks;) {
+ for (i = startblock; i < numblocks; i++) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- if (bd->options & NAND_BBT_SCANALLPAGES)
- ret = scan_block_full(mtd, bd, from, buf, readlen,
- scanlen, len);
- else
- ret = scan_block_fast(mtd, bd, from, buf, len);
-
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
if (ret < 0)
return ret;
if (ret) {
- this->bbt[i >> 3] |= 0x03 << (i & 0x6);
- printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
- i >> 1, (unsigned long long)from);
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
- i += 2;
from += (1 << this->bbt_erase_shift);
}
return 0;
@@ -578,31 +511,29 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
*
- * Read the bad block table by searching for a given ident pattern.
- * Search is preformed either from the beginning up or from the end of
- * the device downwards. The search starts always at the start of a
- * block.
- * If the option NAND_BBT_PERCHIP is given, each chip is searched
- * for a bbt, which contains the bad block information of this chip.
- * This is necessary to provide support for certain DOC devices.
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
*
- * The bbt ident pattern resides in the oob area of the first page
- * in a block.
+ * The bbt ident pattern resides in the oob area of the first page in a block.
*/
static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, chips;
- int bits, startblock, block, dir;
+ int startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
int bbtblocks;
int blocktopage = this->bbt_erase_shift - this->page_shift;
- /* Search direction top -> down ? */
+ /* Search direction top -> down? */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = (mtd->size >> this->bbt_erase_shift) - 1;
dir = -1;
@@ -611,7 +542,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
dir = 1;
}
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -621,9 +552,6 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
bbtblocks = mtd->size >> this->bbt_erase_shift;
}
- /* Number of bits for each erase block in the bbt */
- bits = td->options & NAND_BBT_NRBITS_MSK;
-
for (i = 0; i < chips; i++) {
/* Reset version information */
td->version[i] = 0;
@@ -635,7 +563,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
- scan_read_raw(mtd, buf, offs, mtd->writesize, td);
+ scan_read(mtd, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
@@ -650,24 +578,26 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
if (td->pages[i] == -1)
- printk(KERN_WARNING "Bad block table not found for chip %d\n", i);
+ pr_warn("Bad block table not found for chip %d\n", i);
else
- printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i],
- td->version[i]);
+ pr_info("Bad block table found at page %d, version "
+ "0x%02X\n", td->pages[i], td->version[i]);
}
return 0;
}
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
*
- * Search and read the bad block table(s)
-*/
-static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
{
/* Search the primary table */
search_bbt(mtd, buf, td);
@@ -675,32 +605,27 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
/* Search the mirror table */
if (md)
search_bbt(mtd, buf, md);
-
- /* Force result check */
- return 1;
}
/**
* write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
*
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- * @chipsel: selector for a specific chip, -1 for all
- *
- * (Re)write the bad block table
- *
-*/
+ * (Re)write the bad block table.
+ */
static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
struct nand_chip *this = mtd->priv;
struct erase_info einfo;
- int i, j, res, chip = 0;
+ int i, res, chip = 0;
int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
- int nrchips, bbtoffs, pageoffs, ooboffs;
+ int nrchips, pageoffs, ooboffs;
uint8_t msk[4];
uint8_t rcode = td->reserved_block_code;
size_t retlen, len = 0;
@@ -710,14 +635,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
if (!rcode)
rcode = 0xff;
- /* Write bad block table per chip rather than per device ? */
+ /* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
- /* Full device write or specific chip ? */
+ /* Full device write or specific chip? */
if (chipsel == -1) {
nrchips = this->numchips;
} else {
@@ -731,8 +656,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Loop through the chips */
for (; chip < nrchips; chip++) {
-
- /* There was already a version of the table, reuse the page
+ /*
+ * There was already a version of the table, reuse the page
* This applies for absolute placement too, as we have the
* page nr. in td->pages.
*/
@@ -741,8 +666,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
goto write;
}
- /* Automatic placement of the bad block table */
- /* Search direction top -> down ? */
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = numblocks * (chip + 1) - 1;
dir = -1;
@@ -754,10 +681,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
for (i = 0; i < td->maxblocks; i++) {
int block = startblock + dir * i;
/* Check, if the block is bad */
- switch ((this->bbt[block >> 2] >>
- (2 * (block & 0x03))) & 0x03) {
- case 0x01:
- case 0x03:
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
continue;
}
page = block <<
@@ -766,7 +692,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (!md || md->pages[chip] != page)
goto write;
}
- printk(KERN_ERR "No space left to write bad block table\n");
+ pr_err("No space left to write bad block table\n");
return -ENOSPC;
write:
@@ -789,31 +715,27 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
default: return -EINVAL;
}
- bbtoffs = chip * (numblocks >> 2);
-
- to = ((loff_t) page) << this->page_shift;
+ to = ((loff_t)page) << this->page_shift;
- /* Must we save the block contents ? */
+ /* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
- to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
- res = mtd->read(mtd, to, len, &retlen, buf);
+ res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
- printk(KERN_INFO "nand_bbt: Error "
- "reading block for writing "
- "the bad block table\n");
+ pr_info("nand_bbt: error reading block "
+ "for writing the bad block table\n");
return res;
}
- printk(KERN_WARNING "nand_bbt: ECC error "
- "while reading block for writing "
- "bad block table\n");
+ pr_warn("nand_bbt: ECC error while reading "
+ "block for writing bad block table\n");
}
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
- res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
@@ -821,19 +743,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
pageoffs = page - (int)(to >> this->page_shift);
offs = pageoffs << this->page_shift;
/* Preset the bbt area with 0xff */
- memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
} else if (td->options & NAND_BBT_NO_OOB) {
ooboffs = 0;
offs = td->len;
- /* the version byte */
+ /* The version byte */
if (td->options & NAND_BBT_VERSION)
offs++;
/* Calc length */
- len = (size_t) (numblocks >> sft);
+ len = (size_t)(numblocks >> sft);
len += offs;
- /* Make it page aligned ! */
+ /* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len);
@@ -841,8 +763,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
memcpy(buf, td->pattern, td->len);
} else {
/* Calc length */
- len = (size_t) (numblocks >> sft);
- /* Make it page aligned ! */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
@@ -856,17 +778,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (td->options & NAND_BBT_VERSION)
buf[ooboffs + td->veroffs] = td->version[chip];
- /* walk through the memory table */
- for (i = 0; i < numblocks;) {
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
uint8_t dat;
- dat = this->bbt[bbtoffs + (i >> 2)];
- for (j = 0; j < 4; j++, i++) {
- int sftcnt = (i << (3 - sft)) & sftmsk;
- /* Do not store the reserved bbt blocks ! */
- buf[offs + (i >> sft)] &=
- ~(msk[dat & 0x03] << sftcnt);
- dat >>= 2;
- }
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
}
memset(&einfo, 0, sizeof(einfo));
@@ -883,8 +801,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (res < 0)
goto outerr;
- printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
- "0x%02X\n", (unsigned long long)to, td->version[chip]);
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
/* Mark it as used */
td->pages[chip] = page;
@@ -892,48 +810,45 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
return 0;
outerr:
- printk(KERN_WARNING
- "nand_bbt: Error while writing bad block table %d\n", res);
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
return res;
}
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
- * The function creates a memory based bbt by scanning the device
- * for manufacturer / software marked good / bad blocks
-*/
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
- bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->buffers->databuf, bd, -1);
}
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
*
- * The function checks the results of the previous call to read_bbt
- * and creates / updates the bbt(s) if necessary
- * Creation is necessary if no bbt was found for the chip/device
- * Update is necessary if one of the tables is missing or the
- * version nr. of one table is less than the other
-*/
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
{
- int i, chips, writeops, chipsel, res;
+ int i, chips, writeops, create, chipsel, res, res2;
struct nand_chip *this = mtd->priv;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
chips = this->numchips;
else
@@ -941,86 +856,98 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
for (i = 0; i < chips; i++) {
writeops = 0;
+ create = 0;
rd = NULL;
rd2 = NULL;
- /* Per chip or per device ? */
+ res = res2 = 0;
+ /* Per chip or per device? */
chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
- /* Mirrored table available ? */
+ /* Mirrored table available? */
if (md) {
if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
writeops = 0x03;
- goto create;
- }
-
- if (td->pages[i] == -1) {
+ } else if (td->pages[i] == -1) {
rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
- goto writecheck;
- }
-
- if (md->pages[i] == -1) {
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
- goto writecheck;
- }
-
- if (td->version[i] == md->version[i]) {
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
rd = td;
if (!(td->options & NAND_BBT_VERSION))
rd2 = md;
- goto writecheck;
- }
-
- if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
+ writeops = 0x02;
} else {
rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
+ writeops = 0x01;
}
-
- goto writecheck;
-
} else {
if (td->pages[i] == -1) {
+ create = 1;
writeops = 0x01;
- goto create;
+ } else {
+ rd = td;
}
- rd = td;
- goto writecheck;
}
- create:
- /* Create the bad block table by scanning the device ? */
- if (!(td->options & NAND_BBT_CREATE))
- continue;
- /* Create the table in memory by scanning the chip(s) */
- if (!(this->options & NAND_CREATE_EMPTY_BBT))
- create_bbt(mtd, buf, bd, chipsel);
-
- td->version[i] = 1;
- if (md)
- md->version[i] = 1;
- writecheck:
- /* read back first ? */
- if (rd)
- read_abs_bbt(mtd, buf, rd, chipsel);
- /* If they weren't versioned, read both. */
- if (rd2)
- read_abs_bbt(mtd, buf, rd2, chipsel);
-
- /* Write the bad block table to the device ? */
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
return res;
}
- /* Write the mirror bad block table to the device ? */
+ /* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
if (res < 0)
@@ -1032,20 +959,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
- * @td: bad block table descriptor
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
*
- * The bad block table regions are marked as "bad" to prevent
- * accidental erasures / writes. The regions are identified by
- * the mark 0x02.
-*/
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, j, chips, block, nrblocks, update;
- uint8_t oldval, newval;
+ uint8_t oldval;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -1060,12 +986,12 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
if (td->pages[i] == -1)
continue;
block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
- block <<= 1;
- oldval = this->bbt[(block >> 3)];
- newval = oldval | (0x2 << (block & 0x06));
- this->bbt[(block >> 3)] = newval;
- if ((oldval != newval) && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)block <<
+ this->bbt_erase_shift);
continue;
}
update = 0;
@@ -1073,27 +999,28 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
block = ((i + 1) * nrblocks) - td->maxblocks;
else
block = i * nrblocks;
- block <<= 1;
for (j = 0; j < td->maxblocks; j++) {
- oldval = this->bbt[(block >> 3)];
- newval = oldval | (0x2 << (block & 0x06));
- this->bbt[(block >> 3)] = newval;
- if (oldval != newval)
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
update = 1;
- block += 2;
+ block++;
}
- /* If we want reserved blocks to be recorded to flash, and some
- new ones have been marked, then we need to update the stored
- bbts. This should only happen once. */
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
}
}
/**
* verify_bbt_descr - verify the bad block description
- * @mtd: MTD device structure
- * @bd: the table to verify
+ * @mtd: MTD device structure
+ * @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
@@ -1111,16 +1038,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
pattern_len = bd->len;
bits = bd->options & NAND_BBT_NRBITS_MSK;
- BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
- !(this->options & NAND_USE_FLASH_BBT));
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
BUG_ON(!bits);
if (bd->options & NAND_BBT_VERSION)
pattern_len++;
if (bd->options & NAND_BBT_NO_OOB) {
- BUG_ON(!(this->options & NAND_USE_FLASH_BBT));
- BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB));
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
BUG_ON(bd->offs);
if (bd->options & NAND_BBT_VERSION)
BUG_ON(bd->veroffs != bd->len);
@@ -1140,18 +1067,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function checks, if a bad block table(s) is/are already
- * available. If not it scans the device for manufacturer
- * marked good / bad blocks and writes the bad block table(s) to
- * the selected place.
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
- * The bad block table memory is allocated here. It must be freed
- * by calling the nand_free_bbt function.
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
*
-*/
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
@@ -1161,19 +1086,21 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
struct nand_bbt_descr *md = this->bbt_md;
len = mtd->size >> (this->bbt_erase_shift + 2);
- /* Allocate memory (2bit per block) and clear the memory bad block table */
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
this->bbt = kzalloc(len, GFP_KERNEL);
- if (!this->bbt) {
- printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
+ if (!this->bbt)
return -ENOMEM;
- }
- /* If no primary table decriptor is given, scan the device
- * to build a memory based bad block table
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
*/
if (!td) {
if ((res = nand_memory_bbt(mtd, bd))) {
- printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
kfree(this->bbt);
this->bbt = NULL;
}
@@ -1187,22 +1114,20 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
len += (len >> this->page_shift) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
- printk(KERN_ERR "nand_bbt: Out of memory\n");
kfree(this->bbt);
this->bbt = NULL;
return -ENOMEM;
}
- /* Is the bbt at a given page ? */
+ /* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
- res = read_abs_bbts(mtd, buf, td, md);
+ read_abs_bbts(mtd, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
- res = search_read_bbts(mtd, buf, td, md);
+ search_read_bbts(mtd, buf, td, md);
}
- if (res)
- res = check_create(mtd, buf, bd);
+ res = check_create(mtd, buf, bd);
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(mtd, td);
@@ -1214,16 +1139,16 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
}
/**
- * nand_update_bbt - [NAND Interface] update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
+ * nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
*
- * The function updates the bad block table(s)
-*/
-int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *this = mtd->priv;
- int len, res = 0, writeops = 0;
+ int len, res = 0;
int chip, chipsel;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
@@ -1236,14 +1161,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "nand_update_bbt: Out of memory\n");
+ if (!buf)
return -ENOMEM;
- }
-
- writeops = md != NULL ? 0x03 : 0x01;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chip = (int)(offs >> this->chip_shift);
chipsel = chip;
@@ -1256,14 +1177,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
if (md)
md->version[chip]++;
- /* Write the bad block table to the device ? */
- if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
goto out;
}
- /* Write the mirror bad block table to the device ? */
- if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
}
@@ -1272,21 +1193,13 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
return res;
}
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks. */
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0x20,
- .len = 6,
- .pattern = scan_agand_pattern
-};
-
-/* Generic flash bbt decriptors
-*/
+/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
@@ -1296,7 +1209,7 @@ static struct nand_bbt_descr bbt_main_descr = {
.offs = 8,
.len = 4,
.veroffs = 12,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
@@ -1306,55 +1219,51 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.offs = 8,
.len = 4,
.veroffs = 12,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
-static struct nand_bbt_descr bbt_main_no_bbt_descr = {
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
-static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
-#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
- NAND_BBT_SCANBYTE1AND6)
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
/**
- * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure
- * @this: NAND chip to create descriptor for
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
*
* This function allocates and initializes a nand_bbt_descr for BBM detection
- * based on the properties of "this". The new descriptor is stored in
+ * based on the properties of @this. The new descriptor is stored in
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
- *
*/
-static int nand_create_default_bbt_descr(struct nand_chip *this)
+static int nand_create_badblock_pattern(struct nand_chip *this)
{
struct nand_bbt_descr *bd;
if (this->badblock_pattern) {
- printk(KERN_WARNING "BBT descr already allocated; not replacing.\n");
+ pr_warn("Bad block pattern already allocated; not replacing\n");
return -EINVAL;
}
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
- if (!bd) {
- printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
+ if (!bd)
return -ENOMEM;
- }
- bd->options = this->options & BBT_SCAN_OPTIONS;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
bd->offs = this->badblockpos;
bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
bd->pattern = scan_ff_pattern;
@@ -1365,40 +1274,23 @@ static int nand_create_default_bbt_descr(struct nand_chip *this)
/**
* nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
- *
- * This function selects the default bad block table
- * support for the device and calls the nand_scan_bbt function
+ * @mtd: MTD device structure
*
-*/
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
int nand_default_bbt(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
+ int ret;
- /* Default for AG-AND. We must use a flash based
- * bad block table as the devices have factory marked
- * _good_ blocks. Erasing those blocks leads to loss
- * of the good / bad information, so we _must_ store
- * this information in a good / bad table during
- * startup
- */
- if (this->options & NAND_IS_AND) {
- /* Use the default pattern descriptors */
- if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- this->options |= NAND_USE_FLASH_BBT;
- return nand_scan_bbt(mtd, &agand_flashbased);
- }
-
- /* Is a flash based bad block table requested ? */
- if (this->options & NAND_USE_FLASH_BBT) {
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
- if (this->options & NAND_USE_FLASH_BBT_NO_OOB) {
- this->bbt_td = &bbt_main_no_bbt_descr;
- this->bbt_md = &bbt_mirror_no_bbt_descr;
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
} else {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
@@ -1409,42 +1301,64 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_md = NULL;
}
- if (!this->badblock_pattern)
- nand_create_default_bbt_descr(this);
+ if (!this->badblock_pattern) {
+ ret = nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
return nand_scan_bbt(mtd, this->badblock_pattern);
}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
- * @offs: offset in the device
- * @allowbbt: allow access to bad block table region
- *
-*/
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct nand_chip *this = mtd->priv;
- int block;
- uint8_t res;
+ int block, res;
- /* Get block number * 2 */
- block = (int)(offs >> (this->bbt_erase_shift - 1));
- res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+ block = (int)(offs >> this->bbt_erase_shift);
+ res = bbt_get_entry(this, block);
- DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
- (unsigned int)offs, block >> 1, res);
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
+ "(block %d) 0x%02x\n",
+ (unsigned int)offs, block, res);
- switch ((int)res) {
- case 0x00:
+ switch (res) {
+ case BBT_BLOCK_GOOD:
return 0;
- case 0x01:
+ case BBT_BLOCK_WORN:
return 1;
- case 0x02:
+ case BBT_BLOCK_RESERVED:
return allowbbt ? 0 : 1;
}
return 1;
}
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, offs);
+
+ return ret;
+}
+
EXPORT_SYMBOL(nand_scan_bbt);
-EXPORT_SYMBOL(nand_default_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 0f931e75711..3803e0bba23 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -93,8 +93,8 @@ int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
/* else error in ecc, no action needed */
- DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n",
- __func__, errloc[i]);
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
}
} else if (count < 0) {
printk(KERN_ERR "ecc unrecoverable error\n");
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
deleted file mode 100644
index 46a6bc9c4b7..00000000000
--- a/drivers/mtd/nand/nand_bcm_umi.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*****************************************************************************
-* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/* ---- Include Files ---------------------------------------------------- */
-#include <mach/reg_umi.h>
-#include "nand_bcm_umi.h"
-#ifdef BOOT0_BUILD
-#include <uart.h>
-#endif
-
-/* ---- External Variable Declarations ----------------------------------- */
-/* ---- External Function Prototypes ------------------------------------- */
-/* ---- Public Variables ------------------------------------------------- */
-/* ---- Private Constants and Types -------------------------------------- */
-/* ---- Private Function Prototypes -------------------------------------- */
-/* ---- Private Variables ------------------------------------------------ */
-/* ---- Private Functions ------------------------------------------------ */
-
-#if NAND_ECC_BCH
-/****************************************************************************
-* nand_bch_ecc_flip_bit - Routine to flip an errored bit
-*
-* PURPOSE:
-* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
-* errored bit specified
-*
-* PARAMETERS:
-* datap - Container that holds the 512 byte data
-* errorLocation - Location of the bit that needs to be flipped
-*
-* RETURNS:
-* None
-****************************************************************************/
-static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
-{
- int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
- int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
- int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
-
- uint8_t errorByte = 0;
- uint8_t byteMask = 1 << locWithinAByte;
-
- /* BCH uses big endian, need to change the location
- * bits to little endian */
- locWithinAWord = 3 - locWithinAWord;
-
- errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
-
-#ifdef BOOT0_BUILD
- puthexs("\nECC Correct Offset: ",
- locWithinAPage * sizeof(uint32_t) + locWithinAWord);
- puthexs(" errorByte:", errorByte);
- puthex8(" Bit: ", locWithinAByte);
-#endif
-
- if (errorByte & byteMask) {
- /* bit needs to be cleared */
- errorByte &= ~byteMask;
- } else {
- /* bit needs to be set */
- errorByte |= byteMask;
- }
-
- /* write back the value with the fixed bit */
- datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
-}
-
-/****************************************************************************
-* nand_correct_page_bch - Routine to correct bit errors when reading NAND
-*
-* PURPOSE:
-* This routine reads the BCH registers to determine if there are any bit
-* errors during the read of the last 512 bytes of data + ECC bytes. If
-* errors exists, the routine fixes it.
-*
-* PARAMETERS:
-* datap - Container that holds the 512 byte data
-*
-* RETURNS:
-* 0 or greater = Number of errors corrected
-* (No errors are found or errors have been fixed)
-* -1 = Error(s) cannot be fixed
-****************************************************************************/
-int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
- int numEccBytes)
-{
- int numErrors;
- int errorLocation;
- int idx;
- uint32_t regValue;
-
- /* wait for read ECC to be valid */
- regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
-
- /*
- * read the control status register to determine if there
- * are error'ed bits
- * see if errors are correctible
- */
- if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
- int i;
-
- for (i = 0; i < numEccBytes; i++) {
- if (readEccData[i] != 0xff) {
- /* errors cannot be fixed, return -1 */
- return -1;
- }
- }
- /* If ECC is unprogrammed then we can't correct,
- * assume everything OK */
- return 0;
- }
-
- if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
- /* no errors */
- return 0;
- }
-
- /*
- * Fix errored bits by doing the following:
- * 1. Read the number of errors in the control and status register
- * 2. Read the error location registers that corresponds to the number
- * of errors reported
- * 3. Invert the bit in the data
- */
- numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
-
- for (idx = 0; idx < numErrors; idx++) {
- errorLocation =
- REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
-
- /* Flip bit */
- nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
- }
- /* Errors corrected */
- return numErrors;
-}
-#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
deleted file mode 100644
index 198b304d6f7..00000000000
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ /dev/null
@@ -1,337 +0,0 @@
-/*****************************************************************************
-* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-#ifndef NAND_BCM_UMI_H
-#define NAND_BCM_UMI_H
-
-/* ---- Include Files ---------------------------------------------------- */
-#include <mach/reg_umi.h>
-#include <mach/reg_nand.h>
-#include <cfg_global.h>
-
-/* ---- Constants and Types ---------------------------------------------- */
-#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
-#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
-#else
-#define NAND_ECC_BCH 0
-#endif
-
-#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
-
-#if NAND_ECC_BCH
-#ifdef BOOT0_BUILD
-#define NAND_ECC_NUM_BYTES 13
-#else
-#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
-#endif
-#else
-#define NAND_ECC_NUM_BYTES 3
-#endif
-
-#define NAND_DATA_ACCESS_SIZE 512
-
-/* ---- Variable Externs ------------------------------------------ */
-/* ---- Function Prototypes --------------------------------------- */
-int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
- int numEccBytes);
-
-/* Check in device is ready */
-static inline int nand_bcm_umi_dev_ready(void)
-{
- return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
-}
-
-/* Wait until device is ready */
-static inline void nand_bcm_umi_wait_till_ready(void)
-{
- while (nand_bcm_umi_dev_ready() == 0)
- ;
-}
-
-/* Enable Hamming ECC */
-static inline void nand_bcm_umi_hamming_enable_hwecc(void)
-{
- /* disable and reset ECC, 512 byte page */
- REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
- REG_UMI_NAND_ECC_CSR_256BYTE);
- /* enable ECC */
- REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
-}
-
-#if NAND_ECC_BCH
-/* BCH ECC specifics */
-#define ECC_BITS_PER_CORRECTABLE_BIT 13
-
-/* Enable BCH Read ECC */
-static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
-{
- /* disable and reset ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
- /* Turn on ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
-}
-
-/* Enable BCH Write ECC */
-static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
-{
- /* disable and reset ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
- /* Turn on ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
-}
-
-/* Config number of BCH ECC bytes */
-static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
-{
- uint32_t nValue;
- uint32_t tValue;
- uint32_t kValue;
- uint32_t numBits = numEccBytes * 8;
-
- /* disable and reset ECC */
- REG_UMI_BCH_CTRL_STATUS =
- REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
- REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
-
- /* Every correctible bit requires 13 ECC bits */
- tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
-
- /* Total data in number of bits for generating and computing BCH ECC */
- nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
-
- /* K parameter is used internally. K = N - (T * 13) */
- kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
-
- /* Write the settings */
- REG_UMI_BCH_N = nValue;
- REG_UMI_BCH_T = tValue;
- REG_UMI_BCH_K = kValue;
-}
-
-/* Pause during ECC read calculation to skip bytes in OOB */
-static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
-{
- REG_UMI_BCH_CTRL_STATUS =
- REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
- REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
-}
-
-/* Resume during ECC read calculation after skipping bytes in OOB */
-static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
-{
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
-}
-
-/* Poll read ECC calc to check when hardware completes */
-static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
-{
- uint32_t regVal;
-
- do {
- /* wait for ECC to be valid */
- regVal = REG_UMI_BCH_CTRL_STATUS;
- } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
-
- return regVal;
-}
-
-/* Poll write ECC calc to check when hardware completes */
-static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
-{
- /* wait for ECC to be valid */
- while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
- == 0)
- ;
-}
-
-/* Read the OOB and ECC, for kernel write OOB to a buffer */
-#if defined(__KERNEL__) && !defined(STANDALONE)
-static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
- uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
-#else
-static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
- uint8_t *eccCalc, int numEccBytes)
-#endif
-{
- int eccPos = 0;
- int numToRead = 16; /* There are 16 bytes per sector in the OOB */
-
- /* ECC is already paused when this function is called */
- if (pageSize != NAND_DATA_ACCESS_SIZE) {
- /* skip BI */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- while (numToRead > numEccBytes) {
- /* skip free oob region */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- /* read ECC bytes before BI */
- nand_bcm_umi_bch_resume_read_ecc_calc();
-
- while (numToRead > 11) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- nand_bcm_umi_bch_pause_read_ecc_calc();
-
- if (numToRead == 11) {
- /* read BI */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- }
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-}
-
-/* Helper function to write ECC */
-static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
- uint8_t *oobp, uint8_t eccVal)
-{
- if (eccBytePos <= numEccBytes)
- *oobp = eccVal;
-}
-
-/* Write OOB with ECC */
-static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
- uint8_t *oobp, int numEccBytes)
-{
- uint32_t eccVal = 0xffffffff;
-
- /* wait for write ECC to be valid */
- nand_bcm_umi_bch_poll_write_ecc_calc();
-
- /*
- ** Get the hardware ecc from the 32-bit result registers.
- ** Read after 512 byte accesses. Format B3B2B1B0
- ** where B3 = ecc3, etc.
- */
-
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- /* Now fill in the ECC bytes */
- if (numEccBytes >= 13)
- eccVal = REG_UMI_BCH_WR_ECC_3;
-
- /* Usually we skip CM in oob[0,1] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
- (eccVal >> 16) & 0xff);
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
- (eccVal >> 8) & 0xff);
-
- /* Write ECC in oob[2,3,4] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
- eccVal & 0xff); /* ECC 12 */
-
- if (numEccBytes >= 9)
- eccVal = REG_UMI_BCH_WR_ECC_2;
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
- (eccVal >> 24) & 0xff); /* ECC11 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
- (eccVal >> 16) & 0xff); /* ECC10 */
-
- /* Always Skip BI in oob[5] */
- } else {
- /* Always Skip BI in oob[0] */
-
- /* Now fill in the ECC bytes */
- if (numEccBytes >= 13)
- eccVal = REG_UMI_BCH_WR_ECC_3;
-
- /* Usually skip CM in oob[1,2] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
- (eccVal >> 16) & 0xff);
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
- (eccVal >> 8) & 0xff);
-
- /* Write ECC in oob[3-15] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
- eccVal & 0xff); /* ECC12 */
-
- if (numEccBytes >= 9)
- eccVal = REG_UMI_BCH_WR_ECC_2;
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
- (eccVal >> 24) & 0xff); /* ECC11 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
- (eccVal >> 16) & 0xff); /* ECC10 */
- }
-
- /* Fill in the remainder of ECC locations */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
- (eccVal >> 8) & 0xff); /* ECC9 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
- eccVal & 0xff); /* ECC8 */
-
- if (numEccBytes >= 5)
- eccVal = REG_UMI_BCH_WR_ECC_1;
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
- (eccVal >> 24) & 0xff); /* ECC7 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
- (eccVal >> 16) & 0xff); /* ECC6 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
- (eccVal >> 8) & 0xff); /* ECC5 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
- eccVal & 0xff); /* ECC4 */
-
- if (numEccBytes >= 1)
- eccVal = REG_UMI_BCH_WR_ECC_0;
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
- (eccVal >> 24) & 0xff); /* ECC3 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
- (eccVal >> 16) & 0xff); /* ECC2 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
- (eccVal >> 8) & 0xff); /* ECC1 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
- eccVal & 0xff); /* ECC0 */
-}
-#endif
-
-#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 271b8e735e8..97c4c0216c9 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -55,8 +55,7 @@ struct mtd_info;
#define MODULE_AUTHOR(x) /* x */
#define MODULE_DESCRIPTION(x) /* x */
-#define printk printf
-#define KERN_ERR ""
+#define pr_err printf
#endif
/*
@@ -110,7 +109,7 @@ static const char bitsperbyte[256] = {
/*
* addressbits is a lookup table to filter out the bits from the xor-ed
- * ecc data that identify the faulty location.
+ * ECC data that identify the faulty location.
* this is only used for repairing parity
* see the comments in nand_correct_data for more details
*/
@@ -153,7 +152,7 @@ static const char addressbits[256] = {
* __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* block
* @buf: input buffer with raw data
- * @eccsize: data bytes per ecc step (256 or 512)
+ * @eccsize: data bytes per ECC step (256 or 512)
* @code: output buffer with ECC
*/
void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
@@ -348,7 +347,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
rp17 = (par ^ rp16) & 0xff;
/*
- * Finally calculate the ecc bits.
+ * Finally calculate the ECC bits.
* Again here it might seem that there are performance optimisations
* possible, but benchmarks showed that on the system this is developed
* the code below is the fastest
@@ -436,7 +435,7 @@ EXPORT_SYMBOL(nand_calculate_ecc);
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
- * @eccsize: data bytes per ecc step (256 or 512)
+ * @eccsize: data bytes per ECC step (256 or 512)
*
* Detect and correct a 1 bit error for eccsize byte block
*/
@@ -505,9 +504,9 @@ int __nand_correct_data(unsigned char *buf,
}
/* count nr of bits; use table lookup, faster than calculating it */
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
- return 1; /* error in ecc data; no action needed */
+ return 1; /* error in ECC data; no action needed */
- printk(KERN_ERR "uncorrectable error : ");
+ pr_err("%s: uncorrectable ECC error\n", __func__);
return -1;
}
EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 00cf1b0d605..3d7c89fc103 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -10,162 +10,156 @@
*/
#include <linux/module.h>
#include <linux/mtd/nand.h>
+#include <linux/sizes.h>
+
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+
/*
-* Chip ID list
-*
-* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-* options
-*
-* Pagesize; 0, 256, 512
-* 0 get this information from the extended chip ID
-+ 256 256 Byte page size
-* 512 512 Byte page size
-*/
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
struct nand_flash_dev nand_flash_ids[] = {
-
-#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
- {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
- {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
- {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
- {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
- {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
- {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
- {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
-
- {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
- {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
- {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
- {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
-#endif
-
- {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
- {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
- {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
- {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
- {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
- {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
- {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
- {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0},
- {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
- {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
/*
- * These are the new chips with large page size. The pagesize and the
- * erasesize is determined from the extended id bytes
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
*/
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
-#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- /*512 Megabit */
- {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16},
+ /* 512 Megabit */
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
/* 1 Gigabit */
- {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
- {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
/* 2 Gigabit */
- {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
- {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS},
- {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16},
- {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
/* 4 Gigabit */
- {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS},
- {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS},
- {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16},
- {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
/* 8 Gigabit */
- {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS},
- {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS},
- {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16},
- {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
/* 16 Gigabit */
- {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS},
- {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS},
- {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
- {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
/* 32 Gigabit */
- {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS},
- {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
- {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16},
- {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
/* 64 Gigabit */
- {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS},
- {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS},
- {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16},
- {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
/* 128 Gigabit */
- {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS},
- {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS},
- {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16},
- {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
/* 256 Gigabit */
- {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS},
- {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS},
- {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16},
- {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
/* 512 Gigabit */
- {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS},
- {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS},
- {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16},
- {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
- /*
- * Renesas AND 1 Gigabit. Those chips do not support extended id and
- * have a strange page/block layout ! The chosen minimum erasesize is
- * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
- * planes 1 block = 2 pages, but due to plane arrangement the blocks
- * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
- * increase the eraseblock size so we chose a combined one which can be
- * erased in one go There are more speed improvements for reads and
- * writes possible, but not implemented now
- */
- {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
- BBT_AUTO_REFRESH
- },
-
- {NULL,}
+ {NULL}
};
-/*
-* Manufacturer ID list
-*/
+/* Manufacturer IDs */
struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_TOSHIBA, "Toshiba"},
{NAND_MFR_SAMSUNG, "Samsung"},
@@ -175,7 +169,11 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_STMICRO, "ST Micro"},
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
- {NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_AMD, "AMD/Spansion"},
+ {NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_SANDISK, "SanDisk"},
+ {NAND_MFR_INTEL, "Intel"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 357e8c5252a..4f0d83648e5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -42,6 +42,8 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -105,7 +107,6 @@ static char *weakblocks = NULL;
static char *weakpages = NULL;
static unsigned int bitflips = 0;
static char *gravepages = NULL;
-static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
@@ -130,7 +131,6 @@ module_param(weakblocks, charp, 0400);
module_param(weakpages, charp, 0400);
module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
-module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
@@ -162,7 +162,6 @@ MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (z
MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be read only twice before failing");
-MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
@@ -206,7 +205,7 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Calculate the page offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET(ns) \
- (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
+ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
/* Calculate the OOB offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
@@ -219,7 +218,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
-#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
@@ -264,15 +262,13 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
#define NS_OPER_STATES 6 /* Maximum number of states in operation */
#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
-#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
-#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
@@ -287,6 +283,11 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
#define NS_MAX_HELD_PAGES 16
+struct nandsim_debug_info {
+ struct dentry *dfs_root;
+ struct dentry *dfs_wear_report;
+};
+
/*
* A union to represent flash memory contents and flash buffer.
*/
@@ -335,7 +336,6 @@ struct nandsim {
uint pgsec; /* number of pages per sector */
uint secshift; /* bits number in sector size */
uint pgshift; /* bits number in page size */
- uint oobshift; /* bits number in OOB size */
uint pgaddrbytes; /* bytes per page address */
uint secaddrbytes; /* bytes per sector address */
uint idbytes; /* the number ID bytes that this chip outputs */
@@ -362,10 +362,12 @@ struct nandsim {
/* Fields needed when using a cache file */
struct file *cfile; /* Open file */
- unsigned char *pages_written; /* Which pages have been written */
+ unsigned long *pages_written; /* Which pages have been written */
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
+
+ struct nandsim_debug_info dbg;
};
/*
@@ -401,8 +403,6 @@ static struct nandsim_operations {
{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
/* Read status */
{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
- /* Read multi-plane status */
- {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
/* Read ID */
{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
/* Large page devices read page */
@@ -443,12 +443,122 @@ static LIST_HEAD(grave_pages);
static unsigned long *erase_block_wear = NULL;
static unsigned int wear_eb_count = 0;
static unsigned long total_wear = 0;
-static unsigned int rptwear_cnt = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
-static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
+static int nandsim_debugfs_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+
+static int nandsim_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nandsim_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+ .open = nandsim_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * nandsim_debugfs_create - initialize debugfs
+ * @dev: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int nandsim_debugfs_create(struct nandsim *dev)
+{
+ struct nandsim_debug_info *dbg = &dev->dbg;
+ struct dentry *dent;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dent = debugfs_create_dir("nandsim", NULL);
+ if (IS_ERR_OR_NULL(dent)) {
+ int err = dent ? -ENODEV : PTR_ERR(dent);
+
+ NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
+ err);
+ return err;
+ }
+ dbg->dfs_root = dent;
+
+ dent = debugfs_create_file("wear_report", S_IRUSR,
+ dbg->dfs_root, dev, &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dbg->dfs_wear_report = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(dbg->dfs_root);
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ return err;
+}
+
+/**
+ * nandsim_debugfs_remove - destroy all debugfs files
+ */
+static void nandsim_debugfs_remove(struct nandsim *ns)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ns->dbg.dfs_root);
+}
/*
* Allocate array of page pointers, create slab allocation for an array
@@ -465,17 +575,18 @@ static int alloc_device(struct nandsim *ns)
cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
if (IS_ERR(cfile))
return PTR_ERR(cfile);
- if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+ if (!(cfile->f_mode & FMODE_CAN_READ)) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
goto err_close;
}
- if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+ if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
NS_ERR("alloc_device: cache file not writeable\n");
err = -EINVAL;
goto err_close;
}
- ns->pages_written = vzalloc(ns->geom.pgnum);
+ ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
+ sizeof(unsigned long));
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
@@ -542,15 +653,7 @@ static void free_device(struct nandsim *ns)
static char *get_partition_name(int i)
{
- char buf[64];
- sprintf(buf, "NAND simulator partition %d", i);
- return kstrdup(buf, GFP_KERNEL);
-}
-
-static uint64_t divide(uint64_t n, uint32_t d)
-{
- do_div(n, d);
- return n;
+ return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
}
/*
@@ -581,20 +684,16 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
- ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
ns->options = 0;
- if (ns->geom.pgsz == 256) {
- ns->options |= OPT_PAGE256;
- }
- else if (ns->geom.pgsz == 512) {
- ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+ if (ns->geom.pgsz == 512) {
+ ns->options |= OPT_PAGE512;
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
@@ -659,14 +758,6 @@ static int init_nandsim(struct mtd_info *mtd)
ns->nbparts += 1;
}
- /* Detect how many ID bytes the NAND chip outputs */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (second_id_byte != nand_flash_ids[i].id)
- continue;
- if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
- ns->options |= OPT_AUTOINCR;
- }
-
if (ns->busw == 16)
NS_WARN("16-bit flashes support wasn't tested\n");
@@ -680,7 +771,7 @@ static int init_nandsim(struct mtd_info *mtd)
printk("bus width: %u\n", ns->busw);
printk("bits in sector size: %u\n", ns->geom.secshift);
printk("bits in page size: %u\n", ns->geom.pgshift);
- printk("bits in OOB size: %u\n", ns->geom.oobshift);
+ printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
printk("flash size with OOB: %llu KiB\n",
(unsigned long long)ns->geom.totszoob >> 10);
printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
@@ -737,7 +828,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
return -EINVAL;
}
offset = erase_block_no * ns->geom.secsz;
- if (mtd->block_markbad(mtd, offset)) {
+ if (mtd_block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
@@ -922,9 +1013,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
{
size_t mem;
- if (!rptwear)
- return 0;
- wear_eb_count = divide(mtd->size, mtd->erasesize);
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
NS_ERR("Too many erase blocks for wear reporting\n");
@@ -940,64 +1029,18 @@ static int setup_wear_reporting(struct mtd_info *mtd)
static void update_wear(unsigned int erase_block_no)
{
- unsigned long wmin = -1, wmax = 0, avg;
- unsigned long deciles[10], decile_max[10], tot = 0;
- unsigned int i;
-
if (!erase_block_wear)
return;
total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
if (total_wear == 0)
NS_ERR("Erase counter total overflow\n");
erase_block_wear[erase_block_no] += 1;
if (erase_block_wear[erase_block_no] == 0)
NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
- rptwear_cnt += 1;
- if (rptwear_cnt < rptwear)
- return;
- rptwear_cnt = 0;
- /* Calc wear stats */
- for (i = 0; i < wear_eb_count; ++i) {
- unsigned long wear = erase_block_wear[i];
- if (wear < wmin)
- wmin = wear;
- if (wear > wmax)
- wmax = wear;
- tot += wear;
- }
- for (i = 0; i < 9; ++i) {
- deciles[i] = 0;
- decile_max[i] = (wmax * (i + 1) + 5) / 10;
- }
- deciles[9] = 0;
- decile_max[9] = wmax;
- for (i = 0; i < wear_eb_count; ++i) {
- int d;
- unsigned long wear = erase_block_wear[i];
- for (d = 0; d < 10; ++d)
- if (wear <= decile_max[d]) {
- deciles[d] += 1;
- break;
- }
- }
- avg = tot / wear_eb_count;
- /* Output wear report */
- NS_INFO("*** Wear Report ***\n");
- NS_INFO("Total numbers of erases: %lu\n", tot);
- NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
- NS_INFO("Average number of erases: %lu\n", avg);
- NS_INFO("Maximum number of erases: %lu\n", wmax);
- NS_INFO("Minimum number of erases: %lu\n", wmin);
- for (i = 0; i < 10; ++i) {
- unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
- if (from > decile_max[i])
- continue;
- NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
- from,
- decile_max[i],
- deciles[i]);
- }
- NS_INFO("*** End of Wear Report ***\n");
}
/*
@@ -1020,8 +1063,6 @@ static char *get_state_name(uint32_t state)
return "STATE_CMD_ERASE1";
case STATE_CMD_STATUS:
return "STATE_CMD_STATUS";
- case STATE_CMD_STATUS_M:
- return "STATE_CMD_STATUS_M";
case STATE_CMD_SEQIN:
return "STATE_CMD_SEQIN";
case STATE_CMD_READID:
@@ -1086,7 +1127,6 @@ static int check_command(int cmd)
case NAND_CMD_RNDOUTSTART:
return 0;
- case NAND_CMD_STATUS_MULTI:
default:
return 1;
}
@@ -1112,8 +1152,6 @@ static uint32_t get_state_by_command(unsigned command)
return STATE_CMD_ERASE1;
case NAND_CMD_STATUS:
return STATE_CMD_STATUS;
- case NAND_CMD_STATUS_MULTI:
- return STATE_CMD_STATUS_M;
case NAND_CMD_SEQIN:
return STATE_CMD_SEQIN;
case NAND_CMD_READID:
@@ -1349,40 +1387,32 @@ static void clear_memalloc(int memalloc)
current->flags &= ~PF_MEMALLOC;
}
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
- mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
- err = get_pages(ns, file, count, *pos);
+ err = get_pages(ns, file, count, pos);
if (err)
return err;
- old_fs = get_fs();
- set_fs(get_ds());
memalloc = set_memalloc();
- tx = vfs_read(file, (char __user *)buf, count, pos);
+ tx = kernel_read(file, pos, buf, count);
clear_memalloc(memalloc);
- set_fs(old_fs);
put_pages(ns);
return tx;
}
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
- mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
- err = get_pages(ns, file, count, *pos);
+ err = get_pages(ns, file, count, pos);
if (err)
return err;
- old_fs = get_fs();
- set_fs(get_ds());
memalloc = set_memalloc();
- tx = vfs_write(file, (char __user *)buf, count, pos);
+ tx = kernel_write(file, buf, count, pos);
clear_memalloc(memalloc);
- set_fs(old_fs);
put_pages(ns);
return tx;
}
@@ -1403,29 +1433,26 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
}
-int do_read_error(struct nandsim *ns, int num)
+static int do_read_error(struct nandsim *ns, int num)
{
unsigned int page_no = ns->regs.row;
if (read_error(page_no)) {
- int i;
- memset(ns->buf.byte, 0xFF, num);
- for (i = 0; i < num; ++i)
- ns->buf.byte[i] = random32();
+ prandom_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
return 0;
}
-void do_bit_flips(struct nandsim *ns, int num)
+static void do_bit_flips(struct nandsim *ns, int num)
{
- if (bitflips && random32() < (1 << 22)) {
+ if (bitflips && prandom_u32() < (1 << 22)) {
int flips = 1;
if (bitflips > 1)
- flips = (random32() % (int) bitflips) + 1;
+ flips = (prandom_u32() % (int) bitflips) + 1;
while (flips--) {
- int pos = random32() % (num * 8);
+ int pos = prandom_u32() % (num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
@@ -1443,7 +1470,7 @@ static void read_page(struct nandsim *ns, int num)
union ns_mem *mypage;
if (ns->cfile) {
- if (!ns->pages_written[ns->regs.row]) {
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
NS_DBG("read_page: page %d not written\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
@@ -1454,8 +1481,8 @@ static void read_page(struct nandsim *ns, int num)
ns->regs.row, ns->regs.column + ns->regs.off);
if (do_read_error(ns, num))
return;
- pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
- tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+ pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return;
@@ -1489,9 +1516,9 @@ static void erase_sector(struct nandsim *ns)
if (ns->cfile) {
for (i = 0; i < ns->geom.pgsec; i++)
- if (ns->pages_written[ns->regs.row + i]) {
+ if (__test_and_clear_bit(ns->regs.row + i,
+ ns->pages_written)) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
- ns->pages_written[ns->regs.row + i] = 0;
}
return;
}
@@ -1517,20 +1544,19 @@ static int prog_page(struct nandsim *ns, int num)
u_char *pg_off;
if (ns->cfile) {
- loff_t off, pos;
+ loff_t off;
ssize_t tx;
int all;
NS_DBG("prog_page: writing page %d\n", ns->regs.row);
pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
- off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
- if (!ns->pages_written[ns->regs.row]) {
+ off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
all = 1;
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
all = 0;
- pos = off;
- tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+ tx = read_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
@@ -1539,16 +1565,15 @@ static int prog_page(struct nandsim *ns, int num)
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
if (all) {
- pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
- tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+ loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
if (tx != ns->geom.pgszoob) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
- ns->pages_written[ns->regs.row] = 1;
+ __set_bit(ns->regs.row, ns->pages_written);
} else {
- pos = off;
- tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+ tx = write_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
@@ -1936,20 +1961,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
if (ns->regs.count == ns->regs.num) {
NS_DBG("read_byte: all bytes were read\n");
- /*
- * The OPT_AUTOINCR allows to read next consecutive pages without
- * new read operation cycle.
- */
- if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
- ns->regs.count = 0;
- if (ns->regs.row + 1 < ns->geom.pgnum)
- ns->regs.row += 1;
- NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
- do_state_action(ns, ACTION_CPY);
- }
- else if (NS_STATE(ns->nxstate) == STATE_READY)
+ if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
-
}
return outb;
@@ -2203,33 +2216,13 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
- if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
- ns->regs.count = 0;
- if (ns->regs.row + 1 < ns->geom.pgnum)
- ns->regs.row += 1;
- NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
- do_state_action(ns, ACTION_CPY);
- }
- else if (NS_STATE(ns->nxstate) == STATE_READY)
+ if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
}
return;
}
-static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
-
- if (!memcmp(buf, &ns_verify_buf[0], len)) {
- NS_DBG("verify_buf: the buffer is OK\n");
- return 0;
- } else {
- NS_DBG("verify_buf: the buffer is wrong\n");
- return -EFAULT;
- }
-}
-
/*
* Module initialization function
*/
@@ -2264,7 +2257,6 @@ static int __init ns_init_module(void)
chip->dev_ready = ns_device_ready;
chip->write_buf = ns_nand_write_buf;
chip->read_buf = ns_nand_read_buf;
- chip->verify_buf = ns_nand_verify_buf;
chip->read_word = ns_nand_read_word;
chip->ecc.mode = NAND_ECC_SOFT;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2273,9 +2265,9 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
- chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
case 1:
- chip->options |= NAND_USE_FLASH_BBT;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
case 0:
break;
default:
@@ -2293,7 +2285,7 @@ static int __init ns_init_module(void)
nand->geom.idbytes = 2;
nand->regs.status = NS_STATUS_OK(nand);
nand->nxstate = STATE_UNKNOWN;
- nand->options |= OPT_PAGE256; /* temporary value */
+ nand->options |= OPT_PAGE512; /* temporary value */
nand->ids[0] = first_id_byte;
nand->ids[1] = second_id_byte;
nand->ids[2] = third_id_byte;
@@ -2361,6 +2353,7 @@ static int __init ns_init_module(void)
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
+ retval = -EINVAL;
goto err_exit;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
@@ -2373,10 +2366,13 @@ static int __init ns_init_module(void)
if ((retval = setup_wear_reporting(nsmtd)) != 0)
goto err_exit;
+ if ((retval = nandsim_debugfs_create(nand)) != 0)
+ goto err_exit;
+
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
- if ((retval = nand_default_bbt(nsmtd)) != 0)
+ if ((retval = chip->scan_bbt(nsmtd)) != 0)
goto err_exit;
if ((retval = parse_badblocks(nand, nsmtd)) != 0)
@@ -2412,6 +2408,7 @@ static void __exit ns_cleanup_module(void)
struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
int i;
+ nandsim_debugfs_remove(ns);
free_nandsim(ns); /* Free nandsim private resources */
nand_release(nsmtd); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ea2dea8a9c8..69eaba690a9 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -30,6 +30,7 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
@@ -42,7 +43,6 @@ struct ndfc_controller {
struct nand_chip chip;
int chip_select;
struct nand_hw_control ndfc_control;
- struct mtd_partition *parts;
};
static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -141,31 +141,15 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
}
-static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd->priv;
- struct ndfc_controller *ndfc = chip->priv;
- uint32_t *p = (uint32_t *) buf;
-
- for(;len > 0; len -= 4)
- if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
- return -EFAULT;
- return 0;
-}
-
/*
* Initialize chip structure
*/
static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct device_node *node)
{
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- static const char *part_types[] = { "cmdlinepart", NULL };
-#else
- static const char *part_types[] = { NULL };
-#endif
struct device_node *flash_np;
struct nand_chip *chip = &ndfc->chip;
+ struct mtd_part_parser_data ppdata;
int ret;
chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
@@ -177,13 +161,13 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->controller = &ndfc->ndfc_control;
chip->read_buf = ndfc_read_buf;
chip->write_buf = ndfc_write_buf;
- chip->verify_buf = ndfc_verify_buf;
chip->ecc.correct = nand_correct_data;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
chip->priv = ndfc;
ndfc->mtd.priv = chip;
@@ -193,6 +177,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
+ ppdata.of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
@@ -204,18 +189,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (ret)
goto err;
- ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
- if (ret < 0)
- goto err;
-
- if (ret == 0) {
- ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
- &ndfc->parts);
- if (ret < 0)
- goto err;
- }
-
- ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
+ ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
err:
of_node_put(flash_np);
@@ -224,7 +198,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct platform_device *ofdev)
+static int ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc;
const __be32 *reg;
@@ -283,11 +257,12 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
return 0;
}
-static int __devexit ndfc_remove(struct platform_device *ofdev)
+static int ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
nand_release(&ndfc->mtd);
+ kfree(ndfc->mtd.name);
return 0;
}
@@ -305,21 +280,10 @@ static struct platform_driver ndfc_driver = {
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
- .remove = __devexit_p(ndfc_remove),
+ .remove = ndfc_remove,
};
-static int __init ndfc_nand_init(void)
-{
- return platform_driver_register(&ndfc_driver);
-}
-
-static void __exit ndfc_nand_exit(void)
-{
- platform_driver_unregister(&ndfc_driver);
-}
-
-module_init(ndfc_nand_init);
-module_exit(ndfc_nand_exit);
+module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
deleted file mode 100644
index b6a5c86ab31..00000000000
--- a/drivers/mtd/nand/nomadik_nand.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * drivers/mtd/nand/nomadik_nand.c
- *
- * Overview:
- * Driver for on-board NAND flash on Nomadik Platforms
- *
- * Copyright © 2007 STMicroelectronics Pvt. Ltd.
- * Author: Sachin Verma <sachin.verma@st.com>
- *
- * Copyright © 2009 Alessandro Rubini
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <mach/nand.h>
-#include <mach/fsmc.h>
-
-#include <mtd/mtd-abi.h>
-
-struct nomadik_nand_host {
- struct mtd_info mtd;
- struct nand_chip nand;
- void __iomem *data_va;
- void __iomem *cmd_va;
- void __iomem *addr_va;
- struct nand_bbt_descr *bbt_desc;
-};
-
-static struct nand_ecclayout nomadik_ecc_layout = {
- .eccbytes = 3 * 4,
- .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
- 0x02, 0x03, 0x04,
- 0x12, 0x13, 0x14,
- 0x22, 0x23, 0x24,
- 0x32, 0x33, 0x34},
- /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
- .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
-};
-
-static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
-{
- /* No need to enable hw ecc, it's on by default */
-}
-
-static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *nand = mtd->priv;
- struct nomadik_nand_host *host = nand->priv;
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, host->cmd_va);
- else
- writeb(cmd, host->addr_va);
-}
-
-static int nomadik_nand_probe(struct platform_device *pdev)
-{
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
- struct nomadik_nand_host *host;
- struct mtd_info *mtd;
- struct nand_chip *nand;
- struct resource *res;
- int ret = 0;
-
- /* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
- return -ENOMEM;
- }
-
- /* Call the client's init function, if any */
- if (pdata->init)
- ret = pdata->init();
- if (ret < 0) {
- dev_err(&pdev->dev, "Init function failed\n");
- goto err;
- }
-
- /* ioremap three regions */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->addr_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->data_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->cmd_va = ioremap(res->start, resource_size(res));
-
- if (!host->addr_va || !host->data_va || !host->cmd_va) {
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- /* Link all private pointers */
- mtd = &host->mtd;
- nand = &host->nand;
- mtd->priv = nand;
- nand->priv = host;
-
- host->mtd.owner = THIS_MODULE;
- nand->IO_ADDR_R = host->data_va;
- nand->IO_ADDR_W = host->data_va;
- nand->cmd_ctrl = nomadik_cmd_ctrl;
-
- /*
- * This stanza declares ECC_HW but uses soft routines. It's because
- * HW claims to make the calculation but not the correction. However,
- * I haven't managed to get the desired data out of it until now.
- */
- nand->ecc.mode = NAND_ECC_SOFT;
- nand->ecc.layout = &nomadik_ecc_layout;
- nand->ecc.hwctl = nomadik_ecc_control;
- nand->ecc.size = 512;
- nand->ecc.bytes = 3;
-
- nand->options = pdata->options;
-
- /*
- * Scan to find existence of the device
- */
- if (nand_scan(&host->mtd, 1)) {
- ret = -ENXIO;
- goto err_unmap;
- }
-
- mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
-
- platform_set_drvdata(pdev, host);
- return 0;
-
- err_unmap:
- if (host->cmd_va)
- iounmap(host->cmd_va);
- if (host->data_va)
- iounmap(host->data_va);
- if (host->addr_va)
- iounmap(host->addr_va);
- err:
- kfree(host);
- return ret;
-}
-
-/*
- * Clean up routine
- */
-static int nomadik_nand_remove(struct platform_device *pdev)
-{
- struct nomadik_nand_host *host = platform_get_drvdata(pdev);
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
-
- if (pdata->exit)
- pdata->exit();
-
- if (host) {
- iounmap(host->cmd_va);
- iounmap(host->data_va);
- iounmap(host->addr_va);
- kfree(host);
- }
- return 0;
-}
-
-static int nomadik_nand_suspend(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- int ret = 0;
- if (host)
- ret = host->mtd.suspend(&host->mtd);
- return ret;
-}
-
-static int nomadik_nand_resume(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- if (host)
- host->mtd.resume(&host->mtd);
- return 0;
-}
-
-static const struct dev_pm_ops nomadik_nand_pm_ops = {
- .suspend = nomadik_nand_suspend,
- .resume = nomadik_nand_resume,
-};
-
-static struct platform_driver nomadik_nand_driver = {
- .probe = nomadik_nand_probe,
- .remove = nomadik_nand_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "nomadik_nand",
- .pm = &nomadik_nand_pm_ops,
- },
-};
-
-static int __init nand_nomadik_init(void)
-{
- pr_info("Nomadik NAND driver\n");
- return platform_driver_register(&nomadik_nand_driver);
-}
-
-static void __exit nand_nomadik_exit(void)
-{
- platform_driver_unregister(&nomadik_nand_driver);
-}
-
-module_init(nand_nomadik_init);
-module_exit(nand_nomadik_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
-MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 9c30a0b0317..e8a5fffd6ab 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -10,7 +10,6 @@
*/
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -112,22 +111,6 @@ static void nuc900_nand_write_buf(struct mtd_info *mtd,
write_data_reg(nand, buf[i]);
}
-static int nuc900_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
-{
- int i;
- struct nuc900_nand *nand;
-
- nand = container_of(mtd, struct nuc900_nand, mtd);
-
- for (i = 0; i < len; i++) {
- if (buf[i] != (unsigned char)read_data_reg(nand))
- return -EFAULT;
- }
-
- return 0;
-}
-
static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
@@ -168,7 +151,8 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (column != -1 || page_addr != -1) {
if (column != -1) {
- if (chip->options & NAND_BUSWIDTH_16)
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
column >>= 1;
write_addr_reg(nand, column);
write_addr_reg(nand, column >> 8 | ENDADDR);
@@ -193,15 +177,6 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
@@ -250,7 +225,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
val = __raw_readl(nand->reg + REG_FMICSR);
if (!(val & NAND_EN))
- __raw_writel(val | NAND_EN, REG_FMICSR);
+ __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
val = __raw_readl(nand->reg + REG_SMCSR);
@@ -262,16 +237,14 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit nuc900_nand_probe(struct platform_device *pdev)
+static int nuc900_nand_probe(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
- int retval;
struct resource *res;
- retval = 0;
-
- nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
+ GFP_KERNEL);
if (!nuc900_nand)
return -ENOMEM;
chip = &(nuc900_nand->chip);
@@ -280,11 +253,9 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
nuc900_nand->mtd.owner = THIS_MODULE;
spin_lock_init(&nuc900_nand->lock);
- nuc900_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(nuc900_nand->clk)) {
- retval = -ENOENT;
- goto fail1;
- }
+ nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk))
+ return -ENOENT;
clk_enable(nuc900_nand->clk);
chip->cmdfunc = nuc900_nand_command_lp;
@@ -292,89 +263,48 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
chip->read_byte = nuc900_nand_read_byte;
chip->write_buf = nuc900_nand_write_buf;
chip->read_buf = nuc900_nand_read_buf;
- chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENXIO;
- goto fail1;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- retval = -EBUSY;
- goto fail1;
- }
-
- nuc900_nand->reg = ioremap(res->start, resource_size(res));
- if (!nuc900_nand->reg) {
- retval = -ENOMEM;
- goto fail2;
- }
+ nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nuc900_nand->reg))
+ return PTR_ERR(nuc900_nand->reg);
nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(nuc900_nand->mtd), 1)) {
- retval = -ENXIO;
- goto fail3;
- }
+ if (nand_scan(&(nuc900_nand->mtd), 1))
+ return -ENXIO;
mtd_device_register(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, nuc900_nand);
- return retval;
-
-fail3: iounmap(nuc900_nand->reg);
-fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(nuc900_nand);
- return retval;
+ return 0;
}
-static int __devexit nuc900_nand_remove(struct platform_device *pdev)
+static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
- struct resource *res;
-
- iounmap(nuc900_nand->reg);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
+ nand_release(&nuc900_nand->mtd);
clk_disable(nuc900_nand->clk);
- clk_put(nuc900_nand->clk);
-
- kfree(nuc900_nand);
-
- platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver nuc900_nand_driver = {
.probe = nuc900_nand_probe,
- .remove = __devexit_p(nuc900_nand_remove),
+ .remove = nuc900_nand_remove,
.driver = {
.name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
-static int __init nuc900_nand_init(void)
-{
- return platform_driver_register(&nuc900_nand_driver);
-}
-
-static void __exit nuc900_nand_exit(void)
-{
- platform_driver_unregister(&nuc900_nand_driver);
-}
-
-module_init(nuc900_nand_init);
-module_exit(nuc900_nand_exit);
+module_platform_driver(nuc900_nand_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0db2c0e7656..f0ed92e210a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,20 +9,26 @@
*/
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
-#include <plat/dma.h>
-#include <plat/gpmc.h>
-#include <plat/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/platform_data/elm.h>
+
+#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
#define OMAP_NAND_TIMEOUT_MS 5000
@@ -94,44 +100,139 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
-static const char *part_probes[] = { "cmdlinepart", NULL };
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define ECC_CONFIG_CS_SHIFT 1
+#define CS_MASK 0x7
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE0_SHIFT 12
+#define ECCSIZE1_SHIFT 22
+#define ECC1RESULTSIZE 0x1
+#define ECCCLEAR 0x100
+#define ECC1 0x1
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
+#define STATUS_BUFF_EMPTY 0x00000001
+
+#define OMAP24XX_DMA_GPMC 4
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#define BADBLOCK_MARKER_LENGTH 2
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
+ 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
+ 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
+ 0x07, 0x0e};
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+#endif
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks
- */
-static uint8_t scan_ff_pattern[] = { 0xff };
-static struct nand_bbt_descr bb_descrip_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
struct omap_nand_info {
struct nand_hw_control controller;
struct omap_nand_platform_data *pdata;
struct mtd_info mtd;
- struct mtd_partition *parts;
struct nand_chip nand;
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
+ enum omap_ecc ecc_opt;
struct completion comp;
- int dma_ch;
- int gpmc_irq;
+ struct dma_chan *dma;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
enum {
OMAP_NAND_IO_READ = 0, /* read */
OMAP_NAND_IO_WRITE, /* write */
} iomode;
u_char *buf;
int buf_len;
+ struct gpmc_nand_regs reg;
+ /* fields specific for BCHx_HW ECC scheme */
+ struct device *elm_dev;
+ struct device_node *of_node;
};
/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+}
+
+/**
* omap_hwcontrol - hardware specific access to control-lines
* @mtd: MTD device structure
* @cmd: command to device
@@ -149,13 +250,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
+ writeb(cmd, info->reg.gpmc_nand_command);
else if (ctrl & NAND_ALE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
+ writeb(cmd, info->reg.gpmc_nand_address);
else /* NAND_NCE */
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
+ writeb(cmd, info->reg.gpmc_nand_data);
}
}
@@ -189,7 +290,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
iowrite8(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -226,7 +328,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
iowrite16(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -256,8 +359,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -266,14 +369,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
omap_read_buf8(mtd, (u_char *)p, len);
} else {
do {
- r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
@@ -292,6 +396,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
+ u32 val;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -301,8 +406,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -311,7 +416,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
omap_write_buf8(mtd, (u_char *)p, len);
} else {
while (len) {
- w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.IO_ADDR_W);
@@ -320,27 +426,28 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
tim = 0;
limit = (loops_per_jiffy *
msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
/*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
* @data: pointer to completion data structure
*/
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_nand_dma_callback(void *data)
{
complete((struct completion *) data);
}
/*
- * omap_nand_dma_transfer: configer and start dma transfer
+ * omap_nand_dma_transfer: configure and start dma transfer
* @mtd: MTD device structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
@@ -351,17 +458,14 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
+ struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
- dma_addr_t dma_addr;
- int ret;
+ struct scatterlist sg;
unsigned long tim, limit;
-
- /* The fifo depth is 64 bytes max.
- * But configure the FIFO-threahold to 32 to get a sync at each frame
- * and frame length is 32 bytes.
- */
- int buf_len = len >> 6;
+ unsigned n;
+ int ret;
+ u32 val;
if (addr >= high_memory) {
struct page *p1;
@@ -375,54 +479,53 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
}
- dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
- if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
dev_err(&info->pdev->dev,
"Couldn't DMA map a %d byte buffer\n", len);
goto out_copy;
}
- if (is_write) {
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
- } else {
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
- }
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ goto out_copy_unmap;
init_completion(&info->comp);
-
- omap_start_dma(info->dma_ch);
+ dma_async_issue_pending(info->dma);
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
+out_copy_unmap:
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -465,7 +568,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
}
/*
- * omap_nand_irq - GMPC irq handler
+ * omap_nand_irq - GPMC irq handler
* @this_irq: gpmc irq number
* @dev: omap_nand_info structure pointer is passed here
*/
@@ -473,13 +576,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
{
struct omap_nand_info *info = (struct omap_nand_info *) dev;
u32 bytes;
- u32 irq_stat;
- irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
if (info->buf_len && (info->buf_len < bytes))
@@ -496,20 +598,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
(u32 *)info->buf, bytes >> 2);
info->buf = info->buf + bytes;
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
}
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
return IRQ_HANDLED;
done:
complete(&info->comp);
- /* disable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
- /* clear status */
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
return IRQ_HANDLED;
}
@@ -536,22 +635,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
init_completion(&info->comp);
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for read to complete */
wait_for_completion(&info->comp);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
@@ -574,6 +673,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd);
int ret = 0;
unsigned long tim, limit;
+ u32 val;
if (len <= mtd->oobsize) {
omap_write_buf_pref(mtd, buf, len);
@@ -585,27 +685,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
init_completion(&info->comp);
/* configure and start prefetch transfer : size=24 */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for write to complete */
wait_for_completion(&info->comp);
+
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
cpu_relax();
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
@@ -616,27 +720,6 @@ out_copy:
}
/**
- * omap_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- */
-static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- u16 *p = (u16 *) buf;
-
- len >>= 1;
- while (len--) {
- if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
*
@@ -744,12 +827,12 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
case 1:
/* Uncorrectable error */
- DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
return -1;
case 11:
/* UN-Correctable error */
- DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ pr_debug("ECC UNCORRECTED_ERROR B\n");
return -1;
case 12:
@@ -766,8 +849,8 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
- DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
- "offset: %d, bit: %d\n", find_byte, find_bit);
+ pr_debug("Correcting single bit ECC error at offset: "
+ "%d, bit: %d\n", find_byte, find_bit);
page_data[find_byte] ^= (1 << find_bit);
@@ -779,7 +862,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
ecc_data2[2] == 0)
return 0;
}
- DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ pr_debug("UNCORRECTED_ERROR default\n");
return -1;
}
}
@@ -845,7 +928,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
}
/**
@@ -859,8 +955,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
mtd);
struct nand_chip *chip = mtd->priv;
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
- gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
}
/**
@@ -881,21 +1003,22 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
unsigned long timeo = jiffies;
- int status = NAND_STATUS_FAIL, state = this->state;
+ int status, state = this->state;
if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
+ timeo += msecs_to_jiffies(400);
else
- timeo += (HZ * 20) / 1000;
+ timeo += msecs_to_jiffies(20);
- gpmc_nand_write(info->gpmc_cs,
- GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
+ writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
- status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
+ status = readb(info->reg.gpmc_nand_data);
if (status & NAND_STATUS_READY)
break;
cond_resched();
}
+
+ status = readb(info->reg.gpmc_nand_data);
return status;
}
@@ -909,38 +1032,618 @@ static int omap_dev_ready(struct mtd_info *mtd)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ val = readl(info->reg.gpmc_status);
+
if ((val & 0x100) == 0x100) {
- /* Clear IRQ Interrupt */
- val |= 0x100;
- val &= ~(0x0);
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
+ return 1;
} else {
- unsigned int cnt = 0;
- while (cnt++ < 0x1FF) {
- if ((val & 0x100) == 0x100)
- return 0;
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ return 0;
+ }
+}
+
+/**
+ * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ *
+ * When using BCH, sector size is hardcoded to 512 bytes.
+ * Using wrapping mode 6 both for reading and writing if ELM module not uses
+ * for error correction.
+ * On writing,
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+ */
+static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+ unsigned int bch_type;
+ unsigned int dev_width, nsectors;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ enum omap_ecc ecc_opt = info->ecc_opt;
+ struct nand_chip *chip = mtd->priv;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* GPMC configurations for calculating ECC */
+ switch (ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ bch_type = 0;
+ nsectors = 1;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_type = 0;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH4R_ECC_SIZE0;
+ ecc_size1 = BCH4R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ bch_type = 1;
+ nsectors = 1;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_type = 1;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH8R_ECC_SIZE0;
+ ecc_size1 = BCH8R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ bch_type = 0x2;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = 0x01;
+ ecc_size0 = 52; /* ECC bits in nibbles per sector */
+ ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
+ } else {
+ wr_mode = 0x01;
+ ecc_size0 = 0; /* extra bits in nibbles per sector */
+ ecc_size1 = 52; /* OOB bits in nibbles per sector */
+ }
+ break;
+ default:
+ return;
+ }
+
+ writel(ECC1, info->reg.gpmc_ecc_control);
+
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+ /* BCH configuration */
+ val = ((1 << 16) | /* enable BCH */
+ (bch_type << 12) | /* BCH4/BCH8/BCH16 */
+ (wr_mode << 8) | /* wrap mode */
+ (dev_width << 7) | /* bus width */
+ (((nsectors-1) & 0x7) << 4) | /* number of sectors */
+ (info->gpmc_cs << 1) | /* ECC CS */
+ (0x1)); /* enable ECC */
+
+ writel(val, info->reg.gpmc_ecc_config);
+
+ /* Clear ecc and enable bits */
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+}
+
+static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
+static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+ 0x97, 0x79, 0xe5, 0x24, 0xb5};
+
+/**
+ * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ struct gpmc_nand_regs *gpmc_regs = &info->reg;
+ u8 *ecc_code;
+ unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ u32 val;
+ int i, j;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ for (i = 0; i < nsectors; i++) {
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ecc_calc += eccbytes;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of non-zero ecc vector, first filter out erased-pages, and
+ * then process data via ELM to detect bit-flips.
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+ int eccsteps = info->nand.ecc.steps;
+ int i , j, stat = 0;
+ int eccflag, actual_eccbytes;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ u_char *buf;
+ int bitflip_count;
+ bool is_error_reported = false;
+ u32 bit_pos, byte_pos, error_max, pos;
+ int err;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* omit 7th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch4_vector;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* omit 14th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch8_vector;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ actual_eccbytes = ecc->bytes;
+ erased_ecc_vec = bch16_vector;
+ break;
+ default:
+ pr_err("invalid driver configuration\n");
+ return -EINVAL;
+ }
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+ for (j = 0; j < actual_eccbytes; j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ if (memcmp(calc_ecc, erased_ecc_vec,
+ actual_eccbytes) == 0) {
+ /*
+ * calc_ecc[] matches pattern for ECC(all 0xff)
+ * so this is definitely an erased-page
+ */
+ } else {
+ buf = &data[info->nand.ecc.size * i];
+ /*
+ * count number of 0-bits in read_buf.
+ * This check can be removed once a similar
+ * check is introduced in generic NAND driver
+ */
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+ if (bitflip_count) {
+ /*
+ * number of 0-bits within ECC limits
+ * So this may be an erased-page
+ */
+ stat += bitflip_count;
+ } else {
+ /*
+ * Too many 0-bits. It may be a
+ * - programmed-page, OR
+ * - erased-page with many bit-flips
+ * So this page requires check by ELM
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc->bytes;
+ read_ecc += ecc->bytes;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return stat;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ err = 0;
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_uncorrectable) {
+ pr_err("nand: uncorrectable bit-flips found\n");
+ err = -EBADMSG;
+ } else if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Add 4 bits to take care of padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ pos = err_vec[i].error_loc[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+ error_max = (ecc->size + actual_eccbytes) * 8;
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512) {
+ pr_debug("bitflip@dat[%d]=%x\n",
+ byte_pos, data[byte_pos]);
+ data[byte_pos] ^= 1 << bit_pos;
+ } else {
+ pr_debug("bitflip@oob[%d]=%x\n",
+ (byte_pos - 512),
+ spare_ecc[byte_pos - 512]);
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ } else {
+ pr_err("invalid bit-flip @ %d:%d\n",
+ byte_pos, bit_pos);
+ err = -EBADMSG;
+ }
+ }
}
+
+ /* Update number of correctable errors */
+ stat += err_vec[i].error_count;
+
+ /* Update page data with sector size */
+ data += ecc->size;
+ spare_ecc += ecc->bytes;
}
- return 1;
+ return (err) ? err : stat;
}
-static int __devinit omap_nand_probe(struct platform_device *pdev)
+/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ /* Update ecc vector from GPMC result registers */
+ chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* Write ecc vector to OOB area */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *oob = &chip->oob_poi[eccpos[0]];
+ uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
+ int stat;
+ unsigned int max_bitflips = 0;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ /* Read data */
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ /* Read oob bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+ chip->read_buf(mtd, oob, chip->ecc.total);
+
+ /* Calculate ecc bytes */
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+
+ stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ * @bch_type: 0x0=BCH4, 0x1=BCH8, 0x2=BCH16
+ */
+static int is_elm_present(struct omap_nand_info *info,
+ struct device_node *elm_node, enum bch_ecc bch_type)
+{
+ struct platform_device *pdev;
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+ int err;
+ /* check whether elm-id is passed via DT */
+ if (!elm_node) {
+ pr_err("nand: error: ELM DT node not found\n");
+ return -ENODEV;
+ }
+ pdev = of_find_device_by_node(elm_node);
+ /* check whether ELM device is registered */
+ if (!pdev) {
+ pr_err("nand: error: ELM device not found\n");
+ return -ENODEV;
+ }
+ /* ELM module available, now configure it */
+ info->elm_dev = &pdev->dev;
+ err = elm_config(info->elm_dev, bch_type,
+ (info->mtd.writesize / ecc->size), ecc->size, ecc->bytes);
+
+ return err;
+}
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+
+static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct nand_ecclayout *ecclayout;
int err;
- int i, offset;
-
- pdata = pdev->dev.platform_data;
+ int i;
+ dma_cap_mask_t mask;
+ unsigned sig;
+ unsigned oob_index;
+ struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
+
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data missing\n");
return -ENODEV;
}
- info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -949,197 +1652,413 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
spin_lock_init(&info->controller.lock);
init_waitqueue_head(&info->controller.wq);
- info->pdev = pdev;
-
+ info->pdev = pdev;
info->gpmc_cs = pdata->cs;
- info->phys_base = pdata->phys_base;
-
- info->mtd.priv = &info->nand;
- info->mtd.name = dev_name(&pdev->dev);
- info->mtd.owner = THIS_MODULE;
+ info->reg = pdata->reg;
+ info->of_node = pdata->of_node;
+ info->ecc_opt = pdata->ecc_opt;
+ mtd = &info->mtd;
+ mtd->priv = &info->nand;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->owner = THIS_MODULE;
+ nand_chip = &info->nand;
+ nand_chip->ecc.priv = NULL;
+ nand_chip->options |= NAND_SKIP_BBTSCAN;
- info->nand.options = pdata->devsize;
- info->nand.options |= NAND_SKIP_BBTSCAN;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->IO_ADDR_R))
+ return PTR_ERR(nand_chip->IO_ADDR_R);
- /* NAND write protect off */
- gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
+ info->phys_base = res->start;
- if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
- pdev->dev.driver->name)) {
- err = -EBUSY;
- goto out_free_info;
- }
-
- info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
- if (!info->nand.IO_ADDR_R) {
- err = -ENOMEM;
- goto out_release_mem_region;
- }
+ nand_chip->controller = &info->controller;
- info->nand.controller = &info->controller;
-
- info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
- info->nand.cmd_ctrl = omap_hwcontrol;
+ nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+ nand_chip->cmd_ctrl = omap_hwcontrol;
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
- * funcrtion and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
* chip delay which is slightly more than tR (AC Timing) of the NAND
* device and read status register until you get a failure or success
*/
if (pdata->dev_ready) {
- info->nand.dev_ready = omap_dev_ready;
- info->nand.chip_delay = 0;
+ nand_chip->dev_ready = omap_dev_ready;
+ nand_chip->chip_delay = 0;
} else {
- info->nand.waitfunc = omap_wait;
- info->nand.chip_delay = 50;
+ nand_chip->waitfunc = omap_wait;
+ nand_chip->chip_delay = 50;
+ }
+
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ pr_err("nand device scan failed, may be bus-width mismatch\n");
+ err = -ENXIO;
+ goto return_error;
}
+ /* check for small page devices */
+ if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) {
+ pr_err("small page devices are not supported\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* re-populate low-level callbacks based on xfer modes */
switch (pdata->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- info->nand.read_buf = omap_read_buf_pref;
- info->nand.write_buf = omap_write_buf_pref;
+ nand_chip->read_buf = omap_read_buf_pref;
+ nand_chip->write_buf = omap_write_buf_pref;
break;
case NAND_OMAP_POLLED:
- if (info->nand.options & NAND_BUSWIDTH_16) {
- info->nand.read_buf = omap_read_buf16;
- info->nand.write_buf = omap_write_buf16;
- } else {
- info->nand.read_buf = omap_read_buf8;
- info->nand.write_buf = omap_write_buf8;
- }
+ /* Use nand_base defaults for {read,write}_buf */
break;
case NAND_OMAP_PREFETCH_DMA:
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- dev_err(&pdev->dev, "DMA request failed!\n");
- goto out_release_mem_region;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = OMAP24XX_DMA_GPMC;
+ info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!info->dma) {
+ dev_err(&pdev->dev, "DMA engine request failed\n");
+ err = -ENXIO;
+ goto return_error;
} else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
+ struct dma_slave_config cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ err = dmaengine_slave_config(info->dma, &cfg);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ err);
+ goto return_error;
+ }
+ nand_chip->read_buf = omap_read_buf_dma_pref;
+ nand_chip->write_buf = omap_write_buf_dma_pref;
}
break;
case NAND_OMAP_PREFETCH_IRQ:
- err = request_irq(pdata->gpmc_irq,
- omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ if (info->gpmc_irq_fifo <= 0) {
+ dev_err(&pdev->dev, "error getting fifo irq\n");
+ err = -ENODEV;
+ goto return_error;
+ }
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- pdata->gpmc_irq, err);
- goto out_release_mem_region;
- } else {
- info->gpmc_irq = pdata->gpmc_irq;
- info->nand.read_buf = omap_read_buf_irq_pref;
- info->nand.write_buf = omap_write_buf_irq_pref;
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ goto return_error;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ if (info->gpmc_irq_count <= 0) {
+ dev_err(&pdev->dev, "error getting count irq\n");
+ err = -ENODEV;
+ goto return_error;
}
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
+ goto return_error;
+ }
+
+ nand_chip->read_buf = omap_read_buf_irq_pref;
+ nand_chip->write_buf = omap_write_buf_irq_pref;
+
break;
default:
dev_err(&pdev->dev,
"xfer_type(%d) not supported!\n", pdata->xfer_type);
err = -EINVAL;
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.verify_buf = omap_verify_buf;
-
- /* selsect the ecc type */
- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
- info->nand.ecc.mode = NAND_ECC_SOFT;
- else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
- }
+ /* populate MTD interface based on ECC scheme */
+ nand_chip->ecc.layout = &omap_oobinfo;
+ ecclayout = &omap_oobinfo;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.bytes = 3;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.calculate = omap_calculate_ecc;
+ nand_chip->ecc.hwctl = omap_enable_hwecc;
+ nand_chip->ecc.correct = omap_correct_data;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ if (nand_chip->options & NAND_BUSWIDTH_16)
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ else
+ oob_index = 1;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* no reserved-marker in ecclayout for this ecc-scheme */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
- /* DIP switches on some boards change between 8 and 16 bit
- * bus widths for flash. Try the other width if the first try fails.
- */
- if (nand_scan_ident(&info->mtd, 1, NULL)) {
- info->nand.options ^= NAND_BUSWIDTH_16;
- if (nand_scan_ident(&info->mtd, 1, NULL)) {
- err = -ENXIO;
- goto out_release_mem_region;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 7;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
}
- }
-
- /* rom code layout */
- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
-
- if (info->nand.options & NAND_BUSWIDTH_16)
- offset = 2;
- else {
- offset = 1;
- info->nand.badblock_pattern = &bb_descrip_flashbased;
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &nand_chip->ecc.layout);
+ if (!nand_chip->ecc.priv) {
+ pr_err("nand: error: unable to use s/w BCH library\n");
+ err = -EINVAL;
}
- omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
- for (i = 0; i < omap_oobinfo.eccbytes; i++)
- omap_oobinfo.eccpos[i] = i+offset;
-
- omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
- omap_oobinfo.oobfree->length = info->mtd.oobsize -
- (offset + omap_oobinfo.eccbytes);
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ case OMAP_ECC_BCH4_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 7 + 1;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ /* This ECC scheme requires ELM H/W block */
+ if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
+ pr_err("nand: error: could not initialize ELM\n");
+ err = -ENODEV;
+ goto return_error;
+ }
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 13;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &nand_chip->ecc.layout);
+ if (!nand_chip->ecc.priv) {
+ pr_err("nand: error: unable to use s/w BCH library\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ case OMAP_ECC_BCH8_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 13 + 1;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* This ECC scheme requires ELM H/W block */
+ err = is_elm_present(info, pdata->elm_of_node, BCH8_ECC);
+ if (err < 0) {
+ pr_err("nand: error: could not initialize ELM\n");
+ goto return_error;
+ }
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ case OMAP_ECC_BCH16_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 26;
+ nand_chip->ecc.strength = 16;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* This ECC scheme requires ELM H/W block */
+ err = is_elm_present(info, pdata->elm_of_node, BCH16_ECC);
+ if (err < 0) {
+ pr_err("ELM is required for this ECC scheme\n");
+ goto return_error;
+ }
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+ default:
+ pr_err("nand: error: invalid or unsupported ECC scheme\n");
+ err = -EINVAL;
+ goto return_error;
+ }
- info->nand.ecc.layout = &omap_oobinfo;
+ /* all OOB bytes from oobfree->offset till end off OOB are free */
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
+ /* check if NAND device's OOB is enough to store ECC signatures */
+ if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+ pr_err("not enough OOB bytes required = %d, available=%d\n",
+ ecclayout->eccbytes, mtd->oobsize);
+ err = -EINVAL;
+ goto return_error;
}
/* second phase scan */
- if (nand_scan_tail(&info->mtd)) {
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
- goto out_release_mem_region;
+ goto return_error;
}
- err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
- if (err > 0)
- mtd_device_register(&info->mtd, info->parts, err);
- else if (pdata->parts)
- mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
- else
- mtd_device_register(&info->mtd, NULL, 0);
+ ppdata.of_node = pdata->of_node;
+ mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
- platform_set_drvdata(pdev, &info->mtd);
+ platform_set_drvdata(pdev, mtd);
return 0;
-out_release_mem_region:
- release_mem_region(info->phys_base, NAND_IO_SIZE);
-out_free_info:
- kfree(info);
-
+return_error:
+ if (info->dma)
+ dma_release_channel(info->dma);
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
return err;
}
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
-
- platform_set_drvdata(pdev, NULL);
- if (info->dma_ch != -1)
- omap_free_dma(info->dma_ch);
-
- if (info->gpmc_irq)
- free_irq(info->gpmc_irq, info);
-
- /* Release NAND device, its internal structures and partitions */
- nand_release(&info->mtd);
- iounmap(info->nand.IO_ADDR_R);
- kfree(&info->mtd);
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
+ if (info->dma)
+ dma_release_channel(info->dma);
+ nand_release(mtd);
return 0;
}
@@ -1152,20 +2071,7 @@ static struct platform_driver omap_nand_driver = {
},
};
-static int __init omap_nand_init(void)
-{
- pr_info("%s driver initializing\n", DRIVER_NAME);
-
- return platform_driver_register(&omap_nand_driver);
-}
-
-static void __exit omap_nand_exit(void)
-{
- platform_driver_unregister(&omap_nand_driver);
-}
-
-module_init(omap_nand_init);
-module_exit(omap_nand_exit);
+module_platform_driver(omap_nand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7794d0680f9..471b4df3a5a 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -13,15 +13,15 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <mach/hardware.h>
-#include <plat/orion_nand.h>
-
-static const char *part_probes[] = { "cmdlinepart", NULL };
+#include <linux/platform_data/mtd-orion_nand.h>
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
@@ -76,17 +76,17 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
+ struct mtd_part_parser_data ppdata = {};
struct nand_chip *nc;
struct orion_nand_data *board;
struct resource *res;
+ struct clk *clk;
void __iomem *io_base;
int ret = 0;
- struct mtd_partition *partitions = NULL;
- int num_part = 0;
+ u32 val = 0;
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
- printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
ret = -ENOMEM;
goto no_res;
}
@@ -100,12 +100,37 @@ static int __init orion_nand_probe(struct platform_device *pdev)
io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
- printk(KERN_ERR "orion_nand: ioremap failed\n");
+ dev_err(&pdev->dev, "ioremap failed\n");
ret = -EIO;
goto no_res;
}
- board = pdev->dev.platform_data;
+ if (pdev->dev.of_node) {
+ board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
+ GFP_KERNEL);
+ if (!board) {
+ ret = -ENOMEM;
+ goto no_res;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
+ board->cle = (u8)val;
+ else
+ board->cle = 0;
+ if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
+ board->ale = (u8)val;
+ else
+ board->ale = 1;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "bank-width", &val))
+ board->width = (u8)val * 8;
+ else
+ board->width = 8;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "chip-delay", &val))
+ board->chip_delay = (u8)val;
+ } else {
+ board = dev_get_platdata(&pdev->dev);
+ }
mtd->priv = nc;
mtd->owner = THIS_MODULE;
@@ -119,6 +144,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->chip_delay)
nc->chip_delay = board->chip_delay;
+ WARN(board->width > 16,
+ "%d bit bus width out of range",
+ board->width);
+
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
@@ -127,22 +156,23 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mtd);
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
if (nand_scan(mtd, 1)) {
ret = -ENXIO;
goto no_dev;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "orion_nand";
- num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
-#endif
- /* If cmdline partitions have been passed, let them be used */
- if (num_part <= 0) {
- num_part = board->nr_parts;
- partitions = board->parts;
- }
-
- ret = mtd_device_register(mtd, partitions, num_part);
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata,
+ board->parts, board->nr_parts);
if (ret) {
nand_release(mtd);
goto no_dev;
@@ -151,7 +181,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
return 0;
no_dev:
- platform_set_drvdata(pdev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
iounmap(io_base);
no_res:
kfree(nc);
@@ -159,10 +192,11 @@ no_res:
return ret;
}
-static int __devexit orion_nand_remove(struct platform_device *pdev)
+static int orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
+ struct clk *clk;
nand_release(mtd);
@@ -170,29 +204,32 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
kfree(nc);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id orion_nand_of_match_table[] = {
+ { .compatible = "marvell,orion-nand", },
+ {},
+};
+#endif
+
static struct platform_driver orion_nand_driver = {
- .remove = __devexit_p(orion_nand_remove),
+ .remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(orion_nand_of_match_table),
},
};
-static int __init orion_nand_init(void)
-{
- return platform_driver_probe(&orion_nand_driver, orion_nand_probe);
-}
-
-static void __exit orion_nand_exit(void)
-{
- platform_driver_unregister(&orion_nand_driver);
-}
-
-module_init(orion_nand_init);
-module_exit(orion_nand_exit);
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tzachi Perelstein");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index b1aa41b8a4e..2c98f9da747 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -23,11 +23,12 @@
#undef DEBUG
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
@@ -89,7 +90,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
+static int pasemi_nand_probe(struct platform_device *ofdev)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->dev.of_node;
@@ -155,7 +156,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
chip->ecc.mode = NAND_ECC_SOFT;
/* Enable the following for a flash based bad block table */
- chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
if (nand_scan(pasemi_nand_mtd, 1)) {
@@ -184,7 +185,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
+static int pasemi_nand_remove(struct platform_device *ofdev)
{
struct nand_chip *chip;
@@ -221,7 +222,7 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct platform_driver pasemi_nand_driver =
{
.driver = {
- .name = (char*)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
.of_match_table = pasemi_nand_match,
},
@@ -229,17 +230,7 @@ static struct platform_driver pasemi_nand_driver =
.remove = pasemi_nand_remove,
};
-static int __init pasemi_nand_init(void)
-{
- return platform_driver_register(&pasemi_nand_driver);
-}
-module_init(pasemi_nand_init);
-
-static void __exit pasemi_nand_exit(void)
-{
- platform_driver_unregister(&pasemi_nand_driver);
-}
-module_exit(pasemi_nand_exit);
+module_platform_driver(pasemi_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 633c04bf76f..0b068a5c0bf 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,49 +22,42 @@ struct plat_nand_data {
struct nand_chip chip;
struct mtd_info mtd;
void __iomem *io_base;
- int nr_parts;
- struct mtd_partition *parts;
};
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
/*
* Probe for the NAND device.
*/
-static int __devinit plat_nand_probe(struct platform_device *pdev)
+static int plat_nand_probe(struct platform_device *pdev)
{
- struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
struct plat_nand_data *data;
struct resource *res;
+ const char **part_types;
int err = 0;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_nand_data is missing\n");
+ return -EINVAL;
+ }
+
if (pdata->chip.nr_chips < 1) {
dev_err(&pdev->dev, "invalid number of chips specified\n");
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
/* Allocate memory for the device structure (and zero it) */
- data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+ GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- }
-
- if (!request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -EBUSY;
- goto out_free;
- }
- data->io_base = ioremap(res->start, resource_size(res));
- if (data->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto out_release_io;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->io_base))
+ return PTR_ERR(data->io_base);
data->chip.priv = &data;
data->mtd.priv = &data->chip;
@@ -77,8 +71,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.select_chip = pdata->ctrl.select_chip;
data->chip.write_buf = pdata->ctrl.write_buf;
data->chip.read_buf = pdata->ctrl.read_buf;
+ data->chip.read_byte = pdata->ctrl.read_byte;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
+ data->chip.bbt_options |= pdata->chip.bbt_options;
data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
data->chip.ecc.layout = pdata->chip.ecclayout;
@@ -99,23 +95,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
- if (pdata->chip.part_probe_types) {
- err = parse_mtd_partitions(&data->mtd,
- pdata->chip.part_probe_types,
- &data->parts, 0);
- if (err > 0) {
- mtd_device_register(&data->mtd, data->parts, err);
- return 0;
- }
- }
- if (pdata->chip.set_parts)
- pdata->chip.set_parts(data->mtd.size, &pdata->chip);
- if (pdata->chip.partitions) {
- data->parts = pdata->chip.partitions;
- err = mtd_device_register(&data->mtd, data->parts,
- pdata->chip.nr_partitions);
- } else
- err = mtd_device_register(&data->mtd, NULL, 0);
+ part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
+ pdata->chip.partitions,
+ pdata->chip.nr_partitions);
if (!err)
return err;
@@ -124,59 +109,41 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- platform_set_drvdata(pdev, NULL);
- iounmap(data->io_base);
-out_release_io:
- release_mem_region(res->start, resource_size(res));
-out_free:
- kfree(data);
return err;
}
/*
* Remove a NAND device.
*/
-static int __devexit plat_nand_remove(struct platform_device *pdev)
+static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
- struct platform_nand_data *pdata = pdev->dev.platform_data;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
nand_release(&data->mtd);
- if (data->parts && data->parts != pdata->chip.partitions)
- kfree(data->parts);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- iounmap(data->io_base);
- release_mem_region(res->start, resource_size(res));
- kfree(data);
return 0;
}
+static const struct of_device_id plat_nand_match[] = {
+ { .compatible = "gen_nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
static struct platform_driver plat_nand_driver = {
- .probe = plat_nand_probe,
- .remove = __devexit_p(plat_nand_remove),
- .driver = {
- .name = "gen_nand",
- .owner = THIS_MODULE,
+ .probe = plat_nand_probe,
+ .remove = plat_nand_remove,
+ .driver = {
+ .name = "gen_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = plat_nand_match,
},
};
-static int __init plat_nand_init(void)
-{
- return platform_driver_register(&plat_nand_driver);
-}
-
-static void __exit plat_nand_exit(void)
-{
- platform_driver_unregister(&plat_nand_driver);
-}
-
-module_init(plat_nand_init);
-module_exit(plat_nand_exit);
+module_platform_driver(plat_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool");
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
deleted file mode 100644
index 3bbb796b451..00000000000
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * drivers/mtd/nand/ppchameleonevb.c
- *
- * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash devices found on the
- * PPChameleon/PPChameleonEVB system.
- * PPChameleon options (autodetected):
- * - BA model: no NAND
- * - ME model: 32MB (Samsung K9F5608U0B)
- * - HI model: 128MB (Samsung K9F1G08UOM)
- * PPChameleonEVB options:
- * - 32MB (Samsung K9F5608U0B)
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <platforms/PPChameleonEVB.h>
-
-#undef USE_READY_BUSY_PIN
-#define USE_READY_BUSY_PIN
-/* see datasheets (tR) */
-#define NAND_BIG_DELAY_US 25
-#define NAND_SMALL_DELAY_US 10
-
-/* handy sizes */
-#define SZ_4M 0x00400000
-#define NAND_SMALL_SIZE 0x02000000
-#define NAND_MTD_NAME "ppchameleon-nand"
-#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
-
-/* GPIO pins used to drive NAND chip mounted on processor module */
-#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
-#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
-#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
-#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
-/* GPIO pins used to drive NAND chip mounted on EVB */
-#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
-#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
-#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
-#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
-
-/*
- * MTD structure for PPChameleonEVB board
- */
-static struct mtd_info *ppchameleon_mtd = NULL;
-static struct mtd_info *ppchameleonevb_mtd = NULL;
-
-/*
- * Module stuff
- */
-static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
-static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
-
-#ifdef MODULE
-module_param(ppchameleon_fio_pbase, ulong, 0);
-module_param(ppchameleonevb_fio_pbase, ulong, 0);
-#else
-__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
-__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
-#endif
-
-/*
- * Define static partitions for flash devices
- */
-static struct mtd_partition partition_info_hi[] = {
- { .name = "PPChameleon HI Nand Flash",
- .offset = 0,
- .size = 128 * 1024 * 1024
- }
-};
-
-static struct mtd_partition partition_info_me[] = {
- { .name = "PPChameleon ME Nand Flash",
- .offset = 0,
- .size = 32 * 1024 * 1024
- }
-};
-
-static struct mtd_partition partition_info_evb[] = {
- { .name = "PPChameleonEVB Nand Flash",
- .offset = 0,
- .size = 32 * 1024 * 1024
- }
-};
-
-#define NUM_PARTITIONS 1
-
-extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
-
-/*
- * hardware specific access to control-lines
- */
-static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
- switch (cmd) {
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- }
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
- switch (cmd) {
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- }
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-#ifdef USE_READY_BUSY_PIN
-/*
- * read device ready pin
- */
-static int ppchameleon_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-
-static int ppchameleonevb_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-#endif
-
-const char *part_probes[] = { "cmdlinepart", NULL };
-const char *part_probes_evb[] = { "cmdlinepart", NULL };
-
-/*
- * Main initialization routine
- */
-static int __init ppchameleonevb_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- void __iomem *ppchameleon_fio_base;
- void __iomem *ppchameleonevb_fio_base;
-
- /*********************************
- * Processor module NAND (if any) *
- *********************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ppchameleon_mtd) {
- printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
- if (!ppchameleon_fio_base) {
- printk("ioremap PPChameleon NAND flash failed\n");
- kfree(ppchameleon_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ppchameleon_mtd[1]);
-
- /* Initialize structures */
- memset(ppchameleon_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleon_mtd->priv = this;
- ppchameleon_mtd->owner = THIS_MODULE;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_01
- CLE GPIO_02
- ALE GPIO_03
- R/B GPIO_04
- */
- /* output select */
- out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF);
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned *)GPIO0_TCR,
- in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF);
- /* high-impedecence */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned *)GPIO0_ISR1H,
- (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleon_fio_base;
- this->IO_ADDR_W = ppchameleon_fio_base;
- this->cmd_ctrl = ppchameleon_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleon_device_ready;
-#endif
- this->chip_delay = NAND_BIG_DELAY_US;
- /* ECC mode */
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device (it could not be mounted) */
- if (nand_scan(ppchameleon_mtd, 1)) {
- iounmap((void *)ppchameleon_fio_base);
- ppchameleon_fio_base = NULL;
- kfree(ppchameleon_mtd);
- goto nand_evb_init;
- }
-#ifndef USE_READY_BUSY_PIN
- /* Adjust delay if necessary */
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- this->chip_delay = NAND_SMALL_DELAY_US;
-#endif
-
- ppchameleon_mtd->name = "ppchameleon-nand";
- mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-
- if (mtd_parts_nb == 0) {
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- mtd_parts = partition_info_me;
- else
- mtd_parts = partition_info_hi;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
-
- nand_evb_init:
- /****************************
- * EVB NAND (always present) *
- ****************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ppchameleonevb_mtd) {
- printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
- if (!ppchameleonevb_fio_base) {
- printk("ioremap PPChameleonEVB NAND flash failed\n");
- kfree(ppchameleonevb_mtd);
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ppchameleonevb_mtd[1]);
-
- /* Initialize structures */
- memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleonevb_mtd->priv = this;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_14
- CLE GPIO_15
- ALE GPIO_16
- R/B GPIO_31
- */
- /* output select */
- out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF);
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
- NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC);
- /* high-impedecence */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned *)GPIO0_ISR1L,
- (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleonevb_fio_base;
- this->IO_ADDR_W = ppchameleonevb_fio_base;
- this->cmd_ctrl = ppchameleonevb_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleonevb_device_ready;
-#endif
- this->chip_delay = NAND_SMALL_DELAY_US;
-
- /* ECC mode */
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device */
- if (nand_scan(ppchameleonevb_mtd, 1)) {
- iounmap((void *)ppchameleonevb_fio_base);
- kfree(ppchameleonevb_mtd);
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -ENXIO;
- }
-
- ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
- mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info_evb;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ppchameleonevb_init);
-
-/*
- * Clean up routine
- */
-static void __exit ppchameleonevb_cleanup(void)
-{
- struct nand_chip *this;
-
- /* Release resources, unregister device(s) */
- nand_release(ppchameleon_mtd);
- nand_release(ppchameleonevb_mtd);
-
- /* Release iomaps */
- this = (struct nand_chip *) &ppchameleon_mtd[1];
- iounmap((void *) this->IO_ADDR_R);
- this = (struct nand_chip *) &ppchameleonevb_mtd[1];
- iounmap((void *) this->IO_ADDR_R);
-
- /* Free the MTD device structure */
- kfree (ppchameleon_mtd);
- kfree (ppchameleonevb_mtd);
-}
-module_exit(ppchameleonevb_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
-MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1fb3b3a8058..96b0b1d27df 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -7,6 +7,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
*/
#include <linux/kernel.h>
@@ -22,14 +24,31 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+#define ARCH_HAS_DMA
+#endif
+#ifdef ARCH_HAS_DMA
#include <mach/dma.h>
-#include <plat/pxa3xx_nand.h>
+#endif
+
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
#define PAGE_CHUNK_SIZE (2048)
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE 256
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -38,6 +57,7 @@
#define NDPCR (0x18) /* Page Count Register */
#define NDBDR0 (0x1C) /* Bad Block Register 0 */
#define NDBDR1 (0x20) /* Bad Block Register 1 */
+#define NDECCCTRL (0x28) /* ECC control */
#define NDDB (0x40) /* Data Buffer */
#define NDCB0 (0x48) /* Command Buffer0 */
#define NDCB1 (0x4C) /* Command Buffer1 */
@@ -64,6 +84,9 @@
#define NDCR_INT_MASK (0xFFF)
#define NDSR_MASK (0xfff)
+#define NDSR_ERR_CNT_OFF (16)
+#define NDSR_ERR_CNT_MASK (0x1f)
+#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
#define NDSR_RDY (0x1 << 12)
#define NDSR_FLASH_RDY (0x1 << 11)
#define NDSR_CS0_PAGED (0x1 << 10)
@@ -72,15 +95,18 @@
#define NDSR_CS1_CMDD (0x1 << 7)
#define NDSR_CS0_BBD (0x1 << 6)
#define NDSR_CS1_BBD (0x1 << 5)
-#define NDSR_DBERR (0x1 << 4)
-#define NDSR_SBERR (0x1 << 3)
+#define NDSR_UNCORERR (0x1 << 4)
+#define NDSR_CORERR (0x1 << 3)
#define NDSR_WRDREQ (0x1 << 2)
#define NDSR_RDDREQ (0x1 << 1)
#define NDSR_WRCMDREQ (0x1)
+#define NDCB0_LEN_OVRD (0x1 << 28)
#define NDCB0_ST_ROW_EN (0x1 << 26)
#define NDCB0_AUTO_RS (0x1 << 25)
#define NDCB0_CSEL (0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
#define NDCB0_NC (0x1 << 20)
@@ -91,25 +117,34 @@
#define NDCB0_CMD1_MASK (0xff)
#define NDCB0_ADDR_CYC_SHIFT (16)
+#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ 4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL 3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+
/* macros for registers read/write */
#define nand_writel(info, off, val) \
- __raw_writel((val), (info)->mmio_base + (off))
+ writel_relaxed((val), (info)->mmio_base + (off))
#define nand_readl(info, off) \
- __raw_readl((info)->mmio_base + (off))
+ readl_relaxed((info)->mmio_base + (off))
/* error code and state */
enum {
ERR_NONE = 0,
ERR_DMABUSERR = -1,
ERR_SENDCMD = -2,
- ERR_DBERR = -3,
+ ERR_UNCORERR = -3,
ERR_BBERR = -4,
- ERR_SBERR = -5,
+ ERR_CORERR = -5,
};
enum {
STATE_IDLE = 0,
+ STATE_PREPARED,
STATE_CMD_HANDLE,
STATE_DMA_READING,
STATE_DMA_WRITING,
@@ -120,21 +155,42 @@ enum {
STATE_READY,
};
-struct pxa3xx_nand_info {
- struct nand_chip nand_chip;
+enum pxa3xx_nand_variant {
+ PXA3XX_NAND_VARIANT_PXA,
+ PXA3XX_NAND_VARIANT_ARMADA370,
+};
+
+struct pxa3xx_nand_host {
+ struct nand_chip chip;
+ struct mtd_info *mtd;
+ void *info_data;
+ /* page size of attached chip */
+ int use_ecc;
+ int cs;
+
+ /* calculated from pxa3xx_nand_flash data */
+ unsigned int col_addr_cycles;
+ unsigned int row_addr_cycles;
+ size_t read_id_bytes;
+
+};
+
+struct pxa3xx_nand_info {
struct nand_hw_control controller;
struct platform_device *pdev;
- struct pxa3xx_nand_cmdset *cmdset;
struct clk *clk;
void __iomem *mmio_base;
unsigned long mmio_phys;
+ struct completion cmd_complete, dev_ready;
unsigned int buf_start;
unsigned int buf_count;
+ unsigned int buf_size;
+ unsigned int data_buff_pos;
+ unsigned int oob_buff_pos;
- struct mtd_info *mtd;
/* DMA information */
int drcmr_dat;
int drcmr_cmd;
@@ -142,67 +198,51 @@ struct pxa3xx_nand_info {
unsigned char *data_buff;
unsigned char *oob_buff;
dma_addr_t data_buff_phys;
- size_t data_buff_size;
int data_dma_ch;
struct pxa_dma_desc *data_desc;
dma_addr_t data_desc_addr;
- uint32_t reg_ndcr;
-
- /* saved column/page_addr during CMD_SEQIN */
- int seqin_column;
- int seqin_page_addr;
-
- /* relate to the command */
+ struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
unsigned int state;
+ /*
+ * This driver supports NFCv1 (as found in PXA SoC)
+ * and NFCv2 (as found in Armada 370/XP SoC).
+ */
+ enum pxa3xx_nand_variant variant;
+
+ int cs;
int use_ecc; /* use HW ECC ? */
+ int ecc_bch; /* using BCH ECC? */
int use_dma; /* use DMA ? */
- int is_ready;
-
- unsigned int page_size; /* page size of attached chip */
- unsigned int data_size; /* data size in FIFO */
+ int use_spare; /* use spare ? */
+ int need_wait;
+
+ unsigned int data_size; /* data to be read from FIFO */
+ unsigned int chunk_size; /* split commands chunk size */
+ unsigned int oob_size;
+ unsigned int spare_size;
+ unsigned int ecc_size;
+ unsigned int ecc_err_cnt;
+ unsigned int max_bitflips;
int retcode;
- struct completion cmd_complete;
+
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
/* generated NDCBx register values */
uint32_t ndcb0;
uint32_t ndcb1;
uint32_t ndcb2;
-
- /* timing calcuted from setting */
- uint32_t ndtr0cs0;
- uint32_t ndtr1cs0;
-
- /* calculated from pxa3xx_nand_flash data */
- size_t oob_size;
- size_t read_id_bytes;
-
- unsigned int col_addr_cycles;
- unsigned int row_addr_cycles;
+ uint32_t ndcb3;
};
-static int use_dma = 1;
+static bool use_dma = 1;
module_param(use_dma, bool, 0444);
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
-/*
- * Default NAND flash controller configuration setup by the
- * bootloader. This configuration is used only when pdata->keep_config is set
- */
-static struct pxa3xx_nand_cmdset default_cmdset = {
- .read1 = 0x3000,
- .read2 = 0x0050,
- .program = 0x1080,
- .read_status = 0x0070,
- .read_id = 0x0090,
- .erase = 0xD060,
- .reset = 0x00FF,
- .lock = 0x002A,
- .unlock = 0x2423,
- .lock_status = 0x007A,
-};
-
static struct pxa3xx_nand_timing timing[] = {
{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
{ 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
@@ -222,11 +262,67 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
};
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+ .eccbytes = 32,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+ .eccbytes = 64,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ /* Bootrom looks in bytes 0 & 5 for bad blocks */
+ .oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+ .eccbytes = 128,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { }
+};
+
/* Define a default flash type setting serve as flash detecting only */
#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
-const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
-
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
#define NDTR0_tWH(c) (min((c), 7) << 11)
@@ -241,9 +337,33 @@ const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
-static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
+static const struct of_device_id pxa3xx_nand_dt_ids[] = {
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_PXA,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+ if (!of_id)
+ return PXA3XX_NAND_VARIANT_PXA;
+ return (enum pxa3xx_nand_variant)of_id->data;
+}
+
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
const struct pxa3xx_nand_timing *t)
{
+ struct pxa3xx_nand_info *info = host->info_data;
unsigned long nand_clk = clk_get_rate(info->clk);
uint32_t ndtr0, ndtr1;
@@ -264,24 +384,23 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
nand_writel(info, NDTR1CS0, ndtr1);
}
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
+/*
+ * Set the data and OOB size, depending on the selected
+ * spare and ECC configuration.
+ * Only applicable to READ0, READOOB and PAGEPROG commands.
+ */
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+ struct mtd_info *mtd)
{
int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
- info->data_size = info->page_size;
- if (!oob_enable) {
- info->oob_size = 0;
+ info->data_size = mtd->writesize;
+ if (!oob_enable)
return;
- }
- switch (info->page_size) {
- case 2048:
- info->oob_size = (info->use_ecc) ? 40 : 64;
- break;
- case 512:
- info->oob_size = (info->use_ecc) ? 8 : 16;
- break;
- }
+ info->oob_size = info->spare_size;
+ if (!info->use_ecc)
+ info->oob_size += info->ecc_size;
}
/**
@@ -295,8 +414,27 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
uint32_t ndcr;
ndcr = info->reg_ndcr;
- ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
- ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+
+ if (info->use_ecc) {
+ ndcr |= NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x1);
+ } else {
+ ndcr &= ~NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x0);
+ }
+
+ if (info->use_dma)
+ ndcr |= NDCR_DMA_EN;
+ else
+ ndcr &= ~NDCR_DMA_EN;
+
+ if (info->use_spare)
+ ndcr |= NDCR_SPARE_EN;
+ else
+ ndcr &= ~NDCR_SPARE_EN;
+
ndcr |= NDCR_ND_RUN;
/* clear status bits and run */
@@ -325,7 +463,8 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
nand_writel(info, NDSR, NDSR_MASK);
}
-static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+static void __maybe_unused
+enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
{
uint32_t ndcr;
@@ -343,28 +482,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
+ unsigned int do_bytes = min(info->data_size, info->chunk_size);
+
switch (info->state) {
case STATE_PIO_WRITING:
- __raw_writesl(info->mmio_base + NDDB, info->data_buff,
- DIV_ROUND_UP(info->data_size, 4));
+ __raw_writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
if (info->oob_size > 0)
- __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
- DIV_ROUND_UP(info->oob_size, 4));
+ __raw_writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
case STATE_PIO_READING:
- __raw_readsl(info->mmio_base + NDDB, info->data_buff,
- DIV_ROUND_UP(info->data_size, 4));
+ __raw_readsl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
if (info->oob_size > 0)
- __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
- DIV_ROUND_UP(info->oob_size, 4));
+ __raw_readsl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
default:
- printk(KERN_ERR "%s: invalid state %d\n", __func__,
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
+
+ /* Update buffer pointers for multi-page read/write */
+ info->data_buff_pos += do_bytes;
+ info->oob_buff_pos += info->oob_size;
+ info->data_size -= do_bytes;
}
+#ifdef ARCH_HAS_DMA
static void start_data_dma(struct pxa3xx_nand_info *info)
{
struct pxa_dma_desc *desc = info->data_desc;
@@ -385,7 +538,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
break;
default:
- printk(KERN_ERR "%s: invalid state %d\n", __func__,
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
@@ -411,18 +564,46 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data)
enable_int(info, NDCR_INT_MASK);
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
}
+#else
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{}
+#endif
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
- unsigned int status, is_completed = 0;
+ unsigned int status, is_completed = 0, is_ready = 0;
+ unsigned int ready, cmd_done;
+
+ if (info->cs == 0) {
+ ready = NDSR_FLASH_RDY;
+ cmd_done = NDSR_CS0_CMDD;
+ } else {
+ ready = NDSR_RDY;
+ cmd_done = NDSR_CS1_CMDD;
+ }
status = nand_readl(info, NDSR);
- if (status & NDSR_DBERR)
- info->retcode = ERR_DBERR;
- if (status & NDSR_SBERR)
- info->retcode = ERR_SBERR;
+ if (status & NDSR_UNCORERR)
+ info->retcode = ERR_UNCORERR;
+ if (status & NDSR_CORERR) {
+ info->retcode = ERR_CORERR;
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ info->ecc_bch)
+ info->ecc_err_cnt = NDSR_ERR_CNT(status);
+ else
+ info->ecc_err_cnt = 1;
+
+ /*
+ * Each chunk composing a page is corrected independently,
+ * and we need to store maximum number of corrected bitflips
+ * to return it to the MTD layer in ecc.read_page().
+ */
+ info->max_bitflips = max_t(unsigned int,
+ info->max_bitflips,
+ info->ecc_err_cnt);
+ }
if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
/* whether use dma to transfer data */
if (info->use_dma) {
@@ -437,38 +618,47 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
handle_data_pio(info);
}
}
- if (status & NDSR_CS0_CMDD) {
+ if (status & cmd_done) {
info->state = STATE_CMD_DONE;
is_completed = 1;
}
- if (status & NDSR_FLASH_RDY) {
- info->is_ready = 1;
+ if (status & ready) {
info->state = STATE_READY;
+ is_ready = 1;
}
if (status & NDSR_WRCMDREQ) {
nand_writel(info, NDSR, NDSR_WRCMDREQ);
status &= ~NDSR_WRCMDREQ;
info->state = STATE_CMD_HANDLE;
+
+ /*
+ * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+ * must be loaded by writing directly either 12 or 16
+ * bytes directly to NDCB0, four bytes at a time.
+ *
+ * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+ * but each NDCBx register can be read.
+ */
nand_writel(info, NDCB0, info->ndcb0);
nand_writel(info, NDCB0, info->ndcb1);
nand_writel(info, NDCB0, info->ndcb2);
+
+ /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDCB0, info->ndcb3);
}
/* clear NDSR to let the controller exit the IRQ */
nand_writel(info, NDSR, status);
if (is_completed)
complete(&info->cmd_complete);
+ if (is_ready)
+ complete(&info->dev_ready);
NORMAL_IRQ_EXIT:
return IRQ_HANDLED;
}
-static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
-{
- struct pxa3xx_nand_info *info = mtd->priv;
- return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
-}
-
static inline int is_buf_blank(uint8_t *buf, size_t len)
{
for (; len > 0; len--)
@@ -477,34 +667,53 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
return 1;
}
-static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
- uint16_t column, int page_addr)
+static void set_command_address(struct pxa3xx_nand_info *info,
+ unsigned int page_size, uint16_t column, int page_addr)
{
- uint16_t cmd;
- int addr_cycle, exec_cmd, ndcb0;
- struct mtd_info *mtd = info->mtd;
+ /* small page addr setting */
+ if (page_size < PAGE_CHUNK_SIZE) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
- ndcb0 = 0;
- addr_cycle = 0;
- exec_cmd = 1;
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ struct mtd_info *mtd = host->mtd;
/* reset data and oob column point to handle data */
info->buf_start = 0;
info->buf_count = 0;
info->oob_size = 0;
+ info->data_buff_pos = 0;
+ info->oob_buff_pos = 0;
info->use_ecc = 0;
- info->is_ready = 0;
+ info->use_spare = 1;
info->retcode = ERR_NONE;
+ info->ecc_err_cnt = 0;
+ info->ndcb3 = 0;
+ info->need_wait = 0;
switch (command) {
case NAND_CMD_READ0:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
case NAND_CMD_READOOB:
- pxa3xx_set_datasize(info);
+ pxa3xx_set_datasize(info, mtd);
break;
- case NAND_CMD_SEQIN:
- exec_cmd = 0;
+ case NAND_CMD_PARAM:
+ info->use_spare = 0;
break;
default:
info->ndcb1 = 0;
@@ -512,49 +721,90 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- info->ndcb0 = ndcb0;
- addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles
- + info->col_addr_cycles);
+ /*
+ * If we are about to issue a read command, or about to set
+ * the write address, then clean the data buffer.
+ */
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READOOB ||
+ command == NAND_CMD_SEQIN) {
+
+ info->buf_count = mtd->writesize + mtd->oobsize;
+ memset(info->data_buff, 0xFF, info->buf_count);
+ }
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+ int ext_cmd_type, uint16_t column, int page_addr)
+{
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
+
+ host = info->host[info->cs];
+ mtd = host->mtd;
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
+
+ if (command == NAND_CMD_SEQIN)
+ exec_cmd = 0;
+
+ addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ + host->col_addr_cycles);
switch (command) {
case NAND_CMD_READOOB:
case NAND_CMD_READ0:
- cmd = info->cmdset->read1;
+ info->buf_start = column;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | addr_cycle
+ | NAND_CMD_READ0;
+
if (command == NAND_CMD_READOOB)
- info->buf_start = mtd->writesize + column;
- else
- info->buf_start = column;
+ info->buf_start += mtd->writesize;
- if (unlikely(info->page_size < PAGE_CHUNK_SIZE))
- info->ndcb0 |= NDCB0_CMD_TYPE(0)
- | addr_cycle
- | (cmd & NDCB0_CMD1_MASK);
- else
- info->ndcb0 |= NDCB0_CMD_TYPE(0)
- | NDCB0_DBC
- | addr_cycle
- | cmd;
+ /*
+ * Multiple page read needs an 'extended command type' field,
+ * which is either naked-read or last-read according to the
+ * state.
+ */
+ if (mtd->writesize == PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+ } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+ }
+
+ set_command_address(info, mtd->writesize, column, page_addr);
+ break;
case NAND_CMD_SEQIN:
- /* small page addr setting */
- if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) {
- info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
- | (column & 0xFF);
- info->ndcb2 = 0;
- } else {
- info->ndcb1 = ((page_addr & 0xFFFF) << 16)
- | (column & 0xFFFF);
+ info->buf_start = column;
+ set_command_address(info, mtd->writesize, 0, page_addr);
- if (page_addr & 0xFF0000)
- info->ndcb2 = (page_addr & 0xFF0000) >> 16;
- else
- info->ndcb2 = 0;
+ /*
+ * Multiple page programming needs to execute the initial
+ * SEQIN command that sets the page address.
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | addr_cycle
+ | command;
+ /* No data transfer in this case */
+ info->data_size = 0;
+ exec_cmd = 1;
}
-
- info->buf_count = mtd->writesize + mtd->oobsize;
- memset(info->data_buff, 0xFF, info->buf_count);
-
break;
case NAND_CMD_PAGEPROG:
@@ -564,49 +814,85 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- cmd = info->cmdset->program;
- info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
- | NDCB0_AUTO_RS
- | NDCB0_ST_ROW_EN
- | NDCB0_DBC
- | cmd
- | addr_cycle;
+ /* Second command setting for large pages */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ /*
+ * Multiple page write uses the 'extended command'
+ * field. This can be used to issue a command dispatch
+ * or a naked-write depending on the current stage.
+ */
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+
+ /*
+ * This is the command dispatch that completes a chunked
+ * page program operation.
+ */
+ if (info->data_size == 0) {
+ info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | command;
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ info->ndcb3 = 0;
+ }
+ } else {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | (NAND_CMD_PAGEPROG << 8)
+ | NAND_CMD_SEQIN
+ | addr_cycle;
+ }
+ break;
+
+ case NAND_CMD_PARAM:
+ info->buf_count = 256;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | NDCB0_ADDR_CYC(1)
+ | NDCB0_LEN_OVRD
+ | command;
+ info->ndcb1 = (column & 0xFF);
+ info->ndcb3 = 256;
+ info->data_size = 256;
break;
case NAND_CMD_READID:
- cmd = info->cmdset->read_id;
- info->buf_count = info->read_id_bytes;
+ info->buf_count = host->read_id_bytes;
info->ndcb0 |= NDCB0_CMD_TYPE(3)
| NDCB0_ADDR_CYC(1)
- | cmd;
+ | command;
+ info->ndcb1 = (column & 0xFF);
info->data_size = 8;
break;
case NAND_CMD_STATUS:
- cmd = info->cmdset->read_status;
info->buf_count = 1;
info->ndcb0 |= NDCB0_CMD_TYPE(4)
| NDCB0_ADDR_CYC(1)
- | cmd;
+ | command;
info->data_size = 8;
break;
case NAND_CMD_ERASE1:
- cmd = info->cmdset->erase;
info->ndcb0 |= NDCB0_CMD_TYPE(2)
| NDCB0_AUTO_RS
| NDCB0_ADDR_CYC(3)
| NDCB0_DBC
- | cmd;
+ | (NAND_CMD_ERASE2 << 8)
+ | NAND_CMD_ERASE1;
info->ndcb1 = page_addr;
info->ndcb2 = 0;
break;
case NAND_CMD_RESET:
- cmd = info->cmdset->reset;
info->ndcb0 |= NDCB0_CMD_TYPE(5)
- | cmd;
+ | command;
break;
@@ -616,18 +902,19 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
default:
exec_cmd = 0;
- printk(KERN_ERR "pxa3xx-nand: non-supported"
- " command %x\n", command);
+ dev_err(&info->pdev->dev, "non-supported command %x\n",
+ command);
break;
}
return exec_cmd;
}
-static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
int ret, exec_cmd;
/*
@@ -638,62 +925,191 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
if (info->reg_ndcr & NDCR_DWIDTH_M)
column /= 2;
- exec_cmd = prepare_command_pool(info, command, column, page_addr);
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ prepare_start_command(info, command);
+
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
if (exec_cmd) {
init_completion(&info->cmd_complete);
+ init_completion(&info->dev_ready);
+ info->need_wait = 1;
pxa3xx_nand_start(info);
ret = wait_for_completion_timeout(&info->cmd_complete,
CHIP_DELAY_TIMEOUT);
if (!ret) {
- printk(KERN_ERR "Wait time out!!!\n");
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(info);
}
- info->state = STATE_IDLE;
}
+ info->state = STATE_IDLE;
+}
+
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+ const unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int ret, exec_cmd, ext_cmd_type;
+
+ /*
+ * if this is a x16 device then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ /* Select the extended command for the first command */
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ ext_cmd_type = EXT_CMD_TYPE_MONO;
+ break;
+ case NAND_CMD_SEQIN:
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ break;
+ case NAND_CMD_PAGEPROG:
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+ break;
+ default:
+ ext_cmd_type = 0;
+ break;
+ }
+
+ prepare_start_command(info, command);
+
+ /*
+ * Prepare the "is ready" completion before starting a command
+ * transaction sequence. If the command is not executed the
+ * completion will be completed, see below.
+ *
+ * We can do that inside the loop because the command variable
+ * is invariant and thus so is the exec_cmd.
+ */
+ info->need_wait = 1;
+ init_completion(&info->dev_ready);
+ do {
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+ column, page_addr);
+ if (!exec_cmd) {
+ info->need_wait = 0;
+ complete(&info->dev_ready);
+ break;
+ }
+
+ init_completion(&info->cmd_complete);
+ pxa3xx_nand_start(info);
+
+ ret = wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT);
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
+ break;
+ }
+
+ /* Check if the sequence is complete */
+ if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+ break;
+
+ /*
+ * After a splitted program command sequence has issued
+ * the command dispatch, the command sequence is complete.
+ */
+ if (info->data_size == 0 &&
+ command == NAND_CMD_PAGEPROG &&
+ ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+ break;
+
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+ /* Last read: issue a 'last naked read' */
+ if (info->data_size == info->chunk_size)
+ ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+ else
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+ /*
+ * If a splitted program command has no more data to transfer,
+ * the command dispatch must be issued to complete.
+ */
+ } else if (command == NAND_CMD_PAGEPROG &&
+ info->data_size == 0) {
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ }
+ } while (1);
+
+ info->state = STATE_IDLE;
}
-static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
chip->read_buf(mtd, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- if (info->retcode == ERR_SBERR) {
- switch (info->use_ecc) {
- case 1:
- mtd->ecc_stats.corrected++;
- break;
- case 0:
- default:
- break;
- }
- } else if (info->retcode == ERR_DBERR) {
+ if (info->retcode == ERR_CORERR && info->use_ecc) {
+ mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+ } else if (info->retcode == ERR_UNCORERR) {
/*
* for blank page (all 0xff), HW will calculate its ECC as
* 0, which is different from the ECC information within
- * OOB, ignore such double bit errors
+ * OOB, ignore such uncorrectable errors
*/
if (is_buf_blank(buf, mtd->writesize))
+ info->retcode = ERR_NONE;
+ else
mtd->ecc_stats.failed++;
}
- return 0;
+ return info->max_bitflips;
}
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
char retval = 0xFF;
if (info->buf_start < info->buf_count)
@@ -705,7 +1121,8 @@ static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
u16 retval = 0xFFFF;
if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
@@ -717,7 +1134,8 @@ static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
memcpy(buf, info->data_buff + info->buf_start, real_len);
@@ -727,19 +1145,14 @@ static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
memcpy(info->data_buff + info->buf_start, buf, real_len);
info->buf_start += real_len;
}
-static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
-{
- return 0;
-}
-
static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
{
return;
@@ -747,107 +1160,121 @@ static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int ret;
+
+ if (info->need_wait) {
+ ret = wait_for_completion_timeout(&info->dev_ready,
+ CHIP_DELAY_TIMEOUT);
+ info->need_wait = 0;
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Ready time out!!!\n");
+ return NAND_STATUS_FAIL;
+ }
+ }
/* pxa3xx_nand_send_command has waited for command complete */
if (this->state == FL_WRITING || this->state == FL_ERASING) {
if (info->retcode == ERR_NONE)
return 0;
- else {
- /*
- * any error make it return 0x01 which will tell
- * the caller the erase and write fail
- */
- return 0x01;
- }
+ else
+ return NAND_STATUS_FAIL;
}
- return 0;
+ return NAND_STATUS_READY;
}
static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
const struct pxa3xx_nand_flash *f)
{
struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct pxa3xx_nand_host *host = info->host[info->cs];
uint32_t ndcr = 0x0; /* enable all interrupts */
- if (f->page_size != 2048 && f->page_size != 512)
+ if (f->page_size != 2048 && f->page_size != 512) {
+ dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
return -EINVAL;
+ }
- if (f->flash_width != 16 && f->flash_width != 8)
+ if (f->flash_width != 16 && f->flash_width != 8) {
+ dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
return -EINVAL;
+ }
/* calculate flash information */
- info->cmdset = &default_cmdset;
- info->page_size = f->page_size;
- info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+ host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
/* calculate addressing information */
- info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+ host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
if (f->num_blocks * f->page_per_block > 65536)
- info->row_addr_cycles = 3;
+ host->row_addr_cycles = 3;
else
- info->row_addr_cycles = 2;
+ host->row_addr_cycles = 2;
ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
- ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+ ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
- ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
+ ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
ndcr |= NDCR_SPARE_EN; /* enable spare by default */
info->reg_ndcr = ndcr;
- pxa3xx_nand_set_timing(info, f->timing);
+ pxa3xx_nand_set_timing(host, f->timing);
return 0;
}
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
+ /*
+ * We set 0 by hard coding here, for we don't support keep_config
+ * when there is more than one chip attached to the controller
+ */
+ struct pxa3xx_nand_host *host = info->host[0];
uint32_t ndcr = nand_readl(info, NDCR);
- info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
- /* set info fields needed to read id */
- info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
- info->reg_ndcr = ndcr;
- info->cmdset = &default_cmdset;
+ if (ndcr & NDCR_PAGE_SZ) {
+ /* Controller's FIFO size */
+ info->chunk_size = 2048;
+ host->read_id_bytes = 4;
+ } else {
+ info->chunk_size = 512;
+ host->read_id_bytes = 2;
+ }
+
+ /* Set an initial chunk size */
+ info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-
return 0;
}
-/* the maximum possible buffer size for large page with OOB data
- * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
- * data buffer and the DMA descriptor
- */
-#define MAX_BUFF_SIZE PAGE_SIZE
-
+#ifdef ARCH_HAS_DMA
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
- int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+ int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
if (use_dma == 0) {
- info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL)
return -ENOMEM;
return 0;
}
- info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
&info->data_buff_phys, GFP_KERNEL);
if (info->data_buff == NULL) {
dev_err(&pdev->dev, "failed to allocate dma buffer\n");
return -ENOMEM;
}
- info->data_buff_size = MAX_BUFF_SIZE;
info->data_desc = (void *)info->data_buff + data_desc_offset;
info->data_desc_addr = info->data_buff_phys + data_desc_offset;
@@ -855,60 +1282,168 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
pxa3xx_nand_data_dma_irq, info);
if (info->data_dma_ch < 0) {
dev_err(&pdev->dev, "failed to request data dma\n");
- dma_free_coherent(&pdev->dev, info->data_buff_size,
+ dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
return info->data_dma_ch;
}
+ /*
+ * Now that DMA buffers are allocated we turn on
+ * DMA proper for I/O operations.
+ */
+ info->use_dma = 1;
return 0;
}
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+ struct platform_device *pdev = info->pdev;
+ if (info->use_dma) {
+ pxa_free_dma(info->data_dma_ch);
+ dma_free_coherent(&pdev->dev, info->buf_size,
+ info->data_buff, info->data_buff_phys);
+ } else {
+ kfree(info->data_buff);
+ }
+}
+#else
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+ kfree(info->data_buff);
+}
+#endif
+
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
{
- struct mtd_info *mtd = info->mtd;
- struct nand_chip *chip = mtd->priv;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+
+ mtd = info->host[info->cs]->mtd;
+ chip = mtd->priv;
/* use the common timing to make a try */
- pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+ ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+ if (ret)
+ return ret;
+
chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
- if (info->is_ready)
- return 1;
- else
- return 0;
+ ret = chip->waitfunc(mtd, chip);
+ if (ret & NAND_STATUS_FAIL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+ struct nand_ecc_ctrl *ecc,
+ int strength, int ecc_stepsize, int page_size)
+{
+ if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->chunk_size = 2048;
+ info->spare_size = 40;
+ info->ecc_size = 24;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+
+ } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->chunk_size = 512;
+ info->spare_size = 8;
+ info->ecc_size = 8;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+
+ /*
+ * Required ECC: 4-bit correction per 512 bytes
+ * Select: 16-bit correction per 2048 bytes
+ */
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_2KB_bch4bit;
+ ecc->strength = 16;
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch4bit;
+ ecc->strength = 16;
+
+ /*
+ * Required ECC: 8-bit correction per 512 bytes
+ * Select: 16-bit correction per 1024 bytes
+ */
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch8bit;
+ ecc->strength = 16;
+ } else {
+ dev_err(&info->pdev->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ strength, page_size);
+ return -ENODEV;
+ }
+
+ dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
+ ecc->strength, ecc->size);
+ return 0;
}
static int pxa3xx_nand_scan(struct mtd_info *mtd)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
- struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} };
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
const struct pxa3xx_nand_flash *f = NULL;
struct nand_chip *chip = mtd->priv;
uint32_t id = -1;
uint64_t chipsize;
int i, ret, num;
+ uint16_t ecc_strength, ecc_step;
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
goto KEEP_CONFIG;
ret = pxa3xx_nand_sensing(info);
- if (!ret) {
- kfree(mtd);
- info->mtd = NULL;
- printk(KERN_INFO "There is no nand chip on cs 0!\n");
+ if (ret) {
+ dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+ info->cs);
- return -EINVAL;
+ return ret;
}
chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
id = *((uint16_t *)(info->data_buff));
if (id != 0)
- printk(KERN_INFO "Detect a flash id %x\n", id);
+ dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
else {
- kfree(mtd);
- info->mtd = NULL;
- printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
+ dev_warn(&info->pdev->dev,
+ "Read out ID 0, potential timing set wrong!!\n");
return -EINVAL;
}
@@ -926,143 +1461,218 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
}
if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
- kfree(mtd);
- info->mtd = NULL;
- printk(KERN_ERR "ERROR!! flash not defined!!!\n");
+ dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
return -EINVAL;
}
- pxa3xx_nand_config_flash(info, f);
+ ret = pxa3xx_nand_config_flash(info, f);
+ if (ret) {
+ dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
+ return ret;
+ }
+
pxa3xx_flash_ids[0].name = f->name;
- pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
+ pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
pxa3xx_flash_ids[0].pagesize = f->page_size;
chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
if (f->flash_width == 16)
pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+ pxa3xx_flash_ids[1].name = NULL;
+ def = pxa3xx_flash_ids;
KEEP_CONFIG:
- if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids))
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ /* Device detection must be done with ECC disabled */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDECCCTRL, 0x0);
+
+ if (nand_scan_ident(mtd, 1, def))
return -ENODEV;
+
+ if (pdata->flash_bbt) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_USE_FLASH |
+ NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /*
+ * If the page size is bigger than the FIFO size, let's check
+ * we are given the right variant and then switch to the extended
+ * (aka splitted) command handling,
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ chip->cmdfunc = nand_cmdfunc_extended;
+ } else {
+ dev_err(&info->pdev->dev,
+ "unsupported page size on this variant\n");
+ return -ENODEV;
+ }
+ }
+
+ if (pdata->ecc_strength && pdata->ecc_step_size) {
+ ecc_strength = pdata->ecc_strength;
+ ecc_step = pdata->ecc_step_size;
+ } else {
+ ecc_strength = chip->ecc_strength_ds;
+ ecc_step = chip->ecc_step_ds;
+ }
+
+ /* Set default ECC strength requirements on non-ONFI devices */
+ if (ecc_strength < 1 && ecc_step < 1) {
+ ecc_strength = 1;
+ ecc_step = 512;
+ }
+
+ ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+ ecc_step, mtd->writesize);
+ if (ret)
+ return ret;
+
/* calculate addressing information */
- info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
- info->oob_buff = info->data_buff + mtd->writesize;
- if ((mtd->size >> chip->page_shift) > 65536)
- info->row_addr_cycles = 3;
+ if (mtd->writesize >= 2048)
+ host->col_addr_cycles = 2;
else
- info->row_addr_cycles = 2;
- mtd->name = mtd_names[0];
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = f->page_size;
+ host->col_addr_cycles = 1;
+
+ /* release the initial buffer */
+ kfree(info->data_buff);
- chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
- chip->options |= NAND_NO_AUTOINCR;
- chip->options |= NAND_NO_READRDY;
+ /* allocate the real data + oob buffer */
+ info->buf_size = mtd->writesize + mtd->oobsize;
+ ret = pxa3xx_nand_init_buff(info);
+ if (ret)
+ return ret;
+ info->oob_buff = info->data_buff + mtd->writesize;
+ if ((mtd->size >> chip->page_shift) > 65536)
+ host->row_addr_cycles = 3;
+ else
+ host->row_addr_cycles = 2;
return nand_scan_tail(mtd);
}
-static
-struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
+static int alloc_nand_resource(struct platform_device *pdev)
{
+ struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
- struct nand_chip *chip;
+ struct pxa3xx_nand_host *host;
+ struct nand_chip *chip = NULL;
struct mtd_info *mtd;
struct resource *r;
- int ret, irq;
+ int ret, irq, cs;
- mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
- GFP_KERNEL);
- if (!mtd) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- return NULL;
- }
+ pdata = dev_get_platdata(&pdev->dev);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
+ sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- info = (struct pxa3xx_nand_info *)(&mtd[1]);
- chip = (struct nand_chip *)(&mtd[1]);
info->pdev = pdev;
- info->mtd = mtd;
- mtd->priv = info;
- mtd->owner = THIS_MODULE;
-
- chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
- chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
- chip->controller = &info->controller;
- chip->waitfunc = pxa3xx_nand_waitfunc;
- chip->select_chip = pxa3xx_nand_select_chip;
- chip->dev_ready = pxa3xx_nand_dev_ready;
- chip->cmdfunc = pxa3xx_nand_cmdfunc;
- chip->read_word = pxa3xx_nand_read_word;
- chip->read_byte = pxa3xx_nand_read_byte;
- chip->read_buf = pxa3xx_nand_read_buf;
- chip->write_buf = pxa3xx_nand_write_buf;
- chip->verify_buf = pxa3xx_nand_verify_buf;
+ info->variant = pxa3xx_nand_get_variant(pdev);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = (struct mtd_info *)((unsigned int)&info[1] +
+ (sizeof(*mtd) + sizeof(*host)) * cs);
+ chip = (struct nand_chip *)(&mtd[1]);
+ host = (struct pxa3xx_nand_host *)chip;
+ info->host[cs] = host;
+ host->mtd = mtd;
+ host->cs = cs;
+ host->info_data = info;
+ mtd->priv = host;
+ mtd->owner = THIS_MODULE;
+
+ chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
+ chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
+ chip->controller = &info->controller;
+ chip->waitfunc = pxa3xx_nand_waitfunc;
+ chip->select_chip = pxa3xx_nand_select_chip;
+ chip->read_word = pxa3xx_nand_read_word;
+ chip->read_byte = pxa3xx_nand_read_byte;
+ chip->read_buf = pxa3xx_nand_read_buf;
+ chip->write_buf = pxa3xx_nand_write_buf;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->cmdfunc = nand_cmdfunc;
+ }
spin_lock_init(&chip->controller->lock);
init_waitqueue_head(&chip->controller->wq);
- info->clk = clk_get(&pdev->dev, NULL);
+ info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get nand clock\n");
- ret = PTR_ERR(info->clk);
- goto fail_free_mtd;
- }
- clk_enable(info->clk);
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
+ return PTR_ERR(info->clk);
}
- info->drcmr_dat = r->start;
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0)
+ return ret;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for command DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
+ if (use_dma) {
+ /*
+ * This is a dirty hack to make this driver work from
+ * devicetree bindings. It can be removed once we have
+ * a prober DMA controller framework for DT.
+ */
+ if (pdev->dev.of_node &&
+ of_machine_is_compatible("marvell,pxa3xx")) {
+ info->drcmr_dat = 97;
+ info->drcmr_cmd = 99;
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_dat = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for cmd DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_cmd = r->start;
+ }
}
- info->drcmr_cmd = r->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ resource defined\n");
ret = -ENXIO;
- goto fail_put_clk;
+ goto fail_disable_clk;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no IO memory resource defined\n");
- ret = -ENODEV;
- goto fail_put_clk;
- }
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (r == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto fail_put_clk;
- }
-
- info->mmio_base = ioremap(r->start, resource_size(r));
- if (info->mmio_base == NULL) {
- dev_err(&pdev->dev, "ioremap() failed\n");
- ret = -ENODEV;
- goto fail_free_res;
+ info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(info->mmio_base)) {
+ ret = PTR_ERR(info->mmio_base);
+ goto fail_disable_clk;
}
info->mmio_phys = r->start;
- ret = pxa3xx_nand_init_buff(info);
- if (ret)
- goto fail_free_io;
+ /* Allocate a buffer to allow flash detection */
+ info->buf_size = INIT_BUFFER_SIZE;
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL) {
+ ret = -ENOMEM;
+ goto fail_disable_clk;
+ }
/* initialize all interrupts to be disabled */
disable_int(info, NDSR_MASK);
- ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
- pdev->name, info);
+ ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto fail_free_buf;
@@ -1070,118 +1680,189 @@ struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- return info;
+ return 0;
fail_free_buf:
free_irq(irq, info);
- if (use_dma) {
- pxa_free_dma(info->data_dma_ch);
- dma_free_coherent(&pdev->dev, info->data_buff_size,
- info->data_buff, info->data_buff_phys);
- } else
- kfree(info->data_buff);
-fail_free_io:
- iounmap(info->mmio_base);
-fail_free_res:
- release_mem_region(r->start, resource_size(r));
-fail_put_clk:
- clk_disable(info->clk);
- clk_put(info->clk);
-fail_free_mtd:
- kfree(mtd);
- return NULL;
+ kfree(info->data_buff);
+fail_disable_clk:
+ clk_disable_unprepare(info->clk);
+ return ret;
}
static int pxa3xx_nand_remove(struct platform_device *pdev)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct mtd_info *mtd = info->mtd;
- struct resource *r;
- int irq;
+ struct pxa3xx_nand_platform_data *pdata;
+ int irq, cs;
- platform_set_drvdata(pdev, NULL);
+ if (!info)
+ return 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, info);
- if (use_dma) {
- pxa_free_dma(info->data_dma_ch);
- dma_free_writecombine(&pdev->dev, info->data_buff_size,
- info->data_buff, info->data_buff_phys);
- } else
- kfree(info->data_buff);
+ pxa3xx_nand_free_buff(info);
- iounmap(info->mmio_base);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
+ clk_disable_unprepare(info->clk);
- clk_disable(info->clk);
- clk_put(info->clk);
+ for (cs = 0; cs < pdata->num_cs; cs++)
+ nand_release(info->host[cs]->mtd);
+ return 0;
+}
+
+static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
+ pdata->enable_arbiter = 1;
+ if (of_get_property(np, "marvell,nand-keep-config", NULL))
+ pdata->keep_config = 1;
+ of_property_read_u32(np, "num-cs", &pdata->num_cs);
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ pdata->ecc_strength = of_get_nand_ecc_strength(np);
+ if (pdata->ecc_strength < 0)
+ pdata->ecc_strength = 0;
+
+ pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
+ if (pdata->ecc_step_size < 0)
+ pdata->ecc_step_size = 0;
+
+ pdev->dev.platform_data = pdata;
- if (mtd) {
- mtd_device_unregister(mtd);
- kfree(mtd);
- }
return 0;
}
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_part_parser_data ppdata = {};
struct pxa3xx_nand_info *info;
+ int ret, cs, probe_success;
+
+#ifndef ARCH_HAS_DMA
+ if (use_dma) {
+ use_dma = 0;
+ dev_warn(&pdev->dev,
+ "This platform can't do DMA on this device\n");
+ }
+#endif
+ ret = pxa3xx_nand_probe_dt(pdev);
+ if (ret)
+ return ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
return -ENODEV;
}
- info = alloc_nand_resource(pdev);
- if (info == NULL)
- return -ENOMEM;
-
- if (pxa3xx_nand_scan(info->mtd)) {
- dev_err(&pdev->dev, "failed to scan nand\n");
- pxa3xx_nand_remove(pdev);
- return -ENODEV;
+ ret = alloc_nand_resource(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "alloc nand resource failed\n");
+ return ret;
}
- if (mtd_has_cmdlinepart()) {
- const char *probes[] = { "cmdlinepart", NULL };
- struct mtd_partition *parts;
- int nr_parts;
+ info = platform_get_drvdata(pdev);
+ probe_success = 0;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ struct mtd_info *mtd = info->host[cs]->mtd;
- nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
+ /*
+ * The mtd name matches the one used in 'mtdparts' kernel
+ * parameter. This name cannot be changed or otherwise
+ * user's mtd partitions configuration would get broken.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ info->cs = cs;
+ ret = pxa3xx_nand_scan(mtd);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
+ cs);
+ continue;
+ }
- if (nr_parts)
- return mtd_device_register(info->mtd, parts, nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL,
+ &ppdata, pdata->parts[cs],
+ pdata->nr_parts[cs]);
+ if (!ret)
+ probe_success = 1;
}
- return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
+ if (!probe_success) {
+ pxa3xx_nand_remove(pdev);
+ return -ENODEV;
+ }
+
+ return 0;
}
#ifdef CONFIG_PM
static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct mtd_info *mtd = info->mtd;
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
+ pdata = dev_get_platdata(&pdev->dev);
if (info->state) {
dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
return -EAGAIN;
}
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd_suspend(mtd);
+ }
+
return 0;
}
static int pxa3xx_nand_resume(struct platform_device *pdev)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct mtd_info *mtd = info->mtd;
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
- nand_writel(info, NDTR0CS0, info->ndtr0cs0);
- nand_writel(info, NDTR1CS0, info->ndtr1cs0);
- clk_enable(info->clk);
+ pdata = dev_get_platdata(&pdev->dev);
+ /* We don't want to handle interrupt without calling mtd routine */
+ disable_int(info, NDCR_INT_MASK);
+
+ /*
+ * Directly set the chip select to a invalid value,
+ * then the driver would reset the timing according
+ * to current chip select at the beginning of cmdfunc
+ */
+ info->cs = 0xff;
+
+ /*
+ * As the spec says, the NDSR would be updated to 0x1800 when
+ * doing the nand_clk disable/enable.
+ * To prevent it damaging state machine of the driver, clear
+ * all status before resume
+ */
+ nand_writel(info, NDSR, NDSR_MASK);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd_resume(mtd);
+ }
return 0;
}
@@ -1193,6 +1874,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
+ .of_match_table = pxa3xx_nand_dt_ids,
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
@@ -1200,17 +1882,7 @@ static struct platform_driver pxa3xx_nand_driver = {
.resume = pxa3xx_nand_resume,
};
-static int __init pxa3xx_nand_init(void)
-{
- return platform_driver_register(&pxa3xx_nand_driver);
-}
-module_init(pxa3xx_nand_init);
-
-static void __exit pxa3xx_nand_exit(void)
-{
- platform_driver_unregister(&pxa3xx_nand_driver);
-}
-module_exit(pxa3xx_nand_exit);
+module_platform_driver(pxa3xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index cae2e013c98..baea83f4dea 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -22,7 +22,7 @@
#include "r852.h"
-static int r852_enable_dma = 1;
+static bool r852_enable_dma = 1;
module_param(r852_enable_dma, bool, S_IRUGO);
MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
@@ -181,7 +181,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
/* Set dma direction */
dev->dma_dir = do_read;
dev->dma_stage = 1;
- INIT_COMPLETION(dev->dma_done);
+ reinit_completion(&dev->dma_done);
dbg_verbose("doing dma %s ", do_read ? "read" : "write");
@@ -229,7 +229,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
/*
* Program data lines of the nand chip to send data to it
*/
-void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
uint32_t reg;
@@ -245,7 +245,7 @@ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
}
/* write DWORD chinks - faster */
- while (len) {
+ while (len >= 4) {
reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
r852_write_reg_dword(dev, R852_DATALINE, reg);
buf += 4;
@@ -254,14 +254,16 @@ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
}
/* write rest */
- while (len)
+ while (len > 0) {
r852_write_reg(dev, R852_DATALINE, *buf++);
+ len--;
+ }
}
/*
* Read data lines of the nand chip to retrieve data
*/
-void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
uint32_t reg;
@@ -309,31 +311,10 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
return r852_read_reg(dev, R852_DATALINE);
}
-
-/*
- * Readback the buffer to verify it
- */
-int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- struct r852_device *dev = r852_get_dev(mtd);
-
- /* We can't be sure about anything here... */
- if (dev->card_unstable)
- return -1;
-
- /* This will never happen, unless you wired up a nand chip
- with > 512 bytes page size to the reader */
- if (len > SM_SECTOR_SIZE)
- return 0;
-
- r852_read_buf(mtd, dev->tmp_buffer, len);
- return memcmp(buf, dev->tmp_buffer, len);
-}
-
/*
* Control several chip lines & send commands
*/
-void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
{
struct r852_device *dev = r852_get_dev(mtd);
@@ -378,7 +359,7 @@ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
* Wait till card is ready.
* based on nand_wait, but returns errors on DMA error
*/
-int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct r852_device *dev = chip->priv;
@@ -407,7 +388,7 @@ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
* Check if card is ready
*/
-int r852_ready(struct mtd_info *mtd)
+static int r852_ready(struct mtd_info *mtd)
{
struct r852_device *dev = r852_get_dev(mtd);
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
@@ -418,7 +399,7 @@ int r852_ready(struct mtd_info *mtd)
* Set ECC engine mode
*/
-void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
{
struct r852_device *dev = r852_get_dev(mtd);
@@ -450,7 +431,7 @@ void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
* Calculate ECC, only used for writes
*/
-int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
struct r852_device *dev = r852_get_dev(mtd);
@@ -482,7 +463,7 @@ int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
* Correct the data using ECC, hw did almost everything for us
*/
-int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
uint16_t ecc_reg;
@@ -539,21 +520,18 @@ exit:
* nand_read_oob_syndrome assumes we can send column address - we can't
*/
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ return 0;
}
/*
* Start the nand engine
*/
-void r852_engine_enable(struct r852_device *dev)
+static void r852_engine_enable(struct r852_device *dev)
{
if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
@@ -571,7 +549,7 @@ void r852_engine_enable(struct r852_device *dev)
* Stop the nand engine
*/
-void r852_engine_disable(struct r852_device *dev)
+static void r852_engine_disable(struct r852_device *dev)
{
r852_write_reg_dword(dev, R852_HW, 0);
r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
@@ -581,7 +559,7 @@ void r852_engine_disable(struct r852_device *dev)
* Test if card is present
*/
-void r852_card_update_present(struct r852_device *dev)
+static void r852_card_update_present(struct r852_device *dev)
{
unsigned long flags;
uint8_t reg;
@@ -596,7 +574,7 @@ void r852_card_update_present(struct r852_device *dev)
* Update card detection IRQ state according to current card state
* which is read in r852_card_update_present
*/
-void r852_update_card_detect(struct r852_device *dev)
+static void r852_update_card_detect(struct r852_device *dev)
{
int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
dev->card_unstable = 0;
@@ -610,8 +588,8 @@ void r852_update_card_detect(struct r852_device *dev)
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
}
-ssize_t r852_media_type_show(struct device *sys_dev,
- struct device_attribute *attr, char *buf)
+static ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
struct r852_device *dev = r852_get_dev(mtd);
@@ -621,11 +599,11 @@ ssize_t r852_media_type_show(struct device *sys_dev,
return strlen(data);
}
-DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
/* Detect properties of card in slot */
-void r852_update_media_status(struct r852_device *dev)
+static void r852_update_media_status(struct r852_device *dev)
{
uint8_t reg;
unsigned long flags;
@@ -654,7 +632,7 @@ void r852_update_media_status(struct r852_device *dev)
* Register the nand device
* Called when the card is detected
*/
-int r852_register_nand_device(struct r852_device *dev)
+static int r852_register_nand_device(struct r852_device *dev)
{
dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
@@ -692,7 +670,7 @@ error1:
* Unregister the card
*/
-void r852_unregister_nand_device(struct r852_device *dev)
+static void r852_unregister_nand_device(struct r852_device *dev)
{
if (!dev->card_registred)
return;
@@ -706,7 +684,7 @@ void r852_unregister_nand_device(struct r852_device *dev)
}
/* Card state updater */
-void r852_card_detect_work(struct work_struct *work)
+static void r852_card_detect_work(struct work_struct *work)
{
struct r852_device *dev =
container_of(work, struct r852_device, card_detect_work.work);
@@ -845,7 +823,7 @@ out:
return ret;
}
-int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
int error;
struct nand_chip *chip;
@@ -885,12 +863,12 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
chip->read_byte = r852_read_byte;
chip->read_buf = r852_read_buf;
chip->write_buf = r852_write_buf;
- chip->verify_buf = r852_verify_buf;
/* ecc */
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
chip->ecc.size = R852_DMA_LEN;
chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
chip->ecc.hwctl = r852_ecc_hwctl;
chip->ecc.calculate = r852_ecc_calculate;
chip->ecc.correct = r852_ecc_correct;
@@ -985,7 +963,7 @@ error1:
return error;
}
-void r852_remove(struct pci_dev *pci_dev)
+static void r852_remove(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
@@ -1016,7 +994,7 @@ void r852_remove(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
}
-void r852_shutdown(struct pci_dev *pci_dev)
+static void r852_shutdown(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
@@ -1026,8 +1004,8 @@ void r852_shutdown(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
}
-#ifdef CONFIG_PM
-int r852_suspend(struct device *device)
+#ifdef CONFIG_PM_SLEEP
+static int r852_suspend(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
@@ -1048,7 +1026,7 @@ int r852_suspend(struct device *device)
return 0;
}
-int r852_resume(struct device *device)
+static int r852_resume(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
@@ -1079,9 +1057,6 @@ int r852_resume(struct device *device)
r852_update_card_detect(dev);
return 0;
}
-#else
-#define r852_suspend NULL
-#define r852_resume NULL
#endif
static const struct pci_device_id r852_pci_id_tbl[] = {
@@ -1092,7 +1067,7 @@ static const struct pci_device_id r852_pci_id_tbl[] = {
MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
-SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
static struct pci_driver r852_pci_driver = {
.name = DRV_NAME,
@@ -1103,18 +1078,7 @@ static struct pci_driver r852_pci_driver = {
.driver.pm = &r852_pm_ops,
};
-static __init int r852_module_init(void)
-{
- return pci_register_driver(&r852_pci_driver);
-}
-
-static void __exit r852_module_exit(void)
-{
- pci_unregister_driver(&r852_pci_driver);
-}
-
-module_init(r852_module_init);
-module_exit(r852_module_exit);
+module_pci_driver(r852_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
deleted file mode 100644
index c9f9127ff77..00000000000
--- a/drivers/mtd/nand/rtc_from4.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * drivers/mtd/nand/rtc_from4.c
- *
- * Copyright (C) 2004 Red Hat, Inc.
- *
- * Derived from drivers/mtd/nand/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the AG-AND flash device found on the
- * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
- * which utilizes the Renesas HN29V1G91T-30 part.
- * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/rslib.h>
-#include <linux/bitrev.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-/*
- * MTD structure for Renesas board
- */
-static struct mtd_info *rtc_from4_mtd = NULL;
-
-#define RTC_FROM4_MAX_CHIPS 2
-
-/* HS77x9 processor register defines */
-#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60))
-#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62))
-#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64))
-#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66))
-#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68))
-#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C))
-#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80))
-
-/*
- * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
- */
-/* Address where flash is mapped */
-#define RTC_FROM4_FIO_BASE 0x14000000
-
-/* CLE and ALE are tied to address lines 5 & 4, respectively */
-#define RTC_FROM4_CLE (1 << 5)
-#define RTC_FROM4_ALE (1 << 4)
-
-/* address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000)
-#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000)
-#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000)
-/* mask address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
-
-/* FPGA status register for checking device ready (bit zero) */
-#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
-#define RTC_FROM4_DEVICE_READY 0x0001
-
-/* FPGA Reed-Solomon ECC Control register */
-
-#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
-#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7)
-#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6)
-#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5)
-
-/* FPGA Reed-Solomon ECC code base */
-#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
-#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
-
-/* FPGA Reed-Solomon ECC check register */
-#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
-#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7)
-
-#define ERR_STAT_ECC_AVAILABLE 0x20
-
-/* Undefine for software ECC */
-#define RTC_FROM4_HWECC 1
-
-/* Define as 1 for no virtual erase blocks (in JFFS2) */
-#define RTC_FROM4_NO_VIRTBLOCKS 0
-
-/*
- * Module stuff
- */
-static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
-
-static const struct mtd_partition partition_info[] = {
- {
- .name = "Renesas flash partition 1",
- .offset = 0,
- .size = MTDPART_SIZ_FULL},
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific flash bbt decriptors
- * Note: this is to allow debugging by disabling
- * NAND_BBT_CREATE and/or NAND_BBT_WRITE
- *
- */
-static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 40,
- .len = 4,
- .veroffs = 44,
- .maxblocks = 4,
- .pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 40,
- .len = 4,
- .veroffs = 44,
- .maxblocks = 4,
- .pattern = mirror_pattern
-};
-
-#ifdef RTC_FROM4_HWECC
-
-/* the Reed Solomon control structure */
-static struct rs_control *rs_decoder;
-
-/*
- * hardware specific Out Of Band information
- */
-static struct nand_ecclayout rtc_from4_nand_oobinfo = {
- .eccbytes = 32,
- .eccpos = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31},
- .oobfree = {{32, 32}}
-};
-
-#endif
-
-/*
- * rtc_from4_hwcontrol - hardware specific access to control-lines
- * @mtd: MTD device structure
- * @cmd: hardware control command
- *
- * Address lines (A5 and A4) are used to control Command and Address Latch
- * Enable on this board, so set the read/write address appropriately.
- *
- * Chip Enable is also controlled by the Chip Select (CS5) and
- * Address lines (A24-A22), so no action is required here.
- *
- */
-static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = (mtd->priv);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE);
- else
- writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE);
-}
-
-/*
- * rtc_from4_nand_select_chip - hardware specific chip select
- * @mtd: MTD device structure
- * @chip: Chip to select (0 == slot 3, 1 == slot 4)
- *
- * The chip select is based on address lines A24-A22.
- * This driver uses flash slots 3 and 4 (A23-A22).
- *
- */
-static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
-
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
-
- switch (chip) {
-
- case 0: /* select slot 3 chip */
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
- break;
- case 1: /* select slot 4 chip */
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
- break;
-
- }
-}
-
-/*
- * rtc_from4_nand_device_ready - hardware specific ready/busy check
- * @mtd: MTD device structure
- *
- * This board provides the Ready/Busy state in the status register
- * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal.
- *
- */
-static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
-{
- unsigned short status;
-
- status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
-
- return (status & RTC_FROM4_DEVICE_READY);
-
-}
-
-/*
- * deplete - code to perform device recovery in case there was a power loss
- * @mtd: MTD device structure
- * @chip: Chip to select (0 == slot 3, 1 == slot 4)
- *
- * If there was a sudden loss of power during an erase operation, a
- * "device recovery" operation must be performed when power is restored
- * to ensure correct operation. This routine performs the required steps
- * for the requested chip.
- *
- * See page 86 of the data sheet for details.
- *
- */
-static void deplete(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
-
- /* wait until device is ready */
- while (!this->dev_ready(mtd)) ;
-
- this->select_chip(mtd, chip);
-
- /* Send the commands for device recovery, phase 1 */
- this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
- this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
- /* Send the commands for device recovery, phase 2 */
- this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
- this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
-}
-
-#ifdef RTC_FROM4_HWECC
-/*
- * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
- * @mtd: MTD device structure
- * @mode: I/O mode; read or write
- *
- * enable hardware ECC for data read or write
- *
- */
-static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
- unsigned short status;
-
- switch (mode) {
- case NAND_ECC_READ:
- status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E;
-
- *rs_ecc_ctl = status;
- break;
-
- case NAND_ECC_READSYN:
- status = 0x00;
-
- *rs_ecc_ctl = status;
- break;
-
- case NAND_ECC_WRITE:
- status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E;
-
- *rs_ecc_ctl = status;
- break;
-
- default:
- BUG();
- break;
- }
-
-}
-
-/*
- * rtc_from4_calculate_ecc - hardware specific code to read ECC code
- * @mtd: MTD device structure
- * @dat: buffer containing the data to generate ECC codes
- * @ecc_code ECC codes calculated
- *
- * The ECC code is calculated by the FPGA. All we have to do is read the values
- * from the FPGA registers.
- *
- * Note: We read from the inverted registers, since data is inverted before
- * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
- *
- */
-static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
-{
- volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
- unsigned short value;
- int i;
-
- for (i = 0; i < 8; i++) {
- value = *rs_eccn;
- ecc_code[i] = (unsigned char)value;
- rs_eccn++;
- }
- ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
-}
-
-/*
- * rtc_from4_correct_data - hardware specific code to correct data using ECC code
- * @mtd: MTD device structure
- * @buf: buffer containing the data to generate ECC codes
- * @ecc1 ECC codes read
- * @ecc2 ECC codes calculated
- *
- * The FPGA tells us fast, if there's an error or not. If no, we go back happy
- * else we read the ecc results from the fpga and call the rs library to decode
- * and hopefully correct the error.
- *
- */
-static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
-{
- int i, j, res;
- unsigned short status;
- uint16_t par[6], syn[6];
- uint8_t ecc[8];
- volatile unsigned short *rs_ecc;
-
- status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
-
- if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
- return 0;
- }
-
- /* Read the syndrom pattern from the FPGA and correct the bitorder */
- rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
- for (i = 0; i < 8; i++) {
- ecc[i] = bitrev8(*rs_ecc);
- rs_ecc++;
- }
-
- /* convert into 6 10bit syndrome fields */
- par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)];
- par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)];
- par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)];
- par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)];
- par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)];
- par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0);
-
- /* Convert to computable syndrome */
- for (i = 0; i < 6; i++) {
- syn[i] = par[0];
- for (j = 1; j < 6; j++)
- if (par[j] != rs_decoder->nn)
- syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
-
- /* Convert to index form */
- syn[i] = rs_decoder->index_of[syn[i]];
- }
-
- /* Let the library code do its magic. */
- res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
- if (res > 0) {
- DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
- }
- return res;
-}
-
-/**
- * rtc_from4_errstat - perform additional error status checks
- * @mtd: MTD device structure
- * @this: NAND chip structure
- * @state: state or the operation
- * @status: status code returned from read status
- * @page: startpage inside the chip, must be called with (page & this->pagemask)
- *
- * Perform additional error status checks on erase and write failures
- * to determine if errors are correctable. For this device, correctable
- * 1-bit errors on erase and write are considered acceptable.
- *
- * note: see pages 34..37 of data sheet for details.
- *
- */
-static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
- int state, int status, int page)
-{
- int er_stat = 0;
- int rtn, retlen;
- size_t len;
- uint8_t *buf;
- int i;
-
- this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
-
- if (state == FL_ERASING) {
-
- for (i = 0; i < 4; i++) {
- if (!(status & 1 << (i + 1)))
- continue;
- this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
- -1, -1);
- rtn = this->read_byte(mtd);
- this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
- /* err_ecc_not_avail */
- if (!(rtn & ERR_STAT_ECC_AVAILABLE))
- er_stat |= 1 << (i + 1);
- }
-
- } else if (state == FL_WRITING) {
-
- unsigned long corrected = mtd->ecc_stats.corrected;
-
- /* single bank write logic */
- this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
- rtn = this->read_byte(mtd);
- this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
- if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
- /* err_ecc_not_avail */
- er_stat |= 1 << 1;
- goto out;
- }
-
- len = mtd->writesize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
- er_stat = 1;
- goto out;
- }
-
- /* recovery read */
- rtn = nand_do_read(mtd, page, len, &retlen, buf);
-
- /* if read failed or > 1-bit error corrected */
- if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
- er_stat |= 1 << 1;
- kfree(buf);
- }
-out:
- rtn = status;
- if (er_stat == 0) { /* if ECC is available */
- rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
- }
-
- return rtn;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init rtc_from4_init(void)
-{
- struct nand_chip *this;
- unsigned short bcr1, bcr2, wcr2;
- int i;
- int ret;
-
- /* Allocate memory for MTD device structure and private data */
- rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!rtc_from4_mtd) {
- printk("Unable to allocate Renesas NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&rtc_from4_mtd[1]);
-
- /* Initialize structures */
- memset(rtc_from4_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- rtc_from4_mtd->priv = this;
- rtc_from4_mtd->owner = THIS_MODULE;
-
- /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
- bcr1 = *SH77X9_BCR1 & ~0x0002;
- bcr1 |= 0x0002;
- *SH77X9_BCR1 = bcr1;
-
- /* set */
- bcr2 = *SH77X9_BCR2 & ~0x0c00;
- bcr2 |= 0x0800;
- *SH77X9_BCR2 = bcr2;
-
- /* set area 5 wait states */
- wcr2 = *SH77X9_WCR2 & ~0x1c00;
- wcr2 |= 0x1c00;
- *SH77X9_WCR2 = wcr2;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = rtc_from4_fio_base;
- this->IO_ADDR_W = rtc_from4_fio_base;
- /* Set address of hardware control function */
- this->cmd_ctrl = rtc_from4_hwcontrol;
- /* Set address of chip select function */
- this->select_chip = rtc_from4_nand_select_chip;
- /* command delay time (in us) */
- this->chip_delay = 100;
- /* return the status of the Ready/Busy line */
- this->dev_ready = rtc_from4_nand_device_ready;
-
-#ifdef RTC_FROM4_HWECC
- printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
-
- this->ecc.mode = NAND_ECC_HW_SYNDROME;
- this->ecc.size = 512;
- this->ecc.bytes = 8;
- /* return the status of extra status and ECC checks */
- this->errstat = rtc_from4_errstat;
- /* set the nand_oobinfo to support FPGA H/W error detection */
- this->ecc.layout = &rtc_from4_nand_oobinfo;
- this->ecc.hwctl = rtc_from4_enable_hwecc;
- this->ecc.calculate = rtc_from4_calculate_ecc;
- this->ecc.correct = rtc_from4_correct_data;
-
- /* We could create the decoder on demand, if memory is a concern.
- * This way we have it handy, if an error happens
- *
- * Symbolsize is 10 (bits)
- * Primitve polynomial is x^10+x^3+1
- * first consecutive root is 0
- * primitve element to generate roots = 1
- * generator polinomial degree = 6
- */
- rs_decoder = init_rs(10, 0x409, 0, 1, 6);
- if (!rs_decoder) {
- printk(KERN_ERR "Could not create a RS decoder\n");
- ret = -ENOMEM;
- goto err_1;
- }
-#else
- printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
-
- this->ecc.mode = NAND_ECC_SOFT;
-#endif
-
- /* set the bad block tables to support debugging */
- this->bbt_td = &rtc_from4_bbt_main_descr;
- this->bbt_md = &rtc_from4_bbt_mirror_descr;
-
- /* Scan to find existence of the device */
- if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
- ret = -ENXIO;
- goto err_2;
- }
-
- /* Perform 'device recovery' for each chip in case there was a power loss. */
- for (i = 0; i < this->numchips; i++) {
- deplete(rtc_from4_mtd, i);
- }
-
-#if RTC_FROM4_NO_VIRTBLOCKS
- /* use a smaller erase block to minimize wasted space when a block is bad */
- /* note: this uses eight times as much RAM as using the default and makes */
- /* mounts take four times as long. */
- rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS;
-#endif
-
- /* Register the partitions */
- ret = mtd_device_register(rtc_from4_mtd, partition_info,
- NUM_PARTITIONS);
- if (ret)
- goto err_3;
-
- /* Return happy */
- return 0;
-err_3:
- nand_release(rtc_from4_mtd);
-err_2:
- free_rs(rs_decoder);
-err_1:
- kfree(rtc_from4_mtd);
- return ret;
-}
-
-module_init(rtc_from4_init);
-
-/*
- * Clean up routine
- */
-static void __exit rtc_from4_cleanup(void)
-{
- /* Release resource, unregister partitions */
- nand_release(rtc_from4_mtd);
-
- /* Free the MTD device structure */
- kfree(rtc_from4_mtd);
-
-#ifdef RTC_FROM4_HWECC
- /* Free the reed solomon resources */
- if (rs_decoder) {
- free_rs(rs_decoder);
- }
-#endif
-}
-
-module_exit(rtc_from4_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
-MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 4405468f196..79acbb8691b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -21,15 +21,17 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
#define DEBUG
#endif
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -43,23 +45,42 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-#include <plat/regs-nand.h>
-#include <plat/nand.h>
-
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
-static int hardware_ecc = 1;
-#else
-static int hardware_ecc = 0;
-#endif
-
-#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-static const int clock_stop = 1;
-#else
-static const int clock_stop = 0;
-#endif
-
+#include <linux/platform_data/mtd-nand-s3c2410.h>
+
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
+#define S3C2410_NFECC S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+#define S3C2410_NFSTAT_BUSY (1<<0)
+#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
+#define S3C2440_NFCONT_INITECC (1<<4)
+#define S3C2440_NFCONT_nFCE (1<<1)
+#define S3C2440_NFCONT_ENABLE (1<<0)
+#define S3C2440_NFSTAT_READY (1<<0)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
/* new oob placement block for use with hardware ecc generation
*/
@@ -109,9 +130,8 @@ enum s3c_nand_clk_state {
* @mtds: An array of MTD instances on this controoler.
* @platform: The platform data for this board.
* @device: The platform device we bound to.
- * @area: The IO area resource that came from request_mem_region().
* @clk: The clock resource for this controller.
- * @regs: The area mapped for the hardware registers described by @area.
+ * @regs: The area mapped for the hardware registers.
* @sel_reg: Pointer to the register controlling the NAND selection.
* @sel_bit: The bit in @sel_reg to select the NAND chip.
* @mtd_count: The number of MTDs created from this controller.
@@ -128,7 +148,6 @@ struct s3c2410_nand_info {
/* device info */
struct device *device;
- struct resource *area;
struct clk *clk;
void __iomem *regs;
void __iomem *sel_reg;
@@ -164,12 +183,16 @@ static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
{
- return dev->dev.platform_data;
+ return dev_get_platdata(&dev->dev);
}
static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
- return clock_stop;
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+ return 1;
+#else
+ return 0;
+#endif
}
/**
@@ -215,7 +238,8 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
if (result > max) {
- printk("%d ns is too big for current clock rate %ld\n", wanted, clk);
+ pr_err("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
return -1;
}
@@ -225,7 +249,7 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
return result;
}
-#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
/* controller setup */
@@ -268,7 +292,8 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
}
dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
- tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
switch (info->cpu_type) {
case TYPE_S3C2410:
@@ -325,13 +350,13 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
if (ret < 0)
return ret;
- switch (info->cpu_type) {
- case TYPE_S3C2410:
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
default:
break;
- case TYPE_S3C2440:
- case TYPE_S3C2412:
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
/* enable the controller and de-assert nFCE */
writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
@@ -450,6 +475,7 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
/* ECC handling functions */
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
@@ -463,10 +489,8 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
diff1 = read_ecc[1] ^ calc_ecc[1];
diff2 = read_ecc[2] ^ calc_ecc[2];
- pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n",
- __func__,
- read_ecc[0], read_ecc[1], read_ecc[2],
- calc_ecc[0], calc_ecc[1], calc_ecc[2],
+ pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+ __func__, 3, read_ecc, 3, calc_ecc,
diff0, diff1, diff2);
if (diff0 == 0 && diff1 == 0 && diff2 == 0)
@@ -546,7 +570,8 @@ static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
unsigned long ctrl;
ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+ info->regs + S3C2440_NFCONT);
}
static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
@@ -558,7 +583,8 @@ static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
}
-static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
@@ -566,13 +592,13 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
- pr_debug("%s: returning ecc %02x%02x%02x\n", __func__,
- ecc_code[0], ecc_code[1], ecc_code[2]);
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
-static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
@@ -581,12 +607,13 @@ static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
ecc_code[1] = ecc >> 8;
ecc_code[2] = ecc >> 16;
- pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
-static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
@@ -599,6 +626,7 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
return 0;
}
+#endif
/* over-ride the standard functions for a little more speed. We can
* use read/write block to move the data buffers to/from the controller
@@ -625,13 +653,15 @@ static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
}
}
-static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
{
struct nand_chip *this = mtd->priv;
writesb(this->IO_ADDR_W, buf, len);
}
-static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
@@ -675,7 +705,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
CPUFREQ_TRANSITION_NOTIFIER);
}
-static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
{
cpufreq_unregister_notifier(&info->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -687,7 +718,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
return 0;
}
-static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
{
}
#endif
@@ -698,8 +730,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (info == NULL)
return 0;
@@ -717,53 +747,28 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
nand_release(&ptr->mtd);
}
-
- kfree(info->mtds);
}
/* free the common resources */
- if (info->clk != NULL && !IS_ERR(info->clk)) {
+ if (!IS_ERR(info->clk))
s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
- clk_put(info->clk);
- }
-
- if (info->regs != NULL) {
- iounmap(info->regs);
- info->regs = NULL;
- }
-
- if (info->area != NULL) {
- release_resource(info->area);
- kfree(info->area);
- info->area = NULL;
- }
-
- kfree(info);
return 0;
}
-const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
- struct mtd_partition *part_info;
- int nr_part = 0;
-
- if (set == NULL)
- return mtd_device_register(&mtd->mtd, NULL, 0);
+ if (set) {
+ mtd->mtd.name = set->name;
- mtd->mtd.name = set->name;
- nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
-
- if (nr_part <= 0 && set->nr_partitions > 0) {
- nr_part = set->nr_partitions;
- part_info = set->partitions;
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ set->partitions, set->nr_partitions);
}
- return mtd_device_register(&mtd->mtd, part_info, nr_part);
+ return -ENODEV;
}
/**
@@ -821,7 +826,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
dev_info(info->device, "System booted from NAND\n");
break;
- }
+ }
chip->IO_ADDR_R = chip->IO_ADDR_W;
@@ -830,31 +835,31 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->mtd.owner = THIS_MODULE;
nmtd->set = set;
- if (hardware_ecc) {
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.mode = NAND_ECC_HW;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.strength = 1;
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
- }
- } else {
- chip->ecc.mode = NAND_ECC_SOFT;
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
}
+#else
+ chip->ecc.mode = NAND_ECC_SOFT;
+#endif
if (set->ecc_layout != NULL)
chip->ecc.layout = set->ecc_layout;
@@ -880,8 +885,10 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
/* If you use u-boot BBT creation code, specifying this flag will
* let the kernel fish out the BBT from the NAND, and also skip the
* full NAND scan that can take 1/2s or so. Little things... */
- if (set->flash_bbt)
- chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ if (set->flash_bbt) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->options |= NAND_SKIP_BBTSCAN;
+ }
}
/**
@@ -906,7 +913,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
if (chip->ecc.mode != NAND_ECC_HW)
return;
- /* change the behaviour depending on wether we are using
+ /* change the behaviour depending on whether we are using
* the large or small page nand device */
if (chip->page_shift > 10) {
@@ -929,7 +936,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
- enum s3c_cpu_type cpu_type;
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -943,9 +950,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
@@ -957,7 +963,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* get the clock source and enable it */
- info->clk = clk_get(&pdev->dev, "nand");
+ info->clk = devm_clk_get(&pdev->dev, "nand");
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
err = -ENOENT;
@@ -969,25 +975,16 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate and map the resource */
/* currently we assume we have the one resource */
- res = pdev->resource;
+ res = pdev->resource;
size = resource_size(res);
- info->area = request_mem_region(res->start, size, pdev->name);
-
- if (info->area == NULL) {
- dev_err(&pdev->dev, "cannot reserve register region\n");
- err = -ENOENT;
- goto exit_error;
- }
-
- info->device = &pdev->dev;
- info->platform = plat;
- info->regs = ioremap(res->start, size);
- info->cpu_type = cpu_type;
+ info->device = &pdev->dev;
+ info->platform = plat;
+ info->cpu_type = cpu_type;
- if (info->regs == NULL) {
- dev_err(&pdev->dev, "cannot reserve register region\n");
- err = -EIO;
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs)) {
+ err = PTR_ERR(info->regs);
goto exit_error;
}
@@ -1007,9 +1004,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
- info->mtds = kzalloc(size, GFP_KERNEL);
+ info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (info->mtds == NULL) {
- dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
@@ -1019,7 +1015,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
nmtd = info->mtds;
for (setno = 0; setno < nr_sets; setno++, nmtd++) {
- pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info);
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
s3c2410_nand_init_chip(info, nmtd, sets);
@@ -1142,20 +1139,7 @@ static struct platform_driver s3c24xx_nand_driver = {
},
};
-static int __init s3c2410_nand_init(void)
-{
- printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
-
- return platform_driver_register(&s3c24xx_nand_driver);
-}
-
-static void __exit s3c2410_nand_exit(void)
-{
- platform_driver_unregister(&s3c24xx_nand_driver);
-}
-
-module_init(s3c2410_nand_init);
-module_exit(s3c2410_nand_exit);
+module_platform_driver(s3c24xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 93b1f74321c..c0670237e7a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,10 +23,20 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -42,11 +52,17 @@ static struct nand_ecclayout flctl_4secc_oob_16 = {
};
static struct nand_ecclayout flctl_4secc_oob_64 = {
- .eccbytes = 10,
- .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
+ .eccbytes = 4 * 10,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
.oobfree = {
- {.offset = 60,
- . length = 4} },
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6} },
};
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -60,15 +76,15 @@ static struct nand_bbt_descr flctl_4secc_smallpage = {
static struct nand_bbt_descr flctl_4secc_largepage = {
.options = NAND_BBT_SCAN2NDPAGE,
- .offs = 58,
+ .offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
static void empty_fifo(struct sh_flctl *flctl)
{
- writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */
- writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */
+ writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
}
static void start_translation(struct sh_flctl *flctl)
@@ -97,6 +113,84 @@ static void wait_completion(struct sh_flctl *flctl)
writeb(0x0, FLTRCR(flctl));
}
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.slave_id = pdata->slave_id_fifo0_tx;
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.slave_id = pdata->slave_id_fifo0_rx;
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
static void set_addr(struct mtd_info *mtd, int column, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -157,27 +251,56 @@ static void wait_wfifo_ready(struct sh_flctl *flctl)
timeout_error(flctl, __func__);
}
-static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
+static enum flctl_ecc_res_t wait_recfifo_ready
+ (struct sh_flctl *flctl, int sector_number)
{
uint32_t timeout = LOOP_TIMEOUT_MAX;
- int checked[4];
void __iomem *ecc_reg[4];
int i;
+ int state = FL_SUCCESS;
uint32_t data, size;
- memset(checked, 0, sizeof(checked));
-
+ /*
+ * First this loops checks in FLDTCNTR if we are ready to read out the
+ * oob data. This is the case if either all went fine without errors or
+ * if the bottom part of the loop corrected the errors or marked them as
+ * uncorrectable and the controller is given time to push the data into
+ * the FIFO.
+ */
while (timeout--) {
+ /* check if all is ok and we can read out the OOB */
size = readl(FLDTCNTR(flctl)) >> 24;
- if (size & 0xFF)
- return 0; /* success */
+ if ((size & 0xFF) == 4)
+ return state;
+
+ /* check if a correction code has been calculated */
+ if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+ /*
+ * either we wait for the fifo to be filled or a
+ * correction pattern is being generated
+ */
+ udelay(1);
+ continue;
+ }
- if (readl(FL4ECCCR(flctl)) & _4ECCFA)
- return 1; /* can't correct */
+ /* check for an uncorrectable error */
+ if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+ /* check if we face a non-empty page */
+ for (i = 0; i < 512; i++) {
+ if (flctl->done_buff[i] != 0xff) {
+ state = FL_ERROR; /* can't correct */
+ break;
+ }
+ }
- udelay(1);
- if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
+ if (state == FL_SUCCESS)
+ dev_dbg(&flctl->pdev->dev,
+ "reading empty sector %d, ecc error ignored\n",
+ sector_number);
+
+ writel(0, FL4ECCCR(flctl));
continue;
+ }
/* start error correction */
ecc_reg[0] = FL4ECCRESULT0(flctl);
@@ -186,28 +309,26 @@ static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
ecc_reg[3] = FL4ECCRESULT3(flctl);
for (i = 0; i < 3; i++) {
+ uint8_t org;
+ unsigned int index;
+
data = readl(ecc_reg[i]);
- if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
- uint8_t org;
- int index;
-
- if (flctl->page_size)
- index = (512 * sector_number) +
- (data >> 16);
- else
- index = data >> 16;
-
- org = flctl->done_buff[index];
- flctl->done_buff[index] = org ^ (data & 0xFF);
- checked[i] = 1;
- }
- }
+ if (flctl->page_size)
+ index = (512 * sector_number) +
+ (data >> 16);
+ else
+ index = data >> 16;
+
+ org = flctl->done_buff[index];
+ flctl->done_buff[index] = org ^ (data & 0xFF);
+ }
+ state = FL_REPAIRABLE;
writel(0, FL4ECCCR(flctl));
}
timeout_error(flctl, __func__);
- return 1; /* timeout */
+ return FL_TIMEOUT; /* timeout */
}
static void wait_wecfifo_ready(struct sh_flctl *flctl)
@@ -225,6 +346,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
timeout_error(flctl, __func__);
}
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie = -EINVAL;
+ uint32_t reg;
+ int ret;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (dma_addr)
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret > 0 is success */
+ return ret;
+}
+
static void read_datareg(struct sh_flctl *flctl, int offset)
{
unsigned long data;
@@ -240,50 +425,84 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
{
int i, len_4align;
unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
- void *fifo_addr = (void *)FLDTFIFO(flctl);
len_4align = (rlen + 3) / 4;
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_rfifo_ready(flctl);
- buf[i] = readl(fifo_addr);
- buf[i] = be32_to_cpu(buf[i]);
+ buf[i] = readl(FLDTFIFO(flctl));
}
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
}
-static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector)
+static enum flctl_ecc_res_t read_ecfiforeg
+ (struct sh_flctl *flctl, uint8_t *buff, int sector)
{
int i;
+ enum flctl_ecc_res_t res;
unsigned long *ecc_buf = (unsigned long *)buff;
- void *fifo_addr = (void *)FLECFIFO(flctl);
- for (i = 0; i < 4; i++) {
- if (wait_recfifo_ready(flctl , sector))
- return 1;
- ecc_buf[i] = readl(fifo_addr);
- ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ res = wait_recfifo_ready(flctl , sector);
+
+ if (res != FL_ERROR) {
+ for (i = 0; i < 4; i++) {
+ ecc_buf[i] = readl(FLECFIFO(flctl));
+ ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ }
}
- return 0;
+ return res;
}
-static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
- void *fifo_addr = (void *)FLDTFIFO(flctl);
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++) {
wait_wfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), fifo_addr);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
+ }
+}
+
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ return; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_wecfifo_ready(flctl);
+ writel(buf[i], FLECFIFO(flctl));
}
}
static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT;
+ uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
uint32_t flcmdcr_val, addr_len_bytes = 0;
/* Set SNAND bit if page size is 2048byte */
@@ -303,6 +522,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
break;
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
+ case NAND_CMD_RNDOUT:
addr_len_bytes = flctl->rw_ADRCNT;
flcmdcr_val |= CDSRC_E;
if (flctl->chip.options & NAND_BUSWIDTH_16)
@@ -320,6 +540,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
break;
case NAND_CMD_READID:
flcmncr_val &= ~SNAND_E;
+ flcmdcr_val |= CDSRC_E;
addr_len_bytes = ADRCNT_1;
break;
case NAND_CMD_STATUS:
@@ -341,75 +562,67 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
}
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->read_buf(mtd, p, eccsize);
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- if (flctl->hwecc_cant_correct[i])
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += 0;
- }
-
+ chip->read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
-static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- const uint8_t *p = buf;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->write_buf(mtd, p, eccsize);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
int sector, page_sectors;
+ enum flctl_ecc_res_t ecc_result;
- if (flctl->page_size)
- page_sectors = 4;
- else
- page_sectors = 1;
-
- writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
- FLCMNCR(flctl));
+ page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
- for (sector = 0; sector < page_sectors; sector++) {
- int ret;
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+ FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
- empty_fifo(flctl);
- writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
- writel(page_addr << 2 | sector, FLADR(flctl));
+ empty_fifo(flctl);
+ start_translation(flctl);
- start_translation(flctl);
+ for (sector = 0; sector < page_sectors; sector++) {
read_fiforeg(flctl, 512, 512 * sector);
- ret = read_ecfiforeg(flctl,
+ ecc_result = read_ecfiforeg(flctl,
&flctl->done_buff[mtd->writesize + 16 * sector],
sector);
- if (ret)
- flctl->hwecc_cant_correct[sector] = 1;
-
- writel(0x0, FL4ECCCR(flctl));
- wait_completion(flctl);
+ switch (ecc_result) {
+ case FL_REPAIRABLE:
+ dev_info(&flctl->pdev->dev,
+ "applied ecc on page 0x%x", page_addr);
+ flctl->mtd.ecc_stats.corrected++;
+ break;
+ case FL_ERROR:
+ dev_warn(&flctl->pdev->dev,
+ "page 0x%x contains corrupted data\n",
+ page_addr);
+ flctl->mtd.ecc_stats.failed++;
+ break;
+ default:
+ ;
+ }
}
+
+ wait_completion(flctl);
+
writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
FLCMNCR(flctl));
}
@@ -417,30 +630,20 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_sectors = flctl->page_size ? 4 : 1;
+ int i;
set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
empty_fifo(flctl);
- if (flctl->page_size) {
- int i;
- /* In case that the page size is 2k */
- for (i = 0; i < 16 * 3; i++)
- flctl->done_buff[i] = 0xFF;
- set_addr(mtd, 3 * 528 + 512, page_addr);
+ for (i = 0; i < page_sectors; i++) {
+ set_addr(mtd, (512 + 16) * i + 512 , page_addr);
writel(16, FLDTCNTR(flctl));
start_translation(flctl);
- read_fiforeg(flctl, 16, 16 * 3);
- wait_completion(flctl);
- } else {
- /* In case that the page size is 512b */
- set_addr(mtd, 512, page_addr);
- writel(16, FLDTCNTR(flctl));
-
- start_translation(flctl);
- read_fiforeg(flctl, 16, 0);
+ read_fiforeg(flctl, 16, 16 * i);
wait_completion(flctl);
}
}
@@ -448,34 +651,26 @@ static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
static void execmd_write_page_sector(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int i, page_addr = flctl->seqin_page_addr;
+ int page_addr = flctl->seqin_page_addr;
int sector, page_sectors;
- if (flctl->page_size)
- page_sectors = 4;
- else
- page_sectors = 1;
-
- writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
- for (sector = 0; sector < page_sectors; sector++) {
- empty_fifo(flctl);
- writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
- writel(page_addr << 2 | sector, FLADR(flctl));
+ empty_fifo(flctl);
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+ start_translation(flctl);
- start_translation(flctl);
+ for (sector = 0; sector < page_sectors; sector++) {
write_fiforeg(flctl, 512, 512 * sector);
-
- for (i = 0; i < 4; i++) {
- wait_wecfifo_ready(flctl); /* wait for write ready */
- writel(0xFFFFFFFF, FLECFIFO(flctl));
- }
- wait_completion(flctl);
+ write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
}
+ wait_completion(flctl);
writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
}
@@ -485,18 +680,12 @@ static void execmd_write_oob(struct mtd_info *mtd)
int page_addr = flctl->seqin_page_addr;
int sector, page_sectors;
- if (flctl->page_size) {
- sector = 3;
- page_sectors = 4;
- } else {
- sector = 0;
- page_sectors = 1;
- }
+ page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
- for (; sector < page_sectors; sector++) {
+ for (sector = 0; sector < page_sectors; sector++) {
empty_fifo(flctl);
set_addr(mtd, sector * 528 + 512, page_addr);
writel(16, FLDTCNTR(flctl)); /* set read size */
@@ -513,6 +702,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
struct sh_flctl *flctl = mtd_to_flctl(mtd);
uint32_t read_cmd = 0;
+ pm_runtime_get_sync(&flctl->pdev->dev);
+
flctl->read_bytes = 0;
if (command != NAND_CMD_PAGEPROG)
flctl->index = 0;
@@ -525,7 +716,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
execmd_read_page_sector(mtd, page_addr);
break;
}
- empty_fifo(flctl);
if (flctl->page_size)
set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
| command);
@@ -547,7 +737,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
break;
}
- empty_fifo(flctl);
if (flctl->page_size) {
set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
| NAND_CMD_READ0);
@@ -559,15 +748,35 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
flctl->read_bytes = mtd->oobsize;
goto read_normal_exit;
+ case NAND_CMD_RNDOUT:
+ if (flctl->hwecc)
+ break;
+
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+ goto read_normal_exit;
+
case NAND_CMD_READID:
- empty_fifo(flctl);
set_cmd_regs(mtd, command, command);
- set_addr(mtd, 0, 0);
- flctl->read_bytes = 4;
+ /* READID is always performed using an 8-bit bus */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column <<= 1;
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = 8;
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
start_translation(flctl);
- read_datareg(flctl, 0); /* read and end */
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
break;
case NAND_CMD_ERASE1:
@@ -650,29 +859,57 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
default:
break;
}
- return;
+ goto runtime_exit;
read_normal_exit:
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
start_translation(flctl);
read_fiforeg(flctl, flctl->read_bytes, 0);
wait_completion(flctl);
+runtime_exit:
+ pm_runtime_put_sync(&flctl->pdev->dev);
return;
}
static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- uint32_t flcmncr_val = readl(FLCMNCR(flctl));
+ int ret;
switch (chipnr) {
case -1:
- flcmncr_val &= ~CE0_ENABLE;
- writel(flcmncr_val, FLCMNCR(flctl));
+ flctl->flcmncr_base &= ~CE0_ENABLE;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+ if (flctl->qos_request) {
+ dev_pm_qos_remove_request(&flctl->pm_qos);
+ flctl->qos_request = 0;
+ }
+
+ pm_runtime_put_sync(&flctl->pdev->dev);
break;
case 0:
- flcmncr_val |= CE0_ENABLE;
- writel(flcmncr_val, FLCMNCR(flctl));
+ flctl->flcmncr_base |= CE0_ENABLE;
+
+ if (!flctl->qos_request) {
+ ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+ &flctl->pm_qos,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 100);
+ if (ret < 0)
+ dev_err(&flctl->pdev->dev,
+ "PM QoS request failed: %d\n", ret);
+ flctl->qos_request = 1;
+ }
+
+ if (flctl->holden) {
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(HOLDEN, FLHOLDCR(flctl));
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ }
break;
default:
BUG();
@@ -682,57 +919,36 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int i, index = flctl->index;
- for (i = 0; i < len; i++)
- flctl->done_buff[index + i] = buf[i];
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
flctl->index += len;
}
static uint8_t flctl_read_byte(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
uint8_t data;
- data = flctl->done_buff[index];
+ data = flctl->done_buff[flctl->index];
flctl->index++;
return data;
}
static uint16_t flctl_read_word(struct mtd_info *mtd)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- uint16_t data;
- uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
- data = *buf;
- flctl->index += 2;
- return data;
+ flctl->index += 2;
+ return *buf;
}
static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int i;
-
- for (i = 0; i < len; i++)
- buf[i] = flctl_read_byte(mtd);
-}
-
-static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- if (buf[i] != flctl_read_byte(mtd))
- return -EFAULT;
- return 0;
-}
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
-static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
-{
- writel(val, FLCMNCR(flctl));
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
+ flctl->index += len;
}
static int flctl_chip_init_tail(struct mtd_info *mtd)
@@ -781,13 +997,13 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
chip->ecc.size = 512;
chip->ecc.bytes = 10;
+ chip->ecc.strength = 4;
chip->ecc.read_page = flctl_read_page_hwecc;
chip->ecc.write_page = flctl_write_page_hwecc;
chip->ecc.mode = NAND_ECC_HW;
/* 4 symbols ECC enabled */
- writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02,
- FLCMNCR(flctl));
+ flctl->flcmncr_base |= _4ECCEN;
} else {
chip->ecc.mode = NAND_ECC_SOFT;
}
@@ -795,37 +1011,114 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
return 0;
}
-static int __devinit flctl_probe(struct platform_device *pdev)
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+ struct sh_flctl *flctl = dev_id;
+
+ dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+ return IRQ_HANDLED;
+}
+
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+ struct device_node *dn = dev->of_node;
+ int ret;
+
+ match = of_match_device(of_flctl_match, dev);
+ if (match)
+ config = (struct flctl_soc_config *)match->data;
+ else {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ /* parse user defined options */
+ ret = of_get_nand_bus_width(dn);
+ if (ret == 16)
+ pdata->flcmncr_val |= SEL_16BIT;
+ else if (ret != 8) {
+ dev_err(dev, "%s: invalid bus width\n", __func__);
+ return NULL;
+ }
+
+ return pdata;
+}
+
+static int flctl_probe(struct platform_device *pdev)
{
struct resource *res;
struct sh_flctl *flctl;
struct mtd_info *flctl_mtd;
struct nand_chip *nand;
struct sh_flctl_platform_data *pdata;
- int ret = -ENXIO;
+ int ret;
+ int irq;
+ struct mtd_part_parser_data ppdata = {};
- pdata = pdev->dev.platform_data;
- if (pdata == NULL) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
- if (!flctl) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl)
return -ENOMEM;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- goto err;
+ flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flctl->reg))
+ return PTR_ERR(flctl->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get flste irq data\n");
+ return -ENXIO;
}
- flctl->reg = ioremap(res->start, resource_size(res));
- if (flctl->reg == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- goto err;
+ ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+ "flste", flctl);
+ if (ret) {
+ dev_err(&pdev->dev, "request interrupt failed.\n");
+ return ret;
+ }
+
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ return -EINVAL;
}
platform_set_drvdata(pdev, flctl);
@@ -834,10 +1127,9 @@ static int __devinit flctl_probe(struct platform_device *pdev)
flctl_mtd->priv = nand;
flctl->pdev = pdev;
flctl->hwecc = pdata->has_hwecc;
-
- flctl_register_init(flctl, pdata->flcmncr_val);
-
- nand->options = NAND_NO_AUTOINCR;
+ flctl->holden = pdata->use_holden;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
/* Set address of hardware control function */
/* 20 us command delay time */
@@ -846,7 +1138,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_byte = flctl_read_byte;
nand->write_buf = flctl_write_buf;
nand->read_buf = flctl_read_buf;
- nand->verify_buf = flctl_verify_buf;
nand->select_chip = flctl_select_chip;
nand->cmdfunc = flctl_cmdfunc;
@@ -855,33 +1146,42 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ flctl_setup_dma(flctl);
+
ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
- goto err;
+ goto err_chip;
ret = flctl_chip_init_tail(flctl_mtd);
if (ret)
- goto err;
+ goto err_chip;
ret = nand_scan_tail(flctl_mtd);
if (ret)
- goto err;
+ goto err_chip;
- mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
return 0;
-err:
- kfree(flctl);
+err_chip:
+ flctl_release_dma(flctl);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
-static int __devexit flctl_remove(struct platform_device *pdev)
+static int flctl_remove(struct platform_device *pdev)
{
struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ flctl_release_dma(flctl);
nand_release(&flctl->mtd);
- kfree(flctl);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -891,21 +1191,11 @@ static struct platform_driver flctl_driver = {
.driver = {
.name = "sh_flctl",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_flctl_match),
},
};
-static int __init flctl_nand_init(void)
-{
- return platform_driver_probe(&flctl_driver, flctl_probe);
-}
-
-static void __exit flctl_nand_cleanup(void)
-{
- platform_driver_unregister(&flctl_driver);
-}
-
-module_init(flctl_nand_init);
-module_exit(flctl_nand_cleanup);
+module_platform_driver_probe(flctl_driver, flctl_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 19e24ed089e..e81059b5838 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,20 +103,16 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
return readb(sharpsl->io + ECCCNTR) != 0;
}
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
/*
* Main initialization routine
*/
-static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
+static int sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
- struct mtd_partition *sharpsl_partition_info;
- int nr_partitions;
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
- struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
+ struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no platform data!\n");
@@ -125,10 +121,8 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Allocate memory for MTD device structure and private data */
sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
- if (!sharpsl) {
- printk("Unable to allocate SharpSL NAND MTD device structure.\n");
+ if (!sharpsl)
return -ENOMEM;
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -140,7 +134,7 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* map physical address */
sharpsl->io = ioremap(r->start, resource_size(r));
if (!sharpsl->io) {
- printk("ioremap to access Sharp SL NAND chip failed\n");
+ dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
err = -EIO;
goto err_ioremap;
}
@@ -171,6 +165,7 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 256;
this->ecc.bytes = 3;
+ this->ecc.strength = 1;
this->badblock_pattern = data->badblock_pattern;
this->ecc.layout = data->ecc_layout;
this->ecc.hwctl = sharpsl_nand_enable_hwecc;
@@ -184,14 +179,9 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Register the partitions */
sharpsl->mtd.name = "sharpsl-nand";
- nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
- if (nr_partitions <= 0) {
- nr_partitions = data->nr_partitions;
- sharpsl_partition_info = data->partitions;
- }
- err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
- nr_partitions);
+ err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
+ data->partitions, data->nr_partitions);
if (err)
goto err_add;
@@ -202,7 +192,6 @@ err_add:
nand_release(&sharpsl->mtd);
err_scan:
- platform_set_drvdata(pdev, NULL);
iounmap(sharpsl->io);
err_ioremap:
err_get_res:
@@ -213,15 +202,13 @@ err_get_res:
/*
* Clean up routine
*/
-static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
+static int sharpsl_nand_remove(struct platform_device *pdev)
{
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
/* Release resources, unregister device */
nand_release(&sharpsl->mtd);
- platform_set_drvdata(pdev, NULL);
-
iounmap(sharpsl->io);
/* Free the MTD device structure */
@@ -236,20 +223,10 @@ static struct platform_driver sharpsl_nand_driver = {
.owner = THIS_MODULE,
},
.probe = sharpsl_nand_probe,
- .remove = __devexit_p(sharpsl_nand_remove),
+ .remove = sharpsl_nand_remove,
};
-static int __init sharpsl_nand_init(void)
-{
- return platform_driver_register(&sharpsl_nand_driver);
-}
-module_init(sharpsl_nand_init);
-
-static void __exit sharpsl_nand_exit(void)
-{
- platform_driver_unregister(&sharpsl_nand_driver);
-}
-module_exit(sharpsl_nand_exit);
+module_platform_driver(sharpsl_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index b6332e83b28..e06b5e5d328 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -8,6 +8,8 @@
*/
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
@@ -40,71 +42,62 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_oob_ops ops;
struct sm_oob oob;
- int ret, error = 0;
+ int ret;
memset(&oob, -1, SM_OOB_SIZE);
oob.block_status = 0x0F;
/* As long as this function is called on erase block boundaries
it will work correctly for 256 byte nand */
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = (void *)&oob;
ops.datbuf = NULL;
- ret = mtd->write_oob(mtd, ofs, &ops);
+ ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
(int)ofs);
- error = -EIO;
- } else
- mtd->ecc_stats.badblocks++;
+ return -EIO;
+ }
- return error;
+ return 0;
}
-
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
- {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
- {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
- {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
- {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
- {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
- {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
- {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
- {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
- {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
- {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
- {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
- {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
- {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
- {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
- {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
- {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
- {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
- {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
- {NULL,}
+ LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+ {NULL}
};
-#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
static struct nand_flash_dev nand_xd_flash_ids[] = {
-
- {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
- {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
- {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
- {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
- {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
- {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
- {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
- {NULL,}
+ LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+ {NULL}
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index ca2d0555729..fe8058a4505 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -15,6 +15,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
@@ -98,24 +99,6 @@ static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
return word;
}
-/**
- * socrates_nand_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- */
-static int socrates_nand_verify_buf(struct mtd_info *mtd, const u8 *buf,
- int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (buf[i] != socrates_nand_read_byte(mtd))
- return -EFAULT;
- }
- return 0;
-}
-
/*
* Hardware specific access to control-lines
*/
@@ -155,32 +138,25 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
return 1;
}
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct platform_device *ofdev)
+static int socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int res;
- struct mtd_partition *partitions = NULL;
- int num_partitions = 0;
+ struct mtd_part_parser_data ppdata;
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR
- "socrates_nand: failed to allocate device structure.\n");
+ host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
return -ENOMEM;
- }
host->io_base = of_iomap(ofdev->dev.of_node, 0);
if (host->io_base == NULL) {
- printk(KERN_ERR "socrates_nand: ioremap failed\n");
- kfree(host);
+ dev_err(&ofdev->dev, "ioremap failed\n");
return -EIO;
}
@@ -193,6 +169,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
mtd->name = "socrates_nand";
mtd->owner = THIS_MODULE;
mtd->dev.parent = &ofdev->dev;
+ ppdata.of_node = ofdev->dev.of_node;
/*should never be accessed directly */
nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
@@ -203,7 +180,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
nand_chip->read_word = socrates_nand_read_word;
nand_chip->write_buf = socrates_nand_write_buf;
nand_chip->read_buf = socrates_nand_read_buf;
- nand_chip->verify_buf = socrates_nand_verify_buf;
nand_chip->dev_ready = socrates_nand_device_ready;
nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
@@ -225,52 +201,28 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto out;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- num_partitions = parse_mtd_partitions(mtd, part_probes,
- &partitions, 0);
- if (num_partitions < 0) {
- res = num_partitions;
- goto release;
- }
-#endif
-
- if (num_partitions == 0) {
- num_partitions = of_mtd_parse_partitions(&ofdev->dev,
- ofdev->dev.of_node,
- &partitions);
- if (num_partitions < 0) {
- res = num_partitions;
- goto release;
- }
- }
-
- res = mtd_device_register(mtd, partitions, num_partitions);
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (!res)
return res;
-release:
nand_release(mtd);
out:
- dev_set_drvdata(&ofdev->dev, NULL);
iounmap(host->io_base);
- kfree(host);
return res;
}
/*
* Remove a NAND device.
*/
-static int __devexit socrates_nand_remove(struct platform_device *ofdev)
+static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = &host->mtd;
nand_release(mtd);
- dev_set_drvdata(&ofdev->dev, NULL);
iounmap(host->io_base);
- kfree(host);
return 0;
}
@@ -292,21 +244,10 @@ static struct platform_driver socrates_nand_driver = {
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
- .remove = __devexit_p(socrates_nand_remove),
+ .remove = socrates_nand_remove,
};
-static int __init socrates_nand_init(void)
-{
- return platform_driver_register(&socrates_nand_driver);
-}
-
-static void __exit socrates_nand_exit(void)
-{
- platform_driver_unregister(&socrates_nand_driver);
-}
-
-module_init(socrates_nand_init);
-module_exit(socrates_nand_exit);
+module_platform_driver(socrates_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ilya Yanok");
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
deleted file mode 100644
index bef76cd7c24..00000000000
--- a/drivers/mtd/nand/spia.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * drivers/mtd/nand/spia.c
- *
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- *
- * 10-29-2001 TG change to support hardwarespecific access
- * to controllines (due to change in nand.c)
- * page_cache added
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * SPIA board which utilizes the Toshiba TC58V64AFT part. This is
- * a 64Mibit (8MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-/*
- * MTD structure for SPIA board
- */
-static struct mtd_info *spia_mtd = NULL;
-
-/*
- * Values specific to the SPIA board (used with EP7212 processor)
- */
-#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
-#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
-#define SPIA_PEDR 0x0080 /*
- * IO offset to Port E data register
- * where the CLE, ALE and NCE pins
- * are wired to.
- */
-#define SPIA_PEDDR 0x00c0 /*
- * IO offset to Port E data direction
- * register so we can control the IO
- * lines.
- */
-
-/*
- * Module stuff
- */
-
-static int spia_io_base = SPIA_IO_BASE;
-static int spia_fio_base = SPIA_FIO_BASE;
-static int spia_pedr = SPIA_PEDR;
-static int spia_peddr = SPIA_PEDDR;
-
-module_param(spia_io_base, int, 0);
-module_param(spia_fio_base, int, 0);
-module_param(spia_pedr, int, 0);
-module_param(spia_peddr, int, 0);
-
-/*
- * Define partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "SPIA flash partition 1",
- .offset = 0,
- .size = 2 * 1024 * 1024},
- {
- .name = "SPIA flash partition 2",
- .offset = 2 * 1024 * 1024,
- .size = 6 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 2
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_CNE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 0
- * NAND_ALE: bit 2 -> bit 1
- */
-static void spia_hwcontrol(struct mtd_info *mtd, int cmd)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr = spia_io_base + spia_pedr;
- unsigned char bits;
-
- bits = (ctrl & NAND_CNE) << 2;
- bits |= (ctrl & NAND_CLE | NAND_ALE) >> 1;
- writeb((readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * Main initialization routine
- */
-static int __init spia_init(void)
-{
- struct nand_chip *this;
-
- /* Allocate memory for MTD device structure and private data */
- spia_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!spia_mtd) {
- printk("Unable to allocate SPIA NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&spia_mtd[1]);
-
- /* Initialize structures */
- memset(spia_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- spia_mtd->priv = this;
- spia_mtd->owner = THIS_MODULE;
-
- /*
- * Set GPIO Port E control register so that the pins are configured
- * to be outputs for controlling the NAND flash.
- */
- (*(volatile unsigned char *)(spia_io_base + spia_peddr)) = 0x07;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = (void __iomem *)spia_fio_base;
- this->IO_ADDR_W = (void __iomem *)spia_fio_base;
- /* Set address of hardware control function */
- this->cmd_ctrl = spia_hwcontrol;
- /* 15 us command delay time */
- this->chip_delay = 15;
-
- /* Scan to find existence of the device */
- if (nand_scan(spia_mtd, 1)) {
- kfree(spia_mtd);
- return -ENXIO;
- }
-
- /* Register the partitions */
- mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
-
- /* Return happy */
- return 0;
-}
-
-module_init(spia_init);
-
-/*
- * Clean up routine
- */
-static void __exit spia_cleanup(void)
-{
- /* Release resources, unregister device */
- nand_release(spia_mtd);
-
- /* Free the MTD device structure */
- kfree(spia_mtd);
-}
-
-module_exit(spia_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
-MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 11e8371b568..fb8fd35fa66 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -121,9 +121,6 @@ struct tmio_nand {
#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*--------------------------------------------------------------------------*/
@@ -259,18 +256,6 @@ static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
-static int
-tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
- u16 *p = (u16 *) buf;
-
- for (len >>= 1; len; len--)
- if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
- return -EFAULT;
- return 0;
-}
-
static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
@@ -372,7 +357,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct tmio_nand_data *data = dev->dev.platform_data;
+ struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
@@ -381,18 +366,14 @@ static int tmio_probe(struct platform_device *dev)
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
- struct mtd_partition *parts;
- int nbparts = 0;
int retval;
if (data == NULL)
dev_warn(&dev->dev, "NULL platform data!\n");
- tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
- if (!tmio) {
- retval = -ENOMEM;
- goto err_kzalloc;
- }
+ tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+ if (!tmio)
+ return -ENOMEM;
tmio->dev = dev;
@@ -402,22 +383,18 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, resource_size(ccr));
- if (!tmio->ccr) {
- retval = -EIO;
- goto err_iomap_ccr;
- }
+ tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+ if (!tmio->ccr)
+ return -EIO;
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, resource_size(fcr));
- if (!tmio->fcr) {
- retval = -EIO;
- goto err_iomap_fcr;
- }
+ tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+ if (!tmio->fcr)
+ return -EIO;
retval = tmio_hw_init(dev, tmio);
if (retval)
- goto err_hwinit;
+ return retval;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = tmio->fcr;
@@ -429,12 +406,12 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->read_byte = tmio_nand_read_byte;
nand_chip->write_buf = tmio_nand_write_buf;
nand_chip->read_buf = tmio_nand_read_buf;
- nand_chip->verify_buf = tmio_nand_verify_buf;
/* set eccmode using hardware ECC */
nand_chip->ecc.mode = NAND_ECC_HW;
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 6;
+ nand_chip->ecc.strength = 2;
nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
nand_chip->ecc.correct = tmio_nand_correct_data;
@@ -445,8 +422,8 @@ static int tmio_probe(struct platform_device *dev)
/* 15 us command delay time */
nand_chip->chip_delay = 15;
- retval = request_irq(irq, &tmio_irq,
- IRQF_DISABLED, dev_name(&dev->dev), tmio);
+ retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+ dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
@@ -458,35 +435,19 @@ static int tmio_probe(struct platform_device *dev)
/* Scan to find existence of the device */
if (nand_scan(mtd, 1)) {
retval = -ENODEV;
- goto err_scan;
+ goto err_irq;
}
/* Register the partitions */
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
-#endif
- if (nbparts <= 0 && data) {
- parts = data->partition;
- nbparts = data->num_partitions;
- }
-
- retval = mtd_device_register(mtd, parts, nbparts);
+ retval = mtd_device_parse_register(mtd, NULL, NULL,
+ data ? data->partition : NULL,
+ data ? data->num_partitions : 0);
if (!retval)
return retval;
nand_release(mtd);
-err_scan:
- if (tmio->irq)
- free_irq(tmio->irq, tmio);
err_irq:
tmio_hw_stop(dev, tmio);
-err_hwinit:
- iounmap(tmio->fcr);
-err_iomap_fcr:
- iounmap(tmio->ccr);
-err_iomap_ccr:
- kfree(tmio);
-err_kzalloc:
return retval;
}
@@ -495,12 +456,7 @@ static int tmio_remove(struct platform_device *dev)
struct tmio_nand *tmio = platform_get_drvdata(dev);
nand_release(&tmio->mtd);
- if (tmio->irq)
- free_irq(tmio->irq, tmio);
tmio_hw_stop(dev, tmio);
- iounmap(tmio->fcr);
- iounmap(tmio->ccr);
- kfree(tmio);
return 0;
}
@@ -544,18 +500,7 @@ static struct platform_driver tmio_driver = {
.resume = tmio_resume,
};
-static int __init tmio_init(void)
-{
- return platform_driver_register(&tmio_driver);
-}
-
-static void __exit tmio_exit(void)
-{
- platform_driver_unregister(&tmio_driver);
-}
-
-module_init(tmio_init);
-module_exit(tmio_exit);
+module_platform_driver(tmio_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index bfba4e39a6c..c1622a5ba81 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -9,6 +9,7 @@
* (C) Copyright TOSHIBA CORPORATION 2004-2007
* All Rights Reserved.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -74,7 +75,6 @@ struct txx9ndfmc_drvdata {
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_hw_control hw_control;
- struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -87,7 +87,7 @@ static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
return drvdata->base + (reg << plat->shift);
}
@@ -132,25 +132,13 @@ static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
*buf++ = __raw_readl(ndfdtr);
}
-static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf,
- int len)
-{
- struct platform_device *dev = mtd_to_platdev(mtd);
- void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
-
- while (len--)
- if (*buf++ != (uint8_t)__raw_readl(ndfdtr))
- return -EFAULT;
- return 0;
-}
-
static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *chip = mtd->priv;
struct txx9ndfmc_priv *txx9_priv = chip->priv;
struct platform_device *dev = txx9_priv->dev;
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
if (ctrl & NAND_CTRL_CHANGE) {
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
@@ -237,7 +225,7 @@ static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
static void txx9ndfmc_initialize(struct platform_device *dev)
{
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int tmout = 100;
@@ -286,27 +274,20 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
- static const char *probes[] = { "cmdlinepart", NULL };
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
unsigned long gbusclk = plat->gbus_clock;
struct resource *res;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (!devm_request_mem_region(&dev->dev, res->start,
- resource_size(res), dev_name(&dev->dev)))
- return -EBUSY;
- drvdata->base = devm_ioremap(&dev->dev, res->start,
- resource_size(res));
- if (!drvdata->base)
- return -EBUSY;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ drvdata->base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
hold = plat->hold ?: 20; /* tDH */
spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
@@ -333,17 +314,13 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
- int nr_parts;
if (!(plat->ch_mask & (1 << i)))
continue;
txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
GFP_KERNEL);
- if (!txx9_priv) {
- dev_err(&dev->dev, "Unable to allocate "
- "TXx9 NDFMC MTD device structure.\n");
+ if (!txx9_priv)
continue;
- }
chip = &txx9_priv->chip;
mtd = &txx9_priv->mtd;
mtd->owner = THIS_MODULE;
@@ -353,7 +330,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->read_byte = txx9ndfmc_read_byte;
chip->read_buf = txx9ndfmc_read_buf;
chip->write_buf = txx9ndfmc_write_buf;
- chip->verify_buf = txx9ndfmc_verify_buf;
chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
chip->dev_ready = txx9ndfmc_dev_ready;
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
@@ -363,6 +339,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
/* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
chip->chip_delay = 100;
chip->controller = &drvdata->hw_control;
@@ -393,9 +370,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
}
mtd->name = txx9_priv->mtdname;
- nr_parts = parse_mtd_partitions(mtd, probes,
- &drvdata->parts[i], 0);
- mtd_device_register(mtd, drvdata->parts[i], nr_parts);
+ mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
drvdata->mtds[i] = mtd;
}
@@ -407,7 +382,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int i;
- platform_set_drvdata(dev, NULL);
if (!drvdata)
return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
@@ -421,7 +395,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
txx9_priv = chip->priv;
nand_release(mtd);
- kfree(drvdata->parts[i]);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
@@ -448,18 +421,7 @@ static struct platform_driver txx9ndfmc_driver = {
},
};
-static int __init txx9ndfmc_init(void)
-{
- return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe);
-}
-
-static void __exit txx9ndfmc_exit(void)
-{
- platform_driver_unregister(&txx9ndfmc_driver);
-}
-
-module_init(txx9ndfmc_init);
-module_exit(txx9ndfmc_exit);
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
new file mode 100644
index 00000000000..3f81dc8f214
--- /dev/null
+++ b/drivers/mtd/nand/xway_nand.c
@@ -0,0 +1,201 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright © 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* nand registers */
+#define EBU_ADDSEL1 0x24
+#define EBU_NAND_CON 0xB0
+#define EBU_NAND_WAIT 0xB4
+#define EBU_NAND_ECC0 0xB8
+#define EBU_NAND_ECC_AC 0xBC
+
+/* nand commands */
+#define NAND_CMD_ALE (1 << 2)
+#define NAND_CMD_CLE (1 << 3)
+#define NAND_CMD_CS (1 << 4)
+#define NAND_WRITE_CMD_RESET 0xff
+#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA (NAND_CMD_CS)
+#define NAND_READ_DATA (NAND_CMD_CS)
+#define NAND_WAIT_WR_C (1 << 3)
+#define NAND_WAIT_RD (0x1)
+
+/* we need to tel the ebu which addr we mapped the nand to */
+#define ADDSEL1_MASK(x) (x << 4)
+#define ADDSEL1_REGEN 1
+
+/* we need to tell the EBU that we have nand attached and set it up properly */
+#define BUSCON1_SETUP (1 << 22)
+#define BUSCON1_BCGEN_RES (0x3 << 12)
+#define BUSCON1_WAITWRC2 (2 << 8)
+#define BUSCON1_WAITRDC2 (2 << 6)
+#define BUSCON1_HOLDC1 (1 << 4)
+#define BUSCON1_RECOVC1 (1 << 2)
+#define BUSCON1_CMULT4 1
+
+#define NAND_CON_CE (1 << 20)
+#define NAND_CON_OUT_CS1 (1 << 10)
+#define NAND_CON_IN_CS1 (1 << 8)
+#define NAND_CON_PRE_P (1 << 7)
+#define NAND_CON_WP_P (1 << 6)
+#define NAND_CON_SE_P (1 << 5)
+#define NAND_CON_CS_P (1 << 4)
+#define NAND_CON_CSMUX (1 << 1)
+#define NAND_CON_NANDM 1
+
+static void xway_reset_chip(struct nand_chip *chip)
+{
+ unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
+ unsigned long flags;
+
+ nandaddr &= ~NAND_WRITE_ADDR;
+ nandaddr |= NAND_WRITE_CMD;
+
+ /* finish with a reset */
+ spin_lock_irqsave(&ebu_lock, flags);
+ writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static void xway_select_chip(struct mtd_info *mtd, int chip)
+{
+
+ switch (chip) {
+ case -1:
+ ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ break;
+ case 0:
+ ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ unsigned long flags;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
+ if (ctrl & NAND_CLE)
+ nandaddr |= NAND_WRITE_CMD;
+ else
+ nandaddr |= NAND_WRITE_ADDR;
+ this->IO_ADDR_W = (void __iomem *) nandaddr;
+ }
+
+ if (cmd != NAND_CMD_NONE) {
+ spin_lock_irqsave(&ebu_lock, flags);
+ writeb(cmd, this->IO_ADDR_W);
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+ }
+}
+
+static int xway_dev_ready(struct mtd_info *mtd)
+{
+ return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+static unsigned char xway_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return ret;
+}
+
+static int xway_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this = platform_get_drvdata(pdev);
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ const __be32 *cs = of_get_property(pdev->dev.of_node,
+ "lantiq,cs", NULL);
+ u32 cs_flag = 0;
+
+ /* load our CS from the DT. Either we find a valid 1 or default to 0 */
+ if (cs && (*cs == 1))
+ cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+
+ /* setup the EBU to run in NAND mode on our base addr */
+ ltq_ebu_w32(CPHYSADDR(nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+
+ ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+ ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
+
+ /* finish with a reset */
+ xway_reset_chip(this);
+
+ return 0;
+}
+
+/* allow users to override the partition in DT using the cmdline */
+static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL };
+
+static struct platform_nand_data xway_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_delay = 30,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .probe = xway_nand_probe,
+ .cmd_ctrl = xway_cmd_ctrl,
+ .dev_ready = xway_dev_ready,
+ .select_chip = xway_select_chip,
+ .read_byte = xway_read_byte,
+ }
+};
+
+/*
+ * Try to find the node inside the DT. If it is available attach out
+ * platform_nand_data
+ */
+static int __init xway_register_nand(void)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
+ if (!node)
+ return -ENOENT;
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -EINVAL;
+ pdev->dev.platform_data = &xway_nand_data;
+ of_node_put(node);
+ return 0;
+}
+
+subsys_initcall(xway_register_nand);
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index b155666acfb..46f27de018c 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -50,27 +50,18 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct NFTLrecord *nftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
- printk(KERN_ERR
-"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
-"Please use the new diskonchip driver under the NAND subsystem.\n");
- return;
- }
-
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
+ pr_debug("NFTL: add_mtd for %s\n", mtd->name);
nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
- if (!nftl) {
- printk(KERN_WARNING "NFTL: out of memory for data structures\n");
+ if (!nftl)
return;
- }
nftl->mbd.mtd = mtd;
nftl->mbd.devnum = -1;
@@ -132,7 +123,7 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct NFTLrecord *nftl = (void *)dev;
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum);
+ pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
@@ -149,13 +140,13 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~mask, &ops);
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -170,13 +161,13 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -193,14 +184,14 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
@@ -220,7 +211,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
+ pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
return BLOCK_NIL;
}
@@ -291,8 +282,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (block == 2) {
foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
if (foldmark == FOLD_MARK_IN_PROGRESS) {
- DEBUG(MTD_DEBUG_LEVEL1,
- "Write Inhibited on EUN %d\n", thisEUN);
+ pr_debug("Write Inhibited on EUN %d\n", thisEUN);
inplace = 0;
} else {
/* There's no other reason not to do inplace,
@@ -357,7 +347,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockLastState[block] != SECTOR_FREE &&
BlockMap[block] != BLOCK_NIL &&
BlockMap[block] != targetEUN) {
- DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, "
+ pr_debug("Setting inplace to 0. VUC %d, "
"block %d was %x lastEUN, "
"and is in EUN %d (%s) %d\n",
thisVUC, block, BlockLastState[block],
@@ -373,14 +363,14 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
SECTOR_FREE) {
- DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. "
+ pr_debug("Pending write not free in EUN %d. "
"Folding out of place.\n", targetEUN);
inplace = 0;
}
}
if (!inplace) {
- DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. "
+ pr_debug("Cannot fold Virtual Unit Chain %d in place. "
"Trying out-of-place\n", thisVUC);
/* We need to find a targetEUN to fold into. */
targetEUN = NFTL_findfreeblock(nftl, 1);
@@ -410,7 +400,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
and the Erase Unit into which we are supposed to be copying.
Go for it.
*/
- DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN);
+ pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
unsigned char movebuf[512];
int ret;
@@ -426,12 +416,17 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512, &retlen, movebuf);
- if (ret < 0 && ret != -EUCLEAN) {
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
- + (block * 512), 512, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret != -EIO)
printk("Error went away on retry.\n");
}
@@ -457,7 +452,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
has duplicate chains, we need to free one of the chains because it's not necessary any more.
*/
thisEUN = nftl->EUNtable[thisVUC];
- DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n");
+ pr_debug("Want to erase\n");
/* For each block in the old chain (except the targetEUN of course),
free it and make it available for future use */
@@ -570,7 +565,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
(writeEUN * nftl->EraseSize) + blockofs,
8, &retlen, (char *)&bci);
- DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
+ pr_debug("Status of block %d in EUN %d is %x\n",
block , writeEUN, le16_to_cpu(bci.Status));
status = bci.Status | bci.Status1;
@@ -623,7 +618,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
but they are reserved for when we're
desperate. Well, now we're desperate.
*/
- DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
+ pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
writeEUN = NFTL_findfreeblock(nftl, 1);
}
if (writeEUN == BLOCK_NIL) {
@@ -774,9 +769,9 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
- int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
+ int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
- if (res < 0 && res != -EUCLEAN)
+ if (res < 0 && !mtd_is_bitflip(res))
return -EIO;
}
return 0;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index e3cd1ffad2f..51b9d6af307 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -32,7 +32,7 @@
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
- * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
+ * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
* is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
*/
static int find_boot_record(struct NFTLrecord *nftl)
@@ -63,8 +63,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
- ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf);
+ ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -242,7 +242,8 @@ The new DiskOnChip driver already scanned the bad block table. Just query it.
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
#endif
- if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
+ if (mtd_block_isbad(nftl->mbd.mtd,
+ i * nftl->EraseSize))
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
@@ -274,7 +275,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -297,7 +298,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
*
* Return: 0 when succeed, -1 on error.
*
- * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
*/
int NFTL_formatblock(struct NFTLrecord *nftl, int block)
{
@@ -326,7 +327,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
instr->mtd = nftl->mbd.mtd;
instr->addr = block * nftl->EraseSize;
instr->len = nftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk("Error while formatting block %d\n", block);
@@ -337,7 +338,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
nb_erases = le32_to_cpu(uci.WearInfo);
nb_erases++;
- /* wrap (almost impossible with current flashs) or free block */
+ /* wrap (almost impossible with current flash) or free block */
if (nb_erases == 0)
nb_erases = 1;
@@ -355,7 +356,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
- nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(nftl->mbd.mtd, instr->addr);
return -1;
}
@@ -363,10 +364,10 @@ fail:
* Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
* was being folded when NFTL was interrupted.
*
- * The check_free_sectors in this function is neceressary. There is a possible
+ * The check_free_sectors in this function is necessary. There is a possible
* situation that after writing the Data area, the Block Control Information is
* not updated according (due to power failure or something) which leaves the block
- * in an umconsistent state. So we have to check if a block is really FREE in this
+ * in an inconsistent state. So we have to check if a block is really FREE in this
* case. */
static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
{
@@ -428,7 +429,7 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
for (;;) {
length++;
- /* avoid infinite loops, although this is guaranted not to
+ /* avoid infinite loops, although this is guaranteed not to
happen because of the previous checks */
if (length >= nftl->nb_blocks) {
printk("nftl: length too long %d !\n", length);
@@ -447,11 +448,11 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
* Virtual Unit Chain, i.e. all the units are disconnected.
*
- * It is not stricly correct to begin from the first block of the chain because
+ * It is not strictly correct to begin from the first block of the chain because
* if we stop the code, we may see again a valid chain if there was a first_block
* flag in a block inside it. But is it really a problem ?
*
- * FixMe: Figure out what the last statesment means. What if power failure when we are
+ * FixMe: Figure out what the last statement means. What if power failure when we are
* in the for (;;) loop formatting blocks ??
*/
static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
@@ -485,7 +486,7 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
* totally free (only 0xff).
*
* Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
- * following critia:
+ * following criteria:
* 1. */
static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
{
@@ -502,7 +503,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
if (erase_mark != ERASE_MARK) {
/* if no erase mark, the block must be totally free. This is
- possible in two cases : empty filsystem or interrupted erase (very unlikely) */
+ possible in two cases : empty filesystem or interrupted erase (very unlikely) */
if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
return -1;
@@ -544,7 +545,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
* to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
* is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
- * for some reason. A clean up/check of the VUC is neceressary in this case.
+ * for some reason. A clean up/check of the VUC is necessary in this case.
*
* WARNING: return 0 if read error
*/
@@ -657,7 +658,7 @@ int NFTL_mount(struct NFTLrecord *s)
printk("Block %d: incorrect logical block: %d expected: %d\n",
block, logical_block, first_logical_block);
/* the chain is incorrect : we must format it,
- but we need to read it completly */
+ but we need to read it completely */
do_format_chain = 1;
}
if (is_first_block) {
@@ -669,7 +670,7 @@ int NFTL_mount(struct NFTLrecord *s)
printk("Block %d: incorrectly marked as first block in chain\n",
block);
/* the chain is incorrect : we must format it,
- but we need to read it completly */
+ but we need to read it completely */
do_format_chain = 1;
} else {
printk("Block %d: folding in progress - ignoring first block flag\n",
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index a996718fa6b..aa26c32e1bc 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,19 +20,36 @@
#include <linux/slab.h>
#include <linux/mtd/partitions.h>
-int __devinit of_mtd_parse_partitions(struct device *dev,
- struct device_node *node,
- struct mtd_partition **pparts)
+static bool node_has_compatible(struct device_node *pp)
{
+ return of_get_property(pp, "compatible", NULL);
+}
+
+static int parse_ofpart_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct device_node *node;
const char *partname;
struct device_node *pp;
int nr_parts, i;
+
+ if (!data)
+ return 0;
+
+ node = data->of_node;
+ if (!node)
+ return 0;
+
/* First count the subnodes */
- pp = NULL;
nr_parts = 0;
- while ((pp = of_get_next_child(node, pp)))
+ for_each_child_of_node(node, pp) {
+ if (node_has_compatible(pp))
+ continue;
+
nr_parts++;
+ }
if (nr_parts == 0)
return 0;
@@ -41,11 +58,14 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
if (!*pparts)
return -ENOMEM;
- pp = NULL;
i = 0;
- while ((pp = of_get_next_child(node, pp))) {
+ for_each_child_of_node(node, pp) {
const __be32 *reg;
int len;
+ int a_cells, s_cells;
+
+ if (node_has_compatible(pp))
+ continue;
reg = of_get_property(pp, "reg", &len);
if (!reg) {
@@ -53,23 +73,28 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
continue;
}
- (*pparts)[i].offset = be32_to_cpu(reg[0]);
- (*pparts)[i].size = be32_to_cpu(reg[1]);
+ a_cells = of_n_addr_cells(pp);
+ s_cells = of_n_size_cells(pp);
+ (*pparts)[i].offset = of_read_number(reg, a_cells);
+ (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
- (*pparts)[i].name = (char *)partname;
+ (*pparts)[i].name = partname;
if (of_get_property(pp, "read-only", &len))
- (*pparts)[i].mask_flags = MTD_WRITEABLE;
+ (*pparts)[i].mask_flags |= MTD_WRITEABLE;
+
+ if (of_get_property(pp, "lock", &len))
+ (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
i++;
}
if (!i) {
of_node_put(pp);
- dev_err(dev, "No valid partition found on %s\n", node->full_name);
+ pr_err("No valid partition found on %s\n", node->full_name);
kfree(*pparts);
*pparts = NULL;
return -EINVAL;
@@ -77,6 +102,97 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
return nr_parts;
}
-EXPORT_SYMBOL(of_mtd_parse_partitions);
+
+static struct mtd_part_parser ofpart_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_ofpart_partitions,
+ .name = "ofpart",
+};
+
+static int parse_ofoldpart_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct device_node *dp;
+ int i, plen, nr_parts;
+ const struct {
+ __be32 offset, len;
+ } *part;
+ const char *names;
+
+ if (!data)
+ return 0;
+
+ dp = data->of_node;
+ if (!dp)
+ return 0;
+
+ part = of_get_property(dp, "partitions", &plen);
+ if (!part)
+ return 0; /* No partitions found */
+
+ pr_warning("Device tree uses obsolete partition map binding: %s\n",
+ dp->full_name);
+
+ nr_parts = plen / sizeof(part[0]);
+
+ *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ names = of_get_property(dp, "partition-names", &plen);
+
+ for (i = 0; i < nr_parts; i++) {
+ (*pparts)[i].offset = be32_to_cpu(part->offset);
+ (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
+ /* bit 0 set signifies read only partition */
+ if (be32_to_cpu(part->len) & 1)
+ (*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+ if (names && (plen > 0)) {
+ int len = strlen(names) + 1;
+
+ (*pparts)[i].name = names;
+ plen -= len;
+ names += len;
+ } else {
+ (*pparts)[i].name = "unnamed";
+ }
+
+ part++;
+ }
+
+ return nr_parts;
+}
+
+static struct mtd_part_parser ofoldpart_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_ofoldpart_partitions,
+ .name = "ofoldpart",
+};
+
+static int __init ofpart_parser_init(void)
+{
+ register_mtd_parser(&ofpart_parser);
+ register_mtd_parser(&ofoldpart_parser);
+ return 0;
+}
+
+static void __exit ofpart_parser_exit(void)
+{
+ deregister_mtd_parser(&ofpart_parser);
+ deregister_mtd_parser(&ofoldpart_parser);
+}
+
+module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
+MODULE_AUTHOR("Vitaly Wool, David Gibson");
+/*
+ * When MTD core cannot find the requested parser, it tries to load the module
+ * with the same name. Since we provide the ofoldpart parser, we should have
+ * the corresponding alias.
+ */
+MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 772ad296661..ab2607273e8 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,6 +1,7 @@
menuconfig MTD_ONENAND
tristate "OneNAND Device Support"
depends on MTD
+ depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
devices. For further information see
@@ -39,7 +40,6 @@ config MTD_ONENAND_SAMSUNG
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
- select HAVE_MTD_OTP
help
One Block of the NAND Flash Array memory is reserved as
a One-Time Programmable Block memory area.
@@ -67,10 +67,4 @@ config MTD_ONENAND_2X_PROGRAM
And more recent chips
-config MTD_ONENAND_SIM
- tristate "OneNAND simulator support"
- help
- The simulator may simulate various OneNAND flash chips for the
- OneNAND MTD layer.
-
endif # MTD_ONENAND
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 2b7884c7577..9d6540e8b3d 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -10,7 +10,4 @@ obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
-# Simulator
-obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
-
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 2d70d354d84..093c29ac1a1 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
@@ -30,18 +29,15 @@
*/
#define DRIVER_NAME "onenand-flash"
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
struct onenand_info {
struct mtd_info mtd;
- struct mtd_partition *parts;
struct onenand_chip onenand;
};
-static int __devinit generic_onenand_probe(struct platform_device *pdev)
+static int generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
- struct onenand_platform_data *pdata = pdev->dev.platform_data;
+ struct onenand_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res = pdev->resource;
unsigned long size = resource_size(res);
int err;
@@ -61,7 +57,7 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0;
+ info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
info->onenand.irq = platform_get_irq(pdev, 0);
info->mtd.name = dev_name(&pdev->dev);
@@ -73,13 +69,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
- err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
- if (err > 0)
- mtd_device_register(&info->mtd, info->parts, err);
- else if (err <= 0 && pdata && pdata->parts)
- mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
- else
- err = mtd_device_register(&info->mtd, NULL, 0);
+ err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, info);
@@ -95,16 +87,13 @@ out_free_info:
return err;
}
-static int __devexit generic_onenand_remove(struct platform_device *pdev)
+static int generic_onenand_remove(struct platform_device *pdev)
{
struct onenand_info *info = platform_get_drvdata(pdev);
struct resource *res = pdev->resource;
unsigned long size = resource_size(res);
- platform_set_drvdata(pdev, NULL);
-
if (info) {
- mtd_device_unregister(&info->mtd);
onenand_release(&info->mtd);
release_mem_region(res->start, size);
iounmap(info->onenand.base);
@@ -120,24 +109,12 @@ static struct platform_driver generic_onenand_driver = {
.owner = THIS_MODULE,
},
.probe = generic_onenand_probe,
- .remove = __devexit_p(generic_onenand_remove),
+ .remove = generic_onenand_remove,
};
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
-static int __init generic_onenand_init(void)
-{
- return platform_driver_register(&generic_onenand_driver);
-}
-
-static void __exit generic_onenand_exit(void)
-{
- platform_driver_unregister(&generic_onenand_driver);
-}
-
-module_init(generic_onenand_init);
-module_exit(generic_onenand_exit);
+module_platform_driver(generic_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a916dec2921..d945473c388 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -25,7 +25,6 @@
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
@@ -38,26 +37,22 @@
#include <linux/regulator/consumer.h>
#include <asm/mach/flash.h>
-#include <plat/gpmc.h>
-#include <plat/onenand.h>
-#include <mach/gpio.h>
+#include <linux/platform_data/mtd-onenand-omap2.h>
+#include <asm/gpio.h>
-#include <plat/dma.h>
-
-#include <plat/board.h>
+#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
-#define ONENAND_IO_SIZE SZ_128K
#define ONENAND_BUFRAM_SIZE (1024 * 5)
struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
+ unsigned int mem_size;
int gpio_irq;
struct mtd_info mtd;
- struct mtd_partition *parts;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
@@ -65,10 +60,9 @@ struct omap2_onenand {
int freq;
int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
+ u8 flags;
};
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
struct omap2_onenand *c = data;
@@ -159,12 +153,12 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (cpu_is_omap34xx())
+ if (c->flags & ONENAND_IN_OMAP34XX)
/* Add a delay to let GPIO settle */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
- INIT_COMPLETION(c->irq_done);
+ reinit_completion(&c->irq_done);
if (c->gpio_irq) {
result = gpio_get_value(c->gpio_irq);
if (result == -1) {
@@ -354,7 +348,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
timeout = jiffies + msecs_to_jiffies(20);
@@ -425,7 +419,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
timeout = jiffies + msecs_to_jiffies(20);
@@ -450,13 +444,19 @@ out_copy:
#else
-int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count);
+static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ return -ENOSYS;
+}
-int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count);
+static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ return -ENOSYS;
+}
#endif
@@ -498,7 +498,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
@@ -543,7 +543,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
@@ -554,40 +554,24 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
#else
-int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count);
-
-int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count);
-
-#endif
-
-static struct platform_driver omap2_onenand_driver;
-
-static int __adjust_timing(struct device *dev, void *data)
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
{
- int ret = 0;
- struct omap2_onenand *c;
-
- c = dev_get_drvdata(dev);
-
- BUG_ON(c->setup == NULL);
-
- /* DMA is not in use so this is all that is needed */
- /* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, &c->freq);
-
- return ret;
+ return -ENOSYS;
}
-int omap2_onenand_rephase(void)
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
{
- return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
- NULL, __adjust_timing);
+ return -ENOSYS;
}
+#endif
+
+static struct platform_driver omap2_onenand_driver;
+
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -623,14 +607,16 @@ static int omap2_onenand_disable(struct mtd_info *mtd)
return ret;
}
-static int __devinit omap2_onenand_probe(struct platform_device *pdev)
+static int omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
struct omap2_onenand *c;
struct onenand_chip *this;
int r;
+ struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data missing\n");
return -ENODEV;
@@ -642,6 +628,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
init_completion(&c->irq_done);
init_completion(&c->dma_done);
+ c->flags = pdata->flags;
c->gpmc_cs = pdata->cs;
c->gpio_irq = pdata->gpio_irq;
c->dma_channel = pdata->dma_channel;
@@ -650,20 +637,24 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->gpio_irq = 0;
}
- r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
- if (r < 0) {
- dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ r = -EINVAL;
+ dev_err(&pdev->dev, "error getting memory resource\n");
goto err_kfree;
}
- if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
+ c->phys_base = res->start;
+ c->mem_size = resource_size(res);
+
+ if (request_mem_region(c->phys_base, c->mem_size,
pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
- "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
+ dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
+ c->phys_base, c->mem_size);
r = -EBUSY;
- goto err_free_cs;
+ goto err_kfree;
}
- c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
+ c->onenand.base = ioremap(c->phys_base, c->mem_size);
if (c->onenand.base == NULL) {
r = -ENOMEM;
goto err_release_mem_region;
@@ -728,7 +719,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
this = &c->onenand;
if (c->dma_channel >= 0) {
this->wait = omap2_onenand_wait;
- if (cpu_is_omap34xx()) {
+ if (c->flags & ONENAND_IN_OMAP34XX) {
this->read_bufferram = omap3_onenand_read_bufferram;
this->write_bufferram = omap3_onenand_write_bufferram;
} else {
@@ -741,6 +732,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->regulator = regulator_get(&pdev->dev, "vonenand");
if (IS_ERR(c->regulator)) {
dev_err(&pdev->dev, "Failed to get regulator\n");
+ r = PTR_ERR(c->regulator);
goto err_release_dma;
}
c->onenand.enable = omap2_onenand_enable;
@@ -753,13 +745,10 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
- if (r > 0)
- r = mtd_device_register(&c->mtd, c->parts, r);
- else if (pdata->parts != NULL)
- r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
- else
- r = mtd_device_register(&c->mtd, NULL, 0);
+ ppdata.of_node = pdata->of_node;
+ r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
if (r)
goto err_release_onenand;
@@ -782,17 +771,14 @@ err_release_gpio:
err_iounmap:
iounmap(c->onenand.base);
err_release_mem_region:
- release_mem_region(c->phys_base, ONENAND_IO_SIZE);
-err_free_cs:
- gpmc_cs_free(c->gpmc_cs);
+ release_mem_region(c->phys_base, c->mem_size);
err_kfree:
- kfree(c->parts);
kfree(c);
return r;
}
-static int __devexit omap2_onenand_remove(struct platform_device *pdev)
+static int omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -801,15 +787,12 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
omap2_onenand_shutdown(pdev);
- platform_set_drvdata(pdev, NULL);
if (c->gpio_irq) {
free_irq(gpio_to_irq(c->gpio_irq), c);
gpio_free(c->gpio_irq);
}
iounmap(c->onenand.base);
- release_mem_region(c->phys_base, ONENAND_IO_SIZE);
- gpmc_cs_free(c->gpmc_cs);
- kfree(c->parts);
+ release_mem_region(c->phys_base, c->mem_size);
kfree(c);
return 0;
@@ -817,7 +800,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = __devexit_p(omap2_onenand_remove),
+ .remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
@@ -825,19 +808,7 @@ static struct platform_driver omap2_onenand_driver = {
},
};
-static int __init omap2_onenand_init(void)
-{
- printk(KERN_INFO "OneNAND driver initializing\n");
- return platform_driver_register(&omap2_onenand_driver);
-}
-
-static void __exit omap2_onenand_exit(void)
-{
- platform_driver_unregister(&omap2_onenand_driver);
-}
-
-module_init(omap2_onenand_init);
-module_exit(omap2_onenand_exit);
+module_platform_driver(omap2_onenand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ac9e959802a..635ee002769 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1015,7 +1014,7 @@ static void onenand_release_device(struct mtd_info *mtd)
}
/**
- * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer
+ * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
* @param mtd MTD device structure
* @param buf destination address
* @param column oob offset to read from
@@ -1079,7 +1078,7 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
return status;
/* check if we failed due to uncorrectable error */
- if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
return status;
/* check if address lies in MLC region */
@@ -1122,10 +1121,10 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
int ret = 0;
int writesize = this->writesize;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
- __func__, (unsigned int) from, (int) len);
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1159,7 +1158,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
onenand_update_bufferram(mtd, from, !ret);
- if (ret == -EBADMSG)
+ if (mtd_is_eccerr(ret))
ret = 0;
if (ret)
break;
@@ -1170,7 +1169,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
thisooblen = oobsize - oobcolumn;
thisooblen = min_t(int, thisooblen, ooblen - oobread);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1201,7 +1200,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
@@ -1226,10 +1226,10 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
int ret = 0, boundary = 0;
int writesize = this->writesize;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
- __func__, (unsigned int) from, (int) len);
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1255,7 +1255,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
- if (ret == -EBADMSG)
+ if (mtd_is_eccerr(ret))
ret = 0;
}
}
@@ -1291,7 +1291,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
thisooblen = oobsize - oobcolumn;
thisooblen = min_t(int, thisooblen, ooblen - oobread);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1315,7 +1315,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
- if (ret == -EBADMSG)
+ if (mtd_is_eccerr(ret))
ret = 0;
}
@@ -1333,7 +1333,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
@@ -1351,19 +1352,19 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_ecc_stats stats;
int read = 0, thislen, column, oobsize;
size_t len = ops->ooblen;
- mtd_oob_mode_t mode = ops->mode;
+ unsigned int mode = ops->mode;
u_char *buf = ops->oobbuf;
int ret = 0, readcmd;
from += ops->ooboffs;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
- __func__, (unsigned int) from, (int) len);
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
/* Initialize return length value */
ops->oobretlen = 0;
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1403,13 +1404,13 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
- if (ret && ret != -EBADMSG) {
+ if (ret && !mtd_is_eccerr(ret)) {
printk(KERN_ERR "%s: read failed = 0x%x\n",
__func__, ret);
break;
}
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, buf, column, thislen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
@@ -1487,10 +1488,10 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
int ret;
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
break;
- case MTD_OOB_RAW:
+ case MTD_OPS_RAW:
/* Not implemented yet */
default:
return -EINVAL;
@@ -1576,8 +1577,8 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
- __func__, (unsigned int) from, len);
+ pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
+ len);
/* Initialize return value */
ops->oobretlen = 0;
@@ -1750,18 +1751,8 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Wait for any existing operation to clear */
onenand_panic_wait(mtd);
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int) to, (int) len);
-
- /* Initialize retlen, in case of early exit */
- *retlen = 0;
-
- /* Do not allow writes past end of device */
- if (unlikely((to + len) > mtd->size)) {
- printk(KERN_ERR "%s: Attempt write to past end of device\n",
- __func__);
- return -EINVAL;
- }
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
@@ -1821,7 +1812,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
+ * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
* @param mtd MTD device structure
* @param oob_buf oob buffer
* @param buf source address
@@ -1883,20 +1874,13 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
u_char *oobbuf;
int ret = 0, cmd;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int) to, (int) len);
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
/* Initialize retlen, in case of early exit */
ops->retlen = 0;
ops->oobretlen = 0;
- /* Do not allow writes past end of device */
- if (unlikely((to + len) > mtd->size)) {
- printk(KERN_ERR "%s: Attempt write to past end of device\n",
- __func__);
- return -EINVAL;
- }
-
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -1908,7 +1892,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
if (!len)
return 0;
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1945,7 +1929,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* We send data to spare ram with oobsize
* to prevent byte access */
memset(oobbuf, 0xff, mtd->oobsize);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
else
memcpy(oobbuf + oobcolumn, oob, thisooblen);
@@ -2055,7 +2039,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/**
- * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band
+ * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
* @param mtd MTD device structure
* @param to offset to write to
* @param len number of bytes to write
@@ -2074,17 +2058,17 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
- mtd_oob_mode_t mode = ops->mode;
+ unsigned int mode = ops->mode;
to += ops->ooboffs;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int) to, (int) len);
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
/* Initialize retlen, in case of early exit */
ops->oobretlen = 0;
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -2128,7 +2112,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
/* We send data to spare ram with oobsize
* to prevent byte access */
memset(oobbuf, 0xff, mtd->oobsize);
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
else
memcpy(oobbuf + column, buf, thislen);
@@ -2217,10 +2201,10 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
int ret;
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
break;
- case MTD_OOB_RAW:
+ case MTD_OPS_RAW:
/* Not implemented yet */
default:
return -EINVAL;
@@ -2281,7 +2265,7 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
}
/**
- * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
+ * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
* @param mtd MTD device structure
* @param instr erase instruction
* @param region erase region
@@ -2397,7 +2381,7 @@ static int onenand_multiblock_erase(struct mtd_info *mtd,
/**
- * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
+ * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
* @param mtd MTD device structure
* @param instr erase instruction
* @param region erase region
@@ -2489,14 +2473,9 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
struct mtd_erase_region_info *region = NULL;
loff_t region_offset = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
- (unsigned long long) instr->addr, (unsigned long long) instr->len);
-
- /* Do not allow erase past end of device */
- if (unlikely((len + addr) > mtd->size)) {
- printk(KERN_ERR "%s: Erase past end of device\n", __func__);
- return -EINVAL;
- }
+ pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
+ (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
if (FLEXONENAND(this)) {
/* Find the eraseregion of this address */
@@ -2524,8 +2503,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EINVAL;
}
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
@@ -2558,7 +2535,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
*/
static void onenand_sync(struct mtd_info *mtd)
{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+ pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_SYNCING);
@@ -2578,10 +2555,6 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
int ret;
- /* Check for invalid offset */
- if (ofs > mtd->size)
- return -EINVAL;
-
onenand_get_device(mtd, FL_READING);
ret = onenand_block_isbad_nolock(mtd, ofs, 0);
onenand_release_device(mtd);
@@ -2602,7 +2575,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct bbm_info *bbm = this->bbm;
u_char buf[2] = {0, 0};
struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
+ .mode = MTD_OPS_PLACE_OOB,
.ooblen = 2,
.oobbuf = buf,
.ooboffs = 0,
@@ -2632,7 +2605,6 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2644,7 +2616,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = this->block_markbad(mtd, ofs);
+ ret = mtd_block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
@@ -2922,7 +2894,7 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
}
/**
- * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
+ * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
* @param mtd MTD device structure
* @param to offset to write to
* @param len number of bytes to write
@@ -3170,7 +3142,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETING);
} else {
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = len;
ops.oobbuf = buf;
ops.ooboffs = 0;
@@ -3265,20 +3237,17 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
/**
* onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
* @param mtd MTD device structure
- * @param buf the databuffer to put/get data
* @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put/get data
*
* Read factory OTP info.
*/
-static int onenand_get_fact_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
- size_t retlen;
- int ret;
-
- ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_FACTORY);
-
- return ret ? : retlen;
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_FACTORY);
}
/**
@@ -3300,20 +3269,17 @@ static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
/**
* onenand_get_user_prot_info - [MTD Interface] Read user OTP info
* @param mtd MTD device structure
- * @param buf the databuffer to put/get data
+ * @param retlen pointer to variable to store the number of read bytes
* @param len number of bytes to read
+ * @param buf the databuffer to put/get data
*
* Read user OTP info.
*/
-static int onenand_get_user_prot_info(struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
{
- size_t retlen;
- int ret;
-
- ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_USER);
-
- return ret ? : retlen;
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_USER);
}
/**
@@ -3429,6 +3395,19 @@ static void onenand_check_features(struct mtd_info *mtd)
else if (numbufs == 1) {
this->options |= ONENAND_HAS_4KB_PAGE;
this->options |= ONENAND_HAS_CACHE_PROGRAM;
+ /*
+ * There are two different 4KiB pagesize chips
+ * and no way to detect it by H/W config values.
+ *
+ * To detect the correct NOP for each chips,
+ * It should check the version ID as workaround.
+ *
+ * Now it has as following
+ * KFM4G16Q4M has NOP 4 with version ID 0x0131
+ * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
+ */
+ if ((this->version_id & 0xf) == 0xe)
+ this->options |= ONENAND_HAS_NOP_1;
}
case ONENAND_DEVICE_DENSITY_2Gb:
@@ -3539,7 +3518,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned die, bdry;
- int ret, syscfg, locked;
+ int syscfg, locked;
/* Disable ECC */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
@@ -3550,7 +3529,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->wait(mtd, FL_SYNCING);
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
- ret = this->wait(mtd, FL_READING);
+ this->wait(mtd, FL_READING);
bdry = this->read_word(this->base + ONENAND_DATARAM);
if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
@@ -3560,7 +3539,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- ret = this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3663,7 +3642,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
int i, ret;
int block;
struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
+ .mode = MTD_OPS_PLACE_OOB,
.ooboffs = 0,
.ooblen = mtd->oobsize,
.datbuf = NULL,
@@ -3704,7 +3683,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
* flexonenand_set_boundary - Writes the SLC boundary
* @param mtd - mtd info structure
*/
-int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
int boundary, int lock)
{
struct onenand_chip *this = mtd->priv;
@@ -3744,7 +3723,7 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
/* Check is boundary is locked */
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
- ret = this->wait(mtd, FL_READING);
+ this->wait(mtd, FL_READING);
thisboundary = this->read_word(this->base + ONENAND_DATARAM);
if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
@@ -3845,7 +3824,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
static int onenand_probe(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
- int maf_id, dev_id, ver_id;
+ int dev_id, ver_id;
int density;
int ret;
@@ -3853,8 +3832,7 @@ static int onenand_probe(struct mtd_info *mtd)
if (ret)
return ret;
- /* Read manufacturer and device IDs from Register */
- maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ /* Device and version IDs from Register */
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
@@ -4010,11 +3988,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
/* Allocate buffers, if necessary */
if (!this->page_buf) {
this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
- if (!this->page_buf) {
- printk(KERN_ERR "%s: Can't allocate page_buf\n",
- __func__);
+ if (!this->page_buf)
return -ENOMEM;
- }
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
if (!this->verify_buf) {
@@ -4027,8 +4002,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!this->oob_buf) {
this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!this->oob_buf) {
- printk(KERN_ERR "%s: Can't allocate oob_buf\n",
- __func__);
if (this->options & ONENAND_PAGEBUF_ALLOC) {
this->options &= ~ONENAND_PAGEBUF_ALLOC;
kfree(this->page_buf);
@@ -4054,6 +4027,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
this->ecclayout = &onenand_oob_128;
mtd->subpage_sft = 2;
}
+ if (ONENAND_IS_NOP_1(this))
+ mtd->subpage_sft = 0;
break;
case 64:
this->ecclayout = &onenand_oob_64;
@@ -4088,33 +4063,34 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->oobavail = this->ecclayout->oobavail;
mtd->ecclayout = this->ecclayout;
+ mtd->ecc_strength = 1;
/* Fill in remaining MTD driver data */
mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->erase = onenand_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = onenand_read;
- mtd->write = onenand_write;
- mtd->read_oob = onenand_read_oob;
- mtd->write_oob = onenand_write_oob;
- mtd->panic_write = onenand_panic_write;
+ mtd->_erase = onenand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = onenand_read;
+ mtd->_write = onenand_write;
+ mtd->_read_oob = onenand_read_oob;
+ mtd->_write_oob = onenand_write_oob;
+ mtd->_panic_write = onenand_panic_write;
#ifdef CONFIG_MTD_ONENAND_OTP
- mtd->get_fact_prot_info = onenand_get_fact_prot_info;
- mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
- mtd->get_user_prot_info = onenand_get_user_prot_info;
- mtd->read_user_prot_reg = onenand_read_user_prot_reg;
- mtd->write_user_prot_reg = onenand_write_user_prot_reg;
- mtd->lock_user_prot_reg = onenand_lock_user_prot_reg;
+ mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
+ mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
+ mtd->_get_user_prot_info = onenand_get_user_prot_info;
+ mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
+ mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
#endif
- mtd->sync = onenand_sync;
- mtd->lock = onenand_lock;
- mtd->unlock = onenand_unlock;
- mtd->suspend = onenand_suspend;
- mtd->resume = onenand_resume;
- mtd->block_isbad = onenand_block_isbad;
- mtd->block_markbad = onenand_block_markbad;
+ mtd->_sync = onenand_sync;
+ mtd->_lock = onenand_lock;
+ mtd->_unlock = onenand_unlock;
+ mtd->_suspend = onenand_suspend;
+ mtd->_resume = onenand_resume;
+ mtd->_block_isbad = onenand_block_isbad;
+ mtd->_block_markbad = onenand_block_markbad;
mtd->owner = THIS_MODULE;
mtd->writebufsize = mtd->writesize;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index fc2c16a0fd1..08d0085f3e9 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
+#include <linux/export.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
@@ -80,7 +81,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
startblock = 0;
from = 0;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = readlen;
ops.oobbuf = buf;
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -132,7 +133,6 @@ static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_desc
{
struct onenand_chip *this = mtd->priv;
- bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->page_buf, bd, -1);
}
@@ -153,7 +153,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
- DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int) offs, block >> 1, res);
switch ((int) res) {
@@ -188,10 +188,8 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
- if (!bbm->bbt) {
- printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
+ if (!bbm->bbt)
return -ENOMEM;
- }
/* Set the bad block position */
bbm->badblockpos = ONENAND_BADBLOCK_POS;
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
deleted file mode 100644
index 85399e3accd..00000000000
--- a/drivers/mtd/onenand/onenand_sim.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * linux/drivers/mtd/onenand/onenand_sim.c
- *
- * The OneNAND simulator
- *
- * Copyright © 2005-2007 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- *
- * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
- * Flex-OneNAND simulator support
- * Copyright (C) Samsung Electronics, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/onenand.h>
-
-#include <linux/io.h>
-
-#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
-#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
-#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
-#endif
-
-#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
-
-#ifndef CONFIG_ONENAND_SIM_VERSION_ID
-#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
-#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
-#endif
-
-/* Initial boundary values for Flex-OneNAND Simulator */
-#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
-#endif
-
-#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
-#endif
-
-static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
-static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
-static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
-static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
-static int boundary[] = {
- CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
- CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
-};
-
-struct onenand_flash {
- void __iomem *base;
- void __iomem *data;
-};
-
-#define ONENAND_CORE(flash) (flash->data)
-#define ONENAND_CORE_SPARE(flash, this, offset) \
- ((flash->data) + (this->chipsize) + (offset >> 5))
-
-#define ONENAND_MAIN_AREA(this, offset) \
- (this->base + ONENAND_DATARAM + offset)
-
-#define ONENAND_SPARE_AREA(this, offset) \
- (this->base + ONENAND_SPARERAM + offset)
-
-#define ONENAND_GET_WP_STATUS(this) \
- (readw(this->base + ONENAND_REG_WP_STATUS))
-
-#define ONENAND_SET_WP_STATUS(v, this) \
- (writew(v, this->base + ONENAND_REG_WP_STATUS))
-
-/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (4096 + 128)
-static unsigned char *ffchars;
-
-#if CONFIG_FLEXONENAND
-#define PARTITION_NAME "Flex-OneNAND simulator partition"
-#else
-#define PARTITION_NAME "OneNAND simulator partition"
-#endif
-
-static struct mtd_partition os_partitions[] = {
- {
- .name = PARTITION_NAME,
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-/*
- * OneNAND simulator mtd
- */
-struct onenand_info {
- struct mtd_info mtd;
- struct mtd_partition *parts;
- struct onenand_chip onenand;
- struct onenand_flash flash;
-};
-
-static struct onenand_info *info;
-
-#define DPRINTK(format, args...) \
-do { \
- printk(KERN_DEBUG "%s[%d]: " format "\n", __func__, \
- __LINE__, ##args); \
-} while (0)
-
-/**
- * onenand_lock_handle - Handle Lock scheme
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Send lock command to OneNAND device.
- * The lock scheme depends on chip type.
- */
-static void onenand_lock_handle(struct onenand_chip *this, int cmd)
-{
- int block_lock_scheme;
- int status;
-
- status = ONENAND_GET_WP_STATUS(this);
- block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK);
-
- switch (cmd) {
- case ONENAND_CMD_UNLOCK:
- case ONENAND_CMD_UNLOCK_ALL:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this);
- break;
-
- case ONENAND_CMD_LOCK:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this);
- break;
-
- case ONENAND_CMD_LOCK_TIGHT:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this);
- break;
-
- default:
- break;
- }
-}
-
-/**
- * onenand_bootram_handle - Handle BootRAM area
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Emulate BootRAM area. It is possible to do basic operation using BootRAM.
- */
-static void onenand_bootram_handle(struct onenand_chip *this, int cmd)
-{
- switch (cmd) {
- case ONENAND_CMD_READID:
- writew(manuf_id, this->base);
- writew(device_id, this->base + 2);
- writew(version_id, this->base + 4);
- break;
-
- default:
- /* REVIST: Handle other commands */
- break;
- }
-}
-
-/**
- * onenand_update_interrupt - Set interrupt register
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Update interrupt register. The status depends on command.
- */
-static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
-{
- int interrupt = ONENAND_INT_MASTER;
-
- switch (cmd) {
- case ONENAND_CMD_READ:
- case ONENAND_CMD_READOOB:
- interrupt |= ONENAND_INT_READ;
- break;
-
- case ONENAND_CMD_PROG:
- case ONENAND_CMD_PROGOOB:
- interrupt |= ONENAND_INT_WRITE;
- break;
-
- case ONENAND_CMD_ERASE:
- interrupt |= ONENAND_INT_ERASE;
- break;
-
- case ONENAND_CMD_RESET:
- interrupt |= ONENAND_INT_RESET;
- break;
-
- default:
- break;
- }
-
- writew(interrupt, this->base + ONENAND_REG_INTERRUPT);
-}
-
-/**
- * onenand_check_overwrite - Check if over-write happened
- * @dest: The destination pointer
- * @src: The source pointer
- * @count: The length to be check
- *
- * Returns: 0 on same, otherwise 1
- *
- * Compare the source with destination
- */
-static int onenand_check_overwrite(void *dest, void *src, size_t count)
-{
- unsigned int *s = (unsigned int *) src;
- unsigned int *d = (unsigned int *) dest;
- int i;
-
- count >>= 2;
- for (i = 0; i < count; i++)
- if ((*s++ ^ *d++) != 0)
- return 1;
-
- return 0;
-}
-
-/**
- * onenand_data_handle - Handle OneNAND Core and DataRAM
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- * @dataram: Which dataram used
- * @offset: The offset to OneNAND Core
- *
- * Copy data from OneNAND Core to DataRAM (read)
- * Copy data from DataRAM to OneNAND Core (write)
- * Erase the OneNAND Core (erase)
- */
-static void onenand_data_handle(struct onenand_chip *this, int cmd,
- int dataram, unsigned int offset)
-{
- struct mtd_info *mtd = &info->mtd;
- struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset, die = 0;
- void __iomem *src;
- void __iomem *dest;
- unsigned int i;
- static int pi_operation;
- int erasesize, rgn;
-
- if (dataram) {
- main_offset = mtd->writesize;
- spare_offset = mtd->oobsize;
- } else {
- main_offset = 0;
- spare_offset = 0;
- }
-
- if (pi_operation) {
- die = readw(this->base + ONENAND_REG_START_ADDRESS2);
- die >>= ONENAND_DDP_SHIFT;
- }
-
- switch (cmd) {
- case FLEXONENAND_CMD_PI_ACCESS:
- pi_operation = 1;
- break;
-
- case ONENAND_CMD_RESET:
- pi_operation = 0;
- break;
-
- case ONENAND_CMD_READ:
- src = ONENAND_CORE(flash) + offset;
- dest = ONENAND_MAIN_AREA(this, main_offset);
- if (pi_operation) {
- writew(boundary[die], this->base + ONENAND_DATARAM);
- break;
- }
- memcpy(dest, src, mtd->writesize);
- /* Fall through */
-
- case ONENAND_CMD_READOOB:
- src = ONENAND_CORE_SPARE(flash, this, offset);
- dest = ONENAND_SPARE_AREA(this, spare_offset);
- memcpy(dest, src, mtd->oobsize);
- break;
-
- case ONENAND_CMD_PROG:
- src = ONENAND_MAIN_AREA(this, main_offset);
- dest = ONENAND_CORE(flash) + offset;
- if (pi_operation) {
- boundary[die] = readw(this->base + ONENAND_DATARAM);
- break;
- }
- /* To handle partial write */
- for (i = 0; i < (1 << mtd->subpage_sft); i++) {
- int off = i * this->subpagesize;
- if (!memcmp(src + off, ffchars, this->subpagesize))
- continue;
- if (memcmp(dest + off, ffchars, this->subpagesize) &&
- onenand_check_overwrite(dest + off, src + off, this->subpagesize))
- printk(KERN_ERR "over-write happened at 0x%08x\n", offset);
- memcpy(dest + off, src + off, this->subpagesize);
- }
- /* Fall through */
-
- case ONENAND_CMD_PROGOOB:
- src = ONENAND_SPARE_AREA(this, spare_offset);
- /* Check all data is 0xff chars */
- if (!memcmp(src, ffchars, mtd->oobsize))
- break;
-
- dest = ONENAND_CORE_SPARE(flash, this, offset);
- if (memcmp(dest, ffchars, mtd->oobsize) &&
- onenand_check_overwrite(dest, src, mtd->oobsize))
- printk(KERN_ERR "OOB: over-write happened at 0x%08x\n",
- offset);
- memcpy(dest, src, mtd->oobsize);
- break;
-
- case ONENAND_CMD_ERASE:
- if (pi_operation)
- break;
-
- if (FLEXONENAND(this)) {
- rgn = flexonenand_region(mtd, offset);
- erasesize = mtd->eraseregions[rgn].erasesize;
- } else
- erasesize = mtd->erasesize;
-
- memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
- memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (erasesize >> 5));
- break;
-
- default:
- break;
- }
-}
-
-/**
- * onenand_command_handle - Handle command
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Emulate OneNAND command.
- */
-static void onenand_command_handle(struct onenand_chip *this, int cmd)
-{
- unsigned long offset = 0;
- int block = -1, page = -1, bufferram = -1;
- int dataram = 0;
-
- switch (cmd) {
- case ONENAND_CMD_UNLOCK:
- case ONENAND_CMD_LOCK:
- case ONENAND_CMD_LOCK_TIGHT:
- case ONENAND_CMD_UNLOCK_ALL:
- onenand_lock_handle(this, cmd);
- break;
-
- case ONENAND_CMD_BUFFERRAM:
- /* Do nothing */
- return;
-
- default:
- block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1);
- if (block & (1 << ONENAND_DDP_SHIFT)) {
- block &= ~(1 << ONENAND_DDP_SHIFT);
- /* The half of chip block */
- block += this->chipsize >> (this->erase_shift + 1);
- }
- if (cmd == ONENAND_CMD_ERASE)
- break;
-
- page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8);
- page = (page >> ONENAND_FPA_SHIFT);
- bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER);
- bufferram >>= ONENAND_BSA_SHIFT;
- bufferram &= ONENAND_BSA_DATARAM1;
- dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0;
- break;
- }
-
- if (block != -1)
- offset = onenand_addr(this, block);
-
- if (page != -1)
- offset += page << this->page_shift;
-
- onenand_data_handle(this, cmd, dataram, offset);
-
- onenand_update_interrupt(this, cmd);
-}
-
-/**
- * onenand_writew - [OneNAND Interface] Emulate write operation
- * @value: value to write
- * @addr: address to write
- *
- * Write OneNAND register with value
- */
-static void onenand_writew(unsigned short value, void __iomem * addr)
-{
- struct onenand_chip *this = info->mtd.priv;
-
- /* BootRAM handling */
- if (addr < this->base + ONENAND_DATARAM) {
- onenand_bootram_handle(this, value);
- return;
- }
- /* Command handling */
- if (addr == this->base + ONENAND_REG_COMMAND)
- onenand_command_handle(this, value);
-
- writew(value, addr);
-}
-
-/**
- * flash_init - Initialize OneNAND simulator
- * @flash: OneNAND simulator data strucutres
- *
- * Initialize OneNAND simulator.
- */
-static int __init flash_init(struct onenand_flash *flash)
-{
- int density, size;
- int buffer_size;
-
- flash->base = kzalloc(131072, GFP_KERNEL);
- if (!flash->base) {
- printk(KERN_ERR "Unable to allocate base address.\n");
- return -ENOMEM;
- }
-
- density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
- density &= ONENAND_DEVICE_DENSITY_MASK;
- size = ((16 << 20) << density);
-
- ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
- if (!ONENAND_CORE(flash)) {
- printk(KERN_ERR "Unable to allocate nand core address.\n");
- kfree(flash->base);
- return -ENOMEM;
- }
-
- memset(ONENAND_CORE(flash), 0xff, size + (size >> 5));
-
- /* Setup registers */
- writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
- writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
- writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
- writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
-
- if (density < 2 && (!CONFIG_FLEXONENAND))
- buffer_size = 0x0400; /* 1KiB page */
- else
- buffer_size = 0x0800; /* 2KiB page */
- writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE);
-
- return 0;
-}
-
-/**
- * flash_exit - Clean up OneNAND simulator
- * @flash: OneNAND simulator data structures
- *
- * Clean up OneNAND simulator.
- */
-static void flash_exit(struct onenand_flash *flash)
-{
- vfree(ONENAND_CORE(flash));
- kfree(flash->base);
-}
-
-static int __init onenand_sim_init(void)
-{
- /* Allocate all 0xff chars pointer */
- ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL);
- if (!ffchars) {
- printk(KERN_ERR "Unable to allocate ff chars.\n");
- return -ENOMEM;
- }
- memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE);
-
- /* Allocate OneNAND simulator mtd pointer */
- info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "Unable to allocate core structures.\n");
- kfree(ffchars);
- return -ENOMEM;
- }
-
- /* Override write_word function */
- info->onenand.write_word = onenand_writew;
-
- if (flash_init(&info->flash)) {
- printk(KERN_ERR "Unable to allocate flash.\n");
- kfree(ffchars);
- kfree(info);
- return -ENOMEM;
- }
-
- info->parts = os_partitions;
-
- info->onenand.base = info->flash.base;
- info->onenand.priv = &info->flash;
-
- info->mtd.name = "OneNAND simulator";
- info->mtd.priv = &info->onenand;
- info->mtd.owner = THIS_MODULE;
-
- if (onenand_scan(&info->mtd, 1)) {
- flash_exit(&info->flash);
- kfree(ffchars);
- kfree(info);
- return -ENXIO;
- }
-
- mtd_device_register(&info->mtd, info->parts,
- ARRAY_SIZE(os_partitions));
-
- return 0;
-}
-
-static void __exit onenand_sim_exit(void)
-{
- struct onenand_chip *this = info->mtd.priv;
- struct onenand_flash *flash = this->priv;
-
- onenand_release(&info->mtd);
- flash_exit(flash);
- kfree(ffchars);
- kfree(info);
-}
-
-module_init(onenand_sim_init);
-module_exit(onenand_sim_exit);
-
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_DESCRIPTION("The OneNAND flash simulator");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 3306b5b3c73..efb819c3df2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -23,11 +23,11 @@
#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <asm/mach/flash.h>
-#include <plat/regs-onenand.h>
-#include <linux/io.h>
+#include "samsung.h"
enum soc_type {
TYPE_S3C6400,
@@ -147,7 +147,6 @@ struct s3c_onenand {
struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
- struct mtd_partition *parts;
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -157,8 +156,6 @@ struct s3c_onenand {
static struct s3c_onenand *onenand;
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
static inline int s3c_read_reg(int offset)
{
return readl(onenand->base + offset);
@@ -540,9 +537,9 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
return 0;
}
-static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
+static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
-static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
+static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
@@ -608,7 +605,7 @@ static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
+static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
@@ -689,7 +686,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
goto normal;
}
- err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
+ err = s5pc110_dma_ops(dma_dst, dma_src,
count, S5PC110_DMA_DIR_READ);
if (page_dma)
@@ -870,15 +867,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
struct resource *r;
int size, err;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
mtd = kzalloc(size, GFP_KERNEL);
- if (!mtd) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!mtd)
return -ENOMEM;
- }
onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
if (!onenand) {
@@ -926,7 +921,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
dev_err(&pdev->dev, "no buffer memory resource defined\n");
- return -ENOENT;
+ err = -ENOENT;
goto ahb_resource_failed;
}
@@ -967,7 +962,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
dev_err(&pdev->dev, "no dma memory resource defined\n");
- return -ENOENT;
+ err = -ENOENT;
goto dma_resource_failed;
}
@@ -1017,13 +1012,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
- err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
- if (err > 0)
- mtd_device_register(mtd, onenand->parts, err);
- else if (err <= 0 && pdata && pdata->parts)
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
- else
- err = mtd_device_register(mtd, NULL, 0);
+ err = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, mtd);
@@ -1060,7 +1051,7 @@ onenand_fail:
return err;
}
-static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+static int s3c_onenand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
@@ -1080,7 +1071,6 @@ static int __devexit s3c_onenand_remove(struct platform_device *pdev)
release_mem_region(onenand->base_res->start,
resource_size(onenand->base_res));
- platform_set_drvdata(pdev, NULL);
kfree(onenand->oob_buf);
kfree(onenand->page_buf);
kfree(onenand);
@@ -1137,21 +1127,10 @@ static struct platform_driver s3c_onenand_driver = {
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
- .remove = __devexit_p(s3c_onenand_remove),
+ .remove = s3c_onenand_remove,
};
-static int __init s3c_onenand_init(void)
-{
- return platform_driver_register(&s3c_onenand_driver);
-}
-
-static void __exit s3c_onenand_exit(void)
-{
- platform_driver_unregister(&s3c_onenand_driver);
-}
-
-module_init(s3c_onenand_init);
-module_exit(s3c_onenand_exit);
+module_platform_driver(s3c_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
diff --git a/drivers/mtd/onenand/samsung.h b/drivers/mtd/onenand/samsung.h
new file mode 100644
index 00000000000..9016dc0136a
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SAMSUNG_ONENAND_H__
+#define __SAMSUNG_ONENAND_H__
+
+/*
+ * OneNAND Controller
+ */
+#define MEM_CFG_OFFSET 0x0000
+#define BURST_LEN_OFFSET 0x0010
+#define MEM_RESET_OFFSET 0x0020
+#define INT_ERR_STAT_OFFSET 0x0030
+#define INT_ERR_MASK_OFFSET 0x0040
+#define INT_ERR_ACK_OFFSET 0x0050
+#define ECC_ERR_STAT_OFFSET 0x0060
+#define MANUFACT_ID_OFFSET 0x0070
+#define DEVICE_ID_OFFSET 0x0080
+#define DATA_BUF_SIZE_OFFSET 0x0090
+#define BOOT_BUF_SIZE_OFFSET 0x00A0
+#define BUF_AMOUNT_OFFSET 0x00B0
+#define TECH_OFFSET 0x00C0
+#define FBA_WIDTH_OFFSET 0x00D0
+#define FPA_WIDTH_OFFSET 0x00E0
+#define FSA_WIDTH_OFFSET 0x00F0
+#define TRANS_SPARE_OFFSET 0x0140
+#define DBS_DFS_WIDTH_OFFSET 0x0160
+#define INT_PIN_ENABLE_OFFSET 0x01A0
+#define ACC_CLOCK_OFFSET 0x01C0
+#define FLASH_VER_ID_OFFSET 0x01F0
+#define FLASH_AUX_CNTRL_OFFSET 0x0300 /* s3c64xx only */
+
+#define ONENAND_MEM_RESET_HOT 0x3
+#define ONENAND_MEM_RESET_COLD 0x2
+#define ONENAND_MEM_RESET_WARM 0x1
+
+#define CACHE_OP_ERR (1 << 13)
+#define RST_CMP (1 << 12)
+#define RDY_ACT (1 << 11)
+#define INT_ACT (1 << 10)
+#define UNSUP_CMD (1 << 9)
+#define LOCKED_BLK (1 << 8)
+#define BLK_RW_CMP (1 << 7)
+#define ERS_CMP (1 << 6)
+#define PGM_CMP (1 << 5)
+#define LOAD_CMP (1 << 4)
+#define ERS_FAIL (1 << 3)
+#define PGM_FAIL (1 << 2)
+#define INT_TO (1 << 1)
+#define LD_FAIL_ECC_ERR (1 << 0)
+
+#define TSRF (1 << 0)
+
+#endif
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 7a87d07cd79..5da911ebdf4 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -28,6 +28,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/module.h>
struct fis_image_desc {
unsigned char name[16]; // Null terminated name
@@ -56,8 +57,8 @@ static inline int redboot_checksum(struct fis_image_desc *img)
}
static int parse_redboot_partitions(struct mtd_info *master,
- struct mtd_partition **pparts,
- unsigned long fis_origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
int nrparts = 0;
struct fis_image_desc *buf;
@@ -77,8 +78,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_block_isbad(master, offset)) {
if (!offset) {
nogood:
printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -88,8 +88,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
} else {
offset = directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
@@ -103,8 +102,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
- ret = master->read(master, offset,
- master->erasesize, &retlen, (void *)buf);
+ ret = mtd_read(master, offset, master->erasesize, &retlen,
+ (void *)buf);
if (ret)
goto out;
@@ -197,11 +196,10 @@ static int parse_redboot_partitions(struct mtd_info *master,
goto out;
}
new_fl->img = &buf[i];
- if (fis_origin) {
- buf[i].flash_base -= fis_origin;
- } else {
- buf[i].flash_base &= master->size-1;
- }
+ if (data && data->origin)
+ buf[i].flash_base -= data->origin;
+ else
+ buf[i].flash_base &= master->size-1;
/* I'm sure the JFFS2 code has done me permanent damage.
* I now think the following is _normal_
@@ -297,9 +295,13 @@ static struct mtd_part_parser redboot_parser = {
.name = "RedBoot",
};
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("RedBoot");
+
static int __init redboot_parser_init(void)
{
- return register_mtd_parser(&redboot_parser);
+ register_mtd_parser(&redboot_parser);
+ return 0;
}
static void __exit redboot_parser_exit(void)
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index cc4d1805b86..d1cbf26db2c 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -18,6 +18,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <asm/types.h>
@@ -199,9 +200,9 @@ static int scan_header(struct partition *part)
part->sector_map[i] = -1;
for (i=0, blocks_found=0; i<part->total_blocks; i++) {
- rc = part->mbd.mtd->read(part->mbd.mtd,
- i * part->block_size, part->header_size,
- &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, i * part->block_size,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -249,8 +250,8 @@ static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *b
addr = part->sector_map[sector];
if (addr != -1) {
- rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
- &retlen, (u_char*)buf);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -303,9 +304,8 @@ static void erase_callback(struct erase_info *erase)
part->blocks[i].used_sectors = 0;
part->blocks[i].erases++;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- part->blocks[i].offset, sizeof(magic), &retlen,
- (u_char*)&magic);
+ rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
+ &retlen, (u_char *)&magic);
if (!rc && retlen != sizeof(magic))
rc = -EIO;
@@ -341,7 +341,7 @@ static int erase_block(struct partition *part, int block)
part->blocks[block].state = BLOCK_ERASING;
part->blocks[block].free_sectors = 0;
- rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ rc = mtd_erase(part->mbd.mtd, erase);
if (rc) {
printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
@@ -371,9 +371,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
if (!map)
goto err2;
- rc = part->mbd.mtd->read(part->mbd.mtd,
- part->blocks[block_no].offset, part->header_size,
- &retlen, (u_char*)map);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+ part->header_size, &retlen, (u_char *)map);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -412,8 +411,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
}
continue;
}
- rc = part->mbd.mtd->read(part->mbd.mtd, addr,
- SECTOR_SIZE, &retlen, sector_data);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ sector_data);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -449,8 +448,7 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
int rc;
/* we have a race if sync doesn't exist */
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
score = 0x7fffffff; /* MAX_INT */
best_block = -1;
@@ -562,8 +560,9 @@ static int find_writable_block(struct partition *part, u_long *old_sector)
}
}
- rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
- part->header_size, &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -594,8 +593,8 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
addr = part->blocks[block].offset +
(HEADER_MAP_OFFSET + offset) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(del), &retlen, (u_char*)&del);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+ (u_char *)&del);
if (!rc && retlen != sizeof(del))
rc = -EIO;
@@ -603,8 +602,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at "
"0x%lx\n", part->mbd.mtd->name, addr);
- if (rc)
- goto err;
+ goto err;
}
if (block == part->current_block)
part->header_cache[offset + HEADER_MAP_OFFSET] = del;
@@ -667,8 +665,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
block->offset;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- addr, SECTOR_SIZE, &retlen, (u_char*)buf);
+ rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -676,8 +674,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
part->mbd.mtd->name, addr);
- if (rc)
- goto err;
+ goto err;
}
part->sector_map[sector] = addr;
@@ -687,8 +684,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
part->header_cache[i + HEADER_MAP_OFFSET] = entry;
addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(entry), &retlen, (u_char*)&entry);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+ (u_char *)&entry);
if (!rc && retlen != sizeof(entry))
rc = -EIO;
@@ -696,8 +693,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
part->mbd.mtd->name, addr);
- if (rc)
- goto err;
+ goto err;
}
block->used_sectors++;
block->free_sectors--;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index ed3d6cd2c6d..cf49c22673b 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -22,10 +22,10 @@
-struct workqueue_struct *cache_flush_workqueue;
+static struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
-module_param(cache_timeout, bool, S_IRUGO);
+module_param(cache_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cache_timeout,
"Timeout (in ms) for cache flush (1000 ms default");
@@ -34,14 +34,14 @@ module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
-/* ------------------- sysfs attributtes ---------------------------------- */
+/* ------------------- sysfs attributes ---------------------------------- */
struct sm_sysfs_attribute {
struct device_attribute dev_attr;
char *data;
int len;
};
-ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sm_sysfs_attribute *sm_attr =
@@ -54,20 +54,17 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
#define NUM_ATTRIBUTES 1
#define SM_CIS_VENDOR_OFFSET 0x59
-struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute_group *attr_group;
struct attribute **attributes;
struct sm_sysfs_attribute *vendor_attribute;
+ char *vendor;
- int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
- SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
-
- char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
if (!vendor)
goto error1;
- memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
- vendor[vendor_len] = 0;
/* Initialize sysfs attributes */
vendor_attribute =
@@ -78,7 +75,7 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
sysfs_attr_init(&vendor_attribute->dev_attr.attr);
vendor_attribute->data = vendor;
- vendor_attribute->len = vendor_len;
+ vendor_attribute->len = strlen(vendor);
vendor_attribute->dev_attr.attr.name = "vendor";
vendor_attribute->dev_attr.attr.mode = S_IRUGO;
vendor_attribute->dev_attr.show = sm_attr_show;
@@ -107,7 +104,7 @@ error1:
return NULL;
}
-void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute **attributes = ftl->disk_attributes->attrs;
int i;
@@ -138,7 +135,7 @@ static int sm_get_lba(uint8_t *lba)
if ((lba[0] & 0xF8) != 0x10)
return -2;
- /* check parity - endianess doesn't matter */
+ /* check parity - endianness doesn't matter */
if (hweight16(*(uint16_t *)lba) & 1)
return -2;
@@ -147,7 +144,7 @@ static int sm_get_lba(uint8_t *lba)
/*
- * Read LBA asscociated with block
+ * Read LBA associated with block
* returns -1, if block is erased
* returns -2 if error happens
*/
@@ -252,11 +249,11 @@ static int sm_read_sector(struct sm_ftl *ftl,
return 0;
}
- /* User might not need the oob, but we do for data vertification */
+ /* User might not need the oob, but we do for data verification */
if (!oob)
oob = &tmp_oob;
- ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
@@ -276,12 +273,12 @@ again:
return ret;
}
- /* Unfortunelly, oob read will _always_ succeed,
+ /* Unfortunately, oob read will _always_ succeed,
despite card removal..... */
- ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
- if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+ if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
dbg("read of block %d at zone %d, failed due to error (%d)",
block, zone, ret);
goto again;
@@ -306,7 +303,7 @@ again:
}
/* Test ECC*/
- if (ret == -EBADMSG ||
+ if (mtd_is_eccerr(ret) ||
(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
dbg("read of block %d at zone %d, failed due to ECC error",
@@ -336,17 +333,16 @@ static int sm_write_sector(struct sm_ftl *ftl,
if (ftl->unstable)
return -EIO;
- ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.len = SM_SECTOR_SIZE;
ops.datbuf = buffer;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
- ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
- /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
if (ret) {
dbg("write to block %d at zone %d, failed with error %d",
@@ -447,14 +443,14 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
/* We aren't checking the return value, because we don't care */
/* This also fails on fake xD cards, but I guess these won't expose
- any bad blocks till fail completly */
+ any bad blocks till fail completely */
for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
}
/*
* Erase a block within a zone
- * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ * If erase succeeds, it updates free block fifo, otherwise marks block as bad
*/
static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
int put_free)
@@ -479,7 +475,7 @@ static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
return -EIO;
}
- if (mtd->erase(mtd, &erase)) {
+ if (mtd_erase(mtd, &erase)) {
sm_printk("erase of block %d in zone %d failed",
block, zone_num);
goto error;
@@ -510,7 +506,7 @@ static void sm_erase_callback(struct erase_info *self)
complete(&ftl->erase_completion);
}
-/* Throughtly test that block is valid. */
+/* Thoroughly test that block is valid. */
static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
{
int boffset;
@@ -526,7 +522,7 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
- /* This shoudn't happen anyway */
+ /* This shouldn't happen anyway */
if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
return -2;
@@ -572,7 +568,7 @@ static const uint8_t cis_signature[] = {
};
/* Find out media parameters.
* This ideally has to be based on nand id, but for now device size is enough */
-int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
{
int i;
int size_in_megs = mtd->size / (1024 * 1024);
@@ -645,8 +641,8 @@ int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
return -ENODEV;
- /* We use these functions for IO */
- if (!mtd->read_oob || !mtd->write_oob)
+ /* We use OOB */
+ if (!mtd_has_oob(mtd))
return -ENODEV;
/* Find geometry information */
@@ -879,7 +875,7 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
}
/* Get and automatically initialize an FTL mapping for one zone */
-struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
{
struct ftl_zone *zone;
int error;
@@ -900,7 +896,7 @@ struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
/* ----------------- cache handling ------------------------------------------*/
/* Initialize the one block cache */
-void sm_cache_init(struct sm_ftl *ftl)
+static void sm_cache_init(struct sm_ftl *ftl)
{
ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
ftl->cache_clean = 1;
@@ -910,7 +906,7 @@ void sm_cache_init(struct sm_ftl *ftl)
}
/* Put sector in one block cache */
-void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
{
memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
@@ -918,7 +914,7 @@ void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
}
/* Read a sector from the cache */
-int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
{
if (test_bit(boffset / SM_SECTOR_SIZE,
&ftl->cache_data_invalid_bitmap))
@@ -929,7 +925,7 @@ int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
}
/* Write the cache to hardware */
-int sm_cache_flush(struct sm_ftl *ftl)
+static int sm_cache_flush(struct sm_ftl *ftl)
{
struct ftl_zone *zone;
@@ -1108,7 +1104,7 @@ static int sm_flush(struct mtd_blktrans_dev *dev)
}
/* outside interface: device is released */
-static int sm_release(struct mtd_blktrans_dev *dev)
+static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
@@ -1117,7 +1113,6 @@ static int sm_release(struct mtd_blktrans_dev *dev)
cancel_work_sync(&ftl->flush_work);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
- return 0;
}
/* outside interface: get geometry */
@@ -1256,7 +1251,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops sm_ftl_ops = {
.name = "smblk",
- .major = -1,
+ .major = 0,
.part_bits = SM_FTL_PARTN_BITS,
.blksize = SM_SECTOR_SIZE,
.getgeo = sm_getgeo,
@@ -1276,10 +1271,10 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void)
{
int error = 0;
- cache_flush_workqueue = create_freezable_workqueue("smflush");
- if (IS_ERR(cache_flush_workqueue))
- return PTR_ERR(cache_flush_workqueue);
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
+ if (!cache_flush_workqueue)
+ return -ENOMEM;
error = register_mtd_blktrans(&sm_ftl_ops);
if (error)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
new file mode 100644
index 00000000000..f8acfa4310e
--- /dev/null
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -0,0 +1,17 @@
+menuconfig MTD_SPI_NOR
+ tristate "SPI-NOR device support"
+ depends on MTD
+ help
+ This is the framework for the SPI NOR which can be used by the SPI
+ device drivers and the SPI-NOR device driver.
+
+if MTD_SPI_NOR
+
+config SPI_FSL_QUADSPI
+ tristate "Freescale Quad SPI controller"
+ depends on ARCH_MXC
+ help
+ This enables support for the Quad SPI controller in master mode.
+ We only connect the NOR to this controller now.
+
+endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
new file mode 100644
index 00000000000..6a7ce146224
--- /dev/null
+++ b/drivers/mtd/spi-nor/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
new file mode 100644
index 00000000000..8d659a2888d
--- /dev/null
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -0,0 +1,1009 @@
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+
+/* The registers */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_SHIFT 16
+#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
+#define QUADSPI_MCR_MDIS_SHIFT 14
+#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
+#define QUADSPI_MCR_CLR_TXF_SHIFT 11
+#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
+#define QUADSPI_MCR_CLR_RXF_SHIFT 10
+#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
+#define QUADSPI_MCR_DDR_EN_SHIFT 7
+#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
+#define QUADSPI_MCR_END_CFG_SHIFT 2
+#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
+#define QUADSPI_MCR_SWRSTHD_SHIFT 1
+#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
+#define QUADSPI_MCR_SWRSTSD_SHIFT 0
+#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID_SHIFT 24
+#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
+#define QUADSPI_BUF3CR_ALLMST (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
+#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
+#define QUADSPI_BFGENCR_SEQID_SHIFT 12
+#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_SHIFT 16
+#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
+#define QUADSPI_SMPR_FSDLY_SHIFT 6
+#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
+#define QUADSPI_SMPR_FSPHS_SHIFT 5
+#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
+#define QUADSPI_SMPR_HSENA_SHIFT 0
+#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
+
+#define QUADSPI_RBSR 0x10c
+#define QUADSPI_RBSR_RDBFL_SHIFT 8
+#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK 0x1F
+#define QUADSPI_RBCT_RXBRD_SHIFT 8
+#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
+
+#define QUADSPI_TBSR 0x150
+#define QUADSPI_TBDR 0x154
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_SHIFT 1
+#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
+#define QUADSPI_SR_AHB_ACC_SHIFT 2
+#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK 0x1
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR 0x200
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK 0x1
+#define QUADSPI_LCKER_UNLOCK 0x2
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE (0x1 << 0)
+
+#define QUADSPI_LUT_BASE 0x310
+
+/*
+ * The definition of the LUT register shows below:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define OPRND0_SHIFT 0
+#define PAD0_SHIFT 8
+#define INSTR0_SHIFT 10
+#define OPRND1_SHIFT 16
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_READ 7
+#define LUT_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_READ_DDR 14
+#define LUT_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the lines number of IO[0:3].
+ * For example, the Quad read need four IO lines, so you should
+ * set LUT_PAD4 which means we use four IO lines.
+ */
+#define LUT_PAD1 0
+#define LUT_PAD2 1
+#define LUT_PAD4 2
+
+/* Oprands for the LUT register. */
+#define ADDR24BIT 0x18
+#define ADDR32BIT 0x20
+
+/* Macros for constructing the LUT register. */
+#define LUT0(ins, pad, opr) \
+ (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
+ ((LUT_##ins) << INSTR0_SHIFT))
+
+#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
+
+/* other macros for LUT register. */
+#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
+#define QUADSPI_LUT_NUM 64
+
+/* SEQID -- we can have 16 seqids at most. */
+#define SEQID_QUAD_READ 0
+#define SEQID_WREN 1
+#define SEQID_WRDI 2
+#define SEQID_RDSR 3
+#define SEQID_SE 4
+#define SEQID_CHIP_ERASE 5
+#define SEQID_PP 6
+#define SEQID_RDID 7
+#define SEQID_WRSR 8
+#define SEQID_RDCR 9
+#define SEQID_EN4B 10
+#define SEQID_BRWR 11
+
+enum fsl_qspi_devtype {
+ FSL_QUADSPI_VYBRID,
+ FSL_QUADSPI_IMX6SX,
+};
+
+struct fsl_qspi_devtype_data {
+ enum fsl_qspi_devtype devtype;
+ int rxfifo;
+ int txfifo;
+};
+
+static struct fsl_qspi_devtype_data vybrid_data = {
+ .devtype = FSL_QUADSPI_VYBRID,
+ .rxfifo = 128,
+ .txfifo = 64
+};
+
+static struct fsl_qspi_devtype_data imx6sx_data = {
+ .devtype = FSL_QUADSPI_IMX6SX,
+ .rxfifo = 128,
+ .txfifo = 512
+};
+
+#define FSL_QSPI_MAX_CHIP 4
+struct fsl_qspi {
+ struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
+ struct spi_nor nor[FSL_QSPI_MAX_CHIP];
+ void __iomem *iobase;
+ void __iomem *ahb_base; /* Used when read from AHB bus */
+ u32 memmap_phy;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ struct fsl_qspi_devtype_data *devtype_data;
+ u32 nor_size;
+ u32 nor_num;
+ u32 clk_rate;
+ unsigned int chip_base_addr; /* We may support two chips. */
+};
+
+static inline int is_vybrid_qspi(struct fsl_qspi *q)
+{
+ return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+}
+
+static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+{
+ return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+}
+
+/*
+ * An IC bug makes us to re-arrange the 32-bit data.
+ * The following chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return is_vybrid_qspi(q) ? __swab32(a) : a;
+}
+
+static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
+{
+ writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
+{
+ writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = readl(q->iobase + QUADSPI_FR);
+ writel(reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
+ return IRQ_HANDLED;
+}
+
+static void fsl_qspi_init_lut(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int rxfifo = q->devtype_data->rxfifo;
+ u32 lut_base;
+ u8 cmd, addrlen, dummy;
+ int i;
+
+ fsl_qspi_unlock_lut(q);
+
+ /* Clear all the LUT table */
+ for (i = 0; i < QUADSPI_LUT_NUM; i++)
+ writel(0, base + QUADSPI_LUT_BASE + i * 4);
+
+ /* Quad Read */
+ lut_base = SEQID_QUAD_READ * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_READ_1_1_4;
+ addrlen = ADDR24BIT;
+ dummy = 8;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_READ_1_1_4;
+ addrlen = ADDR32BIT;
+ dummy = 8;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
+ base + QUADSPI_LUT(lut_base + 1));
+
+ /* Write enable */
+ lut_base = SEQID_WREN * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base));
+
+ /* Page Program */
+ lut_base = SEQID_PP * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_PP;
+ addrlen = ADDR24BIT;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_PP;
+ addrlen = ADDR32BIT;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+
+ /* Read Status */
+ lut_base = SEQID_RDSR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase a sector */
+ lut_base = SEQID_SE * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_SE;
+ addrlen = ADDR24BIT;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_SE;
+ addrlen = ADDR32BIT;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase the whole chip */
+ lut_base = SEQID_CHIP_ERASE * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
+ base + QUADSPI_LUT(lut_base));
+
+ /* READ ID */
+ lut_base = SEQID_RDID * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write Register */
+ lut_base = SEQID_WRSR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Read Configuration Register */
+ lut_base = SEQID_RDCR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write disable */
+ lut_base = SEQID_WRDI * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Micron) */
+ lut_base = SEQID_EN4B * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Spansion) */
+ lut_base = SEQID_BRWR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base));
+
+ fsl_qspi_lock_lut(q);
+}
+
+/* Get the SEQID for the command */
+static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+{
+ switch (cmd) {
+ case SPINOR_OP_READ_1_1_4:
+ return SEQID_QUAD_READ;
+ case SPINOR_OP_WREN:
+ return SEQID_WREN;
+ case SPINOR_OP_WRDI:
+ return SEQID_WRDI;
+ case SPINOR_OP_RDSR:
+ return SEQID_RDSR;
+ case SPINOR_OP_SE:
+ return SEQID_SE;
+ case SPINOR_OP_CHIP_ERASE:
+ return SEQID_CHIP_ERASE;
+ case SPINOR_OP_PP:
+ return SEQID_PP;
+ case SPINOR_OP_RDID:
+ return SEQID_RDID;
+ case SPINOR_OP_WRSR:
+ return SEQID_WRSR;
+ case SPINOR_OP_RDCR:
+ return SEQID_RDCR;
+ case SPINOR_OP_EN4B:
+ return SEQID_EN4B;
+ case SPINOR_OP_BRWR:
+ return SEQID_BRWR;
+ default:
+ dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+ u32 reg, reg2;
+ int err;
+
+ init_completion(&q->c);
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
+ q->chip_base_addr, addr, len, cmd);
+
+ /* save the reg */
+ reg = readl(base + QUADSPI_MCR);
+
+ writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR);
+ writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
+ base + QUADSPI_RBCT);
+ writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
+
+ do {
+ reg2 = readl(base + QUADSPI_SR);
+ if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
+ udelay(1);
+ dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
+ continue;
+ }
+ break;
+ } while (1);
+
+ /* trigger the LUT now */
+ seqid = fsl_qspi_get_seqid(q, cmd);
+ writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ err = wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000));
+ if (!err) {
+ dev_err(q->dev,
+ "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
+ cmd, addr, readl(base + QUADSPI_FR),
+ readl(base + QUADSPI_SR));
+ err = -ETIMEDOUT;
+ } else {
+ err = 0;
+ }
+
+ /* restore the MCR */
+ writel(reg, base + QUADSPI_MCR);
+
+ return err;
+}
+
+/* Read out the data from the QUADSPI_RBDR buffer registers. */
+static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
+{
+ u32 tmp;
+ int i = 0;
+
+ while (len > 0) {
+ tmp = readl(q->iobase + QUADSPI_RBDR + i * 4);
+ tmp = fsl_qspi_endian_xchg(q, tmp);
+ dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
+ q->chip_base_addr, tmp);
+
+ if (len >= 4) {
+ *((u32 *)rxbuf) = tmp;
+ rxbuf += 4;
+ } else {
+ memcpy(rxbuf, &tmp, len);
+ break;
+ }
+
+ len -= 4;
+ i++;
+ }
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing,
+ * we need to invalidate the AHB buffer. If we do not do so, we may read out
+ * the wrong data. The spec tells us reset the AHB domain and Serial Flash
+ * domain at the same time.
+ */
+static inline void fsl_qspi_invalid(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = readl(q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ writel(reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ writel(reg, q->iobase + QUADSPI_MCR);
+}
+
+static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+ u8 opcode, unsigned int to, u32 *txbuf,
+ unsigned count, size_t *retlen)
+{
+ int ret, i, j;
+ u32 tmp;
+
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
+ q->chip_base_addr, to, count);
+
+ /* clear the TX FIFO. */
+ tmp = readl(q->iobase + QUADSPI_MCR);
+ writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+
+ /* fill the TX data to the FIFO */
+ for (j = 0, i = ((count + 3) / 4); j < i; j++) {
+ tmp = fsl_qspi_endian_xchg(q, *txbuf);
+ writel(tmp, q->iobase + QUADSPI_TBDR);
+ txbuf++;
+ }
+
+ /* Trigger it */
+ ret = fsl_qspi_runcmd(q, opcode, to, count);
+
+ if (ret == 0 && retlen)
+ *retlen += count;
+
+ return ret;
+}
+
+static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
+{
+ int nor_size = q->nor_size;
+ void __iomem *base = q->iobase;
+
+ writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
+ writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
+ writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
+ writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
+}
+
+/*
+ * There are two different ways to read out the data from the flash:
+ * the "IP Command Read" and the "AHB Command Read".
+ *
+ * The IC guy suggests we use the "AHB Command Read" which is faster
+ * then the "IP Command Read". (What's more is that there is a bug in
+ * the "IP Command Read" in the Vybrid.)
+ *
+ * After we set up the registers for the "AHB Command Read", we can use
+ * the memcpy to read the data directly. A "missed" access to the buffer
+ * causes the controller to clear the buffer, and use the sequence pointed
+ * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
+ */
+static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+
+ /* AHB configuration for access buffer 0/1/2 .*/
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
+ writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR);
+
+ /* We only use the buffer3 */
+ writel(0, base + QUADSPI_BUF0IND);
+ writel(0, base + QUADSPI_BUF1IND);
+ writel(0, base + QUADSPI_BUF2IND);
+
+ /* Set the default lut sequence for AHB Read. */
+ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
+ writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ q->iobase + QUADSPI_BFGENCR);
+}
+
+/* We use this function to do some basic init for spi_nor_scan(). */
+static int fsl_qspi_nor_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg;
+ int ret;
+
+ /* the default frequency, we will change it in the future.*/
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table. */
+ fsl_qspi_init_lut(q);
+
+ /* Disable the module */
+ writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ reg = readl(base + QUADSPI_SMPR);
+ writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* Enable the module */
+ writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* enable the interrupt */
+ writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
+{
+ unsigned long rate = q->clk_rate;
+ int ret;
+
+ if (is_imx6sx_qspi(q))
+ rate *= 4;
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table again. */
+ fsl_qspi_init_lut(q);
+
+ /* Init for AHB read */
+ fsl_qspi_init_abh_read(q);
+
+ return 0;
+}
+
+static struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
+{
+ q->chip_base_addr = q->nor_size * (nor - q->nor);
+}
+
+static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+ struct fsl_qspi *q = nor->priv;
+
+ ret = fsl_qspi_runcmd(q, opcode, 0, len);
+ if (ret)
+ return ret;
+
+ fsl_qspi_read_data(q, len, buf);
+ return 0;
+}
+
+static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int write_enable)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ if (!buf) {
+ ret = fsl_qspi_runcmd(q, opcode, 0, 1);
+ if (ret)
+ return ret;
+
+ if (opcode == SPINOR_OP_CHIP_ERASE)
+ fsl_qspi_invalid(q);
+
+ } else if (len > 0) {
+ ret = fsl_qspi_nor_write(q, nor, opcode, 0,
+ (u32 *)buf, len, NULL);
+ } else {
+ dev_err(q->dev, "invalid cmd %d\n", opcode);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+ (u32 *)buf, len, retlen);
+
+ /* invalid the data in the AHB buffer. */
+ fsl_qspi_invalid(q);
+}
+
+static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+ u8 cmd = nor->read_opcode;
+ int ret;
+
+ dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
+ cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+
+ /* Wait until the previous command is finished. */
+ ret = nor->wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* Read out the data directly from the AHB buffer.*/
+ memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+
+ *retlen += len;
+ return 0;
+}
+
+static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
+ nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
+
+ /* Wait until finished previous write command. */
+ ret = nor->wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
+ if (ret)
+ return ret;
+
+ fsl_qspi_invalid(q);
+ return 0;
+}
+
+static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ ret = clk_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(q->clk);
+ if (ret) {
+ clk_disable(q->clk_en);
+ return ret;
+ }
+
+ fsl_qspi_set_base_addr(q, nor);
+ return 0;
+}
+
+static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ clk_disable(q->clk);
+ clk_disable(q->clk_en);
+}
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ struct device *dev = &pdev->dev;
+ struct fsl_qspi *q;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int ret, i = 0;
+ bool has_second_chip = false;
+ const struct of_device_id *of_id =
+ of_match_device(fsl_qspi_dt_ids, &pdev->dev);
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->nor_num = of_get_child_count(dev->of_node);
+ if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
+ return -ENODEV;
+
+ /* find the resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
+ q->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->iobase)) {
+ ret = PTR_ERR(q->iobase);
+ goto map_failed;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ q->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->ahb_base)) {
+ ret = PTR_ERR(q->ahb_base);
+ goto map_failed;
+ }
+ q->memmap_phy = res->start;
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en)) {
+ ret = PTR_ERR(q->clk_en);
+ goto map_failed;
+ }
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk)) {
+ ret = PTR_ERR(q->clk);
+ goto map_failed;
+ }
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret) {
+ dev_err(dev, "can not enable the qspi_en clock\n");
+ goto map_failed;
+ }
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ dev_err(dev, "can not enable the qspi clock\n");
+ goto map_failed;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq\n");
+ goto irq_failed;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq.\n");
+ goto irq_failed;
+ }
+
+ q->dev = dev;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+ platform_set_drvdata(pdev, q);
+
+ ret = fsl_qspi_nor_setup(q);
+ if (ret)
+ goto irq_failed;
+
+ if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
+ has_second_chip = true;
+
+ /* iterate the subnodes. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ const struct spi_device_id *id;
+ char modalias[40];
+
+ /* skip the holes */
+ if (!has_second_chip)
+ i *= 2;
+
+ nor = &q->nor[i];
+ mtd = &q->mtd[i];
+
+ nor->mtd = mtd;
+ nor->dev = dev;
+ nor->priv = q;
+ mtd->priv = nor;
+
+ /* fill the hooks */
+ nor->read_reg = fsl_qspi_read_reg;
+ nor->write_reg = fsl_qspi_write_reg;
+ nor->read = fsl_qspi_read;
+ nor->write = fsl_qspi_write;
+ nor->erase = fsl_qspi_erase;
+
+ nor->prepare = fsl_qspi_prep;
+ nor->unprepare = fsl_qspi_unprep;
+
+ if (of_modalias_node(np, modalias, sizeof(modalias)) < 0)
+ goto map_failed;
+
+ id = spi_nor_match_id(modalias);
+ if (!id)
+ goto map_failed;
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &q->clk_rate);
+ if (ret < 0)
+ goto map_failed;
+
+ /* set the chip address for READID */
+ fsl_qspi_set_base_addr(q, nor);
+
+ ret = spi_nor_scan(nor, id, SPI_NOR_QUAD);
+ if (ret)
+ goto map_failed;
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret)
+ goto map_failed;
+
+ /* Set the correct NOR size now. */
+ if (q->nor_size == 0) {
+ q->nor_size = mtd->size;
+
+ /* Map the SPI NOR to accessiable address */
+ fsl_qspi_set_map_addr(q);
+ }
+
+ /*
+ * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
+ * may writes 265 bytes per time. The write is working in the
+ * unit of the TX FIFO, not in the unit of the SPI NOR's page
+ * size.
+ *
+ * So shrink the spi_nor->page_size if it is larger then the
+ * TX FIFO.
+ */
+ if (nor->page_size > q->devtype_data->txfifo)
+ nor->page_size = q->devtype_data->txfifo;
+
+ i++;
+ }
+
+ /* finish the rest init. */
+ ret = fsl_qspi_nor_setup_last(q);
+ if (ret)
+ goto last_init_failed;
+
+ clk_disable(q->clk);
+ clk_disable(q->clk_en);
+ dev_info(dev, "QuadSPI SPI NOR flash driver\n");
+ return 0;
+
+last_init_failed:
+ for (i = 0; i < q->nor_num; i++)
+ mtd_device_unregister(&q->mtd[i]);
+
+irq_failed:
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+map_failed:
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < q->nor_num; i++)
+ mtd_device_unregister(&q->mtd[i]);
+
+ /* disable the hardware */
+ writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ writel(0x0, q->iobase + QUADSPI_RSER);
+
+ clk_unprepare(q->clk);
+ clk_unprepare(q->clk_en);
+ return 0;
+}
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_qspi_dt_ids,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
new file mode 100644
index 00000000000..c713c865671
--- /dev/null
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -0,0 +1,1107 @@
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+/* Define max times to check status register before we give up. */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+
+#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occured.
+ */
+static int read_cr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Dummy Cycle calculation for different type of read.
+ * It can be used to support more commands with
+ * different dummy cycle requirements.
+ */
+static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
+{
+ switch (nor->flash_read) {
+ case SPI_NOR_FAST:
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ return 1;
+ case SPI_NOR_NORMAL:
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static inline int write_sr(struct spi_nor *nor, u8 val)
+{
+ nor->cmd_buf[0] = val;
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+}
+
+/*
+ * Send write disble instruction to the chip.
+ */
+static inline int write_disable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0);
+}
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+/* Enable/disable 4-byte addressing mode. */
+static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
+{
+ int status;
+ bool need_wren = false;
+ u8 cmd;
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_ST: /* Micron, actually */
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+ case CFI_MFR_MACRONIX:
+ case 0xEF /* winbond */:
+ if (need_wren)
+ write_enable(nor);
+
+ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+ status = nor->write_reg(nor, cmd, NULL, 0, 0);
+ if (need_wren)
+ write_disable(nor);
+
+ return status;
+ default:
+ /* Spansion style */
+ nor->cmd_buf[0] = enable << 7;
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
+ }
+}
+
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ unsigned long deadline;
+ int sr;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+
+ do {
+ cond_resched();
+
+ sr = read_sr(nor);
+ if (sr < 0)
+ break;
+ else if (!(sr & SR_WIP))
+ return 0;
+ } while (!time_after_eq(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int wait_till_ready(struct spi_nor *nor)
+{
+ return nor->wait_till_ready(nor);
+}
+
+/*
+ * Erase the whole flash memory
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_chip(struct spi_nor *nor)
+{
+ int ret;
+
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ write_enable(nor);
+
+ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
+}
+
+static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->prepare) {
+ ret = nor->prepare(nor, ops);
+ if (ret) {
+ dev_err(nor->dev, "failed in the preparation.\n");
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ if (nor->unprepare)
+ nor->unprepare(nor, ops);
+ mutex_unlock(&nor->lock);
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size) {
+ if (erase_chip(nor)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else {
+ while (len) {
+ if (nor->erase(nor, addr)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ }
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return ret;
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+ instr->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int ret = 0;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous command */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status_old = read_sr(nor);
+
+ if (offset < mtd->size - (mtd->size / 2))
+ status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 4))
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ else if (offset < mtd->size - (mtd->size / 8))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 16))
+ status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
+ else if (offset < mtd->size - (mtd->size / 32))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 64))
+ status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
+ else
+ status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
+ (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ goto err;
+ }
+
+err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int ret = 0;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous command */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status_old = read_sr(nor);
+
+ if (offset+len > mtd->size - (mtd->size / 64))
+ status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
+ else if (offset+len > mtd->size - (mtd->size / 32))
+ status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+ else if (offset+len > mtd->size - (mtd->size / 16))
+ status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
+ else if (offset+len > mtd->size - (mtd->size / 8))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset+len > mtd->size - (mtd->size / 4))
+ status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
+ else if (offset+len > mtd->size - (mtd->size / 2))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
+ (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ goto err;
+ }
+
+err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ return ret;
+}
+
+struct flash_info {
+ /* JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ u32 jedec_id;
+ u16 ext_id;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u16 flags;
+#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
+#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
+#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
+};
+
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .jedec_id = (_jedec_id), \
+ .ext_id = (_ext_id), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags), \
+ })
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags), \
+ })
+
+/* NOTE: double check command sets and memory organization when you add
+ * more nor chips. This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ */
+const struct spi_device_id spi_nor_ids[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Everspin */
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+ /* GigaDevice */
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+
+ /* Macronix */
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
+
+ /* Micron */
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Spansion -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+
+ /* ST Microelectronics -- newer production may have feature updates */
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { },
+};
+EXPORT_SYMBOL_GPL(spi_nor_ids);
+
+static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+{
+ int tmp;
+ u8 id[5];
+ u32 jedec;
+ u16 ext_jedec;
+ struct flash_info *info;
+
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, 5);
+ if (tmp < 0) {
+ dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+ jedec = id[0];
+ jedec = jedec << 8;
+ jedec |= id[1];
+ jedec = jedec << 8;
+ jedec |= id[2];
+
+ ext_jedec = id[3] << 8 | id[4];
+
+ for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
+ info = (void *)spi_nor_ids[tmp].driver_data;
+ if (info->jedec_id == jedec) {
+ if (info->ext_id == 0 || info->ext_id == ext_jedec)
+ return &spi_nor_ids[tmp];
+ }
+ }
+ dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
+ return ERR_PTR(-ENODEV);
+}
+
+static const struct spi_device_id *jedec_probe(struct spi_nor *nor)
+{
+ return nor->read_id(nor);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+ if (ret)
+ return ret;
+
+ ret = nor->read(nor, from, len, retlen, buf);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ return ret;
+}
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+
+ write_enable(nor);
+
+ nor->sst_write_second = false;
+
+ actual = to % 2;
+ /* Start write from odd address. */
+ if (actual) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ nor->write(nor, to, 1, retlen, buf);
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ }
+ to += actual;
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ nor->write(nor, to, 2, retlen, buf + actual);
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ write_disable(nor);
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ write_enable(nor);
+
+ nor->program_opcode = SPINOR_OP_BP;
+ nor->write(nor, to, 1, retlen, buf + actual);
+
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ write_disable(nor);
+ }
+time_out:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 page_offset, page_size, i;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+
+ write_enable(nor);
+
+ page_offset = to & (nor->page_size - 1);
+
+ /* do all the bytes fit onto one page? */
+ if (page_offset + len <= nor->page_size) {
+ nor->write(nor, to, len, retlen, buf);
+ } else {
+ /* the size of data remaining on the first page */
+ page_size = nor->page_size - page_offset;
+ nor->write(nor, to, page_size, retlen, buf);
+
+ /* write everything in nor->page_size chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > nor->page_size)
+ page_size = nor->page_size;
+
+ wait_till_ready(nor);
+ write_enable(nor);
+
+ nor->write(nor, to + i, page_size, retlen, buf + i);
+ }
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return 0;
+}
+
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ write_enable(nor);
+
+ nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
+ nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+
+ if (wait_till_ready(nor))
+ return 1;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occured.
+ */
+static int write_sr_cr(struct spi_nor *nor, u16 val)
+{
+ nor->cmd_buf[0] = val & 0xff;
+ nor->cmd_buf[1] = (val >> 8);
+
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0);
+}
+
+static int spansion_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+ int quad_en = CR_QUAD_EN_SPAN << 8;
+
+ write_enable(nor);
+
+ ret = write_sr_cr(nor, quad_en);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ /* read back and check it */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
+{
+ int status;
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ status = macronix_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Macronix quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ default:
+ status = spansion_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Spansion quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ }
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev || !nor->read || !nor->write ||
+ !nor->read_reg || !nor->write_reg || !nor->erase) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ if (!nor->read_id)
+ nor->read_id = spi_nor_read_id;
+ if (!nor->wait_till_ready)
+ nor->wait_till_ready = spi_nor_wait_till_ready;
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
+ enum read_mode mode)
+{
+ struct flash_info *info;
+ struct flash_platform_data *data;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = nor->mtd;
+ struct device_node *np = dev->of_node;
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Platform data helps sort out which chip type we have, as
+ * well as how this board partitions it. If we don't have
+ * a chip ID, try the JEDEC id commands; they'll work for most
+ * newer chips, even if we don't recognize the particular chip.
+ */
+ data = dev_get_platdata(dev);
+ if (data && data->type) {
+ const struct spi_device_id *plat_id;
+
+ for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) {
+ plat_id = &spi_nor_ids[i];
+ if (strcmp(data->type, plat_id->name))
+ continue;
+ break;
+ }
+
+ if (i < ARRAY_SIZE(spi_nor_ids) - 1)
+ id = plat_id;
+ else
+ dev_warn(dev, "unrecognized id %s\n", data->type);
+ }
+
+ info = (void *)id->driver_data;
+
+ if (info->jedec_id) {
+ const struct spi_device_id *jid;
+
+ jid = jedec_probe(nor);
+ if (IS_ERR(jid)) {
+ return PTR_ERR(jid);
+ } else if (jid != id) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(dev, "found %s, expected %s\n",
+ jid->name, id->name);
+ id = jid;
+ info = (void *)jid->driver_data;
+ }
+ }
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Atmel, SST and Intel/Numonyx serial nor tend to power
+ * up with the software protection bits set
+ */
+
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ }
+
+ if (data && data->name)
+ mtd->name = data->name;
+ else
+ mtd->name = dev_name(dev);
+
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = info->sector_size * info->n_sectors;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+
+ /* nor protection support for STmicro chips */
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ }
+
+ /* sst nor chips use AAI word program */
+ if (info->flags & SST_WRITE)
+ mtd->_write = sst_write;
+ else
+ mtd->_write = spi_nor_write;
+
+ /* prefer "small sector" erase if possible */
+ if (info->flags & SECT_4K) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else {
+ nor->erase_opcode = SPINOR_OP_SE;
+ mtd->erasesize = info->sector_size;
+ }
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = info->page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (np) {
+ /* If we were instantiated by DT, use it */
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ nor->flash_read = SPI_NOR_FAST;
+ else
+ nor->flash_read = SPI_NOR_NORMAL;
+ } else {
+ /* If we weren't instantiated by DT, default to fast-read */
+ nor->flash_read = SPI_NOR_FAST;
+ }
+
+ /* Some devices cannot do fast-read, no matter what DT tells us */
+ if (info->flags & SPI_NOR_NO_FR)
+ nor->flash_read = SPI_NOR_NORMAL;
+
+ /* Quad/Dual-read mode takes precedence over fast/normal */
+ if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
+ ret = set_quad_mode(nor, info->jedec_id);
+ if (ret) {
+ dev_err(dev, "quad mode not supported\n");
+ return ret;
+ }
+ nor->flash_read = SPI_NOR_QUAD;
+ } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
+ nor->flash_read = SPI_NOR_DUAL;
+ }
+
+ /* Default commands */
+ switch (nor->flash_read) {
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ_1_1_4;
+ break;
+ case SPI_NOR_DUAL:
+ nor->read_opcode = SPINOR_OP_READ_1_1_2;
+ break;
+ case SPI_NOR_FAST:
+ nor->read_opcode = SPINOR_OP_READ_FAST;
+ break;
+ case SPI_NOR_NORMAL:
+ nor->read_opcode = SPINOR_OP_READ;
+ break;
+ default:
+ dev_err(dev, "No Read opcode defined\n");
+ return -EINVAL;
+ }
+
+ nor->program_opcode = SPINOR_OP_PP;
+
+ if (info->addr_width)
+ nor->addr_width = info->addr_width;
+ else if (mtd->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
+ /* Dedicated 4-byte command set */
+ switch (nor->flash_read) {
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ4_1_1_4;
+ break;
+ case SPI_NOR_DUAL:
+ nor->read_opcode = SPINOR_OP_READ4_1_1_2;
+ break;
+ case SPI_NOR_FAST:
+ nor->read_opcode = SPINOR_OP_READ4_FAST;
+ break;
+ case SPI_NOR_NORMAL:
+ nor->read_opcode = SPINOR_OP_READ4;
+ break;
+ }
+ nor->program_opcode = SPINOR_OP_PP_4B;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE_4B;
+ mtd->erasesize = info->sector_size;
+ } else
+ set_4byte(nor, info->jedec_id, 1);
+ } else {
+ nor->addr_width = 3;
+ }
+
+ nor->read_dummy = spi_nor_read_dummy_cycles(nor);
+
+ dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+const struct spi_device_id *spi_nor_match_id(char *name)
+{
+ const struct spi_device_id *id = spi_nor_ids;
+
+ while (id->name[0]) {
+ if (!strcmp(name, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spi_nor_match_id);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 5cd18979333..daf82ba7aba 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -122,9 +122,9 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
* is not SSFDC formatted
*/
for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
- if (!mtd->block_isbad(mtd, offset)) {
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
- sect_buf);
+ if (mtd_block_isbad(mtd, offset)) {
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
/* CIS pattern match on the sector buffer */
if (ret < 0 || retlen != SECTOR_SIZE) {
@@ -135,8 +135,7 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
/* Found */
cis_sector = (int)(offset >> SECTOR_SHIFT);
} else {
- DEBUG(MTD_DEBUG_LEVEL1,
- "SSFDC_RO: CIS/IDI sector not found"
+ pr_debug("SSFDC_RO: CIS/IDI sector not found"
" on %s (mtd%d)\n", mtd->name,
mtd->index);
}
@@ -157,7 +156,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
size_t retlen;
loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
if (ret < 0 || retlen != SECTOR_SIZE)
return -1;
@@ -170,13 +169,13 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.ooblen = OOB_SIZE;
ops.oobbuf = buf;
ops.datbuf = NULL;
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
if (ret < 0 || ops.oobretlen != OOB_SIZE)
return -1;
@@ -221,8 +220,7 @@ static int get_logical_address(uint8_t *oob_buf)
block_address >>= 1;
if (get_parity(block_address, 10) != parity) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "SSFDC_RO: logical address field%d"
+ pr_debug("SSFDC_RO: logical address field%d"
"parity error(0x%04X)\n", j+1,
block_address);
} else {
@@ -235,7 +233,7 @@ static int get_logical_address(uint8_t *oob_buf)
if (!ok)
block_address = -2;
- DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
+ pr_debug("SSFDC_RO: get_logical_address() %d\n",
block_address);
return block_address;
@@ -249,7 +247,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
int ret, block_address, phys_block;
struct mtd_info *mtd = ssfdc->mbd.mtd;
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
+ pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
ssfdc->map_len,
(unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
@@ -257,13 +255,12 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
phys_block++) {
offset = (unsigned long)phys_block * ssfdc->erase_size;
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
continue; /* skip bad blocks */
ret = read_raw_oob(mtd, offset, oob_buf);
if (ret < 0) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "SSFDC_RO: mtd read_oob() failed at %lu\n",
+ pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
offset);
return -1;
}
@@ -279,8 +276,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
ssfdc->logic_block_map[block_address] =
(unsigned short)phys_block;
- DEBUG(MTD_DEBUG_LEVEL2,
- "SSFDC_RO: build_block_map() phys_block=%d,"
+ pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
"logic_block_addr=%d, zone=%d\n",
phys_block, block_address, zone_index);
}
@@ -294,7 +290,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
int cis_sector;
/* Check for small page NAND flash */
- if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+ if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
mtd->size > UINT_MAX)
return;
@@ -304,11 +300,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
- if (!ssfdc) {
- printk(KERN_WARNING
- "SSFDC_RO: out of memory for data structures\n");
+ if (!ssfdc)
return;
- }
ssfdc->mbd.mtd = mtd;
ssfdc->mbd.devnum = -1;
@@ -319,8 +312,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
ssfdc->erase_size = mtd->erasesize;
ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
- DEBUG(MTD_DEBUG_LEVEL1,
- "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
+ pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
@@ -331,7 +323,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
((long)ssfdc->sectors * (long)ssfdc->heads));
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+ pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
(long)ssfdc->cylinders * (long)ssfdc->heads *
(long)ssfdc->sectors);
@@ -342,11 +334,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
/* Allocate logical block map */
ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len, GFP_KERNEL);
- if (!ssfdc->logic_block_map) {
- printk(KERN_WARNING
- "SSFDC_RO: out of memory for data structures\n");
+ if (!ssfdc->logic_block_map)
goto out_err;
- }
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len);
@@ -371,7 +360,7 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
+ pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
@@ -387,8 +376,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
offset = (int)(logic_sect_no % sectors_per_block);
block_address = (int)(logic_sect_no / sectors_per_block);
- DEBUG(MTD_DEBUG_LEVEL3,
- "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
+ pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
block_address);
@@ -397,8 +385,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
block_address = ssfdc->logic_block_map[block_address];
- DEBUG(MTD_DEBUG_LEVEL3,
- "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
block_address);
if (block_address < 0xffff) {
@@ -407,8 +394,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
sect_no = (unsigned long)block_address * sectors_per_block +
offset;
- DEBUG(MTD_DEBUG_LEVEL3,
- "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
sect_no);
if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
@@ -424,7 +410,7 @@ static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
+ pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
geo->heads = ssfdc->heads;
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index b44dcab940d..937a829bb70 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -6,3 +6,13 @@ obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+
+mtd_oobtest-objs := oobtest.o mtd_test.o
+mtd_pagetest-objs := pagetest.o mtd_test.o
+mtd_readtest-objs := readtest.o mtd_test.o
+mtd_speedtest-objs := speedtest.o mtd_test.o
+mtd_stresstest-objs := stresstest.o mtd_test.o
+mtd_subpagetest-objs := subpagetest.o mtd_test.o
+mtd_torturetest-objs := torturetest.o mtd_test.o
+mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 70d6d7d0d65..e579f9027c4 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -1,63 +1,292 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/bitops.h>
-#include <linux/jiffies.h>
+#include <linux/slab.h>
#include <linux/mtd/nand_ecc.h>
-#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
+/*
+ * Test the implementation for software ECC
+ *
+ * No actual MTD device is needed, So we don't need to warry about losing
+ * important data by human error.
+ *
+ * This covers possible patterns of corruption which can be reliably corrected
+ * or detected.
+ */
+
+#if IS_ENABLED(CONFIG_MTD_NAND)
+
+struct nand_ecc_test {
+ const char *name;
+ void (*prepare)(void *, void *, void *, void *, const size_t);
+ int (*verify)(void *, void *, void *, const size_t);
+};
-static void inject_single_bit_error(void *data, size_t size)
+/*
+ * The reason for this __change_bit_le() instead of __change_bit() is to inject
+ * bit error properly within the region which is not a multiple of
+ * sizeof(unsigned long) on big-endian systems
+ */
+#ifdef __LITTLE_ENDIAN
+#define __change_bit_le(nr, addr) __change_bit(nr, addr)
+#elif defined(__BIG_ENDIAN)
+#define __change_bit_le(nr, addr) \
+ __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
+#else
+#error "Unknown byte order"
+#endif
+
+static void single_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
{
- unsigned long offset = random32() % (size * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
- __change_bit(offset, data);
+ memcpy(error_data, correct_data, size);
+ __change_bit_le(offset, error_data);
}
-static unsigned char data[512];
-static unsigned char error_data[512];
+static void double_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
+{
+ unsigned int offset[2];
+
+ offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
+ do {
+ offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
+ } while (offset[0] == offset[1]);
+
+ memcpy(error_data, correct_data, size);
-static int nand_ecc_test(const size_t size)
+ __change_bit_le(offset[0], error_data);
+ __change_bit_le(offset[1], error_data);
+}
+
+static unsigned int random_ecc_bit(size_t size)
{
- unsigned char code[3];
- unsigned char error_code[3];
- char testname[30];
+ unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
+
+ if (size == 256) {
+ /*
+ * Don't inject a bit error into the insignificant bits (16th
+ * and 17th bit) in ECC code for 256 byte data block
+ */
+ while (offset == 16 || offset == 17)
+ offset = prandom_u32() % (3 * BITS_PER_BYTE);
+ }
- BUG_ON(sizeof(data) < size);
+ return offset;
+}
- sprintf(testname, "nand-ecc-%zu", size);
+static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset = random_ecc_bit(size);
- get_random_bytes(data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset, error_ecc);
+}
- memcpy(error_data, data, size);
- inject_single_bit_error(error_data, size);
+static void double_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset[2];
- __nand_calculate_ecc(data, size, code);
- __nand_calculate_ecc(error_data, size, error_code);
- __nand_correct_data(error_data, code, error_code, size);
+ offset[0] = random_ecc_bit(size);
+ do {
+ offset[1] = random_ecc_bit(size);
+ } while (offset[0] == offset[1]);
- if (!memcmp(data, error_data, size)) {
- printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset[0], error_ecc);
+ __change_bit_le(offset[1], error_ecc);
+}
+
+static void no_bit_error(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static int no_bit_error_verify(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
- }
- printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
+ return -EINVAL;
+}
+
+static void single_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int single_bit_error_correct(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ if (ret == 1 && !memcmp(correct_data, error_data, size))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void double_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ double_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ double_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int double_bit_error_detect(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
- printk(KERN_DEBUG "hexdump of data:\n");
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
- data, size, false);
- printk(KERN_DEBUG "hexdump of error data:\n");
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ return (ret == -1) ? 0 : -EINVAL;
+}
+
+static const struct nand_ecc_test nand_ecc_test[] = {
+ {
+ .name = "no-bit-error",
+ .prepare = no_bit_error,
+ .verify = no_bit_error_verify,
+ },
+ {
+ .name = "single-bit-error-in-data-correct",
+ .prepare = single_bit_error_in_data,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "single-bit-error-in-ecc-correct",
+ .prepare = single_bit_error_in_ecc,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "double-bit-error-in-data-detect",
+ .prepare = double_bit_error_in_data,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "single-bit-error-in-data-and-ecc-detect",
+ .prepare = single_bit_error_in_data_and_ecc,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "double-bit-error-in-ecc-detect",
+ .prepare = double_bit_error_in_ecc,
+ .verify = double_bit_error_detect,
+ },
+};
+
+static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
+ void *correct_ecc, const size_t size)
+{
+ pr_info("hexdump of error data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
error_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
+
+ pr_info("hexdump of correct data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ correct_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
+}
+
+static int nand_ecc_test_run(const size_t size)
+{
+ int i;
+ int err = 0;
+ void *error_data;
+ void *error_ecc;
+ void *correct_data;
+ void *correct_ecc;
- return -1;
+ error_data = kmalloc(size, GFP_KERNEL);
+ error_ecc = kmalloc(3, GFP_KERNEL);
+ correct_data = kmalloc(size, GFP_KERNEL);
+ correct_ecc = kmalloc(3, GFP_KERNEL);
+
+ if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ prandom_bytes(correct_data, size);
+ __nand_calculate_ecc(correct_data, size, correct_ecc);
+
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
+ nand_ecc_test[i].prepare(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ err = nand_ecc_test[i].verify(error_data, error_ecc,
+ correct_data, size);
+
+ if (err) {
+ pr_err("not ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+ dump_data_ecc(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ break;
+ }
+ pr_info("ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+ }
+error:
+ kfree(error_data);
+ kfree(error_ecc);
+ kfree(correct_data);
+ kfree(correct_ecc);
+
+ return err;
}
#else
-static int nand_ecc_test(const size_t size)
+static int nand_ecc_test_run(const size_t size)
{
return 0;
}
@@ -66,12 +295,13 @@ static int nand_ecc_test(const size_t size)
static int __init ecc_test_init(void)
{
- srandom32(jiffies);
+ int err;
- nand_ecc_test(256);
- nand_ecc_test(512);
+ err = nand_ecc_test_run(256);
+ if (err)
+ return err;
- return 0;
+ return nand_ecc_test_run(512);
}
static void __exit ecc_test_exit(void)
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
deleted file mode 100644
index 00b937e38c1..00000000000
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ /dev/null
@@ -1,632 +0,0 @@
-/*
- * Copyright (C) 2006-2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; see the file COPYING. If not, write to the Free Software
- * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Test page read and write on MTD device.
- *
- * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
- */
-
-#include <asm/div64.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/err.h>
-#include <linux/mtd/mtd.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-
-#define PRINT_PREF KERN_INFO "mtd_pagetest: "
-
-static int dev;
-module_param(dev, int, S_IRUGO);
-MODULE_PARM_DESC(dev, "MTD device number to use");
-
-static struct mtd_info *mtd;
-static unsigned char *twopages;
-static unsigned char *writebuf;
-static unsigned char *boundary;
-static unsigned char *bbt;
-
-static int pgsize;
-static int bufsize;
-static int ebcnt;
-static int pgcnt;
-static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
-
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd->erase(mtd, &ei);
- if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int write_eraseblock(int ebnum)
-{
- int err = 0;
- size_t written = 0;
- loff_t addr = ebnum * mtd->erasesize;
-
- set_random_data(writebuf, mtd->erasesize);
- cond_resched();
- err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
- if (err || written != mtd->erasesize)
- printk(PRINT_PREF "error: write failed at %#llx\n",
- (long long)addr);
-
- return err;
-}
-
-static int verify_eraseblock(int ebnum)
-{
- uint32_t j;
- size_t read = 0;
- int err = 0, i;
- loff_t addr0, addrn;
- loff_t addr = ebnum * mtd->erasesize;
-
- addr0 = 0;
- for (i = 0; i < ebcnt && bbt[i]; ++i)
- addr0 += mtd->erasesize;
-
- addrn = mtd->size;
- for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
- addrn -= mtd->erasesize;
-
- set_random_data(writebuf, mtd->erasesize);
- for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
- /* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr0);
- return err;
- }
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)(addrn - bufsize));
- return err;
- }
- memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- break;
- }
- if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- }
- }
- /* Check boundary between eraseblocks */
- if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
- unsigned long oldnext = next;
- /* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr0);
- return err;
- }
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)(addrn - bufsize));
- return err;
- }
- memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- return err;
- }
- memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
- set_random_data(boundary + pgsize, pgsize);
- if (memcmp(twopages, boundary, bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- }
- next = oldnext;
- }
- return err;
-}
-
-static int crosstest(void)
-{
- size_t read = 0;
- int err = 0, i;
- loff_t addr, addr0, addrn;
- unsigned char *pp1, *pp2, *pp3, *pp4;
-
- printk(PRINT_PREF "crosstest\n");
- pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
- if (!pp1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
- pp2 = pp1 + pgsize;
- pp3 = pp2 + pgsize;
- pp4 = pp3 + pgsize;
- memset(pp1, 0, pgsize * 4);
-
- addr0 = 0;
- for (i = 0; i < ebcnt && bbt[i]; ++i)
- addr0 += mtd->erasesize;
-
- addrn = mtd->size;
- for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
- addrn -= mtd->erasesize;
-
- /* Read 2nd-to-last page to pp1 */
- read = 0;
- addr = addrn - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- kfree(pp1);
- return err;
- }
-
- /* Read 3rd-to-last page to pp1 */
- read = 0;
- addr = addrn - pgsize - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- kfree(pp1);
- return err;
- }
-
- /* Read first page to pp2 */
- read = 0;
- addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp2);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- kfree(pp1);
- return err;
- }
-
- /* Read last page to pp3 */
- read = 0;
- addr = addrn - pgsize;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp3);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- kfree(pp1);
- return err;
- }
-
- /* Read first page again to pp4 */
- read = 0;
- addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp4);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
- kfree(pp1);
- return err;
- }
-
- /* pp2 and pp4 should be the same */
- printk(PRINT_PREF "verifying pages read at %#llx match\n",
- (long long)addr0);
- if (memcmp(pp2, pp4, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
- errcnt += 1;
- } else if (!err)
- printk(PRINT_PREF "crosstest ok\n");
- kfree(pp1);
- return err;
-}
-
-static int erasecrosstest(void)
-{
- size_t read = 0, written = 0;
- int err = 0, i, ebnum, ebnum2;
- loff_t addr0;
- char *readbuf = twopages;
-
- printk(PRINT_PREF "erasecrosstest\n");
-
- ebnum = 0;
- addr0 = 0;
- for (i = 0; i < ebcnt && bbt[i]; ++i) {
- addr0 += mtd->erasesize;
- ebnum += 1;
- }
-
- ebnum2 = ebcnt - 1;
- while (ebnum2 && bbt[ebnum2])
- ebnum2 -= 1;
-
- printk(PRINT_PREF "erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
- if (err)
- return err;
-
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
- strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
- if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
-
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
- memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
-
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
- if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
- errcnt += 1;
- return -1;
- }
-
- printk(PRINT_PREF "erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
- if (err)
- return err;
-
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
- strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
- if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
-
- printk(PRINT_PREF "erasing block %d\n", ebnum2);
- err = erase_eraseblock(ebnum2);
- if (err)
- return err;
-
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
- memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
-
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
- if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
- errcnt += 1;
- return -1;
- }
-
- if (!err)
- printk(PRINT_PREF "erasecrosstest ok\n");
- return err;
-}
-
-static int erasetest(void)
-{
- size_t read = 0, written = 0;
- int err = 0, i, ebnum, ok = 1;
- loff_t addr0;
-
- printk(PRINT_PREF "erasetest\n");
-
- ebnum = 0;
- addr0 = 0;
- for (i = 0; i < ebcnt && bbt[i]; ++i) {
- addr0 += mtd->erasesize;
- ebnum += 1;
- }
-
- printk(PRINT_PREF "erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
- if (err)
- return err;
-
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
- if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
-
- printk(PRINT_PREF "erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
- if (err)
- return err;
-
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
- err = mtd->read(mtd, addr0, pgsize, &read, twopages);
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
-
- printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
- ebnum);
- for (i = 0; i < pgsize; ++i)
- if (twopages[i] != 0xff) {
- printk(PRINT_PREF "verifying all 0xff failed at %d\n",
- i);
- errcnt += 1;
- ok = 0;
- break;
- }
-
- if (ok && !err)
- printk(PRINT_PREF "erasetest ok\n");
-
- return err;
-}
-
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd->block_isbad(mtd, addr);
- if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
-static int __init mtd_pagetest_init(void)
-{
- int err = 0;
- uint64_t tmp;
- uint32_t i;
-
- printk(KERN_INFO "\n");
- printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
-
- mtd = get_mtd_device(NULL, dev);
- if (IS_ERR(mtd)) {
- err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
- return err;
- }
-
- if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
- goto out;
- }
-
- tmp = mtd->size;
- do_div(tmp, mtd->erasesize);
- ebcnt = tmp;
- pgcnt = mtd->erasesize / mtd->writesize;
- pgsize = mtd->writesize;
-
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
- "page size %u, count of eraseblocks %u, pages per "
- "eraseblock %u, OOB size %u\n",
- (unsigned long long)mtd->size, mtd->erasesize,
- pgsize, ebcnt, pgcnt, mtd->oobsize);
-
- err = -ENOMEM;
- bufsize = pgsize * 2;
- writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- goto out;
- }
- twopages = kmalloc(bufsize, GFP_KERNEL);
- if (!twopages) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- goto out;
- }
- boundary = kmalloc(bufsize, GFP_KERNEL);
- if (!boundary) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- goto out;
- }
-
- err = scan_for_bad_eraseblocks();
- if (err)
- goto out;
-
- /* Erase all eraseblocks */
- printk(PRINT_PREF "erasing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- goto out;
- cond_resched();
- }
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
-
- /* Write all eraseblocks */
- simple_srand(1);
- printk(PRINT_PREF "writing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = write_eraseblock(i);
- if (err)
- goto out;
- if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
- cond_resched();
- }
- printk(PRINT_PREF "written %u eraseblocks\n", i);
-
- /* Check all eraseblocks */
- simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = verify_eraseblock(i);
- if (err)
- goto out;
- if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
- cond_resched();
- }
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
-
- err = crosstest();
- if (err)
- goto out;
-
- err = erasecrosstest();
- if (err)
- goto out;
-
- err = erasetest();
- if (err)
- goto out;
-
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
-out:
-
- kfree(bbt);
- kfree(boundary);
- kfree(twopages);
- kfree(writebuf);
- put_mtd_device(mtd);
- if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
- printk(KERN_INFO "=================================================\n");
- return err;
-}
-module_init(mtd_pagetest_init);
-
-static void __exit mtd_pagetest_exit(void)
-{
- return;
-}
-module_exit(mtd_pagetest_exit);
-
-MODULE_DESCRIPTION("NAND page test");
-MODULE_AUTHOR("Adrian Hunter");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
new file mode 100644
index 00000000000..111ee46a742
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.c
@@ -0,0 +1,113 @@
+#define pr_fmt(fmt) "mtd_test: " fmt
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+
+#include "mtd_test.h"
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = mtd->erasesize;
+
+ err = mtd_erase(mtd, &ei);
+ if (err) {
+ pr_info("error %d while erasing EB %d\n", err, ebnum);
+ return err;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ pr_info("some erase error occurred at EB %d\n", ebnum);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int ret;
+ loff_t addr = ebnum * mtd->erasesize;
+
+ ret = mtd_block_isbad(mtd, addr);
+ if (ret)
+ pr_info("block %d is bad\n", ebnum);
+
+ return ret;
+}
+
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int i, bad = 0;
+
+ if (!mtd_can_have_bb(mtd))
+ return 0;
+
+ pr_info("scanning for bad eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0;
+ if (bbt[i])
+ bad += 1;
+ cond_resched();
+ }
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
+
+ return 0;
+}
+
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = mtdtest_erase_eraseblock(mtd, eb + i);
+ if (err)
+ return err;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+{
+ size_t read;
+ int err;
+
+ err = mtd_read(mtd, addr, size, &read, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+ if (!err && read != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: read failed at %#llx\n", addr);
+
+ return err;
+}
+
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf)
+{
+ size_t written;
+ int err;
+
+ err = mtd_write(mtd, addr, size, &written, buf);
+ if (!err && written != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: write failed at %#llx\n", addr);
+
+ return err;
+}
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
new file mode 100644
index 00000000000..f437c776c54
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.h
@@ -0,0 +1,11 @@
+#include <linux/mtd/mtd.h>
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf);
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf);
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
new file mode 100644
index 00000000000..6f976159611
--- /dev/null
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -0,0 +1,428 @@
+/*
+ * Copyright © 2012 NetCommWireless
+ * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au>
+ *
+ * Test for multi-bit error recovery on a NAND page This mostly tests the
+ * ECC controller / driver.
+ *
+ * There are two test modes:
+ *
+ * 0 - artificially inserting bit errors until the ECC fails
+ * This is the default method and fairly quick. It should
+ * be independent of the quality of the FLASH.
+ *
+ * 1 - re-writing the same pattern repeatedly until the ECC fails.
+ * This method relies on the physics of NAND FLASH to eventually
+ * generate '0' bits if '1' has been written sufficient times.
+ * Depending on the NAND, the first bit errors will appear after
+ * 1000 or more writes and then will usually snowball, reaching the
+ * limits of the ECC quickly.
+ *
+ * The test stops after 10000 cycles, should your FLASH be
+ * exceptionally good and not generate bit errors before that. Try
+ * a different page in that case.
+ *
+ * Please note that neither of these tests will significantly 'use up' any
+ * FLASH endurance. Only a maximum of two erase operations will be performed.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/err.h>
+#include <linux/mtd/nand.h>
+#include <linux/slab.h>
+#include "mtd_test.h"
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static unsigned page_offset;
+module_param(page_offset, uint, S_IRUGO);
+MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
+
+static unsigned seed;
+module_param(seed, uint, S_IRUGO);
+MODULE_PARM_DESC(seed, "Random seed");
+
+static int mode;
+module_param(mode, int, S_IRUGO);
+MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
+
+static unsigned max_overwrite = 10000;
+
+static loff_t offset; /* Offset of the page we're using. */
+static unsigned eraseblock; /* Eraseblock number for our page. */
+
+/* We assume that the ECC can correct up to a certain number
+ * of biterrors per subpage. */
+static unsigned subsize; /* Size of subpages */
+static unsigned subcount; /* Number of subpages per page */
+
+static struct mtd_info *mtd; /* MTD device */
+
+static uint8_t *wbuffer; /* One page write / compare buffer */
+static uint8_t *rbuffer; /* One page read buffer */
+
+/* 'random' bytes from known offsets */
+static uint8_t hash(unsigned offset)
+{
+ unsigned v = offset;
+ unsigned char c;
+ v ^= 0x7f7edfd3;
+ v = v ^ (v >> 3);
+ v = v ^ (v >> 5);
+ v = v ^ (v >> 13);
+ c = v & 0xFF;
+ /* Reverse bits of result. */
+ c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
+ c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
+ c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
+ return c;
+}
+
+/* Writes wbuffer to page */
+static int write_page(int log)
+{
+ if (log)
+ pr_info("write_page\n");
+
+ return mtdtest_write(mtd, offset, mtd->writesize, wbuffer);
+}
+
+/* Re-writes the data area while leaving the OOB alone. */
+static int rewrite_page(int log)
+{
+ int err = 0;
+ struct mtd_oob_ops ops;
+
+ if (log)
+ pr_info("rewrite page\n");
+
+ ops.mode = MTD_OPS_RAW; /* No ECC */
+ ops.len = mtd->writesize;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = wbuffer;
+ ops.oobbuf = NULL;
+
+ err = mtd_write_oob(mtd, offset, &ops);
+ if (err || ops.retlen != mtd->writesize) {
+ pr_err("error: write_oob failed (%d)\n", err);
+ if (!err)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
+ * or error (<0) */
+static int read_page(int log)
+{
+ int err = 0;
+ size_t read;
+ struct mtd_ecc_stats oldstats;
+
+ if (log)
+ pr_info("read_page\n");
+
+ /* Saving last mtd stats */
+ memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
+
+ err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
+ if (err == -EUCLEAN)
+ err = mtd->ecc_stats.corrected - oldstats.corrected;
+
+ if (err < 0 || read != mtd->writesize) {
+ pr_err("error: read failed at %#llx\n", (long long)offset);
+ if (err >= 0)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Verifies rbuffer against random sequence */
+static int verify_page(int log)
+{
+ unsigned i, errs = 0;
+
+ if (log)
+ pr_info("verify_page\n");
+
+ for (i = 0; i < mtd->writesize; i++) {
+ if (rbuffer[i] != hash(i+seed)) {
+ pr_err("Error: page offset %u, expected %02x, got %02x\n",
+ i, hash(i+seed), rbuffer[i]);
+ errs++;
+ }
+ }
+
+ if (errs)
+ return -EIO;
+ else
+ return 0;
+}
+
+#define CBIT(v, n) ((v) & (1 << (n)))
+#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
+
+/* Finds the first '1' bit in wbuffer starting at offset 'byte'
+ * and sets it to '0'. */
+static int insert_biterror(unsigned byte)
+{
+ int bit;
+
+ while (byte < mtd->writesize) {
+ for (bit = 7; bit >= 0; bit--) {
+ if (CBIT(wbuffer[byte], bit)) {
+ BCLR(wbuffer[byte], bit);
+ pr_info("Inserted biterror @ %u/%u\n", byte, bit);
+ return 0;
+ }
+ }
+ byte++;
+ }
+ pr_err("biterror: Failed to find a '1' bit\n");
+ return -EIO;
+}
+
+/* Writes 'random' data to page and then introduces deliberate bit
+ * errors into the page, while verifying each step. */
+static int incremental_errors_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned errs_per_subpage = 0;
+
+ pr_info("incremental biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (1) {
+
+ err = rewrite_page(1);
+ if (err)
+ goto exit;
+
+ err = read_page(1);
+ if (err > 0)
+ pr_info("Read reported %d corrected bit errors\n", err);
+ if (err < 0) {
+ pr_err("After %d biterrors per subpage, read reported error %d\n",
+ errs_per_subpage, err);
+ err = 0;
+ goto exit;
+ }
+
+ err = verify_page(1);
+ if (err) {
+ pr_err("ECC failure, read data is incorrect despite read success\n");
+ goto exit;
+ }
+
+ pr_info("Successfully corrected %d bit errors per subpage\n",
+ errs_per_subpage);
+
+ for (i = 0; i < subcount; i++) {
+ err = insert_biterror(i * subsize);
+ if (err < 0)
+ goto exit;
+ }
+ errs_per_subpage++;
+ }
+
+exit:
+ return err;
+}
+
+
+/* Writes 'random' data to page and then re-writes that same data repeatedly.
+ This eventually develops bit errors (bits written as '1' will slowly become
+ '0'), which are corrected as far as the ECC is capable of. */
+static int overwrite_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned max_corrected = 0;
+ unsigned opno = 0;
+ /* We don't expect more than this many correctable bit errors per
+ * page. */
+ #define MAXBITS 512
+ static unsigned bitstats[MAXBITS]; /* bit error histogram. */
+
+ memset(bitstats, 0, sizeof(bitstats));
+
+ pr_info("overwrite biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (opno < max_overwrite) {
+
+ err = rewrite_page(0);
+ if (err)
+ break;
+
+ err = read_page(0);
+ if (err >= 0) {
+ if (err >= MAXBITS) {
+ pr_info("Implausible number of bit errors corrected\n");
+ err = -EIO;
+ break;
+ }
+ bitstats[err]++;
+ if (err > max_corrected) {
+ max_corrected = err;
+ pr_info("Read reported %d corrected bit errors\n",
+ err);
+ }
+ } else { /* err < 0 */
+ pr_info("Read reported error %d\n", err);
+ err = 0;
+ break;
+ }
+
+ err = verify_page(0);
+ if (err) {
+ bitstats[max_corrected] = opno;
+ pr_info("ECC failure, read data is incorrect despite read success\n");
+ break;
+ }
+
+ opno++;
+ }
+
+ /* At this point bitstats[0] contains the number of ops with no bit
+ * errors, bitstats[1] the number of ops with 1 bit error, etc. */
+ pr_info("Bit error histogram (%d operations total):\n", opno);
+ for (i = 0; i < max_corrected; i++)
+ pr_info("Page reads with %3d corrected bit errors: %d\n",
+ i, bitstats[i]);
+
+exit:
+ return err;
+}
+
+static int __init mtd_nandbiterrs_init(void)
+{
+ int err = 0;
+
+ printk("\n");
+ printk(KERN_INFO "==================================================\n");
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ goto exit_mtddev;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ err = -ENODEV;
+ goto exit_nand;
+ }
+
+ pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, mtd->oobsize);
+
+ subsize = mtd->writesize >> mtd->subpage_sft;
+ subcount = mtd->writesize / subsize;
+
+ pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
+
+ offset = page_offset * mtd->writesize;
+ eraseblock = mtd_div_by_eb(offset, mtd);
+
+ pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
+ page_offset, offset, eraseblock);
+
+ wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!wbuffer) {
+ err = -ENOMEM;
+ goto exit_wbuffer;
+ }
+
+ rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!rbuffer) {
+ err = -ENOMEM;
+ goto exit_rbuffer;
+ }
+
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
+ if (err)
+ goto exit_error;
+
+ if (mode == 0)
+ err = incremental_errors_test();
+ else
+ err = overwrite_test();
+
+ if (err)
+ goto exit_error;
+
+ /* We leave the block un-erased in case of test failure. */
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
+ if (err)
+ goto exit_error;
+
+ err = -EIO;
+ pr_info("finished successfully.\n");
+ printk(KERN_INFO "==================================================\n");
+
+exit_error:
+ kfree(rbuffer);
+exit_rbuffer:
+ kfree(wbuffer);
+exit_wbuffer:
+ /* Nothing */
+exit_nand:
+ put_mtd_device(mtd);
+exit_mtddev:
+ return err;
+}
+
+static void __exit mtd_nandbiterrs_exit(void)
+{
+ return;
+}
+
+module_init(mtd_nandbiterrs_init);
+module_exit(mtd_nandbiterrs_exit);
+
+MODULE_DESCRIPTION("NAND bit error recovery test");
+MODULE_AUTHOR("Iwo Mergler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/oobtest.c
index dec92ae6111..f19ab1acde1 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -27,10 +29,11 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_oobtest: "
+#include "mtd_test.h"
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -46,70 +49,7 @@ static int use_offset;
static int use_len;
static int use_len_max;
static int vary_offset;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
-
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd->erase(mtd, &ei);
- if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int erase_whole_device(void)
-{
- int err;
- unsigned int i;
-
- printk(PRINT_PREF "erasing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- return err;
- cond_resched();
- }
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
- return 0;
-}
+static struct rnd_state rnd_state;
static void do_vary_offset(void)
{
@@ -129,21 +69,21 @@ static int write_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- set_random_data(writebuf, use_len);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
ops.oobretlen = 0;
ops.ooboffs = use_offset;
ops.datbuf = NULL;
- ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
+ err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: writeoob failed at %#llx\n",
+ pr_err("error: writeoob failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
+ pr_err("error: use_len %d, use_offset %d\n",
use_len, use_offset);
errcnt += 1;
return err ? err : -1;
@@ -160,7 +100,7 @@ static int write_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -168,10 +108,10 @@ static int write_whole_device(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
return 0;
}
@@ -182,9 +122,9 @@ static int verify_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- set_random_data(writebuf, use_len);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
@@ -192,26 +132,27 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ if (memcmp(readbuf, writebuf + (use_len_max * i) + use_offset,
+ use_len)) {
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
int k;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
@@ -219,31 +160,32 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
- printk(PRINT_PREF "error: readoob failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf + use_offset, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at "
- "%#llx\n", (long long)addr);
+ if (memcmp(readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many "
- "errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -251,12 +193,12 @@ static int verify_eraseblock(int ebnum)
for (k = use_offset + use_len;
k < mtd->ecclayout->oobavail; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -275,8 +217,8 @@ static int verify_eraseblock_in_one_go(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->ecclayout->oobavail * pgcnt;
- set_random_data(writebuf, len);
- ops.mode = MTD_OOB_AUTO;
+ prandom_bytes_state(&rnd_state, writebuf, len);
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = len;
@@ -284,19 +226,19 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -309,7 +251,7 @@ static int verify_all_eraseblocks(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -317,42 +259,10 @@ static int verify_all_eraseblocks(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
- return 0;
-}
-
-static int is_block_bad(int ebnum)
-{
- int ret;
- loff_t addr = ebnum * mtd->erasesize;
-
- ret = mtd->block_isbad(mtd, addr);
- if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kmalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -366,17 +276,24 @@ static int __init mtd_oobtest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -385,7 +302,7 @@ static int __init mtd_oobtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -393,17 +310,16 @@ static int __init mtd_oobtest_init(void)
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!readbuf)
goto out;
- }
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!writebuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -413,18 +329,18 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
- printk(PRINT_PREF "test 1 of 5\n");
+ pr_info("test 1 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
err = write_whole_device();
if (err)
goto out;
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
err = verify_all_eraseblocks();
if (err)
goto out;
@@ -433,20 +349,20 @@ static int __init mtd_oobtest_init(void)
* Second test: write all OOB, a block at a time, read it back and
* verify.
*/
- printk(PRINT_PREF "test 2 of 5\n");
+ pr_info("test 2 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
- simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -454,18 +370,18 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
/*
* Third test: write OOB at varying offsets and lengths, read it back
* and verify.
*/
- printk(PRINT_PREF "test 3 of 5\n");
+ pr_info("test 3 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -474,7 +390,7 @@ static int __init mtd_oobtest_init(void)
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
- simple_srand(5);
+ prandom_seed_state(&rnd_state, 5);
err = write_whole_device();
if (err)
@@ -485,7 +401,7 @@ static int __init mtd_oobtest_init(void)
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
- simple_srand(5);
+ prandom_seed_state(&rnd_state, 5);
err = verify_all_eraseblocks();
if (err)
goto out;
@@ -496,9 +412,9 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* Fourth test: try to write off end of device */
- printk(PRINT_PREF "test 4 of 5\n");
+ pr_info("test 4 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -507,7 +423,7 @@ static int __init mtd_oobtest_init(void)
addr0 += mtd->erasesize;
/* Attempt to write off end of OOB */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
@@ -515,19 +431,19 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to start write past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, addr0, &ops);
+ pr_info("attempting to start write past end of OOB\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can write past end of OOB\n");
+ pr_err("error: can write past end of OOB\n");
errcnt += 1;
}
/* Attempt to read off end of OOB */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
@@ -535,23 +451,23 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to start read past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, addr0, &ops);
+ pr_info("attempting to start read past end of OOB\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can read past end of OOB\n");
+ pr_err("error: can read past end of OOB\n");
errcnt += 1;
}
if (bbt[ebcnt - 1])
- printk(PRINT_PREF "skipping end of device tests because last "
+ pr_info("skipping end of device tests because last "
"block is bad\n");
else {
/* Attempt to write off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -559,19 +475,19 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
/* Attempt to read off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -579,23 +495,23 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
- err = erase_eraseblock(ebcnt - 1);
+ err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
if (err)
goto out;
/* Attempt to write off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
@@ -603,19 +519,19 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
/* Attempt to read off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
@@ -623,29 +539,29 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
}
/* Fifth test: write / read across block boundaries */
- printk(PRINT_PREF "test 5 of 5\n");
+ pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks */
- simple_srand(11);
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ prandom_seed_state(&rnd_state, 11);
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
@@ -653,37 +569,37 @@ static int __init mtd_oobtest_init(void)
if (bbt[i] || bbt[i + 1])
continue;
addr = (i + 1) * mtd->erasesize - mtd->writesize;
+ prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
for (pg = 0; pg < cnt; ++pg) {
- set_random_data(writebuf, sz);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = sz;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ ops.oobbuf = writebuf + pg * sz;
+ err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock "
- "%u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
addr += mtd->writesize;
}
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(11);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ prandom_seed_state(&rnd_state, 11);
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
- set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
+ prandom_bytes_state(&rnd_state, writebuf,
+ mtd->ecclayout->oobavail * 2);
addr = (i + 1) * mtd->erasesize - mtd->writesize;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail * 2;
@@ -691,32 +607,32 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
goto out;
}
}
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(writebuf);
kfree(readbuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
new file mode 100644
index 00000000000..ed2d3f656fd
--- /dev/null
+++ b/drivers/mtd/tests/pagetest.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test page read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *twopages;
+static unsigned char *writebuf;
+static unsigned char *boundary;
+static unsigned char *bbt;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static struct rnd_state rnd_state;
+
+static int write_eraseblock(int ebnum)
+{
+ loff_t addr = ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
+ cond_resched();
+ return mtdtest_write(mtd, addr, mtd->erasesize, writebuf);
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ uint32_t j;
+ int err = 0, i;
+ loff_t addr0, addrn;
+ loff_t addr = ebnum * mtd->erasesize;
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ addrn = mtd->size;
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
+ addrn -= mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
+ for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
+ /* Do a read to set the internal dataRAMs to different data */
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
+ return err;
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
+ return err;
+ memset(twopages, 0, bufsize);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
+ break;
+ if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ }
+ /* Check boundary between eraseblocks */
+ if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
+ struct rnd_state old_state = rnd_state;
+
+ /* Do a read to set the internal dataRAMs to different data */
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
+ return err;
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
+ return err;
+ memset(twopages, 0, bufsize);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
+ return err;
+ memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
+ prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
+ if (memcmp(twopages, boundary, bufsize)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ rnd_state = old_state;
+ }
+ return err;
+}
+
+static int crosstest(void)
+{
+ int err = 0, i;
+ loff_t addr, addr0, addrn;
+ unsigned char *pp1, *pp2, *pp3, *pp4;
+
+ pr_info("crosstest\n");
+ pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
+ if (!pp1)
+ return -ENOMEM;
+ pp2 = pp1 + pgsize;
+ pp3 = pp2 + pgsize;
+ pp4 = pp3 + pgsize;
+ memset(pp1, 0, pgsize * 4);
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ addrn = mtd->size;
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
+ addrn -= mtd->erasesize;
+
+ /* Read 2nd-to-last page to pp1 */
+ addr = addrn - pgsize - pgsize;
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read 3rd-to-last page to pp1 */
+ addr = addrn - pgsize - pgsize - pgsize;
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read first page to pp2 */
+ addr = addr0;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp2);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read last page to pp3 */
+ addr = addrn - pgsize;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp3);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read first page again to pp4 */
+ addr = addr0;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp4);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* pp2 and pp4 should be the same */
+ pr_info("verifying pages read at %#llx match\n",
+ (long long)addr0);
+ if (memcmp(pp2, pp4, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ } else if (!err)
+ pr_info("crosstest ok\n");
+ kfree(pp1);
+ return err;
+}
+
+static int erasecrosstest(void)
+{
+ int err = 0, i, ebnum, ebnum2;
+ loff_t addr0;
+ char *readbuf = twopages;
+
+ pr_info("erasecrosstest\n");
+
+ ebnum = 0;
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
+ addr0 += mtd->erasesize;
+ ebnum += 1;
+ }
+
+ ebnum2 = ebcnt - 1;
+ while (ebnum2 && bbt[ebnum2])
+ ebnum2 -= 1;
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ strcpy(writebuf, "There is no data like this!");
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ memset(readbuf, 0, pgsize);
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d\n", ebnum);
+ if (memcmp(writebuf, readbuf, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ return -1;
+ }
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ strcpy(writebuf, "There is no data like this!");
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("erasing block %d\n", ebnum2);
+ err = mtdtest_erase_eraseblock(mtd, ebnum2);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ memset(readbuf, 0, pgsize);
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d\n", ebnum);
+ if (memcmp(writebuf, readbuf, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ return -1;
+ }
+
+ if (!err)
+ pr_info("erasecrosstest ok\n");
+ return err;
+}
+
+static int erasetest(void)
+{
+ int err = 0, i, ebnum, ok = 1;
+ loff_t addr0;
+
+ pr_info("erasetest\n");
+
+ ebnum = 0;
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
+ addr0 += mtd->erasesize;
+ ebnum += 1;
+ }
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ err = mtdtest_read(mtd, addr0, pgsize, twopages);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d is all 0xff\n",
+ ebnum);
+ for (i = 0; i < pgsize; ++i)
+ if (twopages[i] != 0xff) {
+ pr_err("verifying all 0xff failed at %d\n",
+ i);
+ errcnt += 1;
+ ok = 0;
+ break;
+ }
+
+ if (ok && !err)
+ pr_info("erasetest ok\n");
+
+ return err;
+}
+
+static int __init mtd_pagetest_init(void)
+{
+ int err = 0;
+ uint64_t tmp;
+ uint32_t i;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+ pgsize = mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ bufsize = pgsize * 2;
+ writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ twopages = kmalloc(bufsize, GFP_KERNEL);
+ if (!twopages)
+ goto out;
+ boundary = kmalloc(bufsize, GFP_KERNEL);
+ if (!boundary)
+ goto out;
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Erase all eraseblocks */
+ pr_info("erasing whole device\n");
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ pr_info("erased %u eraseblocks\n", ebcnt);
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("writing whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+ cond_resched();
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+ cond_resched();
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = crosstest();
+ if (err)
+ goto out;
+
+ err = erasecrosstest();
+ if (err)
+ goto out;
+
+ err = erasetest();
+ if (err)
+ goto out;
+
+ pr_info("finished with %d errors\n", errcnt);
+out:
+
+ kfree(bbt);
+ kfree(boundary);
+ kfree(twopages);
+ kfree(writebuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_pagetest_init);
+
+static void __exit mtd_pagetest_exit(void)
+{
+ return;
+}
+module_exit(mtd_pagetest_exit);
+
+MODULE_DESCRIPTION("NAND page test");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/readtest.c
index afe71aa15c4..626e66d0f7e 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/readtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,9 +29,9 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_readtest: "
+#include "mtd_test.h"
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -44,29 +46,22 @@ static int pgcnt;
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
int i, ret, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
void *oobbuf = iobuf1;
for (i = 0; i < pgcnt; i++) {
- memset(buf, 0 , pgcnt);
- ret = mtd->read(mtd, addr, pgsize, &read, buf);
- if (ret == -EUCLEAN)
- ret = 0;
- if (ret || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- (long long)addr);
+ memset(buf, 0 , pgsize);
+ ret = mtdtest_read(mtd, addr, pgsize, buf);
+ if (ret) {
if (!err)
err = ret;
- if (!err)
- err = -EINVAL;
}
if (mtd->oobsize) {
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobsize;
@@ -74,9 +69,10 @@ static int read_eraseblock_by_page(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = oobbuf;
- ret = mtd->read_oob(mtd, addr, &ops);
- if (ret || ops.oobretlen != mtd->oobsize) {
- printk(PRINT_PREF "error: read oob failed at "
+ ret = mtd_read_oob(mtd, addr, &ops);
+ if ((ret && !mtd_is_bitflip(ret)) ||
+ ops.oobretlen != mtd->oobsize) {
+ pr_err("error: read oob failed at "
"%#llx\n", (long long)addr);
if (!err)
err = ret;
@@ -98,7 +94,7 @@ static void dump_eraseblock(int ebnum)
char line[128];
int pg, oob;
- printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
+ pr_info("dumping eraseblock %d\n", ebnum);
n = mtd->erasesize;
for (i = 0; i < n;) {
char *p = line;
@@ -111,7 +107,7 @@ static void dump_eraseblock(int ebnum)
}
if (!mtd->oobsize)
return;
- printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
+ pr_info("dumping oob from eraseblock %d\n", ebnum);
n = mtd->oobsize;
for (pg = 0, i = 0; pg < pgcnt; pg++)
for (oob = 0; oob < n;) {
@@ -126,42 +122,6 @@ static void dump_eraseblock(int ebnum)
}
}
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd->block_isbad(mtd, addr);
- if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
- return 0;
-
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_readtest_init(void)
{
uint64_t tmp;
@@ -169,17 +129,23 @@ static int __init mtd_readtest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: Cannot get MTD device\n");
+ pr_err("error: Cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -190,7 +156,7 @@ static int __init mtd_readtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -198,22 +164,21 @@ static int __init mtd_readtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!iobuf)
goto out;
- }
iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!iobuf1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!iobuf1)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Read all eraseblocks 1 page at a time */
- printk(PRINT_PREF "testing page read\n");
+ pr_info("testing page read\n");
for (i = 0; i < ebcnt; ++i) {
int ret;
@@ -229,9 +194,9 @@ static int __init mtd_readtest_init(void)
}
if (err)
- printk(PRINT_PREF "finished with errors\n");
+ pr_info("finished with errors\n");
else
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
@@ -240,7 +205,7 @@ out:
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/speedtest.c
index 627d4e2466a..87ff6a29f84 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -26,10 +28,11 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_speedtest: "
+#include "mtd_test.h"
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -47,52 +50,6 @@ static int ebcnt;
static int pgcnt;
static int goodebcnt;
static struct timeval start, finish;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
-
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd->erase(mtd, &ei);
- if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
static int multiblock_erase(int ebnum, int blocks)
{
@@ -105,15 +62,15 @@ static int multiblock_erase(int ebnum, int blocks)
ei.addr = addr;
ei.len = mtd->erasesize * blocks;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
+ pr_err("error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d,"
+ pr_err("some erase error occurred at EB %d,"
"blocks %d\n", ebnum, blocks);
return -EIO;
}
@@ -121,54 +78,23 @@ static int multiblock_erase(int ebnum, int blocks)
return 0;
}
-static int erase_whole_device(void)
-{
- int err;
- unsigned int i;
-
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- return err;
- cond_resched();
- }
- return 0;
-}
-
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
- int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
- if (err || written != mtd->erasesize) {
- printk(PRINT_PREF "error: write failed at %#llx\n", addr);
- if (!err)
- err = -EINVAL;
- }
-
- return err;
+ return mtdtest_write(mtd, addr, mtd->erasesize, iobuf);
}
static int write_eraseblock_by_page(int ebnum)
{
- size_t written = 0;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
- if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_write(mtd, addr, pgsize, buf);
+ if (err)
break;
- }
addr += pgsize;
buf += pgsize;
}
@@ -178,74 +104,41 @@ static int write_eraseblock_by_page(int ebnum)
static int write_eraseblock_by_2pages(int ebnum)
{
- size_t written = 0, sz = pgsize * 2;
+ size_t sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->write(mtd, addr, sz, &written, buf);
- if (err || written != sz) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_write(mtd, addr, sz, buf);
+ if (err)
return err;
- }
addr += sz;
buf += sz;
}
- if (pgcnt % 2) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
- if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
- }
- }
+ if (pgcnt % 2)
+ err = mtdtest_write(mtd, addr, pgsize, buf);
return err;
}
static int read_eraseblock(int ebnum)
{
- size_t read = 0;
- int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
- /* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != mtd->erasesize) {
- printk(PRINT_PREF "error: read failed at %#llx\n", addr);
- if (!err)
- err = -EINVAL;
- }
-
- return err;
+ return mtdtest_read(mtd, addr, mtd->erasesize, iobuf);
}
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
- /* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_read(mtd, addr, pgsize, buf);
+ if (err)
break;
- }
addr += pgsize;
buf += pgsize;
}
@@ -255,53 +148,24 @@ static int read_eraseblock_by_page(int ebnum)
static int read_eraseblock_by_2pages(int ebnum)
{
- size_t read = 0, sz = pgsize * 2;
+ size_t sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->read(mtd, addr, sz, &read, buf);
- /* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != sz) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_read(mtd, addr, sz, buf);
+ if (err)
return err;
- }
addr += sz;
buf += sz;
}
- if (pgcnt % 2) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
- /* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
- err = 0;
- if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
- }
- }
+ if (pgcnt % 2)
+ err = mtdtest_read(mtd, addr, pgsize, buf);
return err;
}
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd->block_isbad(mtd, addr);
- if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
- return ret;
-}
-
static inline void start_timing(void)
{
do_gettimeofday(&start);
@@ -326,33 +190,6 @@ static long calc_speed(void)
return k;
}
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
- goto out;
-
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
-out:
- goodebcnt = ebcnt - bad;
- return 0;
-}
-
static int __init mtd_speedtest_init(void)
{
int err, i, blocks, j, k;
@@ -361,20 +198,27 @@ static int __init mtd_speedtest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
if (count)
- printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
+ pr_info("MTD device: %d count: %d\n", dev, count);
else
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -385,7 +229,7 @@ static int __init mtd_speedtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -396,24 +240,28 @@ static int __init mtd_speedtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!iobuf)
goto out;
- }
- simple_srand(1);
- set_random_data(iobuf, mtd->erasesize);
+ prandom_bytes(iobuf, mtd->erasesize);
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
+ for (i = 0; i < ebcnt; i++) {
+ if (!bbt[i])
+ goodebcnt++;
+ }
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock write speed\n");
+ pr_info("testing eraseblock write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -425,10 +273,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock read speed\n");
+ pr_info("testing eraseblock read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -440,14 +288,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock read speed is %ld KiB/s\n", speed);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page write speed\n");
+ pr_info("testing page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -459,10 +307,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
+ pr_info("page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page read speed\n");
+ pr_info("testing page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -474,14 +322,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
+ pr_info("page read speed is %ld KiB/s\n", speed);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page write speed\n");
+ pr_info("testing 2 page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -493,10 +341,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
+ pr_info("2 page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page read speed\n");
+ pr_info("testing 2 page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -508,27 +356,22 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
+ pr_info("2 page read speed is %ld KiB/s\n", speed);
/* Erase all eraseblocks */
- printk(PRINT_PREF "Testing erase speed\n");
+ pr_info("Testing erase speed\n");
start_timing();
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- goto out;
- cond_resched();
- }
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
+ pr_info("erase speed is %ld KiB/s\n", speed);
/* Multi-block erase all eraseblocks */
for (k = 1; k < 7; k++) {
blocks = 1 << k;
- printk(PRINT_PREF "Testing %dx multi-block erase speed\n",
+ pr_info("Testing %dx multi-block erase speed\n",
blocks);
start_timing();
for (i = 0; i < ebcnt; ) {
@@ -547,16 +390,16 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n",
+ pr_info("%dx multi-block erase speed is %ld KiB/s\n",
blocks, speed);
}
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
kfree(iobuf);
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/stresstest.c
index 531625fc925..c9d42cc2df1 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/stresstest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,10 +29,11 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_stresstest: "
+#include "mtd_test.h"
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -48,28 +51,13 @@ static int pgsize;
static int bufsize;
static int ebcnt;
static int pgcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
static int rand_eb(void)
{
- int eb;
+ unsigned int eb;
again:
- if (ebcnt < 32768)
- eb = simple_rand();
- else
- eb = (simple_rand() << 15) | simple_rand();
+ eb = prandom_u32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb %= (ebcnt - 1);
if (bbt[eb])
@@ -79,71 +67,27 @@ again:
static int rand_offs(void)
{
- int offs;
+ unsigned int offs;
- if (bufsize < 32768)
- offs = simple_rand();
- else
- offs = (simple_rand() << 15) | simple_rand();
+ offs = prandom_u32();
offs %= bufsize;
return offs;
}
static int rand_len(int offs)
{
- int len;
+ unsigned int len;
- if (bufsize < 32768)
- len = simple_rand();
- else
- len = (simple_rand() << 15) | simple_rand();
+ len = prandom_u32();
len %= (bufsize - offs);
return len;
}
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd->erase(mtd, &ei);
- if (unlikely(err)) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (unlikely(ei.state == MTD_ERASE_FAILED)) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd->block_isbad(mtd, addr);
- if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
- return ret;
-}
-
static int do_read(void)
{
- size_t read = 0;
int eb = rand_eb();
int offs = rand_offs();
- int len = rand_len(offs), err;
+ int len = rand_len(offs);
loff_t addr;
if (bbt[eb + 1]) {
@@ -153,28 +97,17 @@ static int do_read(void)
len = mtd->erasesize - offs;
}
addr = eb * mtd->erasesize + offs;
- err = mtd->read(mtd, addr, len, &read, readbuf);
- if (err == -EUCLEAN)
- err = 0;
- if (unlikely(err || read != len)) {
- printk(PRINT_PREF "error: read failed at 0x%llx\n",
- (long long)addr);
- if (!err)
- err = -EINVAL;
- return err;
- }
- return 0;
+ return mtdtest_read(mtd, addr, len, readbuf);
}
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
- size_t written = 0;
loff_t addr;
offs = offsets[eb];
if (offs >= mtd->erasesize) {
- err = erase_eraseblock(eb);
+ err = mtdtest_erase_eraseblock(mtd, eb);
if (err)
return err;
offs = offsets[eb] = 0;
@@ -185,21 +118,16 @@ static int do_write(void)
if (bbt[eb + 1])
len = mtd->erasesize - offs;
else {
- err = erase_eraseblock(eb + 1);
+ err = mtdtest_erase_eraseblock(mtd, eb + 1);
if (err)
return err;
offsets[eb + 1] = 0;
}
}
addr = eb * mtd->erasesize + offs;
- err = mtd->write(mtd, addr, len, &written, writebuf);
- if (unlikely(err || written != len)) {
- printk(PRINT_PREF "error: write failed at 0x%llx\n",
- (long long)addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_write(mtd, addr, len, writebuf);
+ if (unlikely(err))
return err;
- }
offs += len;
while (offs > mtd->erasesize) {
offsets[eb++] = mtd->erasesize;
@@ -211,37 +139,12 @@ static int do_write(void)
static int do_operation(void)
{
- if (simple_rand() & 1)
+ if (prandom_u32() & 1)
return do_read();
else
return do_write();
}
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
- return 0;
-
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_stresstest_init(void)
{
int err;
@@ -250,17 +153,24 @@ static int __init mtd_stresstest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -271,12 +181,18 @@ static int __init mtd_stresstest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
+ if (ebcnt < 2) {
+ pr_err("error: need at least 2 eraseblocks\n");
+ err = -ENOSPC;
+ goto out_put_mtd;
+ }
+
/* Read or write up 2 eraseblocks at a time */
bufsize = mtd->erasesize * 2;
@@ -284,40 +200,40 @@ static int __init mtd_stresstest_init(void)
readbuf = vmalloc(bufsize);
writebuf = vmalloc(bufsize);
offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
- if (!readbuf || !writebuf || !offsets) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!readbuf || !writebuf || !offsets)
goto out;
- }
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
- simple_srand(current->pid);
- for (i = 0; i < bufsize; i++)
- writebuf[i] = simple_rand();
+ prandom_bytes(writebuf, bufsize);
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Do operations */
- printk(PRINT_PREF "doing operations\n");
+ pr_info("doing operations\n");
for (op = 0; op < count; op++) {
if ((op & 1023) == 0)
- printk(PRINT_PREF "%d operations done\n", op);
+ pr_info("%d operations done\n", op);
err = do_operation();
if (err)
goto out;
cond_resched();
}
- printk(PRINT_PREF "finished, %d operations done\n", op);
+ pr_info("finished, %d operations done\n", op);
out:
kfree(offsets);
kfree(bbt);
vfree(writebuf);
vfree(readbuf);
+out_put_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/subpagetest.c
index 334eae53a3d..a876371ad41 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -26,10 +28,11 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
+#include "mtd_test.h"
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -43,104 +46,41 @@ static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static inline void clear_data(unsigned char *buf, size_t len)
{
memset(buf, 0, len);
}
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd->erase(mtd, &ei);
- if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int erase_whole_device(void)
-{
- int err;
- unsigned int i;
-
- printk(PRINT_PREF "erasing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- return err;
- cond_resched();
- }
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
- return 0;
-}
-
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
addr += subpgsize;
- set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -150,22 +90,22 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock2(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
- set_random_data(writebuf, subpgsize * k);
- err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
+ err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n",
+ pr_err(" write size: %#x\n",
subpgsize * k);
- printk(PRINT_PREF " written: %#08zx\n",
+ pr_err(" written: %#08zx\n",
written);
}
return err ? err : -1;
@@ -189,61 +129,59 @@ static void print_subpage(unsigned char *p)
static int verify_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
- if (err == -EUCLEAN && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
addr += subpgsize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
- if (err == -EUCLEAN && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_info("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -252,30 +190,29 @@ static int verify_eraseblock(int ebnum)
static int verify_eraseblock2(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
- set_random_data(writebuf, subpgsize * k);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
- if (err == -EUCLEAN && read == subpgsize * k) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ if (mtd_is_bitflip(err) && read == subpgsize * k) {
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -288,28 +225,27 @@ static int verify_eraseblock2(int ebnum)
static int verify_eraseblock_ff(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
memset(writebuf, 0xff, subpgsize);
for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
- if (err == -EUCLEAN && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify 0xff failed at "
+ pr_err("error: verify 0xff failed at "
"%#llx\n", (long long)addr);
errcnt += 1;
}
@@ -324,7 +260,7 @@ static int verify_all_eraseblocks_ff(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
+ pr_info("verifying all eraseblocks for 0xff\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -332,42 +268,10 @@ static int verify_all_eraseblocks_ff(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
- return 0;
-}
-
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd->block_isbad(mtd, addr);
- if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -379,17 +283,24 @@ static int __init mtd_subpagetest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -399,7 +310,7 @@ static int __init mtd_subpagetest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, subpage size %u, count of eraseblocks %u, "
"pages per eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -408,26 +319,25 @@ static int __init mtd_subpagetest_init(void)
err = -ENOMEM;
bufsize = subpgsize * 32;
writebuf = kmalloc(bufsize, GFP_KERNEL);
- if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!writebuf)
goto out;
- }
readbuf = kmalloc(bufsize, GFP_KERNEL);
- if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!readbuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
- printk(PRINT_PREF "writing whole device\n");
- simple_srand(1);
+ pr_info("writing whole device\n");
+ prandom_seed_state(&rnd_state, 1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -435,13 +345,13 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
- simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -449,12 +359,12 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -463,8 +373,8 @@ static int __init mtd_subpagetest_init(void)
goto out;
/* Write all eraseblocks */
- simple_srand(3);
- printk(PRINT_PREF "writing whole device\n");
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -472,14 +382,14 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -487,12 +397,12 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -500,7 +410,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -508,7 +418,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/torturetest.c
index 5c6c3d24890..eeab96973cf 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -23,6 +23,8 @@
* damage caused by this program.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -30,8 +32,8 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include "mtd_test.h"
-#define PRINT_PREF KERN_INFO "mtd_torturetest: "
#define RETRIES 3
static int eb = 8;
@@ -46,7 +48,7 @@ static int pgcnt;
module_param(pgcnt, int, S_IRUGO);
MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -92,42 +94,13 @@ static inline void stop_timing(void)
}
/*
- * Erase eraseblock number @ebnum.
- */
-static inline int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd->erase(mtd, &ei);
- if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
* Check that the contents of eraseblock number @enbum is equivalent to the
* @buf buffer.
*/
static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
- size_t read = 0;
+ size_t read;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -137,42 +110,42 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
}
retry:
- err = mtd->read(mtd, addr, len, &read, check_buf);
- if (err == -EUCLEAN)
- printk(PRINT_PREF "single bit flip occurred at EB %d "
+ err = mtd_read(mtd, addr, len, &read, check_buf);
+ if (mtd_is_bitflip(err))
+ pr_err("single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
else if (err) {
- printk(PRINT_PREF "error %d while reading EB %d, "
+ pr_err("error %d while reading EB %d, "
"read %zd\n", err, ebnum, read);
return err;
}
if (read != len) {
- printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
+ pr_err("failed to read %zd bytes from EB %d, "
"read only %zd, but no error reported\n",
len, ebnum, read);
return -EIO;
}
if (memcmp(buf, check_buf, len)) {
- printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
+ pr_err("read wrong data from EB %d\n", ebnum);
report_corrupt(check_buf, buf);
if (retries++ < RETRIES) {
/* Try read again */
yield();
- printk(PRINT_PREF "re-try reading data from EB %d\n",
+ pr_info("re-try reading data from EB %d\n",
ebnum);
goto retry;
} else {
- printk(PRINT_PREF "retried %d times, still errors, "
+ pr_info("retried %d times, still errors, "
"give-up\n", RETRIES);
return -EINVAL;
}
}
if (retries != 0)
- printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
+ pr_info("only attempt number %d was OK (!!!)\n",
retries);
return 0;
@@ -181,7 +154,7 @@ retry:
static inline int write_pattern(int ebnum, void *buf)
{
int err;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -189,14 +162,14 @@ static inline int write_pattern(int ebnum, void *buf)
addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
- err = mtd->write(mtd, addr, len, &written, buf);
+ err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
- printk(PRINT_PREF "error %d while writing EB %d, written %zd"
+ pr_err("error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
return err;
}
if (written != len) {
- printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
+ pr_info("written only %zd bytes of %zd, but no error"
" reported\n", written, len);
return -EIO;
}
@@ -207,63 +180,66 @@ static inline int write_pattern(int ebnum, void *buf)
static int __init tort_init(void)
{
int err = 0, i, infinite = !cycles_count;
- int bad_ebs[ebcnt];
+ unsigned char *bad_ebs;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "Warning: this program is trying to wear out your "
+ pr_info("Warning: this program is trying to wear out your "
"flash, stop it if this is not wanted.\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
- printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+ pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
ebcnt, eb, eb + ebcnt - 1, dev);
if (pgcnt)
- printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
+ pr_info("torturing just %d pages per eraseblock\n",
pgcnt);
- printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
+ pr_info("write verify %s\n", check ? "enabled" : "disabled");
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
- printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
+ pr_err("error: invalid pgcnt value %d\n", pgcnt);
goto out_mtd;
}
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_5A5) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!patt_5A5)
goto out_mtd;
- }
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_A5A) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!patt_A5A)
goto out_patt_5A5;
- }
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_FF) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!patt_FF)
goto out_patt_A5A;
- }
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!check_buf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ if (!check_buf)
goto out_patt_FF;
- }
+
+ bad_ebs = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bad_ebs)
+ goto out_check_buf;
err = 0;
@@ -279,42 +255,16 @@ static int __init tort_init(void)
}
}
- /*
- * Check if there is a bad eraseblock among those we are going to test.
- */
- memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
- if (mtd->block_isbad) {
- for (i = eb; i < eb + ebcnt; i++) {
- err = mtd->block_isbad(mtd,
- (loff_t)i * mtd->erasesize);
-
- if (err < 0) {
- printk(PRINT_PREF "block_isbad() returned %d "
- "for EB %d\n", err, i);
- goto out;
- }
-
- if (err) {
- printk("EB %d is bad. Skip it.\n", i);
- bad_ebs[i - eb] = 1;
- }
- }
- }
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
start_timing();
while (1) {
int i;
void *patt;
- /* Erase all eraseblocks */
- for (i = eb; i < eb + ebcnt; i++) {
- if (bad_ebs[i - eb])
- continue;
- err = erase_eraseblock(i);
- if (err)
- goto out;
- cond_resched();
- }
+ mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
/* Check if the eraseblocks contain only 0xFF bytes */
if (check) {
@@ -323,7 +273,7 @@ static int __init tort_init(void)
continue;
err = check_eraseblock(i, patt_FF);
if (err) {
- printk(PRINT_PREF "verify failed"
+ pr_info("verify failed"
" for 0xFF... pattern\n");
goto out;
}
@@ -356,7 +306,7 @@ static int __init tort_init(void)
patt = patt_A5A;
err = check_eraseblock(i, patt);
if (err) {
- printk(PRINT_PREF "verify failed for %s"
+ pr_info("verify failed for %s"
" pattern\n",
((eb + erase_cycles) & 1) ?
"0x55AA55..." : "0xAA55AA...");
@@ -374,7 +324,7 @@ static int __init tort_init(void)
stop_timing();
ms = (finish.tv_sec - start.tv_sec) * 1000 +
(finish.tv_usec - start.tv_usec) / 1000;
- printk(PRINT_PREF "%08u erase cycles done, took %lu "
+ pr_info("%08u erase cycles done, took %lu "
"milliseconds (%lu seconds)\n",
erase_cycles, ms, ms / 1000);
start_timing();
@@ -385,8 +335,10 @@ static int __init tort_init(void)
}
out:
- printk(PRINT_PREF "finished after %u erase cycles\n",
+ pr_info("finished after %u erase cycles\n",
erase_cycles);
+ kfree(bad_ebs);
+out_check_buf:
kfree(check_buf);
out_patt_FF:
kfree(patt_FF);
@@ -397,7 +349,7 @@ out_patt_5A5:
out_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred during torturing\n", err);
+ pr_info("error %d occurred during torturing\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
@@ -435,9 +387,9 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
&bits) >= 0)
pages++;
- printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
+ pr_info("verify fails on %d pages, %d bytes/%d bits\n",
pages, bytes, bits);
- printk(PRINT_PREF "The following is a list of all differences between"
+ pr_info("The following is a list of all differences between"
" what was read from flash and what was expected\n");
for (i = 0; i < check_len; i += pgsize) {
@@ -451,7 +403,7 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
printk("-------------------------------------------------------"
"----------------------------------\n");
- printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
+ pr_info("Page %zd has %d bytes/%d bits failing verify,"
" starting at offset 0x%x\n",
(mtd->erasesize - check_len + i) / pgsize,
bytes, bits, first);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 4dcc752a0c0..f0855ce08ed 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,20 +27,55 @@ config MTD_UBI_WL_THRESHOLD
life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
-config MTD_UBI_BEB_RESERVE
- int "Percentage of reserved eraseblocks for bad eraseblocks handling"
- default 1
- range 0 25
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
help
- If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
- reserves some amount of physical eraseblocks to handle new bad
- eraseblocks. For example, if a flash physical eraseblock becomes bad,
- UBI uses these reserved physical eraseblocks to relocate the bad one.
- This option specifies how many physical eraseblocks will be reserved
- for bad eraseblock handling (percents of total number of good flash
- eraseblocks). If the underlying flash does not admit of bad
- eraseblocks (e.g. NOR flash), this value is ignored and nothing is
- reserved. Leave the default value if unsure.
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
@@ -52,12 +87,20 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
-config MTD_UBI_DEBUG
- bool "UBI debugging"
- depends on SYSFS
- select DEBUG_FS
- select KALLSYMS
+config MTD_UBI_BLOCK
+ bool "Read-only block devices on top of UBI volumes"
+ default n
+ depends on BLOCK
help
- This option enables UBI debugging.
+ This option enables read-only UBI block devices support. UBI block
+ devices will be layered on top of UBI volumes, which means that the
+ UBI driver will transparently handle things like bad eraseblocks and
+ bit-flips. You can put any block-oriented file system on top of UBI
+ volumes in read-only mode (e.g., ext4), but it is probably most
+ practical for read-only file systems, like squashfs.
+
+ When selected, this feature will be built in the UBI driver.
+
+ If in doubt, say "N".
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index c9302a5452b..4e3c3d70d8c 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_MTD_UBI) += ubi.o
-ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
-ubi-y += misc.o
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
+ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
-ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/attach.c
index 2135a53732f..6f27d9a1be3 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/attach.c
@@ -19,21 +19,21 @@
*/
/*
- * UBI scanning sub-system.
+ * UBI attaching sub-system.
*
- * This sub-system is responsible for scanning the flash media, checking UBI
- * headers and providing complete information about the UBI flash image.
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
*
- * The scanning information is represented by a &struct ubi_scan_info' object.
- * Information about found volumes is represented by &struct ubi_scan_volume
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
*
- * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects.
- * These objects are kept in per-volume RB-trees with the root at the
- * corresponding &struct ubi_scan_volume object. To put it differently, we keep
- * an RB-tree of per-volume objects and each of these objects is the root of
- * RB-tree of per-eraseblock objects.
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
*
* Corrupted physical eraseblocks are put to the @corr list, free physical
* eraseblocks are put to the @free list and the physical eraseblock to be
@@ -51,34 +51,35 @@
*
* 1. Corruptions caused by power cuts. These are expected corruptions and UBI
* tries to handle them gracefully, without printing too many warnings and
- * error messages. The idea is that we do not lose important data in these case
- * - we may lose only the data which was being written to the media just before
- * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
- * handle such data losses (e.g., by using the FS journal).
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
*
* When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
* the reason is a power cut, UBI puts this PEB to the @erase list, and all
* PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
* Obviously, this lessens the amount of available PEBs, and if at some point
* UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
* about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
- * if the VID header is corrupted and the data area does not contain all 0xFFs,
- * and there were no bit-flips or integrity errors while reading the data area.
- * Otherwise UBI assumes corruption type 1. So the decision criteria are as
- * follows.
- * o If the data area contains only 0xFFs, there is no data, and it is safe
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
* to just erase this PEB - this is corruption type 1.
* o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
* happened, so this is corruption type 1. However, this is just a guess,
* which might be wrong.
- * o Otherwise this it corruption type 2.
+ * o Otherwise this is corruption type 2.
*/
#include <linux/err.h>
@@ -88,11 +89,7 @@
#include <linux/random.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
-#else
-#define paranoid_check_si(ubi, si) 0
-#endif
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* Temporary variables used during scanning */
static struct ubi_ec_hdr *ech;
@@ -100,13 +97,18 @@ static struct ubi_vid_hdr *vidh;
/**
* add_to_list - add physical eraseblock to a list.
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
* @ec: erase counter of the physical eraseblock
* @to_head: if not zero, add to the head of the list
* @list: the list to add to
*
- * This function adds physical eraseblock @pnum to free, erase, or alien lists.
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
* If @to_head is not zero, PEB will be added to the head of the list, which
* basically means it will be processed first later. E.g., we add corrupted
* PEBs (corrupted due to power cuts) to the head of the erase list to make
@@ -114,65 +116,68 @@ static struct ubi_vid_hdr *vidh;
* returns zero in case of success and a negative error code in case of
* failure.
*/
-static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
- struct list_head *list)
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
- if (list == &si->free) {
+ if (list == &ai->free) {
dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
- } else if (list == &si->erase) {
+ } else if (list == &ai->erase) {
dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
- } else if (list == &si->alien) {
+ } else if (list == &ai->alien) {
dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
- si->alien_peb_count += 1;
+ ai->alien_peb_count += 1;
} else
BUG();
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- seb->pnum = pnum;
- seb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
if (to_head)
- list_add(&seb->u.list, list);
+ list_add(&aeb->u.list, list);
else
- list_add_tail(&seb->u.list, list);
+ list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_corrupted - add a corrupted physical eraseblock.
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to add
* @ec: erase counter of the physical eraseblock
*
- * This function adds corrupted physical eraseblock @pnum to the 'corr' list.
- * The corruption was presumably not caused by a power cut. Returns zero in
- * case of success and a negative error code in case of failure.
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
*/
-static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- si->corr_peb_count += 1;
- seb->pnum = pnum;
- seb->ec = ec;
- list_add(&seb->u.list, &si->corr);
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
return 0;
}
/**
* validate_vid_hdr - check volume identifier header.
* @vid_hdr: the volume identifier header to check
- * @sv: information about the volume this logical eraseblock belongs to
+ * @av: information about the volume this logical eraseblock belongs to
* @pnum: physical eraseblock number the VID header came from
*
* This function checks that data stored in @vid_hdr is consistent. Returns
@@ -184,15 +189,15 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
* headers of the same volume.
*/
static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
- const struct ubi_scan_volume *sv, int pnum)
+ const struct ubi_ainf_volume *av, int pnum)
{
int vol_type = vid_hdr->vol_type;
int vol_id = be32_to_cpu(vid_hdr->vol_id);
int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
int data_pad = be32_to_cpu(vid_hdr->data_pad);
- if (sv->leb_count != 0) {
- int sv_vol_type;
+ if (av->leb_count != 0) {
+ int av_vol_type;
/*
* This is not the first logical eraseblock belonging to this
@@ -200,28 +205,28 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
* to the data in previous logical eraseblock headers.
*/
- if (vol_id != sv->vol_id) {
- dbg_err("inconsistent vol_id");
+ if (vol_id != av->vol_id) {
+ ubi_err("inconsistent vol_id");
goto bad;
}
- if (sv->vol_type == UBI_STATIC_VOLUME)
- sv_vol_type = UBI_VID_STATIC;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
else
- sv_vol_type = UBI_VID_DYNAMIC;
+ av_vol_type = UBI_VID_DYNAMIC;
- if (vol_type != sv_vol_type) {
- dbg_err("inconsistent vol_type");
+ if (vol_type != av_vol_type) {
+ ubi_err("inconsistent vol_type");
goto bad;
}
- if (used_ebs != sv->used_ebs) {
- dbg_err("inconsistent used_ebs");
+ if (used_ebs != av->used_ebs) {
+ ubi_err("inconsistent used_ebs");
goto bad;
}
- if (data_pad != sv->data_pad) {
- dbg_err("inconsistent data_pad");
+ if (data_pad != av->data_pad) {
+ ubi_err("inconsistent data_pad");
goto bad;
}
}
@@ -230,74 +235,74 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
bad:
ubi_err("inconsistent VID header at PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_sv(sv);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
return -EINVAL;
}
/**
- * add_volume - add volume to the scanning information.
- * @si: scanning information
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
* @vol_id: ID of the volume to add
* @pnum: physical eraseblock number
* @vid_hdr: volume identifier header
*
* If the volume corresponding to the @vid_hdr logical eraseblock is already
- * present in the scanning information, this function does nothing. Otherwise
- * it adds corresponding volume to the scanning information. Returns a pointer
- * to the scanning volume object in case of success and a negative error code
- * in case of failure.
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
*/
-static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
- int pnum,
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
const struct ubi_vid_hdr *vid_hdr)
{
- struct ubi_scan_volume *sv;
- struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
- sv = rb_entry(parent, struct ubi_scan_volume, rb);
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
- if (vol_id == sv->vol_id)
- return sv;
+ if (vol_id == av->vol_id)
+ return av;
- if (vol_id > sv->vol_id)
+ if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
/* The volume is absent - add it */
- sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
- if (!sv)
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
return ERR_PTR(-ENOMEM);
- sv->highest_lnum = sv->leb_count = 0;
- sv->vol_id = vol_id;
- sv->root = RB_ROOT;
- sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
- sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
- sv->compat = vid_hdr->compat;
- sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
- if (vol_id > si->highest_vol_id)
- si->highest_vol_id = vol_id;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
- rb_link_node(&sv->rb, parent, p);
- rb_insert_color(&sv->rb, &si->volumes);
- si->vols_found += 1;
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
dbg_bld("added volume %d", vol_id);
- return sv;
+ return av;
}
/**
- * compare_lebs - find out which logical eraseblock is newer.
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
- * @seb: first logical eraseblock to compare
+ * @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
* compare
* @vid_hdr: volume identifier header of the second logical eraseblock
@@ -306,7 +311,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -314,16 +319,15 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
- void *buf;
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vh = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
- if (sqnum2 == seb->sqnum) {
+ if (sqnum2 == aeb->sqnum) {
/*
* This must be a really ancient UBI image which has been
* created before sequence numbers support has been added. At
@@ -332,12 +336,12 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
- ubi_err("unsupported on-flash UBI format\n");
+ ubi_err("unsupported on-flash UBI format");
return -EINVAL;
}
/* Obviously the LEB with lower sequence counter is older */
- second_is_newer = !!(sqnum2 > seb->sqnum);
+ second_is_newer = (sqnum2 > aeb->sqnum);
/*
* Now we know which copy is newer. If the copy flag of the PEB with
@@ -356,7 +360,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
return 1;
}
} else {
- if (!seb->copy_flag) {
+ if (!aeb->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("first PEB %d is newer, copy_flag is unset",
pnum);
@@ -367,14 +371,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
if (!vh)
return -ENOMEM;
- pnum = seb->pnum;
+ pnum = aeb->pnum;
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
- dbg_err("VID of PEB %d header is bad, but it "
- "was OK earlier, err %d", pnum, err);
+ ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
if (err > 0)
err = -EIO;
@@ -388,18 +392,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
- buf = vmalloc(len);
- if (!buf) {
- err = -ENOMEM;
- goto out_free_vidh;
- }
- err = ubi_io_read_data(ubi, buf, pnum, 0, len);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
- goto out_free_buf;
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
- crc = crc32(UBI_CRC32_INIT, buf, len);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
@@ -410,8 +410,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
dbg_bld("PEB %d CRC is OK", pnum);
bitflips = !!err;
}
+ mutex_unlock(&ubi->buf_mutex);
- vfree(buf);
ubi_free_vid_hdr(ubi, vh);
if (second_is_newer)
@@ -421,17 +421,17 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
-out_free_buf:
- vfree(buf);
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
return err;
}
/**
- * ubi_scan_add_used - add physical eraseblock to the scanning information.
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: the physical eraseblock number
* @ec: erase counter
* @vid_hdr: the volume identifier header
@@ -444,14 +444,13 @@ out_free_vidh:
* to be picked, while the older one has to be dropped. This function returns
* zero in case of success and a negative error code in case of failure.
*/
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips)
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
{
int err, vol_id, lnum;
unsigned long long sqnum;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
struct rb_node **p, *parent = NULL;
vol_id = be32_to_cpu(vid_hdr->vol_id);
@@ -461,25 +460,25 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, bitflips);
- sv = add_volume(si, vol_id, pnum, vid_hdr);
- if (IS_ERR(sv))
- return PTR_ERR(sv);
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
- if (si->max_sqnum < sqnum)
- si->max_sqnum = sqnum;
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
*/
- p = &sv->root.rb_node;
+ p = &av->root.rb_node;
while (*p) {
int cmp_res;
parent = *p;
- seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
- if (lnum != seb->lnum) {
- if (lnum < seb->lnum)
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -491,8 +490,8 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* logical eraseblock present.
*/
- dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
- "EC %d", seb->pnum, seb->sqnum, seb->ec);
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
/*
* Make sure that the logical eraseblocks have different
@@ -503,15 +502,15 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* sequence numbers. We still can attach these images, unless
* there is a need to distinguish between old and new
* eraseblocks, in which case we'll refuse the image in
- * 'compare_lebs()'. In other words, we attach old clean
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
- if (seb->sqnum == sqnum && sqnum != 0) {
+ if (aeb->sqnum == sqnum && sqnum != 0) {
ubi_err("two LEBs with same sequence number %llu",
sqnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
return -EINVAL;
}
@@ -519,7 +518,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* Now we have to drop the older one and preserve the newer
* one.
*/
- cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
@@ -528,23 +527,26 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* This logical eraseblock is newer than the one
* found earlier.
*/
- err = validate_vid_hdr(vid_hdr, sv, pnum);
+ err = validate_vid_hdr(vid_hdr, av, pnum);
if (err)
return err;
- err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4,
- &si->erase);
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
if (err)
return err;
- seb->ec = ec;
- seb->pnum = pnum;
- seb->scrub = ((cmp_res & 2) || bitflips);
- seb->copy_flag = vid_hdr->copy_flag;
- seb->sqnum = sqnum;
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
- if (sv->highest_lnum == lnum)
- sv->last_data_size =
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
be32_to_cpu(vid_hdr->data_size);
return 0;
@@ -553,63 +555,64 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* This logical eraseblock is older than the one found
* previously.
*/
- return add_to_list(si, pnum, ec, cmp_res & 4,
- &si->erase);
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
}
}
/*
* We've met this logical eraseblock for the first time, add it to the
- * scanning information.
+ * attaching information.
*/
- err = validate_vid_hdr(vid_hdr, sv, pnum);
+ err = validate_vid_hdr(vid_hdr, av, pnum);
if (err)
return err;
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- seb->ec = ec;
- seb->pnum = pnum;
- seb->lnum = lnum;
- seb->scrub = bitflips;
- seb->copy_flag = vid_hdr->copy_flag;
- seb->sqnum = sqnum;
-
- if (sv->highest_lnum <= lnum) {
- sv->highest_lnum = lnum;
- sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
- sv->leb_count += 1;
- rb_link_node(&seb->u.rb, parent, p);
- rb_insert_color(&seb->u.rb, &sv->root);
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
return 0;
}
/**
- * ubi_scan_find_sv - find volume in the scanning information.
- * @si: scanning information
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the volume description or %NULL if there
- * are no data about this volume in the scanning information.
+ * are no data about this volume in the attaching information.
*/
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id)
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
{
- struct ubi_scan_volume *sv;
- struct rb_node *p = si->volumes.rb_node;
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
while (p) {
- sv = rb_entry(p, struct ubi_scan_volume, rb);
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
- if (vol_id == sv->vol_id)
- return sv;
+ if (vol_id == av->vol_id)
+ return av;
- if (vol_id > sv->vol_id)
+ if (vol_id > av->vol_id)
p = p->rb_left;
else
p = p->rb_right;
@@ -619,63 +622,34 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
}
/**
- * ubi_scan_find_seb - find LEB in the volume scanning information.
- * @sv: a pointer to the volume scanning information
- * @lnum: the requested logical eraseblock
- *
- * This function returns a pointer to the scanning logical eraseblock or %NULL
- * if there are no data about it in the scanning volume information.
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
*/
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum)
-{
- struct ubi_scan_leb *seb;
- struct rb_node *p = sv->root.rb_node;
-
- while (p) {
- seb = rb_entry(p, struct ubi_scan_leb, u.rb);
-
- if (lnum == seb->lnum)
- return seb;
-
- if (lnum > seb->lnum)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
-
- return NULL;
-}
-
-/**
- * ubi_scan_rm_volume - delete scanning information about a volume.
- * @si: scanning information
- * @sv: the volume scanning information to delete
- */
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
- dbg_bld("remove scanning information about volume %d", sv->vol_id);
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
- while ((rb = rb_first(&sv->root))) {
- seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, &si->erase);
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
}
- rb_erase(&sv->rb, &si->volumes);
- kfree(sv);
- si->vols_found -= 1;
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
}
/**
- * ubi_scan_erase_peb - erase a physical eraseblock.
+ * early_erase_peb - erase a physical eraseblock.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to erase;
- * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
*
* This function erases physical eraseblock 'pnum', and writes the erase
* counter header to it. This function should only be used on UBI device
@@ -683,8 +657,8 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec)
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -716,9 +690,9 @@ out_free:
}
/**
- * ubi_scan_get_free_peb - get a free physical eraseblock.
+ * ubi_early_get_peb - get a free physical eraseblock.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns a free physical eraseblock. It is supposed to be
* called on the UBI initialization stages when the wear-leveling sub-system is
@@ -726,20 +700,20 @@ out_free:
* the lists, writes the EC header if it is needed, and removes it from the
* list.
*
- * This function returns scanning physical eraseblock information in case of
- * success and an error code in case of failure.
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
*/
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err = 0;
- struct ubi_scan_leb *seb, *tmp_seb;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
- if (!list_empty(&si->free)) {
- seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
- list_del(&seb->u.list);
- dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
}
/*
@@ -748,18 +722,18 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
* so forth. We don't want to take care about bad eraseblocks here -
* they'll be handled later.
*/
- list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
if (err)
continue;
- seb->ec += 1;
- list_del(&seb->u.list);
- dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
}
ubi_err("no free eraseblocks");
@@ -769,7 +743,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
/**
* check_corruption - check the data area of PEB.
* @ubi: UBI device description object
- * @vid_hrd: the (corrupted) VID header of this PEB
+ * @vid_hdr: the (corrupted) VID header of this PEB
* @pnum: the physical eraseblock number to check
*
* This is a helper function which is used to distinguish between VID header
@@ -789,11 +763,11 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
int err;
mutex_lock(&ubi->buf_mutex);
- memset(ubi->peb_buf1, 0x00, ubi->leb_size);
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
ubi->leb_size);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* Bit-flips or integrity errors while reading the data area.
* It is difficult to say for sure what type of corruption is
@@ -808,17 +782,17 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
if (err)
goto out_unlock;
- if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size))
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
goto out_unlock;
- ubi_err("PEB %d contains corrupted VID header, and the data does not "
- "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
- "header corruption which requires manual inspection", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- dbg_msg("hexdump of PEB %d offset %d, length %d",
- pnum, ubi->leb_start, ubi->leb_size);
+ ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->peb_buf1, ubi->leb_size, 1);
+ ubi->peb_buf, ubi->leb_size, 1);
err = 1;
out_unlock:
@@ -827,19 +801,23 @@ out_unlock:
}
/**
- * process_eb - read, check UBI headers, and add them to scanning information.
+ * scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
*
- * This function returns a zero if the physical eraseblock was successfully
- * handled and a negative error code in case of failure.
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
*/
-static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum)
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, int *vid, unsigned long long *sqnum)
{
long long uninitialized_var(ec);
- int err, bitflips = 0, vol_id, ec_err = 0;
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
@@ -848,12 +826,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
if (err < 0)
return err;
else if (err) {
- /*
- * FIXME: this is actually duty of the I/O sub-system to
- * initialize this, but MTD does not provide enough
- * information.
- */
- si->bad_peb_count += 1;
+ ai->bad_peb_count += 1;
return 0;
}
@@ -867,13 +840,13 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
bitflips = 1;
break;
case UBI_IO_FF:
- si->empty_peb_count += 1;
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0,
- &si->erase);
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
case UBI_IO_FF_BITFLIPS:
- si->empty_peb_count += 1;
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1,
- &si->erase);
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
case UBI_IO_BAD_HDR_EBADMSG:
case UBI_IO_BAD_HDR:
/*
@@ -882,7 +855,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* moved and EC be re-created.
*/
ec_err = err;
- ec = UBI_SCAN_UNKNOWN_EC;
+ ec = UBI_UNKNOWN;
bitflips = 1;
break;
default:
@@ -911,7 +884,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
*/
ubi_err("erase counter overflow, max is %d",
UBI_MAX_ERASECOUNTER);
- ubi_dbg_dump_ec_hdr(ech);
+ ubi_dump_ec_hdr(ech);
return -EINVAL;
}
@@ -927,13 +900,12 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* number.
*/
image_seq = be32_to_cpu(ech->image_seq);
- if (!ubi->image_seq && image_seq)
+ if (!ubi->image_seq)
ubi->image_seq = image_seq;
- if (ubi->image_seq && image_seq &&
- ubi->image_seq != image_seq) {
- ubi_err("bad image sequence number %d in PEB %d, "
- "expected %d", image_seq, pnum, ubi->image_seq);
- ubi_dbg_dump_ec_hdr(ech);
+ if (image_seq && ubi->image_seq != image_seq) {
+ ubi_err("bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
return -EINVAL;
}
}
@@ -957,7 +929,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* PEB, bit it is not marked as bad yet. This may also
* be a result of power cut during erasure.
*/
- si->maybe_bad_peb_count += 1;
+ ai->maybe_bad_peb_count += 1;
case UBI_IO_BAD_HDR:
if (ec_err)
/*
@@ -984,23 +956,27 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
return err;
else if (!err)
/* This corruption is caused by a power cut */
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
else
/* This is an unexpected corruption */
- err = add_corrupted(si, pnum, ec);
+ err = add_corrupted(ai, pnum, ec);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF_BITFLIPS:
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF:
- if (ec_err)
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
else
- err = add_to_list(si, pnum, ec, 0, &si->free);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
if (err)
return err;
goto adjust_mean_ec;
@@ -1011,30 +987,38 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
}
vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- ubi_msg("\"delete\" compatible internal volume %d:%d"
- " found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
if (err)
return err;
return 0;
case UBI_COMPAT_RO:
- ubi_msg("read-only compatible internal volume %d:%d"
- " found, switch to read-only mode",
+ ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
vol_id, lnum);
ubi->ro_mode = 1;
break;
case UBI_COMPAT_PRESERVE:
- ubi_msg("\"preserve\" compatible internal volume %d:%d"
- " found", vol_id, lnum);
- err = add_to_list(si, pnum, ec, 0, &si->alien);
+ ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
if (err)
return err;
return 0;
@@ -1049,40 +1033,40 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
if (ec_err)
ubi_warn("valid VID header but corrupted EC header at PEB %d",
pnum);
- err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
if (err)
return err;
adjust_mean_ec:
if (!ec_err) {
- si->ec_sum += ec;
- si->ec_count += 1;
- if (ec > si->max_ec)
- si->max_ec = ec;
- if (ec < si->min_ec)
- si->min_ec = ec;
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
}
return 0;
}
/**
- * check_what_we_have - check what PEB were found by scanning.
+ * late_analysis - analyze the overall situation with PEB.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
- * This is a helper function which takes a look what PEBs were found by
- * scanning, and decides whether the flash is empty and should be formatted and
- * whether there are too many corrupted PEBs and we should not attach this
- * MTD device. Returns zero if we should proceed with attaching the MTD device,
- * and %-EINVAL if we should not.
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
*/
-static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
int max_corr, peb_count;
- peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count;
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
max_corr = peb_count / 20 ?: 8;
/*
@@ -1090,25 +1074,25 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
* unclean reboots. However, many of them may indicate some problems
* with the flash HW or driver.
*/
- if (si->corr_peb_count) {
+ if (ai->corr_peb_count) {
ubi_err("%d PEBs are corrupted and preserved",
- si->corr_peb_count);
- printk(KERN_ERR "Corrupted PEBs are:");
- list_for_each_entry(seb, &si->corr, u.list)
- printk(KERN_CONT " %d", seb->pnum);
- printk(KERN_CONT "\n");
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
/*
* If too many PEBs are corrupted, we refuse attaching,
* otherwise, only print a warning.
*/
- if (si->corr_peb_count >= max_corr) {
+ if (ai->corr_peb_count >= max_corr) {
ubi_err("too many corrupted PEBs, refusing");
return -EINVAL;
}
}
- if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) {
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
/*
* All PEBs are empty, or almost all - a couple PEBs look like
* they may be bad PEBs which were not marked as bad yet.
@@ -1124,14 +1108,13 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
* 2. Flash contains non-UBI data and we do not want to format
* it and destroy possibly important information.
*/
- if (si->maybe_bad_peb_count <= 2) {
- si->is_empty = 1;
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
ubi_msg("empty MTD device detected");
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
} else {
- ubi_err("MTD device is not UBI-formatted and possibly "
- "contains non-UBI data - refusing it");
+ ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
return -EINVAL;
}
@@ -1141,61 +1124,137 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
}
/**
- * ubi_scan - scan an MTD device.
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ if (ai->aeb_slab_cache)
+ kmem_cache_destroy(ai->aeb_slab_cache);
+
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
* @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
*
* This function does full scanning of an MTD device and returns complete
- * information about it. In case of failure, an error code is returned.
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
*/
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
{
int err, pnum;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
- struct ubi_scan_info *si;
-
- si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
- if (!si)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&si->corr);
- INIT_LIST_HEAD(&si->free);
- INIT_LIST_HEAD(&si->erase);
- INIT_LIST_HEAD(&si->alien);
- si->volumes = RB_ROOT;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
err = -ENOMEM;
- si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
- sizeof(struct ubi_scan_leb),
- 0, 0, NULL);
- if (!si->scan_leb_slab)
- goto out_si;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_slab;
+ return err;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
- for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = process_eb(ubi, si, pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
if (err < 0)
goto out_vidh;
}
- dbg_msg("scanning is finished");
+ ubi_msg("scanning is finished");
/* Calculate mean erase counter */
- if (si->ec_count)
- si->mean_ec = div_u64(si->ec_sum, si->ec_count);
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
- err = check_what_we_have(ubi, si);
+ err = late_analysis(ubi, ai);
if (err)
goto out_vidh;
@@ -1203,280 +1262,375 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
* In case of unknown erase counter we use the mean erase counter
* value.
*/
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
}
- list_for_each_entry(seb, &si->free, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
}
- list_for_each_entry(seb, &si->corr, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- list_for_each_entry(seb, &si->erase, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- err = paranoid_check_si(ubi, si);
+ err = self_check_ai(ubi, ai);
if (err)
goto out_vidh;
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- return si;
+ return 0;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
-out_slab:
- kmem_cache_destroy(si->scan_leb_slab);
-out_si:
- ubi_scan_destroy_si(si);
- return ERR_PTR(err);
+ return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
/**
- * destroy_sv - free the scanning volume information
- * @sv: scanning volume information
- * @si: scanning information
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
*
- * This function destroys the volume RB-tree (@sv->root) and the scanning
- * volume information.
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/
-static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- struct ubi_scan_leb *seb;
- struct rb_node *this = sv->root.rb_node;
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
- while (this) {
- if (this->rb_left)
- this = this->rb_left;
- else if (this->rb_right)
- this = this->rb_right;
- else {
- seb = rb_entry(this, struct ubi_scan_leb, u.rb);
- this = rb_parent(this);
- if (this) {
- if (this->rb_left == &seb->u.rb)
- this->rb_left = NULL;
- else
- this->rb_right = NULL;
- }
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
- kmem_cache_free(si->scan_leb_slab, seb);
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
}
}
- kfree(sv);
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ return ubi_scan_fastmap(ubi, ai, fm_anchor);
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create(slab_name,
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
}
/**
- * ubi_scan_destroy_si - destroy scanning information.
- * @si: scanning information
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-void ubi_scan_destroy_si(struct ubi_scan_info *si)
+int ubi_attach(struct ubi_device *ubi, int force_scan)
{
- struct ubi_scan_leb *seb, *seb_tmp;
- struct ubi_scan_volume *sv;
- struct rb_node *rb;
+ int err;
+ struct ubi_attach_info *ai;
- list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+ ai = alloc_ai("ubi_aeb_slab_cache");
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
}
- list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, ai);
+ if (err > 0) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai("ubi_aeb_slab_cache2");
+ if (!ai)
+ return -ENOMEM;
+
+ err = scan_all(ubi, ai, 0);
+ } else {
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
}
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
- /* Destroy the volume RB-tree */
- rb = si->volumes.rb_node;
- while (rb) {
- if (rb->rb_left)
- rb = rb->rb_left;
- else if (rb->rb_right)
- rb = rb->rb_right;
- else {
- sv = rb_entry(rb, struct ubi_scan_volume, rb);
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
- rb = rb_parent(rb);
- if (rb) {
- if (rb->rb_left == &sv->rb)
- rb->rb_left = NULL;
- else
- rb->rb_right = NULL;
- }
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
- destroy_sv(si, sv);
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+ if (!scan_ai) {
+ err = -ENOMEM;
+ goto out_wl;
}
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
}
+#endif
- kmem_cache_destroy(si->scan_leb_slab);
- kfree(si);
-}
+ destroy_ai(ai);
+ return 0;
-#ifdef CONFIG_MTD_UBI_DEBUG
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
/**
- * paranoid_check_si - check the scanning information.
+ * self_check_ai - check the attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
- * This function returns zero if the scanning information is all right, and a
+ * This function returns zero if the attaching information is all right, and a
* negative error code if not or if an error occurred.
*/
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *last_seb;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
- * At first, check that scanning information is OK.
+ * At first, check that attaching information is OK.
*/
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
int leb_count = 0;
cond_resched();
vols_found += 1;
- if (si->is_empty) {
+ if (ai->is_empty) {
ubi_err("bad is_empty flag");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
- sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
- sv->data_pad < 0 || sv->last_data_size < 0) {
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
ubi_err("negative values");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id >= UBI_MAX_VOLUMES &&
- sv->vol_id < UBI_INTERNAL_VOL_START) {
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
ubi_err("bad vol_id");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id > si->highest_vol_id) {
+ if (av->vol_id > ai->highest_vol_id) {
ubi_err("highest_vol_id is %d, but vol_id %d is there",
- si->highest_vol_id, sv->vol_id);
+ ai->highest_vol_id, av->vol_id);
goto out;
}
- if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
- sv->vol_type != UBI_STATIC_VOLUME) {
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
ubi_err("bad vol_type");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->data_pad > ubi->leb_size / 2) {
+ if (av->data_pad > ubi->leb_size / 2) {
ubi_err("bad data_pad");
- goto bad_sv;
+ goto bad_av;
}
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
- last_seb = seb;
+ last_aeb = aeb;
leb_count += 1;
- if (seb->pnum < 0 || seb->ec < 0) {
+ if (aeb->pnum < 0 || aeb->ec < 0) {
ubi_err("negative values");
- goto bad_seb;
+ goto bad_aeb;
}
- if (seb->ec < si->min_ec) {
- ubi_err("bad si->min_ec (%d), %d found",
- si->min_ec, seb->ec);
- goto bad_seb;
+ if (aeb->ec < ai->min_ec) {
+ ubi_err("bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
}
- if (seb->ec > si->max_ec) {
- ubi_err("bad si->max_ec (%d), %d found",
- si->max_ec, seb->ec);
- goto bad_seb;
+ if (aeb->ec > ai->max_ec) {
+ ubi_err("bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
}
- if (seb->pnum >= ubi->peb_count) {
+ if (aeb->pnum >= ubi->peb_count) {
ubi_err("too high PEB number %d, total PEBs %d",
- seb->pnum, ubi->peb_count);
- goto bad_seb;
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
}
- if (sv->vol_type == UBI_STATIC_VOLUME) {
- if (seb->lnum >= sv->used_ebs) {
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
ubi_err("bad lnum or used_ebs");
- goto bad_seb;
+ goto bad_aeb;
}
} else {
- if (sv->used_ebs != 0) {
+ if (av->used_ebs != 0) {
ubi_err("non-zero used_ebs");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (seb->lnum > sv->highest_lnum) {
+ if (aeb->lnum > av->highest_lnum) {
ubi_err("incorrect highest_lnum or lnum");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (sv->leb_count != leb_count) {
+ if (av->leb_count != leb_count) {
ubi_err("bad leb_count, %d objects in the tree",
leb_count);
- goto bad_sv;
+ goto bad_av;
}
- if (!last_seb)
+ if (!last_aeb)
continue;
- seb = last_seb;
+ aeb = last_aeb;
- if (seb->lnum != sv->highest_lnum) {
+ if (aeb->lnum != av->highest_lnum) {
ubi_err("bad highest_lnum");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (vols_found != si->vols_found) {
- ubi_err("bad si->vols_found %d, should be %d",
- si->vols_found, vols_found);
+ if (vols_found != ai->vols_found) {
+ ubi_err("bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
goto out;
}
- /* Check that scanning information is correct */
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
int vol_type;
cond_resched();
- last_seb = seb;
+ last_aeb = aeb;
- err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err("VID header is not OK (%d)", err);
if (err > 0)
@@ -1486,52 +1640,52 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
- if (sv->vol_type != vol_type) {
+ if (av->vol_type != vol_type) {
ubi_err("bad vol_type");
goto bad_vid_hdr;
}
- if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
- ubi_err("bad sqnum %llu", seb->sqnum);
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err("bad sqnum %llu", aeb->sqnum);
goto bad_vid_hdr;
}
- if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
- ubi_err("bad vol_id %d", sv->vol_id);
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err("bad vol_id %d", av->vol_id);
goto bad_vid_hdr;
}
- if (sv->compat != vidh->compat) {
+ if (av->compat != vidh->compat) {
ubi_err("bad compat %d", vidh->compat);
goto bad_vid_hdr;
}
- if (seb->lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad lnum %d", seb->lnum);
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad lnum %d", aeb->lnum);
goto bad_vid_hdr;
}
- if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
- ubi_err("bad used_ebs %d", sv->used_ebs);
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err("bad used_ebs %d", av->used_ebs);
goto bad_vid_hdr;
}
- if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
- ubi_err("bad data_pad %d", sv->data_pad);
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err("bad data_pad %d", av->data_pad);
goto bad_vid_hdr;
}
}
- if (!last_seb)
+ if (!last_aeb)
continue;
- if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad highest_lnum %d", sv->highest_lnum);
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad highest_lnum %d", av->highest_lnum);
goto bad_vid_hdr;
}
- if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
- ubi_err("bad last_data_size %d", sv->last_data_size);
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err("bad last_data_size %d", av->last_data_size);
goto bad_vid_hdr;
}
}
@@ -1553,21 +1707,21 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
buf[pnum] = 1;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- buf[seb->pnum] = 1;
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->free, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->corr, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->erase, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->alien, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
err = 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++)
@@ -1581,25 +1735,23 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out;
return 0;
-bad_seb:
- ubi_err("bad scanning information about LEB %d", seb->lnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_sv(sv);
+bad_aeb:
+ ubi_err("bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
goto out;
-bad_sv:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
+bad_av:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
goto out;
bad_vid_hdr:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vid_hdr(vidh);
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
out:
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
new file mode 100644
index 00000000000..8457df7ec5a
--- /dev/null
+++ b/drivers/mtd/ubi/block.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright (c) 2014 Ezequiel Garcia
+ * Copyright (c) 2011 Free Electrons
+ *
+ * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ * Authors: Artem Bityutskiy, Frank Haverkamp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+
+/*
+ * Read-only block devices on top of UBI volumes
+ *
+ * A simple implementation to allow a block device to be layered on top of a
+ * UBI volume. The implementation is provided by creating a static 1-to-1
+ * mapping between the block device and the UBI volume.
+ *
+ * The addressed byte is obtained from the addressed block sector, which is
+ * mapped linearly into the corresponding LEB:
+ *
+ * LEB number = addressed byte / LEB size
+ *
+ * This feature is compiled in the UBI core, and adds a 'block' parameter
+ * to allow early creation of block devices on top of UBI volumes. Runtime
+ * block creation/removal for UBI volumes is provided through two UBI ioctls:
+ * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/ubi.h>
+#include <linux/workqueue.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/div64.h>
+
+#include "ubi-media.h"
+#include "ubi.h"
+
+/* Maximum number of supported devices */
+#define UBIBLOCK_MAX_DEVICES 32
+
+/* Maximum length of the 'block=' parameter */
+#define UBIBLOCK_PARAM_LEN 63
+
+/* Maximum number of comma-separated items in the 'block=' parameter */
+#define UBIBLOCK_PARAM_COUNT 2
+
+struct ubiblock_param {
+ int ubi_num;
+ int vol_id;
+ char name[UBIBLOCK_PARAM_LEN+1];
+};
+
+/* Numbers of elements set in the @ubiblock_param array */
+static int ubiblock_devs __initdata;
+
+/* MTD devices specification parameters */
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+
+struct ubiblock {
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ int refcnt;
+ int leb_size;
+
+ struct gendisk *gd;
+ struct request_queue *rq;
+
+ struct workqueue_struct *wq;
+ struct work_struct work;
+
+ struct mutex dev_mutex;
+ spinlock_t queue_lock;
+ struct list_head list;
+};
+
+/* Linked list of all ubiblock instances */
+static LIST_HEAD(ubiblock_devices);
+static DEFINE_MUTEX(devices_mutex);
+static int ubiblock_major;
+
+static int __init ubiblock_set_param(const char *val,
+ const struct kernel_param *kp)
+{
+ int i, ret;
+ size_t len;
+ struct ubiblock_param *param;
+ char buf[UBIBLOCK_PARAM_LEN];
+ char *pbuf = &buf[0];
+ char *tokens[UBIBLOCK_PARAM_COUNT];
+
+ if (!val)
+ return -EINVAL;
+
+ len = strnlen(val, UBIBLOCK_PARAM_LEN);
+ if (len == 0) {
+ ubi_warn("block: empty 'block=' parameter - ignored\n");
+ return 0;
+ }
+
+ if (len == UBIBLOCK_PARAM_LEN) {
+ ubi_err("block: parameter \"%s\" is too long, max. is %d\n",
+ val, UBIBLOCK_PARAM_LEN);
+ return -EINVAL;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ param = &ubiblock_param[ubiblock_devs];
+ if (tokens[1]) {
+ /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
+ ret = kstrtoint(tokens[0], 10, &param->ubi_num);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* Second param can be a number or a name */
+ ret = kstrtoint(tokens[1], 10, &param->vol_id);
+ if (ret < 0) {
+ param->vol_id = -1;
+ strcpy(param->name, tokens[1]);
+ }
+
+ } else {
+ /* One parameter: must be device path */
+ strcpy(param->name, tokens[0]);
+ param->ubi_num = -1;
+ param->vol_id = -1;
+ }
+
+ ubiblock_devs++;
+
+ return 0;
+}
+
+static struct kernel_param_ops ubiblock_param_ops = {
+ .set = ubiblock_set_param,
+};
+module_param_cb(block, &ubiblock_param_ops, NULL, 0);
+MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
+ "Multiple \"block\" parameters may be specified.\n"
+ "UBI volumes may be specified by their number, name, or path to the device node.\n"
+ "Examples\n"
+ "Using the UBI volume path:\n"
+ "ubi.block=/dev/ubi0_0\n"
+ "Using the UBI device, and the volume name:\n"
+ "ubi.block=0,rootfs\n"
+ "Using both UBI device number and UBI volume number:\n"
+ "ubi.block=0,0\n");
+
+static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
+{
+ struct ubiblock *dev;
+
+ list_for_each_entry(dev, &ubiblock_devices, list)
+ if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
+ return dev;
+ return NULL;
+}
+
+static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
+ int leb, int offset, int len)
+{
+ int ret;
+
+ ret = ubi_read(dev->desc, leb, buffer, offset, len);
+ if (ret) {
+ ubi_err("%s ubi_read error %d",
+ dev->gd->disk_name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int ubiblock_read(struct ubiblock *dev, char *buffer,
+ sector_t sec, int len)
+{
+ int ret, leb, offset;
+ int bytes_left = len;
+ int to_read = len;
+ u64 pos = sec << 9;
+
+ /* Get LEB:offset address to read from */
+ offset = do_div(pos, dev->leb_size);
+ leb = pos;
+
+ while (bytes_left) {
+ /*
+ * We can only read one LEB at a time. Therefore if the read
+ * length is larger than one LEB size, we split the operation.
+ */
+ if (offset + to_read > dev->leb_size)
+ to_read = dev->leb_size - offset;
+
+ ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read);
+ if (ret)
+ return ret;
+
+ buffer += to_read;
+ bytes_left -= to_read;
+ to_read = bytes_left;
+ leb += 1;
+ offset = 0;
+ }
+ return 0;
+}
+
+static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
+{
+ int len, ret;
+ sector_t sec;
+
+ if (req->cmd_type != REQ_TYPE_FS)
+ return -EIO;
+
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+ get_capacity(req->rq_disk))
+ return -EIO;
+
+ if (rq_data_dir(req) != READ)
+ return -ENOSYS; /* Write not implemented */
+
+ sec = blk_rq_pos(req);
+ len = blk_rq_cur_bytes(req);
+
+ /*
+ * Let's prevent the device from being removed while we're doing I/O
+ * work. Notice that this means we serialize all the I/O operations,
+ * but it's probably of no impact given the NAND core serializes
+ * flash access anyway.
+ */
+ mutex_lock(&dev->dev_mutex);
+ ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static void ubiblock_do_work(struct work_struct *work)
+{
+ struct ubiblock *dev =
+ container_of(work, struct ubiblock, work);
+ struct request_queue *rq = dev->rq;
+ struct request *req;
+ int res;
+
+ spin_lock_irq(rq->queue_lock);
+
+ req = blk_fetch_request(rq);
+ while (req) {
+
+ spin_unlock_irq(rq->queue_lock);
+ res = do_ubiblock_request(dev, req);
+ spin_lock_irq(rq->queue_lock);
+
+ /*
+ * If we're done with this request,
+ * we need to fetch a new one
+ */
+ if (!__blk_end_request_cur(req, res))
+ req = blk_fetch_request(rq);
+ }
+
+ spin_unlock_irq(rq->queue_lock);
+}
+
+static void ubiblock_request(struct request_queue *rq)
+{
+ struct ubiblock *dev;
+ struct request *req;
+
+ dev = rq->queuedata;
+
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ queue_work(dev->wq, &dev->work);
+}
+
+static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+{
+ struct ubiblock *dev = bdev->bd_disk->private_data;
+ int ret;
+
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ /*
+ * The volume is already open, just increase the reference
+ * counter.
+ */
+ goto out_done;
+ }
+
+ /*
+ * We want users to be aware they should only mount us as read-only.
+ * It's just a paranoid check, as write requests will get rejected
+ * in any case.
+ */
+ if (mode & FMODE_WRITE) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
+ if (IS_ERR(dev->desc)) {
+ ubi_err("%s failed to open ubi volume %d_%d",
+ dev->gd->disk_name, dev->ubi_num, dev->vol_id);
+ ret = PTR_ERR(dev->desc);
+ dev->desc = NULL;
+ goto out_unlock;
+ }
+
+out_done:
+ dev->refcnt++;
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static void ubiblock_release(struct gendisk *gd, fmode_t mode)
+{
+ struct ubiblock *dev = gd->private_data;
+
+ mutex_lock(&dev->dev_mutex);
+ dev->refcnt--;
+ if (dev->refcnt == 0) {
+ ubi_close_volume(dev->desc);
+ dev->desc = NULL;
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ /* Some tools might require this information */
+ geo->heads = 1;
+ geo->cylinders = 1;
+ geo->sectors = get_capacity(bdev->bd_disk);
+ geo->start = 0;
+ return 0;
+}
+
+static const struct block_device_operations ubiblock_ops = {
+ .owner = THIS_MODULE,
+ .open = ubiblock_open,
+ .release = ubiblock_release,
+ .getgeo = ubiblock_getgeo,
+};
+
+int ubiblock_create(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ struct gendisk *gd;
+ int disk_capacity;
+ int ret;
+
+ /* Check that the volume isn't already handled */
+ mutex_lock(&devices_mutex);
+ if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
+ mutex_unlock(&devices_mutex);
+ return -EEXIST;
+ }
+ mutex_unlock(&devices_mutex);
+
+ dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->dev_mutex);
+
+ dev->ubi_num = vi->ubi_num;
+ dev->vol_id = vi->vol_id;
+ dev->leb_size = vi->usable_leb_size;
+
+ /* Initialize the gendisk of this ubiblock device */
+ gd = alloc_disk(1);
+ if (!gd) {
+ ubi_err("block: alloc_disk failed");
+ ret = -ENODEV;
+ goto out_free_dev;
+ }
+
+ gd->fops = &ubiblock_ops;
+ gd->major = ubiblock_major;
+ gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
+ gd->private_data = dev;
+ sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
+ disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
+ set_capacity(gd, disk_capacity);
+ dev->gd = gd;
+
+ spin_lock_init(&dev->queue_lock);
+ dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
+ if (!dev->rq) {
+ ubi_err("block: blk_init_queue failed");
+ ret = -ENODEV;
+ goto out_put_disk;
+ }
+
+ dev->rq->queuedata = dev;
+ dev->gd->queue = dev->rq;
+
+ /*
+ * Create one workqueue per volume (per registered block device).
+ * Rembember workqueues are cheap, they're not threads.
+ */
+ dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto out_free_queue;
+ }
+ INIT_WORK(&dev->work, ubiblock_do_work);
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&dev->list, &ubiblock_devices);
+ mutex_unlock(&devices_mutex);
+
+ /* Must be the last step: anyone can call file ops from now on */
+ add_disk(dev->gd);
+ ubi_msg("%s created from ubi%d:%d(%s)",
+ dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
+ return 0;
+
+out_free_queue:
+ blk_cleanup_queue(dev->rq);
+out_put_disk:
+ put_disk(dev->gd);
+out_free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static void ubiblock_cleanup(struct ubiblock *dev)
+{
+ del_gendisk(dev->gd);
+ blk_cleanup_queue(dev->rq);
+ ubi_msg("%s released", dev->gd->disk_name);
+ put_disk(dev->gd);
+}
+
+int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return -ENODEV;
+ }
+
+ /* Found a device, let's lock it so we can check if it's busy */
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+ return -EBUSY;
+ }
+
+ /* Remove from device list */
+ list_del(&dev->list);
+ mutex_unlock(&devices_mutex);
+
+ /* Flush pending work and stop this workqueue */
+ destroy_workqueue(dev->wq);
+
+ ubiblock_cleanup(dev);
+ mutex_unlock(&dev->dev_mutex);
+ kfree(dev);
+ return 0;
+}
+
+static void ubiblock_resize(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ int disk_capacity;
+
+ /*
+ * Need to lock the device list until we stop using the device,
+ * otherwise the device struct might get released in
+ * 'ubiblock_remove()'.
+ */
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ mutex_lock(&dev->dev_mutex);
+ disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
+ set_capacity(dev->gd, disk_capacity);
+ ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+}
+
+static int ubiblock_notify(struct notifier_block *nb,
+ unsigned long notification_type, void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (notification_type) {
+ case UBI_VOLUME_ADDED:
+ /*
+ * We want to enforce explicit block device creation for
+ * volumes, so when a volume is added we do nothing.
+ */
+ break;
+ case UBI_VOLUME_REMOVED:
+ ubiblock_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ ubiblock_resize(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ubiblock_notifier = {
+ .notifier_call = ubiblock_notify,
+};
+
+static struct ubi_volume_desc * __init
+open_volume_desc(const char *name, int ubi_num, int vol_id)
+{
+ if (ubi_num == -1)
+ /* No ubi num, name must be a vol device path */
+ return ubi_open_volume_path(name, UBI_READONLY);
+ else if (vol_id == -1)
+ /* No vol_id, must be vol_name */
+ return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
+ else
+ return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
+}
+
+static int __init ubiblock_create_from_param(void)
+{
+ int i, ret;
+ struct ubiblock_param *p;
+ struct ubi_volume_desc *desc;
+ struct ubi_volume_info vi;
+
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
+ if (IS_ERR(desc)) {
+ ubi_err("block: can't open volume, err=%ld\n",
+ PTR_ERR(desc));
+ ret = PTR_ERR(desc);
+ break;
+ }
+
+ ubi_get_volume_info(desc, &vi);
+ ubi_close_volume(desc);
+
+ ret = ubiblock_create(&vi);
+ if (ret) {
+ ubi_err("block: can't add '%s' volume, err=%d\n",
+ vi.name, ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+static void ubiblock_remove_all(void)
+{
+ struct ubiblock *next;
+ struct ubiblock *dev;
+
+ list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
+ /* Flush pending work and stop workqueue */
+ destroy_workqueue(dev->wq);
+ /* The module is being forcefully removed */
+ WARN_ON(dev->desc);
+ /* Remove from device list */
+ list_del(&dev->list);
+ ubiblock_cleanup(dev);
+ kfree(dev);
+ }
+}
+
+int __init ubiblock_init(void)
+{
+ int ret;
+
+ ubiblock_major = register_blkdev(0, "ubiblock");
+ if (ubiblock_major < 0)
+ return ubiblock_major;
+
+ /* Attach block devices from 'block=' module param */
+ ret = ubiblock_create_from_param();
+ if (ret)
+ goto err_remove;
+
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
+ */
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ if (ret)
+ goto err_unreg;
+ return 0;
+
+err_unreg:
+ unregister_blkdev(ubiblock_major, "ubiblock");
+err_remove:
+ ubiblock_remove_all();
+ return ret;
+}
+
+void __exit ubiblock_exit(void)
+{
+ ubi_unregister_volume_notifier(&ubiblock_notifier);
+ ubiblock_remove_all();
+ unregister_blkdev(ubiblock_major, "ubiblock");
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 65626c1c446..6e30a3c280d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,10 +27,6 @@
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
@@ -40,15 +36,23 @@
#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/major.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 4
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
#ifdef CONFIG_MTD_UBI_MODULE
#define ubi_is_module() 1
#else
@@ -60,10 +64,13 @@
* @name: MTD character device node path, MTD device name, or MTD device number
* string
* @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
+ int ubi_num;
int vid_hdr_offs;
+ int max_beb_per1024;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -71,7 +78,10 @@ static int __initdata mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
-
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
@@ -148,6 +158,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ if (ubi_update_fastmap(ubi)) {
+ ubi_err("Unable to update fastmap!");
+ ubi_ro_mode(ubi);
+ }
+ }
+#endif
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
@@ -554,10 +577,10 @@ static void uif_close(struct ubi_device *ubi)
}
/**
- * free_internal_volumes - free internal volumes.
+ * ubi_free_internal_volumes - free internal volumes.
* @ubi: UBI device description object
*/
-static void free_internal_volumes(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
{
int i;
@@ -568,62 +591,38 @@ static void free_internal_volumes(struct ubi_device *ubi)
}
}
-/**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
- */
-static int attach_by_scanning(struct ubi_device *ubi)
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
- int err;
- struct ubi_scan_info *si;
+ int limit, device_pebs;
+ uint64_t device_size;
- si = ubi_scan(ubi);
- if (IS_ERR(si))
- return PTR_ERR(si);
-
- ubi->bad_peb_count = si->bad_peb_count;
- ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
- ubi->corr_peb_count = si->corr_peb_count;
- ubi->max_ec = si->max_ec;
- ubi->mean_ec = si->mean_ec;
- ubi_msg("max. sequence number: %llu", si->max_sqnum);
-
- err = ubi_read_volume_table(ubi, si);
- if (err)
- goto out_si;
-
- err = ubi_wl_init_scan(ubi, si);
- if (err)
- goto out_vtbl;
+ if (!max_beb_per1024)
+ return 0;
- err = ubi_eba_init_scan(ubi, si);
- if (err)
- goto out_wl;
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
- ubi_scan_destroy_si(si);
- return 0;
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
-out_wl:
- ubi_wl_close(ubi);
-out_vtbl:
- free_internal_volumes(ubi);
- vfree(ubi->vtbl);
-out_si:
- ubi_scan_destroy_si(si);
- return err;
+ return limit;
}
/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
@@ -636,8 +635,11 @@ out_si:
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
@@ -664,8 +666,10 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ if (mtd_can_have_bb(ubi->mtd)) {
ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
if (ubi->mtd->type == MTD_NORFLASH) {
ubi_assert(ubi->mtd->writesize == 1);
@@ -707,11 +711,11 @@ static int io_init(struct ubi_device *ubi)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
- dbg_msg("min_io_size %d", ubi->min_io_size);
- dbg_msg("max_write_size %d", ubi->max_write_size);
- dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
- dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
- dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
@@ -728,10 +732,10 @@ static int io_init(struct ubi_device *ubi)
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
- dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
- dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
- dbg_msg("leb_start %d", ubi->leb_start);
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
@@ -757,7 +761,7 @@ static int io_init(struct ubi_device *ubi)
ubi->max_erroneous = ubi->peb_count / 10;
if (ubi->max_erroneous < 16)
ubi->max_erroneous = 16;
- dbg_msg("max_erroneous %d", ubi->max_erroneous);
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
/*
* It may happen that EC and VID headers are situated in one minimal
@@ -765,36 +769,24 @@ static int io_init(struct ubi_device *ubi)
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
- ubi_warn("EC and VID headers are in the same minimal I/O unit, "
- "switch to read-only mode");
+ ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
- ubi_msg("MTD device %d is write-protected, attach in "
- "read-only mode", ubi->mtd->index);
+ ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
ubi->ro_mode = 1;
}
- ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
- ubi->peb_size, ubi->peb_size >> 10);
- ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
- ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
- if (ubi->hdrs_min_io_size != ubi->min_io_size)
- ubi_msg("sub-page size: %d",
- ubi->hdrs_min_io_size);
- ubi_msg("VID header offset: %d (aligned %d)",
- ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
- ubi_msg("data offset: %d", ubi->leb_start);
-
/*
- * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
- * each physical eraseblock. So, we skip ubi->bad_peb_count
- * uninitialized and initialize it after scanning.
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
*/
return 0;
@@ -805,7 +797,7 @@ static int io_init(struct ubi_device *ubi)
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
@@ -816,6 +808,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
+ if (ubi->ro_mode) {
+ ubi_warn("skip auto-resize because of R/O mode");
+ return 0;
+ }
+
/*
* Clear the auto-resize flag in the volume in-memory copy of the
* volume table, and 'ubi_resize_volume()' will propagate this change
@@ -830,8 +827,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -857,6 +853,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -867,11 +864,18 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
{
struct ubi_device *ubi;
int i, err, ref = 0;
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
/*
* Check if we already have the same MTD device attached.
*
@@ -881,7 +885,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- dbg_err("mtd%d is already attached to ubi%d",
+ ubi_err("mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -896,8 +900,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err("refuse attaching mtd%d - it is already emulated on "
- "top of UBI", mtd->index);
+ ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
return -EINVAL;
}
@@ -907,7 +911,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- dbg_err("only %d UBI devices may be created",
+ ubi_err("only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -917,7 +921,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- dbg_err("ubi%d already exists", ubi_num);
+ ubi_err("ubi%d already exists", ubi_num);
return -EEXIST;
}
}
@@ -931,31 +935,61 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+ ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+ ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+ ubi->fm_disabled = !fm_autoconvert;
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
+ mutex_init(&ubi->fm_mutex);
+ init_rwsem(&ubi->fm_sem);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
- dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
- dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
- err = io_init(ubi);
+ err = io_init(ubi, max_beb_per1024);
if (err)
goto out_free;
err = -ENOMEM;
- ubi->peb_buf1 = vmalloc(ubi->peb_size);
- if (!ubi->peb_buf1)
+ ubi->peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf)
goto out_free;
- ubi->peb_buf2 = vmalloc(ubi->peb_size);
- if (!ubi->peb_buf2)
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->fm_size);
+ if (!ubi->fm_buf)
goto out_free;
-
- err = attach_by_scanning(ubi);
+#endif
+ err = ubi_attach(ubi, 0);
if (err) {
- dbg_err("failed to attach by scanning, error %d", err);
+ ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_free;
}
@@ -969,31 +1003,36 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (err)
goto out_detach;
- ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
+ err = ubi_debugfs_init_dev(ubi);
+ if (err)
+ goto out_uif;
+
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
err);
- goto out_uif;
+ goto out_debugfs;
}
- ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
- ubi_msg("MTD device name: \"%s\"", mtd->name);
- ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
- ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
- ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
- ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
- ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
- ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
- ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
- ubi_msg("number of user volumes: %d",
- ubi->vol_count - UBI_INT_VOL_COUNT);
- ubi_msg("available PEBs: %d", ubi->avail_pebs);
- ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
- ubi_msg("number of PEBs reserved for bad PEB handling: %d",
- ubi->beb_rsvd_pebs);
- ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
- ubi_msg("image sequence number: %d", ubi->image_seq);
+ ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+ mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+ ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1008,15 +1047,19 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
+out_debugfs:
+ ubi_debugfs_exit_dev(ubi);
out_uif:
+ get_device(&ubi->dev);
+ ubi_assert(ref);
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_free:
- vfree(ubi->peb_buf1);
- vfree(ubi->peb_buf2);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
@@ -1065,8 +1108,12 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
- dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
-
+ ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap. */
+ ubi_update_fastmap(ubi);
+#endif
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
@@ -1080,13 +1127,15 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
*/
get_device(&ubi->dev);
+ ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
+
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- vfree(ubi->peb_buf1);
- vfree(ubi->peb_buf2);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
@@ -1196,8 +1245,15 @@ static int __init ubi_init(void)
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
- if (!ubi_wl_entry_slab)
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1209,12 +1265,16 @@ static int __init ubi_init(void)
mtd = open_mtd_device(p->name);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- goto out_detach;
+ ubi_err("cannot open mtd %s, error %d", p->name, err);
+ /* See comment below re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ continue;
}
mutex_lock(&ubi_devices_mutex);
- err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
- p->vid_hdr_offs);
+ err = ubi_attach_mtd_dev(mtd, p->ubi_num,
+ p->vid_hdr_offs, p->max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
ubi_err("cannot attach mtd%d", mtd->index);
@@ -1238,6 +1298,15 @@ static int __init ubi_init(void)
}
}
+ err = ubiblock_init();
+ if (err) {
+ ubi_err("block: cannot initialize, error %d", err);
+
+ /* See comment above re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+
return 0;
out_detach:
@@ -1247,6 +1316,8 @@ out_detach:
ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
+ ubi_debugfs_exit();
+out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
@@ -1255,21 +1326,24 @@ out_version:
out_class:
class_destroy(ubi_class);
out:
- ubi_err("UBI error: cannot initialize UBI, error %d", err);
+ ubi_err("cannot initialize UBI, error %d", err);
return err;
}
-module_init(ubi_init);
+late_initcall(ubi_init);
static void __exit ubi_exit(void)
{
int i;
+ ubiblock_exit();
+
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
mutex_lock(&ubi_devices_mutex);
ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
+ ubi_debugfs_exit();
kmem_cache_destroy(ubi_wl_entry_slab);
misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
@@ -1291,8 +1365,7 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1308,8 +1381,7 @@ static int __init bytes_str_to_int(const char *str)
case '\0':
break;
default:
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1330,27 +1402,26 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
- char *tokens[2] = {NULL, NULL};
+ char *tokens[MTD_PARAM_MAX_COUNT], *token;
if (!val)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
- printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
- UBI_MAX_DEVICES);
+ ubi_err("too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
- "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+ ubi_err("parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
- "ignored\n");
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
@@ -1360,42 +1431,69 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
- for (i = 0; i < 2; i++)
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
- val);
+ ubi_err("too many arguments at \"%s\"\n", val);
return -EINVAL;
}
p = &mtd_dev_param[mtd_devs];
strcpy(&p->name[0], tokens[0]);
- if (tokens[1])
- p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
+ token = tokens[1];
+ if (token) {
+ p->vid_hdr_offs = bytes_str_to_int(token);
+
+ if (p->vid_hdr_offs < 0)
+ return p->vid_hdr_offs;
+ }
+
+ token = tokens[2];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->max_beb_per1024);
- if (p->vid_hdr_offs < 0)
- return p->vid_hdr_offs;
+ if (err) {
+ ubi_err("bad value for max_beb_per1024 parameter: %s",
+ token);
+ return -EINVAL;
+ }
+ }
+
+ token = tokens[3];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->ubi_num);
+
+ if (err) {
+ ubi_err("bad value for ubi_num parameter: %s", token);
+ return -EINVAL;
+ }
+ } else
+ p->ubi_num = UBI_DEV_NUM_AUTO;
mtd_devs += 1;
return 0;
}
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number, name, or "
- "path to the MTD character device node.\n"
- "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
- "header position to be used by UBI.\n"
- "Example 1: mtd=/dev/mtd0 - attach MTD device "
- "/dev/mtd0.\n"
- "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
- "with name \"content\" using VID header offset 1984, and "
- "MTD device number 4 with default VID header offset.");
-
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 191f3bb3c41..7646220ca6e 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -63,7 +63,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
- dbg_err("%d users for volume %d", users, vol->vol_id);
+ ubi_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
- dbg_gen("only %lld of %lld bytes received for atomic LEB change"
- " for volume %d:%d, cancel", vol->upd_received,
- vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+ dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+ vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+ vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
@@ -155,46 +155,27 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
- loff_t new_offset;
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
- switch (origin) {
- case 0: /* SEEK_SET */
- new_offset = offset;
- break;
- case 1: /* SEEK_CUR */
- new_offset = file->f_pos + offset;
- break;
- case 2: /* SEEK_END */
- new_offset = vol->used_bytes + offset;
- break;
- default:
- return -EINVAL;
- }
-
- if (new_offset < 0 || new_offset > vol->used_bytes) {
- dbg_err("bad seek %lld", new_offset);
- return -EINVAL;
- }
-
- dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
- vol->vol_id, offset, origin, new_offset);
-
- file->f_pos = new_offset;
- return new_offset;
+ return fixed_size_llseek(file, offset, origin, vol->used_bytes);
}
-static int vol_cdev_fsync(struct file *file, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
-
- return ubi_sync(ubi->ubi_num);
+ struct inode *inode = file_inode(file);
+ int err;
+ mutex_lock(&inode->i_mutex);
+ err = ubi_sync(ubi->ubi_num);
+ mutex_unlock(&inode->i_mutex);
+ return err;
}
@@ -212,11 +193,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
count, *offp, vol->vol_id);
if (vol->updating) {
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
- dbg_err("damaged volume, update marker is set");
+ ubi_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
@@ -296,7 +277,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
- dbg_err("unaligned position");
+ ubi_err("unaligned position");
return -EINVAL;
}
@@ -305,7 +286,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
- dbg_err("unaligned write length");
+ ubi_err("unaligned write length");
return -EINVAL;
}
@@ -330,8 +311,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
if (err)
break;
@@ -473,9 +453,6 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
- if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
- req.dtype != UBI_UNKNOWN)
- break;
err = get_exclusive(desc);
if (err < 0)
@@ -514,7 +491,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (err)
break;
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
break;
}
@@ -528,7 +505,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
err = -EFAULT;
break;
}
- err = ubi_leb_map(desc, req.lnum, req.dtype);
+ err = ubi_leb_map(desc, req.lnum);
break;
}
@@ -584,6 +561,26 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ /* Create a R/O block device on top of the UBI volume */
+ case UBI_IOCVOLCRBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_create(&vi);
+ break;
+ }
+
+ /* Remove the R/O block device */
+ case UBI_IOCVOLRMBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_remove(&vi);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
@@ -628,6 +625,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
if (req->alignment != 1 && n)
goto bad;
+ if (!req->name[0] || !req->name_len)
+ goto bad;
+
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
@@ -640,8 +640,8 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
return 0;
bad:
- dbg_err("bad volume creation request");
- ubi_dbg_dump_mkvol_req(req);
+ ubi_err("bad volume creation request");
+ ubi_dump_mkvol_req(req);
return err;
}
@@ -706,12 +706,12 @@ static int rename_volumes(struct ubi_device *ubi,
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
- dbg_err("duplicated volume id %d",
+ ubi_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
- dbg_err("duplicated volume name \"%s\"",
+ ubi_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
@@ -731,10 +731,10 @@ static int rename_volumes(struct ubi_device *ubi,
goto out_free;
}
- re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
+ re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
- dbg_err("cannot open volume %d, error %d", vol_id, err);
+ ubi_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
@@ -750,7 +750,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
- dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
+ dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
@@ -793,7 +793,7 @@ static int rename_volumes(struct ubi_device *ubi,
continue;
/* The volume exists but busy, or an error occurred */
- dbg_err("cannot open volume \"%s\", error %d",
+ ubi_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
@@ -808,7 +808,7 @@ static int rename_volumes(struct ubi_device *ubi,
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
- dbg_msg("will remove volume %d, name \"%s\"",
+ dbg_gen("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
@@ -939,7 +939,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
{
struct ubi_rnvol_req *req;
- dbg_msg("re-name volumes");
+ dbg_gen("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
@@ -1007,7 +1007,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
- err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+ req.max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
@@ -1023,7 +1024,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
{
int ubi_num;
- dbg_gen("dettach MTD device");
+ dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 2224cbe41dd..63cb1d7236c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -18,225 +18,432 @@
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
- */
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-
#include "ubi.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
-unsigned int ubi_chk_flags;
-unsigned int ubi_tst_flags;
-module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
-module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
-MODULE_PARM_DESC(debug_chks, "Debug check flags");
-MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
/**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_ec_hdr - dump an erase counter header.
* @ec_hdr: the erase counter header to dump
*/
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
- printk(KERN_DEBUG "Erase counter header dump:\n");
- printk(KERN_DEBUG "\tmagic %#08x\n",
- be32_to_cpu(ec_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version);
- printk(KERN_DEBUG "\tec %llu\n",
- (long long)be64_to_cpu(ec_hdr->ec));
- printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
- be32_to_cpu(ec_hdr->vid_hdr_offset));
- printk(KERN_DEBUG "\tdata_offset %d\n",
- be32_to_cpu(ec_hdr->data_offset));
- printk(KERN_DEBUG "\timage_seq %d\n",
- be32_to_cpu(ec_hdr->image_seq));
- printk(KERN_DEBUG "\thdr_crc %#08x\n",
- be32_to_cpu(ec_hdr->hdr_crc));
- printk(KERN_DEBUG "erase counter header hexdump:\n");
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
* @vid_hdr: the volume identifier header to dump
*/
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
- printk(KERN_DEBUG "Volume identifier header dump:\n");
- printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
- printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
- printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
- printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
- printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
- printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
- printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
- printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
- printk(KERN_DEBUG "\tsqnum %llu\n",
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
- printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
- printk(KERN_DEBUG "Volume identifier header hexdump:\n");
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
vid_hdr, UBI_VID_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
* @vol: UBI volume description object
*/
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
{
- printk(KERN_DEBUG "Volume information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
- printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs);
- printk(KERN_DEBUG "\talignment %d\n", vol->alignment);
- printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad);
- printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", vol->name_len);
- printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
- printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs);
- printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes);
- printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes);
- printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted);
- printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker);
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- printk(KERN_DEBUG "\tname %s\n", vol->name);
+ pr_err("\tname %s\n", vol->name);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
vol->name[0], vol->name[1], vol->name[2],
vol->name[3], vol->name[4]);
}
}
/**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
* @r: the object to dump
* @idx: volume table index
*/
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
- printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
- printk(KERN_DEBUG "\treserved_pebs %d\n",
- be32_to_cpu(r->reserved_pebs));
- printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad));
- printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type);
- printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker);
- printk(KERN_DEBUG "\tname_len %d\n", name_len);
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
if (r->name[0] == '\0') {
- printk(KERN_DEBUG "\tname NULL\n");
+ pr_err("\tname NULL\n");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- printk(KERN_DEBUG "\tname %s\n", &r->name[0]);
+ pr_err("\tname %s\n", &r->name[0]);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc));
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
}
/**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
*/
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
{
- printk(KERN_DEBUG "Volume scanning information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id);
- printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum);
- printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count);
- printk(KERN_DEBUG "\tcompat %d\n", sv->compat);
- printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type);
- printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs);
- printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
- printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad);
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
}
/**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
* @type: object type: 0 - not corrupted, 1 - corrupted
*/
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
- printk(KERN_DEBUG "eraseblock scanning information dump:\n");
- printk(KERN_DEBUG "\tec %d\n", seb->ec);
- printk(KERN_DEBUG "\tpnum %d\n", seb->pnum);
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
if (type == 0) {
- printk(KERN_DEBUG "\tlnum %d\n", seb->lnum);
- printk(KERN_DEBUG "\tscrub %d\n", seb->scrub);
- printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum);
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
}
}
/**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
* @req: the object to dump
*/
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
- printk(KERN_DEBUG "Volume creation request dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id);
- printk(KERN_DEBUG "\talignment %d\n", req->alignment);
- printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes);
- printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", req->name_len);
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
memcpy(nm, req->name, 16);
nm[16] = 0;
- printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
+ pr_err("\t1st 16 characters of name: %s\n", nm);
}
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
/**
- * ubi_dbg_dump_flash - dump a region of flash.
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dfs_rootdir = debugfs_create_dir("ubi", NULL);
+ if (IS_ERR_OR_NULL(dfs_rootdir)) {
+ int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+
+ ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ char buf[3];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_io)
+ val = d->chk_io;
+ else if (dent == d->dfs_disable_bgt)
+ val = d->disable_bgt;
+ else if (dent == d->dfs_emulate_bitflips)
+ val = d->emulate_bitflips;
+ else if (dent == d->dfs_emulate_io_failures)
+ val = d->emulate_io_failures;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ size_t buf_size;
+ char buf[8];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (buf[0] == '1')
+ val = 1;
+ else if (buf[0] == '0')
+ val = 0;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_io)
+ d->chk_io = val;
+ else if (dent == d->dfs_disable_bgt)
+ d->disable_bgt = val;
+ else if (dent == d->dfs_emulate_bitflips)
+ d->emulate_bitflips = val;
+ else if (dent == d->dfs_emulate_io_failures)
+ d->emulate_io_failures = val;
+ else
+ count = -EINVAL;
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* File operations for all UBI debugfs files */
+static const struct file_operations dfs_fops = {
+ .read = dfs_file_read,
+ .write = dfs_file_write,
+ .open = simple_open,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
* @ubi: UBI device description object
- * @pnum: the physical eraseblock number to dump
- * @offset: the starting offset within the physical eraseblock to dump
- * @len: the length of the region to dump
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
*/
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
- int err;
- size_t read;
- void *buf;
- loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+ int err, n;
+ unsigned long ubi_num = ubi->ubi_num;
+ const char *fname;
+ struct dentry *dent;
+ struct ubi_debug_info *d = &ubi->dbg;
- buf = vmalloc(len);
- if (!buf)
- return;
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ ubi->ubi_num);
+ if (n == UBI_DFS_DIR_LEN) {
+ /* The array size is too small */
+ fname = UBI_DFS_DIR_NAME;
+ dent = ERR_PTR(-EINVAL);
goto out;
}
- dbg_msg("dumping %d bytes of data from PEB %d, offset %d",
- len, pnum, offset);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
+ goto out;
+ d->dfs_dir = dent;
+
+ fname = "chk_gen";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_gen = dent;
+
+ fname = "chk_io";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_io = dent;
+
+ fname = "tst_disable_bgt";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_disable_bgt = dent;
+
+ fname = "tst_emulate_bitflips";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_bitflips = dent;
+
+ fname = "tst_emulate_io_failures";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_io_failures = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(d->dfs_dir);
out:
- vfree(buf);
- return;
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+ fname, err);
+ return err;
}
-#endif /* CONFIG_MTD_UBI_DEBUG */
+/**
+ * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
+}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 3f1a09c5c43..cba89fcd158 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,37 +21,27 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-struct ubi_ec_hdr;
-struct ubi_vid_hdr;
-struct ubi_volume;
-struct ubi_vtbl_record;
-struct ubi_scan_volume;
-struct ubi_scan_leb;
-struct ubi_mkvol_req;
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-#ifdef CONFIG_MTD_UBI_DEBUG
#include <linux/random.h>
#define ubi_assert(expr) do { \
if (unlikely(!(expr))) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
- ubi_dbg_dump_stack(); \
+ dump_stack(); \
} \
} while (0)
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-
-#define ubi_dbg_dump_stack() dump_stack()
-
-#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
#define ubi_dbg_msg(type, fmt, ...) \
- pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
-/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
/* General debugging messages */
#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
/* Messages from the eraseblock association sub-system */
@@ -63,155 +53,78 @@ struct ubi_mkvol_req;
/* Initialization and build messages */
#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-
-extern unsigned int ubi_chk_flags;
-
-/*
- * Debugging check flags.
- *
- * UBI_CHK_GEN: general checks
- * UBI_CHK_IO: check writes and erases
- */
-enum {
- UBI_CHK_GEN = 0x1,
- UBI_CHK_IO = 0x2,
-};
-
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len);
-
-extern unsigned int ubi_tst_flags;
-
-/*
- * Special testing flags.
- *
- * UBIFS_TST_DISABLE_BGT: disable the background thread
- * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips
- * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures
- * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures
- */
-enum {
- UBI_TST_DISABLE_BGT = 0x1,
- UBI_TST_EMULATE_BITFLIPS = 0x2,
- UBI_TST_EMULATE_WRITE_FAILURES = 0x4,
- UBI_TST_EMULATE_ERASE_FAILURES = 0x8,
-};
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
*
* Returns non-zero if the UBI background thread is disabled for testing
* purposes.
*/
-static inline int ubi_dbg_is_bgt_disabled(void)
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi_tst_flags & UBI_TST_DISABLE_BGT;
+ return ubi->dbg.disable_bgt;
}
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
*
* Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
*/
-static inline int ubi_dbg_is_bitflip(void)
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS)
- return !(random32() % 200);
+ if (ubi->dbg.emulate_bitflips)
+ return !(prandom_u32() % 200);
return 0;
}
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if a write failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_write_failure(void)
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES)
- return !(random32() % 500);
+ if (ubi->dbg.emulate_io_failures)
+ return !(prandom_u32() % 500);
return 0;
}
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if an erase failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_erase_failure(void)
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES)
- return !(random32() % 400);
+ if (ubi->dbg.emulate_io_failures)
+ return !(prandom_u32() % 400);
return 0;
}
-#else
-
-/* Use "if (0)" to make compiler check arguments even if debugging is off */
-#define ubi_assert(expr) do { \
- if (0) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
- __func__, __LINE__, current->pid); \
- } \
-} while (0)
-
-#define dbg_err(fmt, ...) do { \
- if (0) \
- ubi_err(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define ubi_dbg_msg(fmt, ...) do { \
- if (0) \
- pr_debug(fmt "\n", ##__VA_ARGS__); \
-} while (0)
-
-#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gen(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_eba(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_wl(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_io(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_bld(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-
-static inline void ubi_dbg_dump_stack(void) { return; }
-static inline void
-ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { return; }
-static inline void
-ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { return; }
-static inline void
-ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { return; }
-static inline void
-ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { return; }
-static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
-static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
- int type) { return; }
-static inline void
-ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { return; }
-static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
- int pnum, int offset, int len) { return; }
-static inline void
-ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
- int g, const void *b, size_t len, bool a) { return; }
-
-static inline int ubi_dbg_is_bgt_disabled(void) { return 0; }
-static inline int ubi_dbg_is_bitflip(void) { return 0; }
-static inline int ubi_dbg_is_write_failure(void) { return 0; }
-static inline int ubi_dbg_is_erase_failure(void) { return 0; }
-static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
- int pnum, int offset,
- int len) { return 0; }
-static inline int ubi_dbg_check_write(struct ubi_device *ubi,
- const void *buf, int pnum,
- int offset, int len) { return 0; }
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
-#endif /* !CONFIG_MTD_UBI_DEBUG */
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4be67181501..0e11671dadc 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@
* global sequence counter value. It also increases the global sequence
* counter.
*/
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
@@ -340,8 +340,10 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
- err = ubi_wl_put_peb(ubi, pnum, 0);
+ up_read(&ubi->fm_sem);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -420,9 +422,8 @@ retry:
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
- ubi_warn("corrupted VID header at PEB "
- "%d, LEB %d:%d", pnum, vol_id,
- lnum);
+ ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -443,7 +444,7 @@ retry:
if (err == UBI_IO_BITFLIPS) {
scrub = 1;
err = 0;
- } else if (err == -EBADMSG) {
+ } else if (mtd_is_eccerr(err)) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
goto out_unlock;
scrub = 1;
@@ -507,7 +508,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
return -ENOMEM;
retry:
- new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+ new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
@@ -522,25 +523,25 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
- memset(ubi->peb_buf1 + offset, 0xFF, len);
+ memset(ubi->peb_buf + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
- err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
goto out_unlock;
}
- memcpy(ubi->peb_buf1 + offset, buf, len);
+ memcpy(ubi->peb_buf + offset, buf, len);
- err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
+ err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
goto write_error;
@@ -549,8 +550,10 @@ retry:
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum;
- ubi_wl_put_peb(ubi, pnum, 1);
+ up_read(&ubi->fm_sem);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
return 0;
@@ -558,7 +561,7 @@ retry:
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_put:
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -568,7 +571,7 @@ write_error:
* get another one.
*/
ubi_warn("failed to write to PEB %d", new_pnum);
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -585,7 +588,6 @@ write_error:
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
@@ -593,7 +595,7 @@ write_error:
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype)
+ const void *buf, int offset, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -634,14 +636,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -661,14 +663,15 @@ retry:
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
- ubi_warn("failed to write %d bytes at offset %d of "
- "LEB %d:%d, PEB %d", len, offset, vol_id,
- lnum, pnum);
+ ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
goto write_error;
}
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -687,7 +690,7 @@ write_error:
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -695,7 +698,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -707,7 +710,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
@@ -724,8 +726,7 @@ write_error:
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs)
+ int lnum, const void *buf, int len, int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -750,7 +751,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -763,7 +764,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -788,7 +789,9 @@ retry:
}
ubi_assert(vol->eba_tbl[lnum] < 0);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -807,7 +810,7 @@ write_error:
return err;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -815,7 +818,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -827,7 +830,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -839,7 +841,7 @@ write_error:
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype)
+ int lnum, const void *buf, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -856,7 +858,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -868,7 +870,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
goto out_mutex;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -881,7 +883,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_leb_unlock;
@@ -905,12 +907,14 @@ retry:
}
if (vol->eba_tbl[lnum] >= 0) {
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
if (err)
goto out_leb_unlock;
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -930,13 +934,13 @@ write_error:
goto out_leb_unlock;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -979,7 +983,7 @@ static int is_error_sane(int err)
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
* o %0 in case of success;
- * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -1028,12 +1032,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return
- * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
- return MOVE_CANCEL_RACE;
+ return MOVE_RETRY;
}
/*
@@ -1042,22 +1048,21 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving it. Since
- * this function utilizes the @ubi->peb_buf1 buffer which is shared
+ * this function utilizes the @ubi->peb_buf buffer which is shared
* with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
dbg_wl("read %d bytes of data", aldata_size);
- err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
@@ -1077,10 +1082,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
aldata_size = data_size =
- ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
+ ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
cond_resched();
- crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
cond_resched();
/*
@@ -1094,7 +1099,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
@@ -1109,17 +1114,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading VID header back from "
- "PEB %d", err, to);
+ ubi_warn("error %d while reading VID header back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
- err = MOVE_CANCEL_BITFLIPS;
+ err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
if (data_size > 0) {
- err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
+ err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
@@ -1132,31 +1137,33 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
-
- err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
+ memset(ubi->peb_buf, 0xFF, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading data back "
- "from PEB %d", err, to);
+ ubi_warn("error %d while reading data back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
- err = MOVE_CANCEL_BITFLIPS;
+ err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
cond_resched();
- if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
- ubi_warn("read data back from PEB %d and it is "
- "different", to);
+ if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+ ubi_warn("read data back from PEB %d and it is different",
+ to);
err = -EINVAL;
goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = to;
+ up_read(&ubi->fm_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1169,7 +1176,7 @@ out_unlock_leb:
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
*
- * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
* decision whether we have to print a warning or not. The algorithm is as
* follows:
@@ -1184,13 +1191,13 @@ out_unlock_leb:
* reported by real users.
*/
static void print_rsvd_warning(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
/*
* The 1 << 18 (256KiB) number is picked randomly, just a reasonably
* large number to distinguish between newly flashed and used images.
*/
- if (si->max_sqnum > (1 << 18)) {
+ if (ai->max_sqnum > (1 << 18)) {
int min = ubi->beb_rsvd_level / 10;
if (!min)
@@ -1199,27 +1206,123 @@ static void print_rsvd_warning(struct ubi_device *ubi,
return;
}
- ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
- " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
ubi_warn("%d PEBs are corrupted and not used",
- ubi->corr_peb_count);
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
}
/**
- * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, j, err, num_volumes;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct rb_node *rb;
dbg_eba("initialize EBA sub-system");
@@ -1228,7 +1331,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- ubi->global_sqnum = si->max_sqnum + 1;
+ ubi->global_sqnum = ai->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
@@ -1248,18 +1351,18 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
- sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
- if (!sv)
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
continue;
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- if (seb->lnum >= vol->reserved_pebs)
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
- ubi_scan_move_to_list(sv, seb, &si->erase);
- vol->eba_tbl[seb->lnum] = seb->pnum;
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
@@ -1281,7 +1384,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
- print_rsvd_warning(ubi, si);
+ print_rsvd_warning(ubi, ai);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 00000000000..0431b46d9fd
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1567 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crc32.h>
+#include "ubi.h"
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_hdr) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+ (sizeof(struct ubi_fm_eba) + \
+ (ubi->peb_count * sizeof(__be32))) + \
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->used_ebs = used_ebs;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size = \
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @eba_orphans: list of PEBs which need to be scanned
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *eba_orphans, struct list_head *free)
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb, *tmp_aeb;
+ int i, pnum, err, found_orphan, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+ int image_seq;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err("bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err("bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, free, pnum, ec, 1);
+ else
+ add_aeb(ai, free, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ found_orphan = 0;
+ list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ found_orphan = 1;
+ break;
+ }
+ }
+ if (found_orphan) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+
+ new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err("fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, eba_orphans, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ INIT_LIST_HEAD(&eba_orphans);
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err("bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err("bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err("bad fastmap vol header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err("bad fastmap EBA header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ /* This can happen if a PEB is already in an EBA known
+ * by this fastmap but the PEB itself is not in the used
+ * list.
+ * In this case the PEB can be within the fastmap pool
+ * or while writing the fastmap it was in the protection
+ * queue.
+ */
+ if (!aeb) {
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!aeb) {
+ ret = -ENOMEM;
+
+ goto fail;
+ }
+
+ aeb->lnum = j;
+ aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
+ aeb->ec = -1;
+ aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+ list_add_tail(&aeb->u.list, &eba_orphans);
+ continue;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
+ u.list) {
+ int err;
+
+ if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
+ ubi_err("bad PEB in fastmap EBA orphan list");
+ ret = UBI_BAD_FASTMAP;
+ kfree(ech);
+ goto fail;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i " \
+ "err:%i", tmp_aeb->pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ kfree(ech);
+
+ goto fail;
+ } else if (err == UBI_IO_BITFLIPS)
+ tmp_aeb->scrub = 1;
+
+ tmp_aeb->ec = be64_to_cpu(ech->ec);
+ assign_aeb_to_av(ai, tmp_aeb, av);
+ }
+
+ kfree(ech);
+ }
+
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ ubi_assert(list_empty(&used));
+ ubi_assert(list_empty(&eba_orphans));
+ ubi_assert(list_empty(&free));
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ mutex_lock(&ubi->fm_mutex);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err("bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+ ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ int image_seq;
+
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err("wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err("bad fastmap anchor vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err("bad fastmap data vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err("fastmap data CRC is invalid");
+ ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg("attached by fastmap");
+ ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ mutex_unlock(&ubi->fm_mutex);
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err("Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct rb_node *node;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++)
+ fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++)
+ fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+
+ for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ if (ret) {
+ ubi_err("unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ * @fm: the fastmap to be destroyed
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int ret;
+ struct ubi_vid_hdr *vh;
+
+ ret = erase_block(ubi, fm->e[0]->pnum);
+ if (ret < 0)
+ return ret;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ return -ENOMEM;
+
+ /* deleting the current fastmap SB is not enough, an old SB may exist,
+ * so create a (corrupted) SB such that fastmap will find it and fall
+ * back to scanning mode in any case */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+
+ return ret;
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ mutex_lock(&ubi->fm_mutex);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
+ mutex_unlock(&ubi->fm_mutex);
+ return 0;
+ }
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
+ mutex_unlock(&ubi->fm_mutex);
+ return ret;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!new_fm->e[i]) {
+ while (i--)
+ kfree(new_fm->e[i]);
+
+ kfree(new_fm);
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+ }
+
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err("fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e && !old_fm) {
+ int j;
+ ubi_err("could not get any free erase block");
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ } else if (!tmp_e && old_fm) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ int j;
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+
+ ubi_err("could not erase old fastmap PEB");
+ goto err;
+ }
+
+ new_fm->e[i]->pnum = old_fm->e[i]->pnum;
+ new_fm->e[i]->ec = old_fm->e[i]->ec;
+ } else {
+ new_fm->e[i]->pnum = tmp_e->pnum;
+ new_fm->e[i]->ec = tmp_e->ec;
+
+ if (old_fm)
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ }
+ }
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ int i;
+ ubi_err("could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+ new_fm->e[0]->ec = ret;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+ } else {
+ if (!tmp_e) {
+ int i;
+ ubi_err("could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+ up_write(&ubi->fm_sem);
+ up_write(&ubi->work_sem);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ mutex_unlock(&ubi->fm_mutex);
+ kfree(old_fm);
+ return ret;
+
+err:
+ kfree(new_fm);
+
+ ubi_warn("Unable to write new fastmap, err=%i", ret);
+
+ ret = 0;
+ if (old_fm) {
+ ret = invalidate_fastmap(ubi, old_fm);
+ if (ret < 0)
+ ubi_err("Unable to invalidiate current fastmap!");
+ else if (ret)
+ ret = 0;
+ }
+ goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 941bc3c05d6..b93807b4c45 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -41,7 +41,7 @@
#include "ubi-media.h"
#define err_msg(fmt, ...) \
- printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+ pr_err("gluebi (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
/**
@@ -171,21 +171,17 @@ static void gluebi_put_device(struct mtd_info *mtd)
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
- int err = 0, lnum, offs, total_read;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
- if (len < 0 || from < 0 || from + len > mtd->size)
- return -EINVAL;
-
gluebi = container_of(mtd, struct gluebi_device, mtd);
-
lnum = div_u64_rem(from, mtd->erasesize, &offs);
- total_read = len;
- while (total_read) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
- if (to_read > total_read)
- to_read = total_read;
+ if (to_read > bytes_left)
+ to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
@@ -193,11 +189,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
lnum += 1;
offs = 0;
- total_read -= to_read;
+ bytes_left -= to_read;
buf += to_read;
}
- *retlen = len - total_read;
+ *retlen = len - bytes_left;
return err;
}
@@ -215,40 +211,33 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- int err = 0, lnum, offs, total_written;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
- if (len < 0 || to < 0 || len + to > mtd->size)
- return -EINVAL;
-
gluebi = container_of(mtd, struct gluebi_device, mtd);
-
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
lnum = div_u64_rem(to, mtd->erasesize, &offs);
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
- total_written = len;
- while (total_written) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
- if (to_write > total_written)
- to_write = total_written;
+ if (to_write > bytes_left)
+ to_write = bytes_left;
- err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
+ err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
lnum += 1;
offs = 0;
- total_written -= to_write;
+ bytes_left -= to_write;
buf += to_write;
}
- *retlen = len - total_written;
+ *retlen = len - bytes_left;
return err;
}
@@ -265,21 +254,13 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
int err, i, lnum, count;
struct gluebi_device *gluebi;
- if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
- return -EINVAL;
- if (instr->len < 0 || instr->addr + instr->len > mtd->size)
- return -EINVAL;
if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
return -EINVAL;
lnum = mtd_div_by_eb(instr->addr, mtd);
count = mtd_div_by_eb(instr->len, mtd);
-
gluebi = container_of(mtd, struct gluebi_device, mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
-
for (i = 0; i < count - 1; i++) {
err = ubi_leb_unmap(gluebi->desc, lnum + i);
if (err)
@@ -340,11 +321,11 @@ static int gluebi_create(struct ubi_device_info *di,
mtd->owner = THIS_MODULE;
mtd->writesize = di->min_io_size;
mtd->erasesize = vi->usable_leb_size;
- mtd->read = gluebi_read;
- mtd->write = gluebi_write;
- mtd->erase = gluebi_erase;
- mtd->get_device = gluebi_get_device;
- mtd->put_device = gluebi_put_device;
+ mtd->_read = gluebi_read;
+ mtd->_write = gluebi_write;
+ mtd->_erase = gluebi_erase;
+ mtd->_get_device = gluebi_get_device;
+ mtd->_put_device = gluebi_put_device;
/*
* In case of dynamic a volume, MTD device size is just volume size. In
@@ -360,9 +341,8 @@ static int gluebi_create(struct ubi_device_info *di,
mutex_lock(&devices_mutex);
g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (g)
- err_msg("gluebi MTD device %d form UBI device %d volume %d "
- "already exists", g->mtd.index, vi->ubi_num,
- vi->vol_id);
+ err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+ g->mtd.index, vi->ubi_num, vi->vol_id);
mutex_unlock(&devices_mutex);
if (mtd_device_register(mtd, NULL, 0)) {
@@ -395,8 +375,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
- err_msg("got remove notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got remove notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
err = -ENOENT;
} else if (gluebi->refcnt)
err = -EBUSY;
@@ -409,9 +389,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mtd = &gluebi->mtd;
err = mtd_device_unregister(mtd);
if (err) {
- err_msg("cannot remove fake MTD device %d, UBI device %d, "
- "volume %d, error %d", mtd->index, gluebi->ubi_num,
- gluebi->vol_id, err);
+ err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+ mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
mutex_lock(&devices_mutex);
list_add_tail(&gluebi->list, &gluebi_devices);
mutex_unlock(&devices_mutex);
@@ -441,8 +420,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
@@ -468,8 +447,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
gluebi->mtd.size = vi->used_bytes;
@@ -526,9 +505,9 @@ static void __exit ubi_gluebi_exit(void)
err = mtd_device_unregister(mtd);
if (err)
- err_msg("error %d while removing gluebi MTD device %d, "
- "UBI device %d, volume %d - ignoring", err,
- mtd->index, gluebi->ubi_num, gluebi->vol_id);
+ err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+ err, mtd->index, gluebi->ubi_num,
+ gluebi->vol_id);
kfree(mtd->name);
kfree(gluebi);
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8c1b1c7bc4a..d36134925d3 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,21 +91,15 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
/**
* ubi_io_read - read data from a physical eraseblock.
@@ -142,7 +136,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(len > 0);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
@@ -170,11 +164,11 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err) {
- const char *errstr = (err == -EBADMSG) ? " (ECC error)" : "";
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
- if (err == -EUCLEAN) {
+ if (mtd_is_bitflip(err)) {
/*
* -EUCLEAN is reported if there was a bit-flip which
* was corrected, so this is harmless.
@@ -183,36 +177,35 @@ retry:
* enabled. A corresponding message will be printed
* later, when it is has been scrubbed.
*/
- dbg_msg("fixable bit-flip detected at PEB %d", pnum);
+ ubi_msg("fixable bit-flip detected at PEB %d", pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d%s while reading %d bytes from PEB "
- "%d:%d, read only %zd bytes, retry",
- err, errstr, len, pnum, offset, read);
+ ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
- ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, errstr, len, pnum, offset, read);
- ubi_dbg_dump_stack();
+ ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
* all the requested data. But some buggy drivers might do
* this, so we change it to -EIO.
*/
- if (read != len && err == -EBADMSG) {
+ if (read != len && mtd_is_eccerr(err)) {
ubi_assert(0);
err = -EIO;
}
} else {
ubi_assert(len == read);
- if (ubi_dbg_is_bitflip()) {
+ if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
err = UBI_IO_BITFLIPS;
}
@@ -257,14 +250,12 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return -EROFS;
}
- /* The below has to be compiled out if paranoid checks are disabled */
-
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
if (err)
return err;
@@ -273,33 +264,33 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
* We write to the data area of the physical eraseblock. Make
* sure it has valid EC and VID headers.
*/
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
- err = paranoid_check_peb_vid_hdr(ubi, pnum);
+ err = self_check_peb_vid_hdr(ubi, pnum);
if (err)
return err;
}
- if (ubi_dbg_is_write_failure()) {
- dbg_err("cannot write %d bytes to PEB %d:%d "
- "(emulated)", len, pnum, offset);
- ubi_dbg_dump_stack();
+ if (ubi_dbg_is_write_failure(ubi)) {
+ ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
return -EIO;
}
addr = (loff_t)pnum * ubi->peb_size + offset;
- err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
- "%zd bytes", err, len, pnum, offset, written);
- ubi_dbg_dump_stack();
- ubi_dbg_dump_flash(ubi, pnum, offset, len);
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
} else
ubi_assert(written == len);
if (!err) {
- err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
+ err = self_check_write(ubi, buf, pnum, offset, len);
if (err)
return err;
@@ -310,7 +301,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
offset += len;
len = ubi->peb_size - offset;
if (len)
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
}
return err;
@@ -361,16 +352,16 @@ retry:
ei.callback = erase_callback;
ei.priv = (unsigned long)&wq;
- err = ubi->mtd->erase(ubi->mtd, &ei);
+ err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d while erasing PEB %d, retry",
- err, pnum);
+ ubi_warn("error %d while erasing PEB %d, retry",
+ err, pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d, error %d", pnum, err);
- ubi_dbg_dump_stack();
+ dump_stack();
return err;
}
@@ -383,21 +374,21 @@ retry:
if (ei.state == MTD_ERASE_FAILED) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error while erasing PEB %d, retry", pnum);
+ ubi_warn("error while erasing PEB %d, retry", pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d", pnum);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EIO;
}
- err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err;
- if (ubi_dbg_is_erase_failure()) {
- dbg_err("cannot erase PEB %d (emulated)", pnum);
+ if (ubi_dbg_is_erase_failure(ubi)) {
+ ubi_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -431,11 +422,11 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
goto out;
/* Make sure the PEB contains only 0xFF bytes */
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- err = ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
+ err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
if (err == 0) {
ubi_err("erased PEB %d, but a non-0xFF byte found",
pnum);
@@ -444,17 +435,17 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
}
/* Write a pattern and check it */
- memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
- err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- err = ubi_check_pattern(ubi->peb_buf1, patterns[i],
+ err = ubi_check_pattern(ubi->peb_buf, patterns[i],
ubi->peb_size);
if (err == 0) {
ubi_err("pattern %x checking failed for PEB %d",
@@ -469,7 +460,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
out:
mutex_unlock(&ubi->buf_mutex);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* If a bit-flip or data integrity error was detected, the test
* has not passed because it happened on a freshly erased
@@ -504,10 +495,12 @@ out:
*/
static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
{
- int err, err1;
+ int err;
size_t written;
loff_t addr;
uint32_t data = 0;
+ struct ubi_ec_hdr ec_hdr;
+
/*
* Note, we cannot generally define VID header buffers on stack,
* because of the way we deal with these buffers (see the header
@@ -518,53 +511,39 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr vid_hdr;
/*
+ * If VID or EC is valid, we have to corrupt them before erasing.
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
- * corrupted and will try to preserve it, and print scary warnings (see
- * the header comment in scan.c for more information).
+ * corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
- if (!err) {
- addr += ubi->vid_hdr_aloffset;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
- (void *)&data);
- if (!err)
- return 0;
+ err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if(err)
+ goto error;
}
- /*
- * We failed to write to the media. This was observed with Spansion
- * S29GL512N NOR flash. Most probably the previously eraseblock erasure
- * was interrupted at a very inappropriate moment, so it became
- * unwritable. In this case we probably anyway have garbage in this
- * PEB.
- */
- err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
- err1 == UBI_IO_FF) {
- struct ubi_ec_hdr ec_hdr;
-
- err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
- err1 == UBI_IO_FF)
- /*
- * Both VID and EC headers are corrupted, so we can
- * safely erase this PEB and not afraid that it will be
- * treated as a valid PEB in case of an unclean reboot.
- */
- return 0;
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err)
+ goto error;
}
+ return 0;
+error:
/*
- * The PEB contains a valid VID header, but we cannot invalidate it.
- * Supposedly the flash media or the driver is screwed up, so return an
- * error.
+ * The PEB contains a valid VID or EC header, but we cannot invalidate
+ * it. Supposedly the flash media or the driver is screwed up, so
+ * return an error.
*/
- ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
- pnum, err, err1);
- ubi_dbg_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
@@ -590,7 +569,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err != 0)
return err;
@@ -635,7 +614,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
if (ubi->bad_allowed) {
int ret;
- ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
if (ret < 0)
ubi_err("error %d while checking if PEB %d is bad",
ret, pnum);
@@ -670,7 +649,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
if (!ubi->bad_allowed)
return 0;
- err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
return err;
@@ -695,8 +674,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
- ubi_err("node with incompatible UBI version found: "
- "this UBI version is %d, image version is %d",
+ ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
@@ -722,8 +700,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad EC header");
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return 1;
}
@@ -760,7 +738,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
if (read_err) {
- if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
+ if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
/*
@@ -776,7 +754,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
- if (read_err == -EBADMSG)
+ if (mtd_is_eccerr(read_err))
return UBI_IO_BAD_HDR_EBADMSG;
/*
@@ -787,10 +765,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
if (verbose)
- ubi_warn("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -802,12 +780,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -816,12 +794,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
@@ -875,7 +853,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
return err;
@@ -906,40 +884,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
- dbg_err("bad copy_flag");
+ ubi_err("bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
- dbg_err("negative values");
+ ubi_err("negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
- dbg_err("bad vol_id");
+ ubi_err("bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
- dbg_err("bad vol_type");
+ ubi_err("bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
- dbg_err("bad data_pad");
+ ubi_err("bad data_pad");
goto bad;
}
@@ -951,45 +929,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
- dbg_err("zero used_ebs");
+ ubi_err("zero used_ebs");
goto bad;
}
if (data_size == 0) {
- dbg_err("zero data_size");
+ ubi_err("zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
- dbg_err("bad data_size");
+ ubi_err("bad data_size");
goto bad;
}
} else if (lnum == used_ebs - 1) {
if (data_size == 0) {
- dbg_err("bad data_size at last LEB");
+ ubi_err("bad data_size at last LEB");
goto bad;
}
} else {
- dbg_err("too high lnum");
+ ubi_err("too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
- dbg_err("non-zero data CRC");
+ ubi_err("non-zero data CRC");
goto bad;
}
if (data_size != 0) {
- dbg_err("non-zero data_size");
+ ubi_err("non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
- dbg_err("zero data_size of copy");
+ ubi_err("zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
- dbg_err("bad used_ebs");
+ ubi_err("bad used_ebs");
goto bad;
}
}
@@ -998,8 +976,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad VID header");
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return 1;
}
@@ -1032,20 +1010,20 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
p = (char *)vid_hdr - ubi->vid_hdr_shift;
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
- if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
+ if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
- if (read_err == -EBADMSG)
+ if (mtd_is_eccerr(read_err))
return UBI_IO_BAD_HDR_EBADMSG;
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
- ubi_warn("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -1053,12 +1031,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
}
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -1067,12 +1045,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
@@ -1113,7 +1091,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
@@ -1122,7 +1100,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
vid_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
return err;
@@ -1132,34 +1110,32 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
* This function returns zero if the physical eraseblock is good, %-EINVAL if
* it is bad and a negative error code if an error occurred.
*/
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ dump_stack();
return err > 0 ? -EINVAL : err;
}
/**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the erase counter header belongs to
* @ec_hdr: the erase counter header to check
@@ -1167,13 +1143,13 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
* This function returns zero if the erase counter header contains valid
* values, and %-EINVAL if not.
*/
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t magic;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1185,33 +1161,33 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return 0;
fail:
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_ec_hdr - check erase counter header.
+ * self_check_peb_ec_hdr - check erase counter header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the erase counter header is all right and and
* a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1219,21 +1195,21 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
return -ENOMEM;
err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
exit:
kfree(ec_hdr);
@@ -1241,7 +1217,7 @@ exit:
}
/**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the volume identifier header belongs to
* @vid_hdr: the volume identifier header to check
@@ -1249,13 +1225,13 @@ exit:
* This function returns zero if the volume identifier header is all right, and
* %-EINVAL if not.
*/
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
{
int err;
uint32_t magic;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1267,36 +1243,36 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_vid_hdr - check volume identifier header.
+ * self_check_peb_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
* and a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1306,22 +1282,22 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1329,7 +1305,7 @@ exit:
}
/**
- * ubi_dbg_check_write - make sure write succeeded.
+ * self_check_write - make sure write succeeded.
* @ubi: UBI device description object
* @buf: buffer with data which were written
* @pnum: physical eraseblock number the data were written to
@@ -1340,15 +1316,15 @@ exit:
* the original data buffer - the data have to match. Returns zero if the data
* match and a negative error code if not or in case of failure.
*/
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len)
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
{
int err, i;
size_t read;
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1357,8 +1333,8 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
- if (err && err != -EUCLEAN)
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+ if (err && !mtd_is_bitflip(err))
goto out_free;
for (i = 0; i < len; i++) {
@@ -1369,7 +1345,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
if (c == c1)
continue;
- ubi_err("paranoid check failed for PEB %d:%d, len %d",
+ ubi_err("self-check failed for PEB %d:%d, len %d",
pnum, offset, len);
ubi_msg("data differ at position %d", i);
dump_len = max_t(int, 128, len - i);
@@ -1381,7 +1357,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf1 + i, dump_len, 1);
- ubi_dbg_dump_stack();
+ dump_stack();
err = -EINVAL;
goto out_free;
}
@@ -1395,7 +1371,7 @@ out_free:
}
/**
- * ubi_dbg_check_all_ff - check that a region of flash is empty.
+ * ubi_self_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
@@ -1405,14 +1381,14 @@ out_free:
* @offset of the physical eraseblock @pnum, and a negative error code if not
* or if an error occurred.
*/
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1421,17 +1397,17 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && !mtd_is_bitflip(err)) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
goto error;
}
err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not "
- "contain all 0xFF bytes", pnum, offset, len);
+ ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
goto fail;
}
@@ -1439,14 +1415,12 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
- ubi_dbg_dump_stack();
+ dump_stack();
vfree(buf);
return err;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d39716e5b20..3aac1acceeb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -221,7 +221,7 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
- dbg_err("cannot open device %d, volume %d, error %d",
+ ubi_err("cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
return ERR_PTR(err);
}
@@ -410,7 +410,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
return 0;
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
- if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn("mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
}
@@ -426,11 +426,9 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* @buf: data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
*
* This function takes care of physical eraseblock write failures. If write to
* the physical eraseblock write operation fails, the logical eraseblock is
@@ -447,7 +445,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* returns immediately with %-EBADF code.
*/
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype)
+ int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -466,17 +464,13 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@@ -486,7 +480,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* @lnum: logical eraseblock number to change
* @buf: data to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -497,7 +490,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* code in case of failure.
*/
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int len, int dtype)
+ int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -515,17 +508,13 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -562,7 +551,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (err)
return err;
- return ubi_wl_flush(ubi);
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_erase);
@@ -626,7 +615,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
- * @dtype: expected data type
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
* eraseblock. This means, that after a successful invocation of this
@@ -639,7 +627,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -652,17 +640,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
return -EBADMSG;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
@@ -714,14 +698,39 @@ int ubi_sync(int ubi_num)
if (!ubi)
return -ENODEV;
- if (ubi->mtd->sync)
- ubi->mtd->sync(ubi->mtd);
-
+ mtd_sync(ubi->mtd);
ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_sync);
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index ff2a65c37f6..f913d701a5b 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -81,7 +81,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
- if (err == -EBADMSG)
+ if (mtd_is_eccerr(err))
err = 1;
break;
}
@@ -92,16 +92,45 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
void ubi_calculate_reserved(struct ubi_device *ubi)
{
- ubi->beb_rsvd_level = ubi->good_peb_count/100;
- ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
- if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
- ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
}
/**
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
deleted file mode 100644
index d48aef15ab5..00000000000
--- a/drivers/mtd/ubi/scan.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-#ifndef __UBI_SCAN_H__
-#define __UBI_SCAN_H__
-
-/* The erase counter value for this physical eraseblock is unknown */
-#define UBI_SCAN_UNKNOWN_EC (-1)
-
-/**
- * struct ubi_scan_leb - scanning information about a physical eraseblock.
- * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- * @pnum: physical eraseblock number
- * @lnum: logical eraseblock number
- * @scrub: if this physical eraseblock needs scrubbing
- * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
- * @sqnum: sequence number
- * @u: unions RB-tree or @list links
- * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
- * @u.list: link in one of the eraseblock lists
- *
- * One object of this type is allocated for each physical eraseblock during
- * scanning.
- */
-struct ubi_scan_leb {
- int ec;
- int pnum;
- int lnum;
- unsigned int scrub:1;
- unsigned int copy_flag:1;
- unsigned long long sqnum;
- union {
- struct rb_node rb;
- struct list_head list;
- } u;
-};
-
-/**
- * struct ubi_scan_volume - scanning information about a volume.
- * @vol_id: volume ID
- * @highest_lnum: highest logical eraseblock number in this volume
- * @leb_count: number of logical eraseblocks in this volume
- * @vol_type: volume type
- * @used_ebs: number of used logical eraseblocks in this volume (only for
- * static volumes)
- * @last_data_size: amount of data in the last logical eraseblock of this
- * volume (always equivalent to the usable logical eraseblock
- * size in case of dynamic volumes)
- * @data_pad: how many bytes at the end of logical eraseblocks of this volume
- * are not used (due to volume alignment)
- * @compat: compatibility flags of this volume
- * @rb: link in the volume RB-tree
- * @root: root of the RB-tree containing all the eraseblock belonging to this
- * volume (&struct ubi_scan_leb objects)
- *
- * One object of this type is allocated for each volume during scanning.
- */
-struct ubi_scan_volume {
- int vol_id;
- int highest_lnum;
- int leb_count;
- int vol_type;
- int used_ebs;
- int last_data_size;
- int data_pad;
- int compat;
- struct rb_node rb;
- struct rb_root root;
-};
-
-/**
- * struct ubi_scan_info - UBI scanning information.
- * @volumes: root of the volume RB-tree
- * @corr: list of corrupted physical eraseblocks
- * @free: list of free physical eraseblocks
- * @erase: list of physical eraseblocks which have to be erased
- * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
- * those belonging to "preserve"-compatible internal volumes)
- * @corr_peb_count: count of PEBs in the @corr list
- * @empty_peb_count: count of PEBs which are presumably empty (contain only
- * 0xFF bytes)
- * @alien_peb_count: count of PEBs in the @alien list
- * @bad_peb_count: count of bad physical eraseblocks
- * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
- * as bad yet, but which look like bad
- * @vols_found: number of volumes found during scanning
- * @highest_vol_id: highest volume ID
- * @is_empty: flag indicating whether the MTD device is empty or not
- * @min_ec: lowest erase counter value
- * @max_ec: highest erase counter value
- * @max_sqnum: highest sequence number value
- * @mean_ec: mean erase counter value
- * @ec_sum: a temporary variable used when calculating @mean_ec
- * @ec_count: a temporary variable used when calculating @mean_ec
- * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
- *
- * This data structure contains the result of scanning and may be used by other
- * UBI sub-systems to build final UBI data structures, further error-recovery
- * and so on.
- */
-struct ubi_scan_info {
- struct rb_root volumes;
- struct list_head corr;
- struct list_head free;
- struct list_head erase;
- struct list_head alien;
- int corr_peb_count;
- int empty_peb_count;
- int alien_peb_count;
- int bad_peb_count;
- int maybe_bad_peb_count;
- int vols_found;
- int highest_vol_id;
- int is_empty;
- int min_ec;
- int max_ec;
- unsigned long long max_sqnum;
- int mean_ec;
- uint64_t ec_sum;
- int ec_count;
- struct kmem_cache *scan_leb_slab;
-};
-
-struct ubi_device;
-struct ubi_vid_hdr;
-
-/*
- * ubi_scan_move_to_list - move a PEB from the volume tree to a list.
- *
- * @sv: volume scanning information
- * @seb: scanning eraseblock information
- * @list: the list to move to
- */
-static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
- struct ubi_scan_leb *seb,
- struct list_head *list)
-{
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, list);
-}
-
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips);
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id);
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum);
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si);
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec);
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
-void ubi_scan_destroy_si(struct ubi_scan_info *si);
-
-#endif /* !__UBI_SCAN_H__ */
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 6fb8ec2174a..ac2b24d1783 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -149,10 +149,10 @@ enum {
* The @image_seq field is used to validate a UBI image that has been prepared
* for a UBI device. The @image_seq value can be any value, but it must be the
* same on all eraseblocks. UBI will ensure that all new erase counter headers
- * also contain this value, and will check the value when scanning at start-up.
+ * also contain this value, and will check the value when attaching the flash.
* One way to make use of @image_seq is to increase its value by one every time
* an image is flashed over an existing image, then, if the flashing does not
- * complete, UBI will detect the error when scanning.
+ * complete, UBI will detect the error when attaching the media.
*/
struct ubi_ec_hdr {
__be32 magic;
@@ -298,8 +298,8 @@ struct ubi_vid_hdr {
#define UBI_INT_VOL_COUNT 1
/*
- * Starting ID of internal volumes. There is reserved room for 4096 internal
- * volumes.
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
*/
#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
@@ -375,4 +375,141 @@ struct ubi_vtbl_record {
__be32 crc;
} __packed;
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+#define UBI_FM_WL_POOL_SIZE 25
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c6c22295898..7bf416329c1 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -22,7 +22,6 @@
#ifndef __UBI_UBI_H__
#define __UBI_UBI_H__
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -43,8 +42,6 @@
#include <asm/pgtable.h>
#include "ubi-media.h"
-#include "scan.h"
-#include "debug.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -53,21 +50,21 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
__func__, ##__VA_ARGS__)
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
-
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
#define UBI_LEB_UNMAPPED -1
/*
@@ -83,6 +80,16 @@
*/
#define UBI_PROT_QUEUE_LEN 10
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
/*
* Error codes returned by the I/O sub-system.
*
@@ -119,15 +126,28 @@ enum {
* PEB
* MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
* PEB
- * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
* target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
*/
enum {
MOVE_CANCEL_RACE = 1,
MOVE_SOURCE_RD_ERR,
MOVE_TARGET_RD_ERR,
MOVE_TARGET_WR_ERR,
- MOVE_CANCEL_BITFLIPS,
+ MOVE_TARGET_BITFLIPS,
+ MOVE_RETRY,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
};
/**
@@ -196,6 +216,41 @@ struct ubi_rename_entry {
struct ubi_volume_desc;
/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
* @cdev: character device object to create character device
@@ -221,8 +276,6 @@ struct ubi_volume_desc;
* @upd_ebs: how many eraseblocks are expected to be updated
* @ch_lnum: LEB number which is being changing by the atomic LEB change
* operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- * change operation
* @upd_bytes: how many bytes are expected to be received for volume update or
* atomic LEB change
* @upd_received: how many bytes were already received for volume update or
@@ -269,7 +322,6 @@ struct ubi_volume {
int upd_ebs;
int ch_lnum;
- int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
@@ -296,6 +348,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -333,9 +416,21 @@ struct ubi_wl_entry;
* @ltree: the lock tree
* @alc_mutex: serializes "atomic LEB change" operations
*
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ *
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
@@ -360,6 +455,7 @@ struct ubi_wl_entry;
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
* @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -386,10 +482,11 @@ struct ubi_wl_entry;
* time (MTD write buffer size)
* @mtd: MTD device descriptor
*
- * @peb_buf1: a buffer of PEB size used for different purposes
- * @peb_buf2: another buffer of PEB size used for different purposes
- * @buf_mutex: protects @peb_buf1 and @peb_buf2
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
* @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
*/
struct ubi_device {
struct cdev cdev;
@@ -406,6 +503,7 @@ struct ubi_device {
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
+ int bad_peb_limit;
int autoresize_vol_id;
int vtbl_slots;
@@ -423,10 +521,22 @@ struct ubi_device {
struct rb_root ltree;
struct mutex alc_mutex;
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_sem;
+ struct mutex fm_mutex;
+ void *fm_buf;
+ size_t fm_size;
+ struct work_struct fm_work;
+
/* Wear-leveling sub-system's stuff */
struct rb_root used;
struct rb_root erroneous;
struct rb_root free;
+ int free_count;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
int pq_head;
@@ -468,12 +578,159 @@ struct ubi_device {
int max_write_size;
struct mtd_info *mtd;
- void *peb_buf1;
- void *peb_buf2;
+ void *peb_buf;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
+
+ struct ubi_debug_info dbg;
+};
+
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
};
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
+};
+
+#include "debug.h"
+
extern struct kmem_cache *ubi_wl_entry_slab;
extern const struct file_operations ubi_ctrl_cdev_operations;
extern const struct file_operations ubi_cdev_operations;
@@ -482,12 +739,23 @@ extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec);
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
struct list_head *rename_list);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* vmt.c */
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
@@ -511,6 +779,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int length);
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
@@ -520,24 +789,33 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype);
+ const void *buf, int offset, int len);
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs);
+ int lnum, const void *buf, int len, int used_ebs);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype);
+ int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
/* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -557,7 +835,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
@@ -568,11 +847,41 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_notify_all(struct ubi_device *ubi, int ntype,
struct notifier_block *nb);
int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
/* kapi.c */
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
+
+/* block.c */
+#ifdef CONFIG_MTD_UBI_BLOCK
+int ubiblock_init(void);
+void ubiblock_exit(void);
+int ubiblock_create(struct ubi_volume_info *vi);
+int ubiblock_remove(struct ubi_volume_info *vi);
+#else
+static inline int ubiblock_init(void) { return 0; }
+static inline void ubiblock_exit(void) {}
+static inline int ubiblock_create(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+static inline int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+#endif
+
/*
* ubi_rb_for_each_entry - walk an RB-tree.
@@ -588,6 +897,21 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
rb = rb_next(rb), \
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
* @ubi: UBI device description object
@@ -662,6 +986,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
+ dump_stack();
}
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 425bf5a3edd..ec2c2dc1c1c 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -64,8 +64,7 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
@@ -93,8 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
@@ -147,7 +145,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
}
if (bytes == 0) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
@@ -186,14 +184,12 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
- return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
- req->dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->ch_dtype = req->dtype;
vol->upd_buf = vmalloc(req->bytes);
if (!vol->upd_buf)
@@ -246,8 +242,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return 0;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
} else {
/*
* When writing static volume, and this is the last logical
@@ -259,8 +254,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
- UBI_UNKNOWN, used_ebs);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
}
return err;
@@ -365,7 +359,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
/* The update is finished, clear the update marker */
@@ -421,7 +415,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
- vol->upd_buf, len, UBI_UNKNOWN);
+ vol->upd_buf, len);
if (err)
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 366eb70219a..8330703c098 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -26,13 +26,10 @@
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi) 0
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -226,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
if (vol_id == UBI_VOL_NUM_AUTO) {
- dbg_err("out of volume IDs");
+ ubi_err("out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
@@ -240,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
- dbg_err("volume %d already exists", vol_id);
+ ubi_err("volume %d already exists", vol_id);
goto out_unlock;
}
@@ -249,7 +246,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
- dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+ ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
@@ -260,9 +257,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
@@ -283,7 +280,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
@@ -359,8 +356,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while creating volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_sysfs:
@@ -447,21 +443,13 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
- i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (i > 0) {
- i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
- ubi->avail_pebs -= i;
- ubi->rsvd_pebs += i;
- ubi->beb_rsvd_pebs += i;
- if (i > 0)
- ubi_msg("reserve more %d PEBs", i);
- }
+ ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
- if (!no_vtbl && paranoid_check_volumes(ubi))
- dbg_err("check failed while removing volume %d", vol_id);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
return err;
@@ -499,7 +487,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
- dbg_err("too small size %d, %d LEBs contain data",
+ ubi_err("too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
@@ -528,10 +516,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs: requested %d, available %d",
+ ubi_err("not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
@@ -547,7 +535,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -562,15 +550,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
- pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (pebs > 0) {
- pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
- ubi->avail_pebs -= pebs;
- ubi->rsvd_pebs += pebs;
- ubi->beb_rsvd_pebs += pebs;
- if (pebs > 0)
- ubi_msg("reserve more %d PEBs", pebs);
- }
+ ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
@@ -587,8 +567,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while re-sizing volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_acc:
@@ -637,8 +616,8 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
}
}
- if (!err && paranoid_check_volumes(ubi))
- ;
+ if (!err)
+ self_check_volumes(ubi);
return err;
}
@@ -685,8 +664,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
return err;
}
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while adding volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_cdev:
@@ -711,16 +689,14 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
volume_sysfs_close(vol);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* Returns zero if volume is all right and a a negative error code if not.
*/
-static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -770,7 +746,7 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
}
if (vol->upd_marker && vol->corrupted) {
- dbg_err("update marker and corrupted simultaneously");
+ ubi_err("update marker and corrupted simultaneously");
goto fail;
}
@@ -852,34 +828,33 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
return 0;
fail:
- ubi_err("paranoid check failed for volume %d", vol_id);
+ ubi_err("self-check failed for volume %d", vol_id);
if (vol)
- ubi_dbg_dump_vol_info(vol);
- ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
/**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
* Returns zero if volumes are all right and a a negative error code if not.
*/
-static int paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
- err = paranoid_check_volume(ubi, i);
+ err = self_check_volume(ubi, i);
if (err)
break;
}
return err;
}
-#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fd3bf770f51..d77b1c1d7c7 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -37,16 +37,15 @@
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
* In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
*
* But it would still be beneficial to store this information in the volume
* table. For example, suppose we have a static volume X, and all its physical
* eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
* corresponding to the volume X. According to the volume table volume X does
* exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
*
* The volume table also stores so-called "update marker", which is used for
* volume updates. Before updating the volume, the update marker is set, and
@@ -62,11 +61,7 @@
#include <asm/div64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
/* Empty volume table record */
static struct ubi_vtbl_record empty_vtbl_record;
@@ -106,12 +101,12 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
- paranoid_vtbl_check(ubi);
+ self_vtbl_check(ubi);
return 0;
}
@@ -158,7 +153,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
@@ -197,7 +192,7 @@ static int vtbl_check(const struct ubi_device *ubi,
if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -229,7 +224,7 @@ static int vtbl_check(const struct ubi_device *ubi,
n = ubi->leb_size % alignment;
if (data_pad != n) {
- dbg_err("bad data_pad, has to be %d", n);
+ ubi_err("bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
@@ -245,7 +240,7 @@ static int vtbl_check(const struct ubi_device *ubi,
}
if (reserved_pebs > ubi->good_peb_count) {
- dbg_err("too large reserved_pebs %d, good PEBs %d",
+ ubi_err("too large reserved_pebs %d, good PEBs %d",
reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
@@ -275,10 +270,10 @@ static int vtbl_check(const struct ubi_device *ubi,
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
- ubi_err("volumes %d and %d have the same name"
- " \"%s\"", i, n, vtbl[i].name);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
- ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+ ubi_err("volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
}
}
@@ -288,75 +283,64 @@ static int vtbl_check(const struct ubi_device *ubi,
bad:
ubi_err("volume table check failed: record %d, error %d", i, err);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
/**
* create_vtbl - create a copy of volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @copy: number of the volume table copy
* @vtbl: contents of the volume table
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
- static struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *new_seb, *old_seb = NULL;
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_ainf_peb *new_aeb;
- ubi_msg("create volume table (copy #%d)", copy + 1);
+ dbg_gen("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
return -ENOMEM;
- /*
- * Check if there is a logical eraseblock which would have to contain
- * this volume table copy was found during scanning. It has to be wiped
- * out.
- */
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (sv)
- old_seb = ubi_scan_find_seb(sv, copy);
-
retry:
- new_seb = ubi_scan_get_free_peb(ubi, si);
- if (IS_ERR(new_seb)) {
- err = PTR_ERR(new_seb);
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
goto out_free;
}
- vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
vid_hdr->lnum = cpu_to_be32(copy);
- vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
- err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
if (err)
goto write_error;
/* Write the layout volume contents */
- err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
if (err)
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old
- * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
- err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
- vid_hdr, 0);
- kfree(new_seb);
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -366,10 +350,10 @@ write_error:
* Probably this physical eraseblock went bad, try to pick
* another one.
*/
- list_add(&new_seb->u.list, &si->erase);
+ list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_seb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -379,20 +363,20 @@ out_free:
/**
* process_lvol - process the layout volume.
* @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
*
* This function is responsible for reading the layout volume, ensuring it is
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si,
- struct ubi_scan_volume *sv)
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
{
int err;
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
@@ -424,27 +408,27 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
dbg_gen("check layout volume");
/* Read both LEB 0 and LEB 1 into memory */
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vzalloc(ubi->vtbl_size);
- if (!leb[seb->lnum]) {
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi->vtbl_size);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
/*
* Scrub the PEB later. Note, -EBADMSG indicates an
* uncorrectable ECC error, but we have our own CRC and
* the data will be checked later. If the data is OK,
* the PEB will be scrubbed (because we set
- * seb->scrub). If the data is not OK, the contents of
+ * aeb->scrub). If the data is not OK, the contents of
* the PEB will be recovered from the second copy, and
- * seb->scrub will be cleared in
- * 'ubi_scan_add_used()'.
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
*/
- seb->scrub = 1;
+ aeb->scrub = 1;
else if (err)
goto out_free;
}
@@ -463,7 +447,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
ubi->vtbl_size);
if (leb_corrupted[1]) {
ubi_warn("volume table copy #2 is corrupted");
- err = create_vtbl(ubi, si, 1, leb[0]);
+ err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -486,7 +470,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
}
ubi_warn("volume table copy #1 is corrupted");
- err = create_vtbl(ubi, si, 0, leb[1]);
+ err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -504,13 +488,13 @@ out_free:
/**
* create_empty_lvol - create empty layout volume.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
int i;
struct ubi_vtbl_record *vtbl;
@@ -525,7 +509,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
int err;
- err = create_vtbl(ubi, si, i, vtbl);
+ err = create_vtbl(ubi, ai, i, vtbl);
if (err) {
vfree(vtbl);
return ERR_PTR(err);
@@ -538,18 +522,19 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
/**
* init_volumes - initialize volume information for existing volumes.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
* @vtbl: volume table
*
* This function allocates volume description objects for existing volumes.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
int i, reserved_pebs = 0;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -577,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
- ubi_err("more than one auto-resize volume (%d "
- "and %d)", ubi->autoresize_vol_id, i);
+ ubi_err("more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
}
@@ -605,8 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/* Static volumes only */
- sv = ubi_scan_find_sv(si, i);
- if (!sv) {
+ av = ubi_find_av(ai, i);
+ if (!av) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
@@ -618,22 +603,22 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
continue;
}
- if (sv->leb_count != sv->used_ebs) {
+ if (av->leb_count != av->used_ebs) {
/*
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
ubi_warn("static volume %d misses %d LEBs - corrupted",
- sv->vol_id, sv->used_ebs - sv->leb_count);
+ av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
}
- vol->used_ebs = sv->used_ebs;
+ vol->used_ebs = av->used_ebs;
vol->used_bytes =
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
- vol->used_bytes += sv->last_data_size;
- vol->last_eb_bytes = sv->last_data_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
}
/* And add the layout volume */
@@ -642,7 +627,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
return -ENOMEM;
vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
- vol->alignment = 1;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
vol->vol_type = UBI_DYNAMIC_VOLUME;
vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
@@ -674,105 +659,104 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
* @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
*
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_sv(const struct ubi_volume *vol,
- const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
{
int err;
- if (sv->highest_lnum >= vol->reserved_pebs) {
+ if (av->highest_lnum >= vol->reserved_pebs) {
err = 1;
goto bad;
}
- if (sv->leb_count > vol->reserved_pebs) {
+ if (av->leb_count > vol->reserved_pebs) {
err = 2;
goto bad;
}
- if (sv->vol_type != vol->vol_type) {
+ if (av->vol_type != vol->vol_type) {
err = 3;
goto bad;
}
- if (sv->used_ebs > vol->reserved_pebs) {
+ if (av->used_ebs > vol->reserved_pebs) {
err = 4;
goto bad;
}
- if (sv->data_pad != vol->data_pad) {
+ if (av->data_pad != vol->data_pad) {
err = 5;
goto bad;
}
return 0;
bad:
- ubi_err("bad scanning information, error %d", err);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vol_info(vol);
+ ubi_err("bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
return -EINVAL;
}
/**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
* information is OK and %-EINVAL if it is not.
*/
-static int check_scanning_info(const struct ubi_device *ubi,
- struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err, i;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
- ubi_err("scanning found %d volumes, maximum is %d + %d",
- si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err("found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
- if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
- si->highest_vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("too large volume ID %d found by scanning",
- si->highest_vol_id);
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("too large volume ID %d found", ai->highest_vol_id);
return -EINVAL;
}
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
cond_resched();
- sv = ubi_scan_find_sv(si, i);
+ av = ubi_find_av(ai, i);
vol = ubi->volumes[i];
if (!vol) {
- if (sv)
- ubi_scan_rm_volume(si, sv);
+ if (av)
+ ubi_remove_av(ai, av);
continue;
}
if (vol->reserved_pebs == 0) {
ubi_assert(i < ubi->vtbl_slots);
- if (!sv)
+ if (!av)
continue;
/*
- * During scanning we found a volume which does not
+ * During attaching we found a volume which does not
* exist according to the information in the volume
* table. This must have happened due to an unclean
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
- ubi_msg("finish volume %d removal", sv->vol_id);
- ubi_scan_rm_volume(si, sv);
- } else if (sv) {
- err = check_sv(vol, sv);
+ ubi_msg("finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
if (err)
return err;
}
@@ -784,16 +768,16 @@ static int check_scanning_info(const struct ubi_device *ubi,
/**
* ubi_read_volume_table - read the volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function reads volume table, checks it, recover from errors if needed,
* or creates it if needed. Returns zero in case of success and a negative
* error code in case of failure.
*/
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, err;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -808,8 +792,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (!sv) {
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
/*
* No logical eraseblocks belonging to the layout volume were
* found. This could mean that the flash is just empty. In
@@ -818,8 +802,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* But if flash is not empty this must be a corruption or the
* MTD device just contains garbage.
*/
- if (si->is_empty) {
- ubi->vtbl = create_empty_lvol(ubi, si);
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
@@ -827,14 +811,14 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return -EINVAL;
}
} else {
- if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
- dbg_err("too many LEBs (%d) in layout volume",
- sv->leb_count);
+ ubi_err("too many LEBs (%d) in layout volume",
+ av->leb_count);
return -EINVAL;
}
- ubi->vtbl = process_lvol(ubi, si, sv);
+ ubi->vtbl = process_lvol(ubi, ai, av);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
}
@@ -845,15 +829,15 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* The layout volume is OK, initialize the corresponding in-RAM data
* structures.
*/
- err = init_volumes(ubi, si, ubi->vtbl);
+ err = init_volumes(ubi, ai, ubi->vtbl);
if (err)
goto out_free;
/*
- * Make sure that the scanning information is consistent to the
+ * Make sure that the attaching information is consistent to the
* information stored in the volume table.
*/
- err = check_scanning_info(ubi, si);
+ err = check_attaching_info(ubi, ai);
if (err)
goto out_free;
@@ -868,21 +852,17 @@ out_free:
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
* @ubi: UBI device description object
*/
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
{
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
- ubi_err("paranoid check failed");
+ ubi_err("self-check failed");
BUG();
}
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ff2c4956eef..0f3425dac91 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -40,12 +40,6 @@
* physical eraseblocks with low erase counter to free physical eraseblocks
* with high erase counter.
*
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL sub-system may pick a free physical eraseblock with low erase
- * counter, and so forth.
- *
* If the WL sub-system fails to erase a physical eraseblock, it marks it as
* bad.
*
@@ -69,8 +63,7 @@
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
- * soon, so it makes sense not to move it for some time, but wait; this is
- * especially important in case of "short term" physical eraseblocks.
+ * soon, so it makes sense not to move it for some time, but wait.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
@@ -141,35 +134,46 @@
*/
#define WL_MAX_FAILURES 32
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
/**
- * struct ubi_work - UBI work description data structure.
- * @list: a link in the list of pending works
- * @func: worker function
- * @e: physical eraseblock to erase
- * @torture: if the physical eraseblock has to be tortured
- *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
- * case of failure.
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
*/
-struct ubi_work {
- struct list_head list;
- int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
- /* The below fields are only relevant to erasure works */
- struct ubi_wl_entry *e;
- int torture;
-};
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
- struct rb_root *root);
-static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+ ubi_update_fastmap(ubi);
+}
+
+/**
+ * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ * @ubi: UBI device description object
+ * @pnum: the to be checked PEB
+ */
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ int i;
+
+ if (!ubi->fm)
+ return 0;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ if (ubi->fm->e[i]->pnum == pnum)
+ return 1;
+
+ return 0;
+}
#else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(e, root)
-#define paranoid_check_in_pq(ubi, e) 0
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ return 0;
+}
#endif
/**
@@ -268,18 +272,16 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
- spin_lock(&ubi->wl_lock);
while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
err = do_work(ubi);
- if (err)
- return err;
spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
}
- spin_unlock(&ubi->wl_lock);
return 0;
}
@@ -346,19 +348,22 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
* @root: the RB-tree where to look for
- * @max: highest possible erase counter
+ * @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less than @max.
+ * min + @diff, where min is the smallest erase counter.
*/
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e;
+ struct ubi_wl_entry *e, *prev_e = NULL;
+ int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
- max += e->ec;
+ max = e->ec + diff;
p = root->rb_node;
while (p) {
@@ -369,39 +374,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
p = p->rb_left;
else {
p = p->rb_right;
+ prev_e = e;
e = e1;
}
}
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
return e;
}
/**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
* @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
+ * @root: the RB-tree where to look for
*
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure. Might sleep.
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
*/
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
{
- int err, medium_ec;
struct ubi_wl_entry *e, *first, *last;
- ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
- dtype == UBI_UNKNOWN);
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+#endif
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+#endif
+
+/**
+ * __wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ */
+static int __wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
retry:
- spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
- ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
- spin_unlock(&ubi->wl_lock);
+ ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
- spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
if (err < 0)
@@ -409,66 +518,187 @@ retry:
goto retry;
}
- switch (dtype) {
- case UBI_LONGTERM:
- /*
- * For long term data we pick a physical eraseblock with high
- * erase counter. But the highest erase counter we can pick is
- * bounded by the the lowest erase counter plus
- * %WL_FREE_MAX_DIFF.
- */
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- break;
- case UBI_UNKNOWN:
- /*
- * For unknown data we pick a physical eraseblock with medium
- * erase counter. But we by no means can pick a physical
- * eraseblock with erase counter greater or equivalent than the
- * lowest erase counter plus %WL_FREE_MAX_DIFF.
- */
- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
- u.rb);
- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
-
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, u.rb);
- else {
- medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
- e = find_wl_entry(&ubi->free, medium_ec);
- }
- break;
- case UBI_SHORTTERM:
- /*
- * For short term data we pick a physical eraseblock with the
- * lowest erase counter as we expect it will be erased soon.
- */
- e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
- break;
- default:
- BUG();
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err("no free eraseblocks");
+ return -ENOSPC;
}
- paranoid_check_in_wl_tree(e, &ubi->free);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time.
*/
rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+ /* We have to enqueue e only if fastmap is disabled,
+ * is fastmap enabled prot_queue_add() will be called by
+ * ubi_wl_get_peb() after removing e from the pool. */
prot_queue_add(ubi, e);
+#endif
+ return e->pnum;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+/**
+ * refill_wl_pool - refills all the fastmap pool used by the
+ * WL sub-system.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_pool(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ pool->pebs[pool->size] = e->pnum;
+ }
+ pool->used = 0;
+}
+
+/**
+ * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_user_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ pool->pebs[pool->size] = __wl_get_peb(ubi);
+ if (pool->pebs[pool->size] < 0)
+ break;
+ }
+ pool->used = 0;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ spin_lock(&ubi->wl_lock);
+ refill_wl_pool(ubi);
+ refill_wl_user_pool(ubi);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+ if (!pool->size || !wl_pool->size || pool->used == pool->size ||
+ wl_pool->used == wl_pool->size)
+ ubi_update_fastmap(ubi);
+
+ /* we got not a single free PEB */
+ if (!pool->size)
+ ret = -ENOSPC;
+ else {
+ spin_lock(&ubi->wl_lock);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size || !pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ schedule_work(&ubi->fm_work);
+ return NULL;
+ } else {
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+ }
+}
+#else
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int peb, err;
+
+ spin_lock(&ubi->wl_lock);
+ peb = __wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
- err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
- ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (peb < 0)
+ return peb;
+
+ err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
- ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
+ ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
return err;
}
- return e->pnum;
+ return peb;
}
+#endif
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
@@ -486,7 +716,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
if (!e)
return -ENODEV;
- if (paranoid_check_in_pq(ubi, e))
+ if (self_check_in_pq(ubi, e))
return -ENODEV;
list_del(&e->u.list);
@@ -512,7 +742,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
- err = paranoid_check_ec(ubi, e->pnum, e->ec);
+ err = self_check_ec(ubi, e->pnum, e->ec);
if (err)
return -EINVAL;
@@ -600,41 +830,72 @@ repeat:
}
/**
- * schedule_ubi_work - schedule a work.
+ * __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
- * list.
+ * list. Can only be used of ubi->work_sem is already held in read mode!
*/
-static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
- if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
}
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+#endif
+
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+ int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
+ ubi_assert(e);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
@@ -644,6 +905,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->func = &erase_worker;
wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
schedule_ubi_work(ubi, wl_wrk);
@@ -651,6 +914,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
}
/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ } else {
+ e->ec = fm_e->ec;
+ kfree(fm_e);
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+#endif
+
+/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
@@ -665,6 +1001,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, uninitialized_var(lnum);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -698,21 +1037,46 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
if (!ubi->scrub.rb_node) {
+#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
+
+ /* Give the unused PEB back */
+ wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
goto out_cancel;
}
- paranoid_check_in_wl_tree(e1, &ubi->used);
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -720,14 +1084,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(e1, &ubi->scrub);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(e2, &ubi->free);
- rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -792,8 +1157,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
protect = 1;
goto out_not_moved;
}
-
- if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) {
/*
* Target PEB had bit-flips or write error - torture it.
@@ -841,7 +1209,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e1, 0);
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
@@ -856,7 +1224,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
- err = schedule_erase(ubi, e2, 0);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -895,7 +1263,7 @@ out_not_moved:
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- err = schedule_erase(ubi, e2, torture);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -936,12 +1304,13 @@ out_cancel:
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-static int ensure_wear_leveling(struct ubi_device *ubi)
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_wl_entry *e1;
@@ -969,7 +1338,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -986,8 +1355,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
goto out_cancel;
}
+ wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
@@ -998,6 +1371,38 @@ out_unlock:
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+#endif
+
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
@@ -1013,7 +1418,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum, err, need;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1022,7 +1430,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return 0;
}
- dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
@@ -1031,6 +1442,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
spin_unlock(&ubi->wl_lock);
/*
@@ -1040,33 +1452,34 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
serve_prot_queue(ubi);
/* And take care about wear-leveling */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 1);
return err;
}
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
err = err1;
goto out_ro;
}
return err;
- } else if (err != -EIO) {
+ }
+
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
- }
/* It is %-EIO, the PEB went bad */
@@ -1076,20 +1489,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
-
if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- goto out_ro;
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err("no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
}
spin_unlock(&ubi->volumes_lock);
@@ -1099,19 +1506,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs)
+ if (available_consumed)
+ ubi_warn("no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
else
- ubi_warn("last PEB from the reserved pool was used");
+ ubi_warn("last PEB from the reserve was used");
spin_unlock(&ubi->volumes_lock);
return err;
out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
ubi_ro_mode(ubi);
return err;
}
@@ -1119,6 +1543,8 @@ out_ro:
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
*
@@ -1127,7 +1553,8 @@ out_ro:
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success, and a negative error code in case of failure.
*/
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
{
int err;
struct ubi_wl_entry *e;
@@ -1169,13 +1596,13 @@ retry:
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(e, &ubi->scrub);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
- paranoid_check_in_wl_tree(e, &ubi->erroneous);
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1193,7 +1620,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@@ -1217,7 +1644,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
- dbg_msg("schedule PEB %d for scrubbing", pnum);
+ ubi_msg("schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);
@@ -1242,7 +1669,7 @@ retry:
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
@@ -1263,29 +1690,60 @@ retry:
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
- return ensure_wear_leveling(ubi);
+ return ensure_wear_leveling(ubi, 0);
}
/**
* ubi_wl_flush - flush all pending works.
* @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
*/
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
{
- int err;
+ int err = 0;
+ int found = 1;
/*
* Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
- dbg_wl("flush (%d pending works)", ubi->works_count);
- while (ubi->works_count) {
- err = do_work(ubi);
- if (err)
- return err;
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry(wrk, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
}
/*
@@ -1295,18 +1753,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
- /*
- * And in case last was the WL worker and it canceled the LEB
- * movement, flush again.
- */
- while (ubi->works_count) {
- dbg_wl("flush more (%d pending works)", ubi->works_count);
- err = do_work(ubi);
- if (err)
- return err;
- }
-
- return 0;
+ return err;
}
/**
@@ -1364,7 +1811,7 @@ int ubi_thread(void *u)
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works) || ubi->ro_mode ||
- !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
schedule();
@@ -1415,27 +1862,30 @@ static void cancel_pending(struct ubi_device *ubi)
}
/**
- * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
*/
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int err, i;
+ int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *tmp;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
- ubi->max_ec = si->max_ec;
+ ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1448,48 +1898,59 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
- list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
+
+ found_pebs++;
}
- list_for_each_entry(seb, &si->free, u.list) {
+ ubi->free_count = 0;
+ list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (!seb->scrub) {
+
+ if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);
@@ -1498,22 +1959,38 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
+
+ found_pebs++;
}
}
- if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+ dbg_wl("found %i PEBs", found_pebs);
+
+ if (ubi->fm)
+ ubi_assert(ubi->good_peb_count == \
+ found_pebs + ubi->fm->used_blocks);
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Reserve enough LEBs to store two fastmaps. */
+ reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
+#endif
+
+ if (ubi->avail_pebs < reserved_pebs) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, WL_RESERVED_PEBS);
+ ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
goto out_free;
}
- ubi->avail_pebs -= WL_RESERVED_PEBS;
- ubi->rsvd_pebs += WL_RESERVED_PEBS;
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
@@ -1561,10 +2038,8 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
@@ -1573,13 +2048,13 @@ void ubi_wl_close(struct ubi_device *ubi)
* is equivalent to @ec, and a negative error code if not or if an error
* occurred.
*/
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1594,10 +2069,10 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
}
read_ec = be64_to_cpu(ec_hdr->ec);
- if (ec != read_ec) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
- ubi_dbg_dump_stack();
+ dump_stack();
err = 1;
} else
err = 0;
@@ -1608,42 +2083,44 @@ out_free:
}
/**
- * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
- struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
{
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+ ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_in_pq - check if wear-leveling entry is in the protection
+ * self_check_in_pq - check if wear-leveling entry is in the protection
* queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
-static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
@@ -1651,10 +2128,8 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
if (p == e)
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
+ ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */