diff options
Diffstat (limited to 'drivers/mtd/nand')
37 files changed, 6065 insertions, 560 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 42e5ea49e97..98a04b3c952 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -2,11 +2,23 @@ menuconfig MTD_NAND tristate "NAND Device Support" depends on MTD select MTD_NAND_IDS + select MTD_NAND_ECC help This enables support for accessing all type of NAND flash devices. For further information see <http://www.linux-mtd.infradead.org/doc/nand.html>. +config MTD_NAND_ECC + tristate + +config MTD_NAND_ECC_SMC + bool "NAND ECC Smart Media byte order" + depends on MTD_NAND_ECC + default n + help + Software ECC according to the Smart Media Specification. + The original Linux implementation had byte 0 and 1 swapped. + if MTD_NAND config MTD_NAND_VERIFY_WRITE @@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE device thinks the write was successful, a bit could have been flipped accidentally due to device wear or something else. -config MTD_NAND_ECC_SMC - bool "NAND ECC Smart Media byte order" +config MTD_SM_COMMON + tristate default n - help - Software ECC according to the Smart Media Specification. - The original Linux implementation had byte 0 and 1 swapped. config MTD_NAND_MUSEUM_IDS bool "Enable chip ids for obsolete ancient NAND devices" @@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12 This enables the driver for the autronix autcpu12 board to access the SmartMediaCard. +config MTD_NAND_DENALI + depends on PCI + tristate "Support Denali NAND controller on Intel Moorestown" + help + Enable the driver for NAND flash on Intel Moorestown, using the + Denali NAND controller core. + +config MTD_NAND_DENALI_SCRATCH_REG_ADDR + hex "Denali NAND size scratch register address" + default "0xFF108018" + help + Some platforms place the NAND chip size in a scratch register + because (some versions of) the driver aren't able to automatically + determine the size of certain chips. Set the address of the + scratch register here to enable this feature. On Intel Moorestown + boards, the scratch register is at 0xFF108018. + config MTD_NAND_EDB7312 tristate "Support for Cirrus Logic EBD7312 evaluation board" depends on ARCH_EDB7312 @@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA or in DMA interrupt mode. Say y for DMA mode or MPU mode will be used -config MTD_NAND_TS7250 - tristate "NAND Flash device on TS-7250 board" - depends on MACH_TS72XX - help - Support for NAND flash on Technologic Systems TS-7250 platform. - config MTD_NAND_IDS tristate +config MTD_NAND_RICOH + tristate "Ricoh xD card reader" + default n + depends on PCI + select MTD_SM_COMMON + help + Enable support for Ricoh R5C852 xD card reader + You also need to enable ether + NAND SSFDC (SmartMedia) read only translation layer' or new + expermental, readwrite + 'SmartMedia/xD new translation layer' + config MTD_NAND_AU1550 tristate "Au1550/1200 NAND support" depends on SOC_AU1200 || SOC_AU1550 @@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE If unsure, say N - endchoice - endchoice config MTD_NAND_PXA3xx @@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM Enables support for NAND Flash chips wired onto Freescale PowerPC processor localbus with User-Programmable Machine support. +config MTD_NAND_MPC5121_NFC + tristate "MPC5121 built-in NAND Flash Controller support" + depends on PPC_MPC512x + help + This enables the driver for the NAND flash controller on the + MPC5121 SoC. + config MTD_NAND_MXC tristate "MXC NAND support" depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 @@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES help Enables support for NAND Flash chips wired onto Socrates board. -config MTD_NAND_W90P910 - tristate "Support for NAND on w90p910 evaluation board." +config MTD_NAND_NUC900 + tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards." depends on ARCH_W90X900 && MTD_PARTITIONS help This enables the driver for the NAND Flash on evaluation board based - on w90p910. + on w90p910 / NUC9xx. endif # MTD_NAND diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 1407bd14401..e8ab884ba47 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -2,13 +2,16 @@ # linux/drivers/nand/Makefile # -obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o +obj-$(CONFIG_MTD_NAND) += nand.o +obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o +obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o obj-$(CONFIG_MTD_NAND_SPIA) += spia.o obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o +obj-$(CONFIG_MTD_NAND_DENALI) += denali.o obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o @@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o obj-$(CONFIG_MTD_NAND_H1900) += h1910.o obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o -obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o @@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o -obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o +obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o +obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o +obj-$(CONFIG_MTD_NAND_RICOH) += r852.o nand-objs := nand_base.o nand_bbt.o diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c index 2d6773281fd..8691e0482ed 100644 --- a/drivers/mtd/nand/alauda.c +++ b/drivers/mtd/nand/alauda.c @@ -49,7 +49,7 @@ #define TIMEOUT HZ -static struct usb_device_id alauda_table [] = { +static const struct usb_device_id alauda_table[] = { { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */ { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */ { } diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 524e6c9e067..04d30887ca7 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) } /* first scan to find the device and get the page size */ - if (nand_scan_ident(mtd, 1)) { + if (nand_scan_ident(mtd, 1, NULL)) { res = -ENXIO; goto err_scan_ident; } diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 43d46e42404..3ffe05db492 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void) u32 nand_phys; /* Allocate memory for MTD device structure and private data */ - au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); + au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!au1550_mtd) { printk("Unable to allocate NAND MTD dev structure.\n"); return -ENOMEM; @@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void) /* Get pointer to private data */ this = (struct nand_chip *)(&au1550_mtd[1]); - /* Initialize structures */ - memset(au1550_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - /* Link the private data with the MTD structure */ au1550_mtd->priv = this; au1550_mtd->owner = THIS_MODULE; @@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void) } nand_phys = (mem_staddr << 4) & 0xFFFC0000; - p_nand = (void __iomem *)ioremap(nand_phys, 0x1000); + p_nand = ioremap(nand_phys, 0x1000); /* make controller and MTD agree */ if (NAND_CS == 0) @@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void) return 0; outio: - iounmap((void *)p_nand); + iounmap(p_nand); outmem: kfree(au1550_mtd); @@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void) kfree(au1550_mtd); /* Unmap */ - iounmap((void *)p_nand); + iounmap(p_nand); } module_exit(au1550_cleanup); diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c index c997f98eeb3..dfe262c726f 100644 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ b/drivers/mtd/nand/bcm_umi_nand.c @@ -13,7 +13,6 @@ *****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ -#include <linux/version.h> #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> @@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) * layout we'll be using. */ - err = nand_scan_ident(board_mtd, 1); + err = nand_scan_ident(board_mtd, 1, NULL); if (err) { printk(KERN_ERR "nand_scan failed: %d\n", err); iounmap(bcm_umi_io_base); diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 8506e7e606f..2974995e194 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -68,6 +68,27 @@ #define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" #define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" +/* NFC_STAT Masks */ +#define NBUSY 0x01 /* Not Busy */ +#define WB_FULL 0x02 /* Write Buffer Full */ +#define PG_WR_STAT 0x04 /* Page Write Pending */ +#define PG_RD_STAT 0x08 /* Page Read Pending */ +#define WB_EMPTY 0x10 /* Write Buffer Empty */ + +/* NFC_IRQSTAT Masks */ +#define NBUSYIRQ 0x01 /* Not Busy IRQ */ +#define WB_OVF 0x02 /* Write Buffer Overflow */ +#define WB_EDGE 0x04 /* Write Buffer Edge Detect */ +#define RD_RDY 0x08 /* Read Data Ready */ +#define WR_DONE 0x10 /* Page Write Done */ + +/* NFC_RST Masks */ +#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */ + +/* NFC_PGCTL Masks */ +#define PG_RD_START 0x01 /* Page Read Start */ +#define PG_WR_START 0x02 /* Page Write Start */ + #ifdef CONFIG_MTD_NAND_BF5XX_HWECC static int hardware_ecc = 1; #else @@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, * transferred to generate the correct ECC register * values. */ - bfin_write_NFC_RST(0x1); + bfin_write_NFC_RST(ECC_RST); SSYNC(); disable_dma(CH_NFC); @@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, set_dma_config(CH_NFC, 0x0); set_dma_start_addr(CH_NFC, (unsigned long) buf); -/* The DMAs have different size on BF52x and BF54x */ + /* The DMAs have different size on BF52x and BF54x */ #ifdef CONFIG_BF52x set_dma_x_count(CH_NFC, (page_size >> 1)); set_dma_x_modify(CH_NFC, 2); @@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, /* Start PAGE read/write operation */ if (is_read) - bfin_write_NFC_PGCTL(0x1); + bfin_write_NFC_PGCTL(PG_RD_START); else - bfin_write_NFC_PGCTL(0x2); + bfin_write_NFC_PGCTL(PG_WR_START); wait_for_completion(&info->dma_completion); } diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index e5a9f9ccea6..db1dfc5a1b1 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); /* Scan to find existence of the device */ - if (nand_scan_ident(mtd, 2)) { + if (nand_scan_ident(mtd, 2, NULL)) { err = -ENXIO; goto out_irq; } @@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev) kfree(mtd); } -static struct pci_device_id cafe_nand_tbl[] = { +static const struct pci_device_id cafe_nand_tbl[] = { { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND, PCI_ANY_ID, PCI_ANY_ID }, { } diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 76e2dc8e62f..9c9d893affe 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev) goto err_nomem; } - vaddr = ioremap(res1->start, res1->end - res1->start); - base = ioremap(res2->start, res2->end - res2->start); + vaddr = ioremap(res1->start, resource_size(res1)); + base = ioremap(res2->start, resource_size(res2)); if (!vaddr || !base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -EINVAL; @@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev) spin_unlock_irq(&davinci_nand_lock); /* Scan to find existence of the device(s) */ - ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1); + ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL); if (ret < 0) { dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); goto err_scan; diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c new file mode 100644 index 00000000000..ca03428b59c --- /dev/null +++ b/drivers/mtd/nand/denali.c @@ -0,0 +1,2134 @@ +/* + * NAND Flash Controller Device Driver + * Copyright © 2009-2010, Intel Corporation and its suppliers. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/mtd/mtd.h> +#include <linux/module.h> + +#include "denali.h" + +MODULE_LICENSE("GPL"); + +/* We define a module parameter that allows the user to override + * the hardware and decide what timing mode should be used. + */ +#define NAND_DEFAULT_TIMINGS -1 + +static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; +module_param(onfi_timing_mode, int, S_IRUGO); +MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates" + " use default timings"); + +#define DENALI_NAND_NAME "denali-nand" + +/* We define a macro here that combines all interrupts this driver uses into + * a single constant value, for convenience. */ +#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \ + INTR_STATUS0__ECC_TRANSACTION_DONE | \ + INTR_STATUS0__ECC_ERR | \ + INTR_STATUS0__PROGRAM_FAIL | \ + INTR_STATUS0__LOAD_COMP | \ + INTR_STATUS0__PROGRAM_COMP | \ + INTR_STATUS0__TIME_OUT | \ + INTR_STATUS0__ERASE_FAIL | \ + INTR_STATUS0__RST_COMP | \ + INTR_STATUS0__ERASE_COMP) + +/* indicates whether or not the internal value for the flash bank is + valid or not */ +#define CHIP_SELECT_INVALID -1 + +#define SUPPORT_8BITECC 1 + +/* This macro divides two integers and rounds fractional values up + * to the nearest integer value. */ +#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y))) + +/* this macro allows us to convert from an MTD structure to our own + * device context (denali) structure. + */ +#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd) + +/* These constants are defined by the driver to enable common driver + configuration options. */ +#define SPARE_ACCESS 0x41 +#define MAIN_ACCESS 0x42 +#define MAIN_SPARE_ACCESS 0x43 + +#define DENALI_READ 0 +#define DENALI_WRITE 0x100 + +/* types of device accesses. We can issue commands and get status */ +#define COMMAND_CYCLE 0 +#define ADDR_CYCLE 1 +#define STATUS_CYCLE 2 + +/* this is a helper macro that allows us to + * format the bank into the proper bits for the controller */ +#define BANK(x) ((x) << 24) + +/* List of platforms this NAND controller has be integrated into */ +static const struct pci_device_id denali_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 }, + { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST }, + { /* end: all zeroes */ } +}; + + +/* these are static lookup tables that give us easy access to + registers in the NAND controller. + */ +static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, + INTR_STATUS1, + INTR_STATUS2, + INTR_STATUS3}; + +static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0, + DEVICE_RESET__BANK1, + DEVICE_RESET__BANK2, + DEVICE_RESET__BANK3}; + +static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT, + INTR_STATUS1__TIME_OUT, + INTR_STATUS2__TIME_OUT, + INTR_STATUS3__TIME_OUT}; + +static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP, + INTR_STATUS1__RST_COMP, + INTR_STATUS2__RST_COMP, + INTR_STATUS3__RST_COMP}; + +/* specifies the debug level of the driver */ +static int nand_debug_level = 0; + +/* forward declarations */ +static void clear_interrupts(struct denali_nand_info *denali); +static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask); +static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask); +static uint32_t read_interrupt_status(struct denali_nand_info *denali); + +#define DEBUG_DENALI 0 + +/* This is a wrapper for writing to the denali registers. + * this allows us to create debug information so we can + * observe how the driver is programming the device. + * it uses standard linux convention for (val, addr) */ +static void denali_write32(uint32_t value, void *addr) +{ + iowrite32(value, addr); + +#if DEBUG_DENALI + printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff)); +#endif +} + +/* Certain operations for the denali NAND controller use an indexed mode to read/write + data. The operation is performed by writing the address value of the command to + the device memory followed by the data. This function abstracts this common + operation. +*/ +static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data) +{ + denali_write32(address, denali->flash_mem); + denali_write32(data, denali->flash_mem + 0x10); +} + +/* Perform an indexed read of the device */ +static void index_addr_read_data(struct denali_nand_info *denali, + uint32_t address, uint32_t *pdata) +{ + denali_write32(address, denali->flash_mem); + *pdata = ioread32(denali->flash_mem + 0x10); +} + +/* We need to buffer some data for some of the NAND core routines. + * The operations manage buffering that data. */ +static void reset_buf(struct denali_nand_info *denali) +{ + denali->buf.head = denali->buf.tail = 0; +} + +static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) +{ + BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf)); + denali->buf.buf[denali->buf.tail++] = byte; +} + +/* reads the status of the device */ +static void read_status(struct denali_nand_info *denali) +{ + uint32_t cmd = 0x0; + + /* initialize the data buffer to store status */ + reset_buf(denali); + + /* initiate a device status read */ + cmd = MODE_11 | BANK(denali->flash_bank); + index_addr(denali, cmd | COMMAND_CYCLE, 0x70); + denali_write32(cmd | STATUS_CYCLE, denali->flash_mem); + + /* update buffer with status value */ + write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10)); + +#if DEBUG_DENALI + printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]); +#endif +} + +/* resets a specific device connected to the core */ +static void reset_bank(struct denali_nand_info *denali) +{ + uint32_t irq_status = 0; + uint32_t irq_mask = reset_complete[denali->flash_bank] | + operation_timeout[denali->flash_bank]; + int bank = 0; + + clear_interrupts(denali); + + bank = device_reset_banks[denali->flash_bank]; + denali_write32(bank, denali->flash_reg + DEVICE_RESET); + + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status & operation_timeout[denali->flash_bank]) + { + printk(KERN_ERR "reset bank failed.\n"); + } +} + +/* Reset the flash controller */ +static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali) +{ + uint32_t i; + + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); + + for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) + denali_write32(reset_complete[i] | operation_timeout[i], + denali->flash_reg + intr_status_addresses[i]); + + for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { + denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET); + while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) & + (reset_complete[i] | operation_timeout[i]))) + ; + if (ioread32(denali->flash_reg + intr_status_addresses[i]) & + operation_timeout[i]) + nand_dbg_print(NAND_DBG_WARN, + "NAND Reset operation timed out on bank %d\n", i); + } + + for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) + denali_write32(reset_complete[i] | operation_timeout[i], + denali->flash_reg + intr_status_addresses[i]); + + return PASS; +} + +/* this routine calculates the ONFI timing values for a given mode and programs + * the clocking register accordingly. The mode is determined by the get_onfi_nand_para + routine. + */ +static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode) +{ + uint16_t Trea[6] = {40, 30, 25, 20, 20, 16}; + uint16_t Trp[6] = {50, 25, 17, 15, 12, 10}; + uint16_t Treh[6] = {30, 15, 15, 10, 10, 7}; + uint16_t Trc[6] = {100, 50, 35, 30, 25, 20}; + uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15}; + uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5}; + uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25}; + uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70}; + uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100}; + uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100}; + uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60}; + uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15}; + + uint16_t TclsRising = 1; + uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid; + uint16_t dv_window = 0; + uint16_t en_lo, en_hi; + uint16_t acc_clks; + uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; + + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); + + en_lo = CEIL_DIV(Trp[mode], CLK_X); + en_hi = CEIL_DIV(Treh[mode], CLK_X); +#if ONFI_BLOOM_TIME + if ((en_hi * CLK_X) < (Treh[mode] + 2)) + en_hi++; +#endif + + if ((en_lo + en_hi) * CLK_X < Trc[mode]) + en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X); + + if ((en_lo + en_hi) < CLK_MULTI) + en_lo += CLK_MULTI - en_lo - en_hi; + + while (dv_window < 8) { + data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode]; + + data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode]; + + data_invalid = + data_invalid_rhoh < + data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh; + + dv_window = data_invalid - Trea[mode]; + + if (dv_window < 8) + en_lo++; + } + + acc_clks = CEIL_DIV(Trea[mode], CLK_X); + + while (((acc_clks * CLK_X) - Trea[mode]) < 3) + acc_clks++; + + if ((data_invalid - acc_clks * CLK_X) < 2) + nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n", + __FILE__, __LINE__); + + addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); + re_2_we = CEIL_DIV(Trhw[mode], CLK_X); + re_2_re = CEIL_DIV(Trhz[mode], CLK_X); + we_2_re = CEIL_DIV(Twhr[mode], CLK_X); + cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X); + if (!TclsRising) + cs_cnt = CEIL_DIV(Tcs[mode], CLK_X); + if (cs_cnt == 0) + cs_cnt = 1; + + if (Tcea[mode]) { + while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode]) + cs_cnt++; + } + +#if MODE5_WORKAROUND + if (mode == 5) + acc_clks = 5; +#endif + + /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ + if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) && + (ioread32(denali->flash_reg + DEVICE_ID) == 0x88)) + acc_clks = 6; + + denali_write32(acc_clks, denali->flash_reg + ACC_CLKS); + denali_write32(re_2_we, denali->flash_reg + RE_2_WE); + denali_write32(re_2_re, denali->flash_reg + RE_2_RE); + denali_write32(we_2_re, denali->flash_reg + WE_2_RE); + denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA); + denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); + denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); + denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); +} + +/* configures the initial ECC settings for the controller */ +static void set_ecc_config(struct denali_nand_info *denali) +{ +#if SUPPORT_8BITECC + if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) || + (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128)) + denali_write32(8, denali->flash_reg + ECC_CORRECTION); +#endif + + if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE) + == 1) { + denali->dev_info.wECCBytesPerSector = 4; + denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; + denali->dev_info.wNumPageSpareFlag = + denali->dev_info.wPageSpareSize - + denali->dev_info.wPageDataSize / + (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * + denali->dev_info.wECCBytesPerSector + - denali->dev_info.wSpareSkipBytes; + } else { + denali->dev_info.wECCBytesPerSector = + (ioread32(denali->flash_reg + ECC_CORRECTION) & + ECC_CORRECTION__VALUE) * 13 / 8; + if ((denali->dev_info.wECCBytesPerSector) % 2 == 0) + denali->dev_info.wECCBytesPerSector += 2; + else + denali->dev_info.wECCBytesPerSector += 1; + + denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; + denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize - + denali->dev_info.wPageDataSize / + (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * + denali->dev_info.wECCBytesPerSector + - denali->dev_info.wSpareSkipBytes; + } +} + +/* queries the NAND device to see what ONFI modes it supports. */ +static uint16_t get_onfi_nand_para(struct denali_nand_info *denali) +{ + int i; + uint16_t blks_lun_l, blks_lun_h, n_of_luns; + uint32_t blockperlun, id; + + denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET); + + while (!((ioread32(denali->flash_reg + INTR_STATUS0) & + INTR_STATUS0__RST_COMP) | + (ioread32(denali->flash_reg + INTR_STATUS0) & + INTR_STATUS0__TIME_OUT))) + ; + + if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) { + denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET); + while (!((ioread32(denali->flash_reg + INTR_STATUS1) & + INTR_STATUS1__RST_COMP) | + (ioread32(denali->flash_reg + INTR_STATUS1) & + INTR_STATUS1__TIME_OUT))) + ; + + if (ioread32(denali->flash_reg + INTR_STATUS1) & + INTR_STATUS1__RST_COMP) { + denali_write32(DEVICE_RESET__BANK2, + denali->flash_reg + DEVICE_RESET); + while (!((ioread32(denali->flash_reg + INTR_STATUS2) & + INTR_STATUS2__RST_COMP) | + (ioread32(denali->flash_reg + INTR_STATUS2) & + INTR_STATUS2__TIME_OUT))) + ; + + if (ioread32(denali->flash_reg + INTR_STATUS2) & + INTR_STATUS2__RST_COMP) { + denali_write32(DEVICE_RESET__BANK3, + denali->flash_reg + DEVICE_RESET); + while (!((ioread32(denali->flash_reg + INTR_STATUS3) & + INTR_STATUS3__RST_COMP) | + (ioread32(denali->flash_reg + INTR_STATUS3) & + INTR_STATUS3__TIME_OUT))) + ; + } else { + printk(KERN_ERR "Getting a time out for bank 2!\n"); + } + } else { + printk(KERN_ERR "Getting a time out for bank 1!\n"); + } + } + + denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0); + denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1); + denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2); + denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3); + + denali->dev_info.wONFIDevFeatures = + ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES); + denali->dev_info.wONFIOptCommands = + ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS); + denali->dev_info.wONFITimingMode = + ioread32(denali->flash_reg + ONFI_TIMING_MODE); + denali->dev_info.wONFIPgmCacheTimingMode = + ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE); + + n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & + ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS; + blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L); + blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U); + + blockperlun = (blks_lun_h << 16) | blks_lun_l; + + denali->dev_info.wTotalBlocks = n_of_luns * blockperlun; + + if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) & + ONFI_TIMING_MODE__VALUE)) + return FAIL; + + for (i = 5; i > 0; i--) { + if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i)) + break; + } + + NAND_ONFi_Timing_Mode(denali, i); + + index_addr(denali, MODE_11 | 0, 0x90); + index_addr(denali, MODE_11 | 1, 0); + + for (i = 0; i < 3; i++) + index_addr_read_data(denali, MODE_11 | 2, &id); + + nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id); + + denali->dev_info.MLCDevice = id & 0x0C; + + /* By now, all the ONFI devices we know support the page cache */ + /* rw feature. So here we enable the pipeline_rw_ahead feature */ + /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */ + /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */ + + return PASS; +} + +static void get_samsung_nand_para(struct denali_nand_info *denali) +{ + uint8_t no_of_planes; + uint32_t blk_size; + uint64_t plane_size, capacity; + uint32_t id_bytes[5]; + int i; + + index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90); + index_addr(denali, (uint32_t)(MODE_11 | 1), 0); + for (i = 0; i < 5; i++) + index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]); + + nand_dbg_print(NAND_DBG_DEBUG, + "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", + id_bytes[0], id_bytes[1], id_bytes[2], + id_bytes[3], id_bytes[4]); + + if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */ + /* Set timing register values according to datasheet */ + denali_write32(5, denali->flash_reg + ACC_CLKS); + denali_write32(20, denali->flash_reg + RE_2_WE); + denali_write32(12, denali->flash_reg + WE_2_RE); + denali_write32(14, denali->flash_reg + ADDR_2_DATA); + denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT); + denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT); + denali_write32(2, denali->flash_reg + CS_SETUP_CNT); + } + + no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2); + plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4); + blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4); + capacity = (uint64_t)128 * plane_size * no_of_planes; + + do_div(capacity, blk_size); + denali->dev_info.wTotalBlocks = capacity; +} + +static void get_toshiba_nand_para(struct denali_nand_info *denali) +{ + void __iomem *scratch_reg; + uint32_t tmp; + + /* Workaround to fix a controller bug which reports a wrong */ + /* spare area size for some kind of Toshiba NAND device */ + if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && + (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { + denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * + ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); +#if SUPPORT_15BITECC + denali_write32(15, denali->flash_reg + ECC_CORRECTION); +#elif SUPPORT_8BITECC + denali_write32(8, denali->flash_reg + ECC_CORRECTION); +#endif + } + + /* As Toshiba NAND can not provide it's block number, */ + /* so here we need user to provide the correct block */ + /* number in a scratch register before the Linux NAND */ + /* driver is loaded. If no valid value found in the scratch */ + /* register, then we use default block number value */ + scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); + if (!scratch_reg) { + printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", + __FILE__, __LINE__); + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; + } else { + nand_dbg_print(NAND_DBG_WARN, + "Spectra: ioremap reg address: 0x%p\n", scratch_reg); + denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); + if (denali->dev_info.wTotalBlocks < 512) + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; + iounmap(scratch_reg); + } +} + +static void get_hynix_nand_para(struct denali_nand_info *denali) +{ + void __iomem *scratch_reg; + uint32_t main_size, spare_size; + + switch (denali->dev_info.wDeviceID) { + case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ + case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ + denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK); + denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); + denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED); + spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED); + denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); + denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); + denali_write32(0, denali->flash_reg + DEVICE_WIDTH); +#if SUPPORT_15BITECC + denali_write32(15, denali->flash_reg + ECC_CORRECTION); +#elif SUPPORT_8BITECC + denali_write32(8, denali->flash_reg + ECC_CORRECTION); +#endif + denali->dev_info.MLCDevice = 1; + break; + default: + nand_dbg_print(NAND_DBG_WARN, + "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." + "Will use default parameter values instead.\n", + denali->dev_info.wDeviceID); + } + + scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); + if (!scratch_reg) { + printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", + __FILE__, __LINE__); + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; + } else { + nand_dbg_print(NAND_DBG_WARN, + "Spectra: ioremap reg address: 0x%p\n", scratch_reg); + denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); + if (denali->dev_info.wTotalBlocks < 512) + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; + iounmap(scratch_reg); + } +} + +/* determines how many NAND chips are connected to the controller. Note for + Intel CE4100 devices we don't support more than one device. + */ +static void find_valid_banks(struct denali_nand_info *denali) +{ + uint32_t id[LLD_MAX_FLASH_BANKS]; + int i; + + denali->total_used_banks = 1; + for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) { + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); + index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); + + nand_dbg_print(NAND_DBG_DEBUG, + "Return 1st ID for bank[%d]: %x\n", i, id[i]); + + if (i == 0) { + if (!(id[i] & 0x0ff)) + break; /* WTF? */ + } else { + if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) + denali->total_used_banks++; + else + break; + } + } + + if (denali->platform == INTEL_CE4100) + { + /* Platform limitations of the CE4100 device limit + * users to a single chip solution for NAND. + * Multichip support is not enabled. + */ + if (denali->total_used_banks != 1) + { + printk(KERN_ERR "Sorry, Intel CE4100 only supports " + "a single NAND device.\n"); + BUG(); + } + } + nand_dbg_print(NAND_DBG_DEBUG, + "denali->total_used_banks: %d\n", denali->total_used_banks); +} + +static void detect_partition_feature(struct denali_nand_info *denali) +{ + if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { + if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) & + PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) { + denali->dev_info.wSpectraStartBlock = + ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & + MIN_MAX_BANK_1__MIN_VALUE) * + denali->dev_info.wTotalBlocks) + + + (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) & + MIN_BLK_ADDR_1__VALUE); + + denali->dev_info.wSpectraEndBlock = + (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & + MIN_MAX_BANK_1__MAX_VALUE) >> 2) * + denali->dev_info.wTotalBlocks) + + + (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) & + MAX_BLK_ADDR_1__VALUE); + + denali->dev_info.wTotalBlocks *= denali->total_used_banks; + + if (denali->dev_info.wSpectraEndBlock >= + denali->dev_info.wTotalBlocks) { + denali->dev_info.wSpectraEndBlock = + denali->dev_info.wTotalBlocks - 1; + } + + denali->dev_info.wDataBlockNum = + denali->dev_info.wSpectraEndBlock - + denali->dev_info.wSpectraStartBlock + 1; + } else { + denali->dev_info.wTotalBlocks *= denali->total_used_banks; + denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; + denali->dev_info.wSpectraEndBlock = + denali->dev_info.wTotalBlocks - 1; + denali->dev_info.wDataBlockNum = + denali->dev_info.wSpectraEndBlock - + denali->dev_info.wSpectraStartBlock + 1; + } + } else { + denali->dev_info.wTotalBlocks *= denali->total_used_banks; + denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; + denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1; + denali->dev_info.wDataBlockNum = + denali->dev_info.wSpectraEndBlock - + denali->dev_info.wSpectraStartBlock + 1; + } +} + +static void dump_device_info(struct denali_nand_info *denali) +{ + nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n"); + nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n", + denali->dev_info.wDeviceMaker); + nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n", + denali->dev_info.wDeviceID); + nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n", + denali->dev_info.wDeviceType); + nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n", + denali->dev_info.wSpectraStartBlock); + nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n", + denali->dev_info.wSpectraEndBlock); + nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n", + denali->dev_info.wTotalBlocks); + nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n", + denali->dev_info.wPagesPerBlock); + nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n", + denali->dev_info.wPageSize); + nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n", + denali->dev_info.wPageDataSize); + nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n", + denali->dev_info.wPageSpareSize); + nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n", + denali->dev_info.wNumPageSpareFlag); + nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n", + denali->dev_info.wECCBytesPerSector); + nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n", + denali->dev_info.wBlockSize); + nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n", + denali->dev_info.wBlockDataSize); + nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n", + denali->dev_info.wDataBlockNum); + nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n", + denali->dev_info.bPlaneNum); + nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n", + denali->dev_info.wDeviceMainAreaSize); + nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n", + denali->dev_info.wDeviceSpareAreaSize); + nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n", + denali->dev_info.wDevicesConnected); + nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n", + denali->dev_info.wDeviceWidth); + nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n", + denali->dev_info.wHWRevision); + nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n", + denali->dev_info.wHWFeatures); + nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n", + denali->dev_info.wONFIDevFeatures); + nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n", + denali->dev_info.wONFIOptCommands); + nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n", + denali->dev_info.wONFITimingMode); + nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n", + denali->dev_info.wONFIPgmCacheTimingMode); + nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n", + denali->dev_info.MLCDevice ? "Yes" : "No"); + nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n", + denali->dev_info.wSpareSkipBytes); + nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n", + denali->dev_info.nBitsInPageNumber); + nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n", + denali->dev_info.nBitsInPageDataSize); + nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n", + denali->dev_info.nBitsInBlockDataSize); +} + +static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali) +{ + uint16_t status = PASS; + uint8_t no_of_planes; + + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); + + denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID); + denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID); + denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0); + denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1); + denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2); + + denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c; + + if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & + ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ + if (FAIL == get_onfi_nand_para(denali)) + return FAIL; + } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */ + get_samsung_nand_para(denali); + } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */ + get_toshiba_nand_para(denali); + } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */ + get_hynix_nand_para(denali); + } else { + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; + } + + nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" + "acc_clks: %d, re_2_we: %d, we_2_re: %d," + "addr_2_data: %d, rdwr_en_lo_cnt: %d, " + "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", + ioread32(denali->flash_reg + ACC_CLKS), + ioread32(denali->flash_reg + RE_2_WE), + ioread32(denali->flash_reg + WE_2_RE), + ioread32(denali->flash_reg + ADDR_2_DATA), + ioread32(denali->flash_reg + RDWR_EN_LO_CNT), + ioread32(denali->flash_reg + RDWR_EN_HI_CNT), + ioread32(denali->flash_reg + CS_SETUP_CNT)); + + denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION); + denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES); + + denali->dev_info.wDeviceMainAreaSize = + ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE); + denali->dev_info.wDeviceSpareAreaSize = + ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + + denali->dev_info.wPageDataSize = + ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); + + /* Note: When using the Micon 4K NAND device, the controller will report + * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes. + * And if force set it to 218 bytes, the controller can not work + * correctly. So just let it be. But keep in mind that this bug may + * cause + * other problems in future. - Yunpeng 2008-10-10 + */ + denali->dev_info.wPageSpareSize = + ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); + + denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK); + + denali->dev_info.wPageSize = + denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize; + denali->dev_info.wBlockSize = + denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock; + denali->dev_info.wBlockDataSize = + denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize; + + denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH); + denali->dev_info.wDeviceType = + ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8); + + denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED); + + denali->dev_info.wSpareSkipBytes = + ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) * + denali->dev_info.wDevicesConnected; + + denali->dev_info.nBitsInPageNumber = + ilog2(denali->dev_info.wPagesPerBlock); + denali->dev_info.nBitsInPageDataSize = + ilog2(denali->dev_info.wPageDataSize); + denali->dev_info.nBitsInBlockDataSize = + ilog2(denali->dev_info.wBlockDataSize); + + set_ecc_config(denali); + + no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) & + NUMBER_OF_PLANES__VALUE; + + switch (no_of_planes) { + case 0: + case 1: + case 3: + case 7: + denali->dev_info.bPlaneNum = no_of_planes + 1; + break; + default: + status = FAIL; + break; + } + + find_valid_banks(denali); + + detect_partition_feature(denali); + + dump_device_info(denali); + + /* If the user specified to override the default timings + * with a specific ONFI mode, we apply those changes here. + */ + if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) + { + NAND_ONFi_Timing_Mode(denali, onfi_timing_mode); + } + + return status; +} + +static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, + uint16_t INT_ENABLE) +{ + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); + + if (INT_ENABLE) + denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE); + else + denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE); +} + +/* validation function to verify that the controlling software is making + a valid request + */ +static inline bool is_flash_bank_valid(int flash_bank) +{ + return (flash_bank >= 0 && flash_bank < 4); +} + +static void denali_irq_init(struct denali_nand_info *denali) +{ + uint32_t int_mask = 0; + + /* Disable global interrupts */ + NAND_LLD_Enable_Disable_Interrupts(denali, false); + + int_mask = DENALI_IRQ_ALL; + + /* Clear all status bits */ + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0); + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1); + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2); + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3); + + denali_irq_enable(denali, int_mask); +} + +static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali) +{ + NAND_LLD_Enable_Disable_Interrupts(denali, false); + free_irq(irqnum, denali); +} + +static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask) +{ + denali_write32(int_mask, denali->flash_reg + INTR_EN0); + denali_write32(int_mask, denali->flash_reg + INTR_EN1); + denali_write32(int_mask, denali->flash_reg + INTR_EN2); + denali_write32(int_mask, denali->flash_reg + INTR_EN3); +} + +/* This function only returns when an interrupt that this driver cares about + * occurs. This is to reduce the overhead of servicing interrupts + */ +static inline uint32_t denali_irq_detected(struct denali_nand_info *denali) +{ + return (read_interrupt_status(denali) & DENALI_IRQ_ALL); +} + +/* Interrupts are cleared by writing a 1 to the appropriate status bit */ +static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask) +{ + uint32_t intr_status_reg = 0; + + intr_status_reg = intr_status_addresses[denali->flash_bank]; + + denali_write32(irq_mask, denali->flash_reg + intr_status_reg); +} + +static void clear_interrupts(struct denali_nand_info *denali) +{ + uint32_t status = 0x0; + spin_lock_irq(&denali->irq_lock); + + status = read_interrupt_status(denali); + +#if DEBUG_DENALI + denali->irq_debug_array[denali->idx++] = 0x30000000 | status; + denali->idx %= 32; +#endif + + denali->irq_status = 0x0; + spin_unlock_irq(&denali->irq_lock); +} + +static uint32_t read_interrupt_status(struct denali_nand_info *denali) +{ + uint32_t intr_status_reg = 0; + + intr_status_reg = intr_status_addresses[denali->flash_bank]; + + return ioread32(denali->flash_reg + intr_status_reg); +} + +#if DEBUG_DENALI +static void print_irq_log(struct denali_nand_info *denali) +{ + int i = 0; + + printk("ISR debug log index = %X\n", denali->idx); + for (i = 0; i < 32; i++) + { + printk("%08X: %08X\n", i, denali->irq_debug_array[i]); + } +} +#endif + +/* This is the interrupt service routine. It handles all interrupts + * sent to this device. Note that on CE4100, this is a shared + * interrupt. + */ +static irqreturn_t denali_isr(int irq, void *dev_id) +{ + struct denali_nand_info *denali = dev_id; + uint32_t irq_status = 0x0; + irqreturn_t result = IRQ_NONE; + + spin_lock(&denali->irq_lock); + + /* check to see if a valid NAND chip has + * been selected. + */ + if (is_flash_bank_valid(denali->flash_bank)) + { + /* check to see if controller generated + * the interrupt, since this is a shared interrupt */ + if ((irq_status = denali_irq_detected(denali)) != 0) + { +#if DEBUG_DENALI + denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status; + denali->idx %= 32; + + printk("IRQ status = 0x%04x\n", irq_status); +#endif + /* handle interrupt */ + /* first acknowledge it */ + clear_interrupt(denali, irq_status); + /* store the status in the device context for someone + to read */ + denali->irq_status |= irq_status; + /* notify anyone who cares that it happened */ + complete(&denali->complete); + /* tell the OS that we've handled this */ + result = IRQ_HANDLED; + } + } + spin_unlock(&denali->irq_lock); + return result; +} +#define BANK(x) ((x) << 24) + +static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) +{ + unsigned long comp_res = 0; + uint32_t intr_status = 0; + bool retry = false; + unsigned long timeout = msecs_to_jiffies(1000); + + do + { +#if DEBUG_DENALI + printk("waiting for 0x%x\n", irq_mask); +#endif + comp_res = wait_for_completion_timeout(&denali->complete, timeout); + spin_lock_irq(&denali->irq_lock); + intr_status = denali->irq_status; + +#if DEBUG_DENALI + denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status; + denali->idx %= 32; +#endif + + if (intr_status & irq_mask) + { + denali->irq_status &= ~irq_mask; + spin_unlock_irq(&denali->irq_lock); +#if DEBUG_DENALI + if (retry) printk("status on retry = 0x%x\n", intr_status); +#endif + /* our interrupt was detected */ + break; + } + else + { + /* these are not the interrupts you are looking for - + need to wait again */ + spin_unlock_irq(&denali->irq_lock); +#if DEBUG_DENALI + print_irq_log(denali); + printk("received irq nobody cared: irq_status = 0x%x," + " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res); +#endif + retry = true; + } + } while (comp_res != 0); + + if (comp_res == 0) + { + /* timeout */ + printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", + intr_status, irq_mask); + + intr_status = 0; + } + return intr_status; +} + +/* This helper function setups the registers for ECC and whether or not + the spare area will be transfered. */ +static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, + bool transfer_spare) +{ + int ecc_en_flag = 0, transfer_spare_flag = 0; + + /* set ECC, transfer spare bits if needed */ + ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; + transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; + + /* Enable spare area/ECC per user's request. */ + denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); + denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); +} + +/* sends a pipeline command operation to the controller. See the Denali NAND + controller's user guide for more information (section 4.2.3.6). + */ +static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en, + bool transfer_spare, int access_type, + int op) +{ + int status = PASS; + uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0, + irq_mask = 0; + + if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP; + else if (op == DENALI_WRITE) irq_mask = 0; + else BUG(); + + setup_ecc_for_xfer(denali, ecc_en, transfer_spare); + +#if DEBUG_DENALI + spin_lock_irq(&denali->irq_lock); + denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4); + denali->idx %= 32; + spin_unlock_irq(&denali->irq_lock); +#endif + + + /* clear interrupts */ + clear_interrupts(denali); + + addr = BANK(denali->flash_bank) | denali->page; + + if (op == DENALI_WRITE && access_type != SPARE_ACCESS) + { + cmd = MODE_01 | addr; + denali_write32(cmd, denali->flash_mem); + } + else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) + { + /* read spare area */ + cmd = MODE_10 | addr; + index_addr(denali, (uint32_t)cmd, access_type); + + cmd = MODE_01 | addr; + denali_write32(cmd, denali->flash_mem); + } + else if (op == DENALI_READ) + { + /* setup page read request for access type */ + cmd = MODE_10 | addr; + index_addr(denali, (uint32_t)cmd, access_type); + + /* page 33 of the NAND controller spec indicates we should not + use the pipeline commands in Spare area only mode. So we + don't. + */ + if (access_type == SPARE_ACCESS) + { + cmd = MODE_01 | addr; + denali_write32(cmd, denali->flash_mem); + } + else + { + index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count); + + /* wait for command to be accepted + * can always use status0 bit as the mask is identical for each + * bank. */ + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status == 0) + { + printk(KERN_ERR "cmd, page, addr on timeout " + "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr); + status = FAIL; + } + else + { + cmd = MODE_01 | addr; + denali_write32(cmd, denali->flash_mem); + } + } + } + return status; +} + +/* helper function that simply writes a buffer to the flash */ +static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf, + int len) +{ + uint32_t i = 0, *buf32; + + /* verify that the len is a multiple of 4. see comment in + * read_data_from_flash_mem() */ + BUG_ON((len % 4) != 0); + + /* write the data to the flash memory */ + buf32 = (uint32_t *)buf; + for (i = 0; i < len / 4; i++) + { + denali_write32(*buf32++, denali->flash_mem + 0x10); + } + return i*4; /* intent is to return the number of bytes read */ +} + +/* helper function that simply reads a buffer from the flash */ +static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf, + int len) +{ + uint32_t i = 0, *buf32; + + /* we assume that len will be a multiple of 4, if not + * it would be nice to know about it ASAP rather than + * have random failures... + * + * This assumption is based on the fact that this + * function is designed to be used to read flash pages, + * which are typically multiples of 4... + */ + + BUG_ON((len % 4) != 0); + + /* transfer the data from the flash */ + buf32 = (uint32_t *)buf; + for (i = 0; i < len / 4; i++) + { + *buf32++ = ioread32(denali->flash_mem + 0x10); + } + return i*4; /* intent is to return the number of bytes read */ +} + +/* writes OOB data to the device */ +static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_status = 0; + uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | + INTR_STATUS0__PROGRAM_FAIL; + int status = 0; + + denali->page = page; + + if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, + DENALI_WRITE) == PASS) + { + write_data_to_flash_mem(denali, buf, mtd->oobsize); + +#if DEBUG_DENALI + spin_lock_irq(&denali->irq_lock); + denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize; + denali->idx %= 32; + spin_unlock_irq(&denali->irq_lock); +#endif + + + /* wait for operation to complete */ + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status == 0) + { + printk(KERN_ERR "OOB write failed\n"); + status = -EIO; + } + } + else + { + printk(KERN_ERR "unable to send pipeline command\n"); + status = -EIO; + } + return status; +} + +/* reads OOB data from the device */ +static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0; + + denali->page = page; + +#if DEBUG_DENALI + printk("read_oob %d\n", page); +#endif + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, + DENALI_READ) == PASS) + { + read_data_from_flash_mem(denali, buf, mtd->oobsize); + + /* wait for command to be accepted + * can always use status0 bit as the mask is identical for each + * bank. */ + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status == 0) + { + printk(KERN_ERR "page on OOB timeout %d\n", denali->page); + } + + /* We set the device back to MAIN_ACCESS here as I observed + * instability with the controller if you do a block erase + * and the last transaction was a SPARE_ACCESS. Block erase + * is reliable (according to the MTD test infrastructure) + * if you are in MAIN_ACCESS. + */ + addr = BANK(denali->flash_bank) | denali->page; + cmd = MODE_10 | addr; + index_addr(denali, (uint32_t)cmd, MAIN_ACCESS); + +#if DEBUG_DENALI + spin_lock_irq(&denali->irq_lock); + denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize; + denali->idx %= 32; + spin_unlock_irq(&denali->irq_lock); +#endif + } +} + +/* this function examines buffers to see if they contain data that + * indicate that the buffer is part of an erased region of flash. + */ +bool is_erased(uint8_t *buf, int len) +{ + int i = 0; + for (i = 0; i < len; i++) + { + if (buf[i] != 0xFF) + { + return false; + } + } + return true; +} +#define ECC_SECTOR_SIZE 512 + +#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) +#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) +#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) +#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO)) +#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8) +#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) + +static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, + uint8_t *oobbuf, uint32_t irq_status) +{ + bool check_erased_page = false; + + if (irq_status & INTR_STATUS0__ECC_ERR) + { + /* read the ECC errors. we'll ignore them for now */ + uint32_t err_address = 0, err_correction_info = 0; + uint32_t err_byte = 0, err_sector = 0, err_device = 0; + uint32_t err_correction_value = 0; + + do + { + err_address = ioread32(denali->flash_reg + + ECC_ERROR_ADDRESS); + err_sector = ECC_SECTOR(err_address); + err_byte = ECC_BYTE(err_address); + + + err_correction_info = ioread32(denali->flash_reg + + ERR_CORRECTION_INFO); + err_correction_value = + ECC_CORRECTION_VALUE(err_correction_info); + err_device = ECC_ERR_DEVICE(err_correction_info); + + if (ECC_ERROR_CORRECTABLE(err_correction_info)) + { + /* offset in our buffer is computed as: + sector number * sector size + offset in + sector + */ + int offset = err_sector * ECC_SECTOR_SIZE + + err_byte; + if (offset < denali->mtd.writesize) + { + /* correct the ECC error */ + buf[offset] ^= err_correction_value; + denali->mtd.ecc_stats.corrected++; + } + else + { + /* bummer, couldn't correct the error */ + printk(KERN_ERR "ECC offset invalid\n"); + denali->mtd.ecc_stats.failed++; + } + } + else + { + /* if the error is not correctable, need to + * look at the page to see if it is an erased page. + * if so, then it's not a real ECC error */ + check_erased_page = true; + } + +#if DEBUG_DENALI + printk("Detected ECC error in page %d: err_addr = 0x%08x," + " info to fix is 0x%08x\n", denali->page, err_address, + err_correction_info); +#endif + } while (!ECC_LAST_ERR(err_correction_info)); + } + return check_erased_page; +} + +/* programs the controller to either enable/disable DMA transfers */ +static void denali_enable_dma(struct denali_nand_info *denali, bool en) +{ + uint32_t reg_val = 0x0; + + if (en) reg_val = DMA_ENABLE__FLAG; + + denali_write32(reg_val, denali->flash_reg + DMA_ENABLE); + ioread32(denali->flash_reg + DMA_ENABLE); +} + +/* setups the HW to perform the data DMA */ +static void denali_setup_dma(struct denali_nand_info *denali, int op) +{ + uint32_t mode = 0x0; + const int page_count = 1; + dma_addr_t addr = denali->buf.dma_buf; + + mode = MODE_10 | BANK(denali->flash_bank); + + /* DMA is a four step process */ + + /* 1. setup transfer type and # of pages */ + index_addr(denali, mode | denali->page, 0x2000 | op | page_count); + + /* 2. set memory high address bits 23:8 */ + index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200); + + /* 3. set memory low address bits 23:8 */ + index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300); + + /* 4. interrupt when complete, burst len = 64 bytes*/ + index_addr(denali, mode | 0x14000, 0x2400); +} + +/* writes a page. user specifies type, and this function handles the + configuration details. */ +static void write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, bool raw_xfer) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct pci_dev *pci_dev = denali->dev; + + dma_addr_t addr = denali->buf.dma_buf; + size_t size = denali->mtd.writesize + denali->mtd.oobsize; + + uint32_t irq_status = 0; + uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | + INTR_STATUS0__PROGRAM_FAIL; + + /* if it is a raw xfer, we want to disable ecc, and send + * the spare area. + * !raw_xfer - enable ecc + * raw_xfer - transfer spare + */ + setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer); + + /* copy buffer into DMA buffer */ + memcpy(denali->buf.buf, buf, mtd->writesize); + + if (raw_xfer) + { + /* transfer the data to the spare area */ + memcpy(denali->buf.buf + mtd->writesize, + chip->oob_poi, + mtd->oobsize); + } + + pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE); + + clear_interrupts(denali); + denali_enable_dma(denali, true); + + denali_setup_dma(denali, DENALI_WRITE); + + /* wait for operation to complete */ + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status == 0) + { + printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer); + denali->status = + (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL : + PASS; + } + + denali_enable_dma(denali, false); + pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE); +} + +/* NAND core entry points */ + +/* this is the callback that the NAND core calls to write a page. Since + writing a page with ECC or without is similar, all the work is done + by write_page above. */ +static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf) +{ + /* for regular page writes, we let HW handle all the ECC + * data written to the device. */ + write_page(mtd, chip, buf, false); +} + +/* This is the callback that the NAND core calls to write a page without ECC. + raw access is similiar to ECC page writes, so all the work is done in the + write_page() function above. + */ +static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf) +{ + /* for raw page writes, we want to disable ECC and simply write + whatever data is in the buffer. */ + write_page(mtd, chip, buf, true); +} + +static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + return write_oob_data(mtd, chip->oob_poi, page); +} + +static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page, int sndcmd) +{ + read_oob_data(mtd, chip->oob_poi, page); + + return 0; /* notify NAND core to send command to + * NAND device. */ +} + +static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct pci_dev *pci_dev = denali->dev; + + dma_addr_t addr = denali->buf.dma_buf; + size_t size = denali->mtd.writesize + denali->mtd.oobsize; + + uint32_t irq_status = 0; + uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | + INTR_STATUS0__ECC_ERR; + bool check_erased_page = false; + + setup_ecc_for_xfer(denali, true, false); + + denali_enable_dma(denali, true); + pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); + + clear_interrupts(denali); + denali_setup_dma(denali, DENALI_READ); + + /* wait for operation to complete */ + irq_status = wait_for_irq(denali, irq_mask); + + pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); + + memcpy(buf, denali->buf.buf, mtd->writesize); + + check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); + denali_enable_dma(denali, false); + + if (check_erased_page) + { + read_oob_data(&denali->mtd, chip->oob_poi, denali->page); + + /* check ECC failures that may have occurred on erased pages */ + if (check_erased_page) + { + if (!is_erased(buf, denali->mtd.writesize)) + { + denali->mtd.ecc_stats.failed++; + } + if (!is_erased(buf, denali->mtd.oobsize)) + { + denali->mtd.ecc_stats.failed++; + } + } + } + return 0; +} + +static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct pci_dev *pci_dev = denali->dev; + + dma_addr_t addr = denali->buf.dma_buf; + size_t size = denali->mtd.writesize + denali->mtd.oobsize; + + uint32_t irq_status = 0; + uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; + + setup_ecc_for_xfer(denali, false, true); + denali_enable_dma(denali, true); + + pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); + + clear_interrupts(denali); + denali_setup_dma(denali, DENALI_READ); + + /* wait for operation to complete */ + irq_status = wait_for_irq(denali, irq_mask); + + pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); + + denali_enable_dma(denali, false); + + memcpy(buf, denali->buf.buf, mtd->writesize); + memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize); + + return 0; +} + +static uint8_t denali_read_byte(struct mtd_info *mtd) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint8_t result = 0xff; + + if (denali->buf.head < denali->buf.tail) + { + result = denali->buf.buf[denali->buf.head++]; + } + +#if DEBUG_DENALI + printk("read byte -> 0x%02x\n", result); +#endif + return result; +} + +static void denali_select_chip(struct mtd_info *mtd, int chip) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); +#if DEBUG_DENALI + printk("denali select chip %d\n", chip); +#endif + spin_lock_irq(&denali->irq_lock); + denali->flash_bank = chip; + spin_unlock_irq(&denali->irq_lock); +} + +static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + int status = denali->status; + denali->status = 0; + +#if DEBUG_DENALI + printk("waitfunc %d\n", status); +#endif + return status; +} + +static void denali_erase(struct mtd_info *mtd, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + + uint32_t cmd = 0x0, irq_status = 0; + +#if DEBUG_DENALI + printk("erase page: %d\n", page); +#endif + /* clear interrupts */ + clear_interrupts(denali); + + /* setup page read request for access type */ + cmd = MODE_10 | BANK(denali->flash_bank) | page; + index_addr(denali, (uint32_t)cmd, 0x1); + + /* wait for erase to complete or failure to occur */ + irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | + INTR_STATUS0__ERASE_FAIL); + + denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL : + PASS; +} + +static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, + int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + +#if DEBUG_DENALI + printk("cmdfunc: 0x%x %d %d\n", cmd, col, page); +#endif + switch (cmd) + { + case NAND_CMD_PAGEPROG: + break; + case NAND_CMD_STATUS: + read_status(denali); + break; + case NAND_CMD_READID: + reset_buf(denali); + if (denali->flash_bank < denali->total_used_banks) + { + /* write manufacturer information into nand + buffer for NAND subsystem to fetch. + */ + write_byte_to_buf(denali, denali->dev_info.wDeviceMaker); + write_byte_to_buf(denali, denali->dev_info.wDeviceID); + write_byte_to_buf(denali, denali->dev_info.bDeviceParam0); + write_byte_to_buf(denali, denali->dev_info.bDeviceParam1); + write_byte_to_buf(denali, denali->dev_info.bDeviceParam2); + } + else + { + int i; + for (i = 0; i < 5; i++) + write_byte_to_buf(denali, 0xff); + } + break; + case NAND_CMD_READ0: + case NAND_CMD_SEQIN: + denali->page = page; + break; + case NAND_CMD_RESET: + reset_bank(denali); + break; + case NAND_CMD_READOOB: + /* TODO: Read OOB data */ + break; + default: + printk(KERN_ERR ": unsupported command received 0x%x\n", cmd); + break; + } +} + +/* stubs for ECC functions not used by the NAND core */ +static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, + uint8_t *ecc_code) +{ + printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n"); + BUG(); + return -EIO; +} + +static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, + uint8_t *read_ecc, uint8_t *calc_ecc) +{ + printk(KERN_ERR "denali_ecc_correct called unexpectedly\n"); + BUG(); + return -EIO; +} + +static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) +{ + printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n"); + BUG(); +} +/* end NAND core entry points */ + +/* Initialization code to bring the device up to a known good state */ +static void denali_hw_init(struct denali_nand_info *denali) +{ + denali_irq_init(denali); + NAND_Flash_Reset(denali); + denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED); + denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE); + + denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES); + denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); + + /* Should set value for these registers when init */ + denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); + denali_write32(1, denali->flash_reg + ECC_ENABLE); +} + +/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */ +#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE) +static struct nand_ecclayout nand_oob_slc = { + .eccbytes = 4, + .eccpos = { 0, 1, 2, 3 }, /* not used */ + .oobfree = {{ + .offset = ECC_BYTES_SLC, + .length = 64 - ECC_BYTES_SLC + }} +}; + +#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE) +static struct nand_ecclayout nand_oob_mlc_14bit = { + .eccbytes = 14, + .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */ + .oobfree = {{ + .offset = ECC_BYTES_MLC, + .length = 64 - ECC_BYTES_MLC + }} +}; + +static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; +static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, + .offs = 8, + .len = 4, + .veroffs = 12, + .maxblocks = 4, + .pattern = bbt_pattern, +}; + +static struct nand_bbt_descr bbt_mirror_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, + .offs = 8, + .len = 4, + .veroffs = 12, + .maxblocks = 4, + .pattern = mirror_pattern, +}; + +/* initalize driver data structures */ +void denali_drv_init(struct denali_nand_info *denali) +{ + denali->idx = 0; + + /* setup interrupt handler */ + /* the completion object will be used to notify + * the callee that the interrupt is done */ + init_completion(&denali->complete); + + /* the spinlock will be used to synchronize the ISR + * with any element that might be access shared + * data (interrupt status) */ + spin_lock_init(&denali->irq_lock); + + /* indicate that MTD has not selected a valid bank yet */ + denali->flash_bank = CHIP_SELECT_INVALID; + + /* initialize our irq_status variable to indicate no interrupts */ + denali->irq_status = 0; +} + +/* driver entry point */ +static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int ret = -ENODEV; + resource_size_t csr_base, mem_base; + unsigned long csr_len, mem_len; + struct denali_nand_info *denali; + + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); + + denali = kzalloc(sizeof(*denali), GFP_KERNEL); + if (!denali) + return -ENOMEM; + + ret = pci_enable_device(dev); + if (ret) { + printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); + goto failed_enable; + } + + if (id->driver_data == INTEL_CE4100) { + /* Due to a silicon limitation, we can only support + * ONFI timing mode 1 and below. + */ + if (onfi_timing_mode < -1 || onfi_timing_mode > 1) + { + printk("Intel CE4100 only supports ONFI timing mode 1 " + "or below\n"); + ret = -EINVAL; + goto failed_enable; + } + denali->platform = INTEL_CE4100; + mem_base = pci_resource_start(dev, 0); + mem_len = pci_resource_len(dev, 1); + csr_base = pci_resource_start(dev, 1); + csr_len = pci_resource_len(dev, 1); + } else { + denali->platform = INTEL_MRST; + csr_base = pci_resource_start(dev, 0); + csr_len = pci_resource_start(dev, 0); + mem_base = pci_resource_start(dev, 1); + mem_len = pci_resource_len(dev, 1); + if (!mem_len) { + mem_base = csr_base + csr_len; + mem_len = csr_len; + nand_dbg_print(NAND_DBG_WARN, + "Spectra: No second BAR for PCI device; assuming %08Lx\n", + (uint64_t)csr_base); + } + } + + /* Is 32-bit DMA supported? */ + ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); + + if (ret) + { + printk(KERN_ERR "Spectra: no usable DMA configuration\n"); + goto failed_enable; + } + denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) + { + printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); + goto failed_enable; + } + + pci_set_master(dev); + denali->dev = dev; + + ret = pci_request_regions(dev, DENALI_NAND_NAME); + if (ret) { + printk(KERN_ERR "Spectra: Unable to request memory regions\n"); + goto failed_req_csr; + } + + denali->flash_reg = ioremap_nocache(csr_base, csr_len); + if (!denali->flash_reg) { + printk(KERN_ERR "Spectra: Unable to remap memory region\n"); + ret = -ENOMEM; + goto failed_remap_csr; + } + nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n", + (uint64_t)csr_base, denali->flash_reg, csr_len); + + denali->flash_mem = ioremap_nocache(mem_base, mem_len); + if (!denali->flash_mem) { + printk(KERN_ERR "Spectra: ioremap_nocache failed!"); + iounmap(denali->flash_reg); + ret = -ENOMEM; + goto failed_remap_csr; + } + + nand_dbg_print(NAND_DBG_WARN, + "Spectra: Remapped flash base address: " + "0x%p, len: %ld\n", + denali->flash_mem, csr_len); + + denali_hw_init(denali); + denali_drv_init(denali); + + nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq); + if (request_irq(dev->irq, denali_isr, IRQF_SHARED, + DENALI_NAND_NAME, denali)) { + printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); + ret = -ENODEV; + goto failed_request_irq; + } + + /* now that our ISR is registered, we can enable interrupts */ + NAND_LLD_Enable_Disable_Interrupts(denali, true); + + pci_set_drvdata(dev, denali); + + NAND_Read_Device_ID(denali); + + /* MTD supported page sizes vary by kernel. We validate our + kernel supports the device here. + */ + if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) + { + ret = -ENODEV; + printk(KERN_ERR "Spectra: device size not supported by this " + "version of MTD."); + goto failed_nand; + } + + nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" + "acc_clks: %d, re_2_we: %d, we_2_re: %d," + "addr_2_data: %d, rdwr_en_lo_cnt: %d, " + "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", + ioread32(denali->flash_reg + ACC_CLKS), + ioread32(denali->flash_reg + RE_2_WE), + ioread32(denali->flash_reg + WE_2_RE), + ioread32(denali->flash_reg + ADDR_2_DATA), + ioread32(denali->flash_reg + RDWR_EN_LO_CNT), + ioread32(denali->flash_reg + RDWR_EN_HI_CNT), + ioread32(denali->flash_reg + CS_SETUP_CNT)); + + denali->mtd.name = "Denali NAND"; + denali->mtd.owner = THIS_MODULE; + denali->mtd.priv = &denali->nand; + + /* register the driver with the NAND core subsystem */ + denali->nand.select_chip = denali_select_chip; + denali->nand.cmdfunc = denali_cmdfunc; + denali->nand.read_byte = denali_read_byte; + denali->nand.waitfunc = denali_waitfunc; + + /* scan for NAND devices attached to the controller + * this is the first stage in a two step process to register + * with the nand subsystem */ + if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) + { + ret = -ENXIO; + goto failed_nand; + } + + /* second stage of the NAND scan + * this stage requires information regarding ECC and + * bad block management. */ + + /* Bad block management */ + denali->nand.bbt_td = &bbt_main_descr; + denali->nand.bbt_md = &bbt_mirror_descr; + + /* skip the scan for now until we have OOB read and write support */ + denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; + denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; + + if (denali->dev_info.MLCDevice) + { + denali->nand.ecc.layout = &nand_oob_mlc_14bit; + denali->nand.ecc.bytes = ECC_BYTES_MLC; + } + else /* SLC */ + { + denali->nand.ecc.layout = &nand_oob_slc; + denali->nand.ecc.bytes = ECC_BYTES_SLC; + } + + /* These functions are required by the NAND core framework, otherwise, + the NAND core will assert. However, we don't need them, so we'll stub + them out. */ + denali->nand.ecc.calculate = denali_ecc_calculate; + denali->nand.ecc.correct = denali_ecc_correct; + denali->nand.ecc.hwctl = denali_ecc_hwctl; + + /* override the default read operations */ + denali->nand.ecc.size = denali->mtd.writesize; + denali->nand.ecc.read_page = denali_read_page; + denali->nand.ecc.read_page_raw = denali_read_page_raw; + denali->nand.ecc.write_page = denali_write_page; + denali->nand.ecc.write_page_raw = denali_write_page_raw; + denali->nand.ecc.read_oob = denali_read_oob; + denali->nand.ecc.write_oob = denali_write_oob; + denali->nand.erase_cmd = denali_erase; + + if (nand_scan_tail(&denali->mtd)) + { + ret = -ENXIO; + goto failed_nand; + } + + ret = add_mtd_device(&denali->mtd); + if (ret) { + printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret); + goto failed_nand; + } + return 0; + + failed_nand: + denali_irq_cleanup(dev->irq, denali); + failed_request_irq: + iounmap(denali->flash_reg); + iounmap(denali->flash_mem); + failed_remap_csr: + pci_release_regions(dev); + failed_req_csr: + pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + failed_enable: + kfree(denali); + return ret; +} + +/* driver exit point */ +static void denali_pci_remove(struct pci_dev *dev) +{ + struct denali_nand_info *denali = pci_get_drvdata(dev); + + nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); + + nand_release(&denali->mtd); + del_mtd_device(&denali->mtd); + + denali_irq_cleanup(dev->irq, denali); + + iounmap(denali->flash_reg); + iounmap(denali->flash_mem); + pci_release_regions(dev); + pci_disable_device(dev); + pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + pci_set_drvdata(dev, NULL); + kfree(denali); +} + +MODULE_DEVICE_TABLE(pci, denali_pci_ids); + +static struct pci_driver denali_pci_driver = { + .name = DENALI_NAND_NAME, + .id_table = denali_pci_ids, + .probe = denali_pci_probe, + .remove = denali_pci_remove, +}; + +static int __devinit denali_init(void) +{ + printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__); + return pci_register_driver(&denali_pci_driver); +} + +/* Free memory */ +static void __devexit denali_exit(void) +{ + pci_unregister_driver(&denali_pci_driver); +} + +module_init(denali_init); +module_exit(denali_exit); diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h new file mode 100644 index 00000000000..422a29ab2f6 --- /dev/null +++ b/drivers/mtd/nand/denali.h @@ -0,0 +1,816 @@ +/* + * NAND Flash Controller Device Driver + * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/mtd/nand.h> + +#define DEVICE_RESET 0x0 +#define DEVICE_RESET__BANK0 0x0001 +#define DEVICE_RESET__BANK1 0x0002 +#define DEVICE_RESET__BANK2 0x0004 +#define DEVICE_RESET__BANK3 0x0008 + +#define TRANSFER_SPARE_REG 0x10 +#define TRANSFER_SPARE_REG__FLAG 0x0001 + +#define LOAD_WAIT_CNT 0x20 +#define LOAD_WAIT_CNT__VALUE 0xffff + +#define PROGRAM_WAIT_CNT 0x30 +#define PROGRAM_WAIT_CNT__VALUE 0xffff + +#define ERASE_WAIT_CNT 0x40 +#define ERASE_WAIT_CNT__VALUE 0xffff + +#define INT_MON_CYCCNT 0x50 +#define INT_MON_CYCCNT__VALUE 0xffff + +#define RB_PIN_ENABLED 0x60 +#define RB_PIN_ENABLED__BANK0 0x0001 +#define RB_PIN_ENABLED__BANK1 0x0002 +#define RB_PIN_ENABLED__BANK2 0x0004 +#define RB_PIN_ENABLED__BANK3 0x0008 + +#define MULTIPLANE_OPERATION 0x70 +#define MULTIPLANE_OPERATION__FLAG 0x0001 + +#define MULTIPLANE_READ_ENABLE 0x80 +#define MULTIPLANE_READ_ENABLE__FLAG 0x0001 + +#define COPYBACK_DISABLE 0x90 +#define COPYBACK_DISABLE__FLAG 0x0001 + +#define CACHE_WRITE_ENABLE 0xa0 +#define CACHE_WRITE_ENABLE__FLAG 0x0001 + +#define CACHE_READ_ENABLE 0xb0 +#define CACHE_READ_ENABLE__FLAG 0x0001 + +#define PREFETCH_MODE 0xc0 +#define PREFETCH_MODE__PREFETCH_EN 0x0001 +#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 + +#define CHIP_ENABLE_DONT_CARE 0xd0 +#define CHIP_EN_DONT_CARE__FLAG 0x01 + +#define ECC_ENABLE 0xe0 +#define ECC_ENABLE__FLAG 0x0001 + +#define GLOBAL_INT_ENABLE 0xf0 +#define GLOBAL_INT_EN_FLAG 0x01 + +#define WE_2_RE 0x100 +#define WE_2_RE__VALUE 0x003f + +#define ADDR_2_DATA 0x110 +#define ADDR_2_DATA__VALUE 0x003f + +#define RE_2_WE 0x120 +#define RE_2_WE__VALUE 0x003f + +#define ACC_CLKS 0x130 +#define ACC_CLKS__VALUE 0x000f + +#define NUMBER_OF_PLANES 0x140 +#define NUMBER_OF_PLANES__VALUE 0x0007 + +#define PAGES_PER_BLOCK 0x150 +#define PAGES_PER_BLOCK__VALUE 0xffff + +#define DEVICE_WIDTH 0x160 +#define DEVICE_WIDTH__VALUE 0x0003 + +#define DEVICE_MAIN_AREA_SIZE 0x170 +#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff + +#define DEVICE_SPARE_AREA_SIZE 0x180 +#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff + +#define TWO_ROW_ADDR_CYCLES 0x190 +#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 + +#define MULTIPLANE_ADDR_RESTRICT 0x1a0 +#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 + +#define ECC_CORRECTION 0x1b0 +#define ECC_CORRECTION__VALUE 0x001f + +#define READ_MODE 0x1c0 +#define READ_MODE__VALUE 0x000f + +#define WRITE_MODE 0x1d0 +#define WRITE_MODE__VALUE 0x000f + +#define COPYBACK_MODE 0x1e0 +#define COPYBACK_MODE__VALUE 0x000f + +#define RDWR_EN_LO_CNT 0x1f0 +#define RDWR_EN_LO_CNT__VALUE 0x001f + +#define RDWR_EN_HI_CNT 0x200 +#define RDWR_EN_HI_CNT__VALUE 0x001f + +#define MAX_RD_DELAY 0x210 +#define MAX_RD_DELAY__VALUE 0x000f + +#define CS_SETUP_CNT 0x220 +#define CS_SETUP_CNT__VALUE 0x001f + +#define SPARE_AREA_SKIP_BYTES 0x230 +#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f + +#define SPARE_AREA_MARKER 0x240 +#define SPARE_AREA_MARKER__VALUE 0xffff + +#define DEVICES_CONNECTED 0x250 +#define DEVICES_CONNECTED__VALUE 0x0007 + +#define DIE_MASK 0x260 +#define DIE_MASK__VALUE 0x00ff + +#define FIRST_BLOCK_OF_NEXT_PLANE 0x270 +#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff + +#define WRITE_PROTECT 0x280 +#define WRITE_PROTECT__FLAG 0x0001 + +#define RE_2_RE 0x290 +#define RE_2_RE__VALUE 0x003f + +#define MANUFACTURER_ID 0x300 +#define MANUFACTURER_ID__VALUE 0x00ff + +#define DEVICE_ID 0x310 +#define DEVICE_ID__VALUE 0x00ff + +#define DEVICE_PARAM_0 0x320 +#define DEVICE_PARAM_0__VALUE 0x00ff + +#define DEVICE_PARAM_1 0x330 +#define DEVICE_PARAM_1__VALUE 0x00ff + +#define DEVICE_PARAM_2 0x340 +#define DEVICE_PARAM_2__VALUE 0x00ff + +#define LOGICAL_PAGE_DATA_SIZE 0x350 +#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff + +#define LOGICAL_PAGE_SPARE_SIZE 0x360 +#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff + +#define REVISION 0x370 +#define REVISION__VALUE 0xffff + +#define ONFI_DEVICE_FEATURES 0x380 +#define ONFI_DEVICE_FEATURES__VALUE 0x003f + +#define ONFI_OPTIONAL_COMMANDS 0x390 +#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f + +#define ONFI_TIMING_MODE 0x3a0 +#define ONFI_TIMING_MODE__VALUE 0x003f + +#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 +#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f + +#define ONFI_DEVICE_NO_OF_LUNS 0x3c0 +#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff +#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 + +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff + +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff + +#define FEATURES 0x3f0 +#define FEATURES__N_BANKS 0x0003 +#define FEATURES__ECC_MAX_ERR 0x003c +#define FEATURES__DMA 0x0040 +#define FEATURES__CMD_DMA 0x0080 +#define FEATURES__PARTITION 0x0100 +#define FEATURES__XDMA_SIDEBAND 0x0200 +#define FEATURES__GPREG 0x0400 +#define FEATURES__INDEX_ADDR 0x0800 + +#define TRANSFER_MODE 0x400 +#define TRANSFER_MODE__VALUE 0x0003 + +#define INTR_STATUS0 0x410 +#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001 +#define INTR_STATUS0__ECC_ERR 0x0002 +#define INTR_STATUS0__DMA_CMD_COMP 0x0004 +#define INTR_STATUS0__TIME_OUT 0x0008 +#define INTR_STATUS0__PROGRAM_FAIL 0x0010 +#define INTR_STATUS0__ERASE_FAIL 0x0020 +#define INTR_STATUS0__LOAD_COMP 0x0040 +#define INTR_STATUS0__PROGRAM_COMP 0x0080 +#define INTR_STATUS0__ERASE_COMP 0x0100 +#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_STATUS0__LOCKED_BLK 0x0400 +#define INTR_STATUS0__UNSUP_CMD 0x0800 +#define INTR_STATUS0__INT_ACT 0x1000 +#define INTR_STATUS0__RST_COMP 0x2000 +#define INTR_STATUS0__PIPE_CMD_ERR 0x4000 +#define INTR_STATUS0__PAGE_XFER_INC 0x8000 + +#define INTR_EN0 0x420 +#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001 +#define INTR_EN0__ECC_ERR 0x0002 +#define INTR_EN0__DMA_CMD_COMP 0x0004 +#define INTR_EN0__TIME_OUT 0x0008 +#define INTR_EN0__PROGRAM_FAIL 0x0010 +#define INTR_EN0__ERASE_FAIL 0x0020 +#define INTR_EN0__LOAD_COMP 0x0040 +#define INTR_EN0__PROGRAM_COMP 0x0080 +#define INTR_EN0__ERASE_COMP 0x0100 +#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_EN0__LOCKED_BLK 0x0400 +#define INTR_EN0__UNSUP_CMD 0x0800 +#define INTR_EN0__INT_ACT 0x1000 +#define INTR_EN0__RST_COMP 0x2000 +#define INTR_EN0__PIPE_CMD_ERR 0x4000 +#define INTR_EN0__PAGE_XFER_INC 0x8000 + +#define PAGE_CNT0 0x430 +#define PAGE_CNT0__VALUE 0x00ff + +#define ERR_PAGE_ADDR0 0x440 +#define ERR_PAGE_ADDR0__VALUE 0xffff + +#define ERR_BLOCK_ADDR0 0x450 +#define ERR_BLOCK_ADDR0__VALUE 0xffff + +#define INTR_STATUS1 0x460 +#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001 +#define INTR_STATUS1__ECC_ERR 0x0002 +#define INTR_STATUS1__DMA_CMD_COMP 0x0004 +#define INTR_STATUS1__TIME_OUT 0x0008 +#define INTR_STATUS1__PROGRAM_FAIL 0x0010 +#define INTR_STATUS1__ERASE_FAIL 0x0020 +#define INTR_STATUS1__LOAD_COMP 0x0040 +#define INTR_STATUS1__PROGRAM_COMP 0x0080 +#define INTR_STATUS1__ERASE_COMP 0x0100 +#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_STATUS1__LOCKED_BLK 0x0400 +#define INTR_STATUS1__UNSUP_CMD 0x0800 +#define INTR_STATUS1__INT_ACT 0x1000 +#define INTR_STATUS1__RST_COMP 0x2000 +#define INTR_STATUS1__PIPE_CMD_ERR 0x4000 +#define INTR_STATUS1__PAGE_XFER_INC 0x8000 + +#define INTR_EN1 0x470 +#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001 +#define INTR_EN1__ECC_ERR 0x0002 +#define INTR_EN1__DMA_CMD_COMP 0x0004 +#define INTR_EN1__TIME_OUT 0x0008 +#define INTR_EN1__PROGRAM_FAIL 0x0010 +#define INTR_EN1__ERASE_FAIL 0x0020 +#define INTR_EN1__LOAD_COMP 0x0040 +#define INTR_EN1__PROGRAM_COMP 0x0080 +#define INTR_EN1__ERASE_COMP 0x0100 +#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_EN1__LOCKED_BLK 0x0400 +#define INTR_EN1__UNSUP_CMD 0x0800 +#define INTR_EN1__INT_ACT 0x1000 +#define INTR_EN1__RST_COMP 0x2000 +#define INTR_EN1__PIPE_CMD_ERR 0x4000 +#define INTR_EN1__PAGE_XFER_INC 0x8000 + +#define PAGE_CNT1 0x480 +#define PAGE_CNT1__VALUE 0x00ff + +#define ERR_PAGE_ADDR1 0x490 +#define ERR_PAGE_ADDR1__VALUE 0xffff + +#define ERR_BLOCK_ADDR1 0x4a0 +#define ERR_BLOCK_ADDR1__VALUE 0xffff + +#define INTR_STATUS2 0x4b0 +#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001 +#define INTR_STATUS2__ECC_ERR 0x0002 +#define INTR_STATUS2__DMA_CMD_COMP 0x0004 +#define INTR_STATUS2__TIME_OUT 0x0008 +#define INTR_STATUS2__PROGRAM_FAIL 0x0010 +#define INTR_STATUS2__ERASE_FAIL 0x0020 +#define INTR_STATUS2__LOAD_COMP 0x0040 +#define INTR_STATUS2__PROGRAM_COMP 0x0080 +#define INTR_STATUS2__ERASE_COMP 0x0100 +#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_STATUS2__LOCKED_BLK 0x0400 +#define INTR_STATUS2__UNSUP_CMD 0x0800 +#define INTR_STATUS2__INT_ACT 0x1000 +#define INTR_STATUS2__RST_COMP 0x2000 +#define INTR_STATUS2__PIPE_CMD_ERR 0x4000 +#define INTR_STATUS2__PAGE_XFER_INC 0x8000 + +#define INTR_EN2 0x4c0 +#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001 +#define INTR_EN2__ECC_ERR 0x0002 +#define INTR_EN2__DMA_CMD_COMP 0x0004 +#define INTR_EN2__TIME_OUT 0x0008 +#define INTR_EN2__PROGRAM_FAIL 0x0010 +#define INTR_EN2__ERASE_FAIL 0x0020 +#define INTR_EN2__LOAD_COMP 0x0040 +#define INTR_EN2__PROGRAM_COMP 0x0080 +#define INTR_EN2__ERASE_COMP 0x0100 +#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_EN2__LOCKED_BLK 0x0400 +#define INTR_EN2__UNSUP_CMD 0x0800 +#define INTR_EN2__INT_ACT 0x1000 +#define INTR_EN2__RST_COMP 0x2000 +#define INTR_EN2__PIPE_CMD_ERR 0x4000 +#define INTR_EN2__PAGE_XFER_INC 0x8000 + +#define PAGE_CNT2 0x4d0 +#define PAGE_CNT2__VALUE 0x00ff + +#define ERR_PAGE_ADDR2 0x4e0 +#define ERR_PAGE_ADDR2__VALUE 0xffff + +#define ERR_BLOCK_ADDR2 0x4f0 +#define ERR_BLOCK_ADDR2__VALUE 0xffff + +#define INTR_STATUS3 0x500 +#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001 +#define INTR_STATUS3__ECC_ERR 0x0002 +#define INTR_STATUS3__DMA_CMD_COMP 0x0004 +#define INTR_STATUS3__TIME_OUT 0x0008 +#define INTR_STATUS3__PROGRAM_FAIL 0x0010 +#define INTR_STATUS3__ERASE_FAIL 0x0020 +#define INTR_STATUS3__LOAD_COMP 0x0040 +#define INTR_STATUS3__PROGRAM_COMP 0x0080 +#define INTR_STATUS3__ERASE_COMP 0x0100 +#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_STATUS3__LOCKED_BLK 0x0400 +#define INTR_STATUS3__UNSUP_CMD 0x0800 +#define INTR_STATUS3__INT_ACT 0x1000 +#define INTR_STATUS3__RST_COMP 0x2000 +#define INTR_STATUS3__PIPE_CMD_ERR 0x4000 +#define INTR_STATUS3__PAGE_XFER_INC 0x8000 + +#define INTR_EN3 0x510 +#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001 +#define INTR_EN3__ECC_ERR 0x0002 +#define INTR_EN3__DMA_CMD_COMP 0x0004 +#define INTR_EN3__TIME_OUT 0x0008 +#define INTR_EN3__PROGRAM_FAIL 0x0010 +#define INTR_EN3__ERASE_FAIL 0x0020 +#define INTR_EN3__LOAD_COMP 0x0040 +#define INTR_EN3__PROGRAM_COMP 0x0080 +#define INTR_EN3__ERASE_COMP 0x0100 +#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_EN3__LOCKED_BLK 0x0400 +#define INTR_EN3__UNSUP_CMD 0x0800 +#define INTR_EN3__INT_ACT 0x1000 +#define INTR_EN3__RST_COMP 0x2000 +#define INTR_EN3__PIPE_CMD_ERR 0x4000 +#define INTR_EN3__PAGE_XFER_INC 0x8000 + +#define PAGE_CNT3 0x520 +#define PAGE_CNT3__VALUE 0x00ff + +#define ERR_PAGE_ADDR3 0x530 +#define ERR_PAGE_ADDR3__VALUE 0xffff + +#define ERR_BLOCK_ADDR3 0x540 +#define ERR_BLOCK_ADDR3__VALUE 0xffff + +#define DATA_INTR 0x550 +#define DATA_INTR__WRITE_SPACE_AV 0x0001 +#define DATA_INTR__READ_DATA_AV 0x0002 + +#define DATA_INTR_EN 0x560 +#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 +#define DATA_INTR_EN__READ_DATA_AV 0x0002 + +#define GPREG_0 0x570 +#define GPREG_0__VALUE 0xffff + +#define GPREG_1 0x580 +#define GPREG_1__VALUE 0xffff + +#define GPREG_2 0x590 +#define GPREG_2__VALUE 0xffff + +#define GPREG_3 0x5a0 +#define GPREG_3__VALUE 0xffff + +#define ECC_THRESHOLD 0x600 +#define ECC_THRESHOLD__VALUE 0x03ff + +#define ECC_ERROR_BLOCK_ADDRESS 0x610 +#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff + +#define ECC_ERROR_PAGE_ADDRESS 0x620 +#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff +#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 + +#define ECC_ERROR_ADDRESS 0x630 +#define ECC_ERROR_ADDRESS__OFFSET 0x0fff +#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 + +#define ERR_CORRECTION_INFO 0x640 +#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff +#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 +#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 +#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 + +#define DMA_ENABLE 0x700 +#define DMA_ENABLE__FLAG 0x0001 + +#define IGNORE_ECC_DONE 0x710 +#define IGNORE_ECC_DONE__FLAG 0x0001 + +#define DMA_INTR 0x720 +#define DMA_INTR__TARGET_ERROR 0x0001 +#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 +#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 +#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 +#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 +#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 + +#define DMA_INTR_EN 0x730 +#define DMA_INTR_EN__TARGET_ERROR 0x0001 +#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 +#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 +#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 +#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 +#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 + +#define TARGET_ERR_ADDR_LO 0x740 +#define TARGET_ERR_ADDR_LO__VALUE 0xffff + +#define TARGET_ERR_ADDR_HI 0x750 +#define TARGET_ERR_ADDR_HI__VALUE 0xffff + +#define CHNL_ACTIVE 0x760 +#define CHNL_ACTIVE__CHANNEL0 0x0001 +#define CHNL_ACTIVE__CHANNEL1 0x0002 +#define CHNL_ACTIVE__CHANNEL2 0x0004 +#define CHNL_ACTIVE__CHANNEL3 0x0008 + +#define ACTIVE_SRC_ID 0x800 +#define ACTIVE_SRC_ID__VALUE 0x00ff + +#define PTN_INTR 0x810 +#define PTN_INTR__CONFIG_ERROR 0x0001 +#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 +#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 +#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 +#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 +#define PTN_INTR__REG_ACCESS_ERROR 0x0020 + +#define PTN_INTR_EN 0x820 +#define PTN_INTR_EN__CONFIG_ERROR 0x0001 +#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 +#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 +#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 +#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 +#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 + +#define PERM_SRC_ID_0 0x830 +#define PERM_SRC_ID_0__SRCID 0x00ff +#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_0__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_0__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_0 0x840 +#define MIN_BLK_ADDR_0__VALUE 0xffff + +#define MAX_BLK_ADDR_0 0x850 +#define MAX_BLK_ADDR_0__VALUE 0xffff + +#define MIN_MAX_BANK_0 0x860 +#define MIN_MAX_BANK_0__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_0__MAX_VALUE 0x000c + +#define PERM_SRC_ID_1 0x870 +#define PERM_SRC_ID_1__SRCID 0x00ff +#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_1__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_1__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_1 0x880 +#define MIN_BLK_ADDR_1__VALUE 0xffff + +#define MAX_BLK_ADDR_1 0x890 +#define MAX_BLK_ADDR_1__VALUE 0xffff + +#define MIN_MAX_BANK_1 0x8a0 +#define MIN_MAX_BANK_1__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_1__MAX_VALUE 0x000c + +#define PERM_SRC_ID_2 0x8b0 +#define PERM_SRC_ID_2__SRCID 0x00ff +#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_2__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_2__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_2 0x8c0 +#define MIN_BLK_ADDR_2__VALUE 0xffff + +#define MAX_BLK_ADDR_2 0x8d0 +#define MAX_BLK_ADDR_2__VALUE 0xffff + +#define MIN_MAX_BANK_2 0x8e0 +#define MIN_MAX_BANK_2__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_2__MAX_VALUE 0x000c + +#define PERM_SRC_ID_3 0x8f0 +#define PERM_SRC_ID_3__SRCID 0x00ff +#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_3__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_3__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_3 0x900 +#define MIN_BLK_ADDR_3__VALUE 0xffff + +#define MAX_BLK_ADDR_3 0x910 +#define MAX_BLK_ADDR_3__VALUE 0xffff + +#define MIN_MAX_BANK_3 0x920 +#define MIN_MAX_BANK_3__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_3__MAX_VALUE 0x000c + +#define PERM_SRC_ID_4 0x930 +#define PERM_SRC_ID_4__SRCID 0x00ff +#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_4__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_4__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_4 0x940 +#define MIN_BLK_ADDR_4__VALUE 0xffff + +#define MAX_BLK_ADDR_4 0x950 +#define MAX_BLK_ADDR_4__VALUE 0xffff + +#define MIN_MAX_BANK_4 0x960 +#define MIN_MAX_BANK_4__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_4__MAX_VALUE 0x000c + +#define PERM_SRC_ID_5 0x970 +#define PERM_SRC_ID_5__SRCID 0x00ff +#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_5__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_5__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_5 0x980 +#define MIN_BLK_ADDR_5__VALUE 0xffff + +#define MAX_BLK_ADDR_5 0x990 +#define MAX_BLK_ADDR_5__VALUE 0xffff + +#define MIN_MAX_BANK_5 0x9a0 +#define MIN_MAX_BANK_5__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_5__MAX_VALUE 0x000c + +#define PERM_SRC_ID_6 0x9b0 +#define PERM_SRC_ID_6__SRCID 0x00ff +#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_6__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_6__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_6 0x9c0 +#define MIN_BLK_ADDR_6__VALUE 0xffff + +#define MAX_BLK_ADDR_6 0x9d0 +#define MAX_BLK_ADDR_6__VALUE 0xffff + +#define MIN_MAX_BANK_6 0x9e0 +#define MIN_MAX_BANK_6__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_6__MAX_VALUE 0x000c + +#define PERM_SRC_ID_7 0x9f0 +#define PERM_SRC_ID_7__SRCID 0x00ff +#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID_7__READ_ACTIVE 0x4000 +#define PERM_SRC_ID_7__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR_7 0xa00 +#define MIN_BLK_ADDR_7__VALUE 0xffff + +#define MAX_BLK_ADDR_7 0xa10 +#define MAX_BLK_ADDR_7__VALUE 0xffff + +#define MIN_MAX_BANK_7 0xa20 +#define MIN_MAX_BANK_7__MIN_VALUE 0x0003 +#define MIN_MAX_BANK_7__MAX_VALUE 0x000c + +/* flash.h */ +struct device_info_tag { + uint16_t wDeviceMaker; + uint16_t wDeviceID; + uint8_t bDeviceParam0; + uint8_t bDeviceParam1; + uint8_t bDeviceParam2; + uint32_t wDeviceType; + uint32_t wSpectraStartBlock; + uint32_t wSpectraEndBlock; + uint32_t wTotalBlocks; + uint16_t wPagesPerBlock; + uint16_t wPageSize; + uint16_t wPageDataSize; + uint16_t wPageSpareSize; + uint16_t wNumPageSpareFlag; + uint16_t wECCBytesPerSector; + uint32_t wBlockSize; + uint32_t wBlockDataSize; + uint32_t wDataBlockNum; + uint8_t bPlaneNum; + uint16_t wDeviceMainAreaSize; + uint16_t wDeviceSpareAreaSize; + uint16_t wDevicesConnected; + uint16_t wDeviceWidth; + uint16_t wHWRevision; + uint16_t wHWFeatures; + + uint16_t wONFIDevFeatures; + uint16_t wONFIOptCommands; + uint16_t wONFITimingMode; + uint16_t wONFIPgmCacheTimingMode; + + uint16_t MLCDevice; + uint16_t wSpareSkipBytes; + + uint8_t nBitsInPageNumber; + uint8_t nBitsInPageDataSize; + uint8_t nBitsInBlockDataSize; +}; + +/* ffsdefs.h */ +#define CLEAR 0 /*use this to clear a field instead of "fail"*/ +#define SET 1 /*use this to set a field instead of "pass"*/ +#define FAIL 1 /*failed flag*/ +#define PASS 0 /*success flag*/ +#define ERR -1 /*error flag*/ + +/* lld.h */ +#define GOOD_BLOCK 0 +#define DEFECTIVE_BLOCK 1 +#define READ_ERROR 2 + +#define CLK_X 5 +#define CLK_MULTI 4 + +/* ffsport.h */ +#define VERBOSE 1 + +#define NAND_DBG_WARN 1 +#define NAND_DBG_DEBUG 2 +#define NAND_DBG_TRACE 3 + +#ifdef VERBOSE +#define nand_dbg_print(level, args...) \ + do { \ + if (level <= nand_debug_level) \ + printk(KERN_ALERT args); \ + } while (0) +#else +#define nand_dbg_print(level, args...) +#endif + + +/* spectraswconfig.h */ +#define CMD_DMA 0 + +#define SPECTRA_PARTITION_ID 0 +/**** Block Table and Reserved Block Parameters *****/ +#define SPECTRA_START_BLOCK 3 +#define NUM_FREE_BLOCKS_GATE 30 + +/* KBV - Updated to LNW scratch register address */ +#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR +#define SCRATCH_REG_SIZE 64 + +#define GLOB_HWCTL_DEFAULT_BLKS 2048 + +#define SUPPORT_15BITECC 1 +#define SUPPORT_8BITECC 1 + +#define CUSTOM_CONF_PARAMS 0 + +#define ONFI_BLOOM_TIME 1 +#define MODE5_WORKAROUND 0 + +/* lld_nand.h */ +/* + * NAND Flash Controller Device Driver + * Copyright (c) 2009, Intel Corporation and its suppliers. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef _LLD_NAND_ +#define _LLD_NAND_ + +#define MODE_00 0x00000000 +#define MODE_01 0x04000000 +#define MODE_10 0x08000000 +#define MODE_11 0x0C000000 + + +#define DATA_TRANSFER_MODE 0 +#define PROTECTION_PER_BLOCK 1 +#define LOAD_WAIT_COUNT 2 +#define PROGRAM_WAIT_COUNT 3 +#define ERASE_WAIT_COUNT 4 +#define INT_MONITOR_CYCLE_COUNT 5 +#define READ_BUSY_PIN_ENABLED 6 +#define MULTIPLANE_OPERATION_SUPPORT 7 +#define PRE_FETCH_MODE 8 +#define CE_DONT_CARE_SUPPORT 9 +#define COPYBACK_SUPPORT 10 +#define CACHE_WRITE_SUPPORT 11 +#define CACHE_READ_SUPPORT 12 +#define NUM_PAGES_IN_BLOCK 13 +#define ECC_ENABLE_SELECT 14 +#define WRITE_ENABLE_2_READ_ENABLE 15 +#define ADDRESS_2_DATA 16 +#define READ_ENABLE_2_WRITE_ENABLE 17 +#define TWO_ROW_ADDRESS_CYCLES 18 +#define MULTIPLANE_ADDRESS_RESTRICT 19 +#define ACC_CLOCKS 20 +#define READ_WRITE_ENABLE_LOW_COUNT 21 +#define READ_WRITE_ENABLE_HIGH_COUNT 22 + +#define ECC_SECTOR_SIZE 512 +#define LLD_MAX_FLASH_BANKS 4 + +#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE + +struct nand_buf +{ + int head; + int tail; + uint8_t buf[DENALI_BUF_SIZE]; + dma_addr_t dma_buf; +}; + +#define INTEL_CE4100 1 +#define INTEL_MRST 2 + +struct denali_nand_info { + struct mtd_info mtd; + struct nand_chip nand; + struct device_info_tag dev_info; + int flash_bank; /* currently selected chip */ + int status; + int platform; + struct nand_buf buf; + struct pci_dev *dev; + int total_used_banks; + uint32_t block; /* stored for future use */ + uint16_t page; + void __iomem *flash_reg; /* Mapped io reg base address */ + void __iomem *flash_mem; /* Mapped io reg base address */ + + /* elements used by ISR */ + struct completion complete; + spinlock_t irq_lock; + uint32_t irq_status; + int irq_debug_array[32]; + int idx; +}; + +static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali); +static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali); +static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE); + +#endif /*_LLD_NAND_*/ + diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index ae30fb6eed9..3f38fb8e666 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, priv->ctrl = ctrl; priv->dev = ctrl->dev; - priv->vbase = ioremap(res.start, res.end - res.start + 1); + priv->vbase = ioremap(res.start, resource_size(&res)); if (!priv->vbase) { dev_err(ctrl->dev, "failed to map chip region\n"); ret = -ENOMEM; @@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, if (ret) goto err; - ret = nand_scan_ident(&priv->mtd, 1); + ret = nand_scan_ident(&priv->mtd, 1, NULL); if (ret) goto err; diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index 4b96296af32..2d215ccb564 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -49,7 +49,10 @@ struct fsl_upm_nand { uint32_t wait_flags; }; -#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd) +static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo) +{ + return container_of(mtdinfo, struct fsl_upm_nand, mtd); +} static int fun_chip_ready(struct mtd_info *mtd) { @@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev, FSL_UPM_WAIT_WRITE_BYTE; fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, - io_res.end - io_res.start + 1); + resource_size(&io_res)); if (!fun->io_base) { ret = -ENOMEM; goto err2; @@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev) return 0; } -static struct of_device_id of_fun_match[] = { +static const struct of_device_id of_fun_match[] = { { .compatible = "fsl,upm-nand" }, {}, }; diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index 8f902e75aa8..0cde618bcc1 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev) res = platform_get_resource(dev, IORESOURCE_MEM, 1); iounmap(gpiomtd->io_sync); if (res) - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); res = platform_get_resource(dev, IORESOURCE_MEM, 0); iounmap(gpiomtd->nand_chip.IO_ADDR_R); - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) gpio_set_value(gpiomtd->plat.gpio_nwp, 0); @@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size, { void __iomem *ptr; - if (!request_mem_region(res->start, res->end - res->start + 1, name)) { + if (!request_mem_region(res->start, resource_size(res), name)) { *err = -EBUSY; return NULL; } ptr = ioremap(res->start, size); if (!ptr) { - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); *err = -ENOMEM; } return ptr; @@ -338,10 +338,10 @@ err_nwp: err_nce: iounmap(gpiomtd->io_sync); if (res1) - release_mem_region(res1->start, res1->end - res1->start + 1); + release_mem_region(res1->start, resource_size(res1)); err_sync: iounmap(gpiomtd->nand_chip.IO_ADDR_R); - release_mem_region(res0->start, res0->end - res0->start + 1); + release_mem_region(res0->start, resource_size(res0)); err_map: kfree(gpiomtd); return ret; diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c new file mode 100644 index 00000000000..3d0867d829c --- /dev/null +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -0,0 +1,917 @@ +/* + * Copyright 2004-2008 Freescale Semiconductor, Inc. + * Copyright 2009 Semihalf. + * + * Approved as OSADL project by a majority of OSADL members and funded + * by OSADL membership fees in 2009; for details see www.osadl.org. + * + * Based on original driver from Freescale Semiconductor + * written by John Rigby <jrigby@freescale.com> on basis + * of drivers/mtd/nand/mxc_nand.c. Reworked and extended + * Piotr Ziecik <kosmo@semihalf.com>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/gfp.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> + +#include <asm/mpc5121.h> + +/* Addresses for NFC MAIN RAM BUFFER areas */ +#define NFC_MAIN_AREA(n) ((n) * 0x200) + +/* Addresses for NFC SPARE BUFFER areas */ +#define NFC_SPARE_BUFFERS 8 +#define NFC_SPARE_LEN 0x40 +#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN)) + +/* MPC5121 NFC registers */ +#define NFC_BUF_ADDR 0x1E04 +#define NFC_FLASH_ADDR 0x1E06 +#define NFC_FLASH_CMD 0x1E08 +#define NFC_CONFIG 0x1E0A +#define NFC_ECC_STATUS1 0x1E0C +#define NFC_ECC_STATUS2 0x1E0E +#define NFC_SPAS 0x1E10 +#define NFC_WRPROT 0x1E12 +#define NFC_NF_WRPRST 0x1E18 +#define NFC_CONFIG1 0x1E1A +#define NFC_CONFIG2 0x1E1C +#define NFC_UNLOCKSTART_BLK0 0x1E20 +#define NFC_UNLOCKEND_BLK0 0x1E22 +#define NFC_UNLOCKSTART_BLK1 0x1E24 +#define NFC_UNLOCKEND_BLK1 0x1E26 +#define NFC_UNLOCKSTART_BLK2 0x1E28 +#define NFC_UNLOCKEND_BLK2 0x1E2A +#define NFC_UNLOCKSTART_BLK3 0x1E2C +#define NFC_UNLOCKEND_BLK3 0x1E2E + +/* Bit Definitions: NFC_BUF_ADDR */ +#define NFC_RBA_MASK (7 << 0) +#define NFC_ACTIVE_CS_SHIFT 5 +#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT) + +/* Bit Definitions: NFC_CONFIG */ +#define NFC_BLS_UNLOCKED (1 << 1) + +/* Bit Definitions: NFC_CONFIG1 */ +#define NFC_ECC_4BIT (1 << 0) +#define NFC_FULL_PAGE_DMA (1 << 1) +#define NFC_SPARE_ONLY (1 << 2) +#define NFC_ECC_ENABLE (1 << 3) +#define NFC_INT_MASK (1 << 4) +#define NFC_BIG_ENDIAN (1 << 5) +#define NFC_RESET (1 << 6) +#define NFC_CE (1 << 7) +#define NFC_ONE_CYCLE (1 << 8) +#define NFC_PPB_32 (0 << 9) +#define NFC_PPB_64 (1 << 9) +#define NFC_PPB_128 (2 << 9) +#define NFC_PPB_256 (3 << 9) +#define NFC_PPB_MASK (3 << 9) +#define NFC_FULL_PAGE_INT (1 << 11) + +/* Bit Definitions: NFC_CONFIG2 */ +#define NFC_COMMAND (1 << 0) +#define NFC_ADDRESS (1 << 1) +#define NFC_INPUT (1 << 2) +#define NFC_OUTPUT (1 << 3) +#define NFC_ID (1 << 4) +#define NFC_STATUS (1 << 5) +#define NFC_CMD_FAIL (1 << 15) +#define NFC_INT (1 << 15) + +/* Bit Definitions: NFC_WRPROT */ +#define NFC_WPC_LOCK_TIGHT (1 << 0) +#define NFC_WPC_LOCK (1 << 1) +#define NFC_WPC_UNLOCK (1 << 2) + +#define DRV_NAME "mpc5121_nfc" + +/* Timeouts */ +#define NFC_RESET_TIMEOUT 1000 /* 1 ms */ +#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */ + +struct mpc5121_nfc_prv { + struct mtd_info mtd; + struct nand_chip chip; + int irq; + void __iomem *regs; + struct clk *clk; + wait_queue_head_t irq_waitq; + uint column; + int spareonly; + void __iomem *csreg; + struct device *dev; +}; + +static void mpc5121_nfc_done(struct mtd_info *mtd); + +#ifdef CONFIG_MTD_PARTITIONS +static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; +#endif + +/* Read NFC register */ +static inline u16 nfc_read(struct mtd_info *mtd, uint reg) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + + return in_be16(prv->regs + reg); +} + +/* Write NFC register */ +static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + + out_be16(prv->regs + reg, val); +} + +/* Set bits in NFC register */ +static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits) +{ + nfc_write(mtd, reg, nfc_read(mtd, reg) | bits); +} + +/* Clear bits in NFC register */ +static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits) +{ + nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits); +} + +/* Invoke address cycle */ +static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr) +{ + nfc_write(mtd, NFC_FLASH_ADDR, addr); + nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS); + mpc5121_nfc_done(mtd); +} + +/* Invoke command cycle */ +static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd) +{ + nfc_write(mtd, NFC_FLASH_CMD, cmd); + nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND); + mpc5121_nfc_done(mtd); +} + +/* Send data from NFC buffers to NAND flash */ +static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd) +{ + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); + nfc_write(mtd, NFC_CONFIG2, NFC_INPUT); + mpc5121_nfc_done(mtd); +} + +/* Receive data from NAND flash */ +static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd) +{ + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); + nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT); + mpc5121_nfc_done(mtd); +} + +/* Receive ID from NAND flash */ +static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd) +{ + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); + nfc_write(mtd, NFC_CONFIG2, NFC_ID); + mpc5121_nfc_done(mtd); +} + +/* Receive status from NAND flash */ +static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd) +{ + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); + nfc_write(mtd, NFC_CONFIG2, NFC_STATUS); + mpc5121_nfc_done(mtd); +} + +/* NFC interrupt handler */ +static irqreturn_t mpc5121_nfc_irq(int irq, void *data) +{ + struct mtd_info *mtd = data; + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + + nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK); + wake_up(&prv->irq_waitq); + + return IRQ_HANDLED; +} + +/* Wait for operation complete */ +static void mpc5121_nfc_done(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + int rv; + + if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) { + nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK); + rv = wait_event_timeout(prv->irq_waitq, + (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT); + + if (!rv) + dev_warn(prv->dev, + "Timeout while waiting for interrupt.\n"); + } + + nfc_clear(mtd, NFC_CONFIG2, NFC_INT); +} + +/* Do address cycle(s) */ +static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page) +{ + struct nand_chip *chip = mtd->priv; + u32 pagemask = chip->pagemask; + + if (column != -1) { + mpc5121_nfc_send_addr(mtd, column); + if (mtd->writesize > 512) + mpc5121_nfc_send_addr(mtd, column >> 8); + } + + if (page != -1) { + do { + mpc5121_nfc_send_addr(mtd, page & 0xFF); + page >>= 8; + pagemask >>= 8; + } while (pagemask); + } +} + +/* Control chip select signals */ +static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) +{ + if (chip < 0) { + nfc_clear(mtd, NFC_CONFIG1, NFC_CE); + return; + } + + nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK); + nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) & + NFC_ACTIVE_CS_MASK); + nfc_set(mtd, NFC_CONFIG1, NFC_CE); +} + +/* Init external chip select logic on ADS5121 board */ +static int ads5121_chipselect_init(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + struct device_node *dn; + + dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld"); + if (dn) { + prv->csreg = of_iomap(dn, 0); + of_node_put(dn); + if (!prv->csreg) + return -ENOMEM; + + /* CPLD Register 9 controls NAND /CE Lines */ + prv->csreg += 9; + return 0; + } + + return -EINVAL; +} + +/* Control chips select signal on ADS5121 board */ +static void ads5121_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *nand = mtd->priv; + struct mpc5121_nfc_prv *prv = nand->priv; + u8 v; + + v = in_8(prv->csreg); + v |= 0x0F; + + if (chip >= 0) { + mpc5121_nfc_select_chip(mtd, 0); + v &= ~(1 << chip); + } else + mpc5121_nfc_select_chip(mtd, -1); + + out_8(prv->csreg, v); +} + +/* Read NAND Ready/Busy signal */ +static int mpc5121_nfc_dev_ready(struct mtd_info *mtd) +{ + /* + * NFC handles ready/busy signal internally. Therefore, this function + * always returns status as ready. + */ + return 1; +} + +/* Write command to NAND flash */ +static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command, + int column, int page) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + + prv->column = (column >= 0) ? column : 0; + prv->spareonly = 0; + + switch (command) { + case NAND_CMD_PAGEPROG: + mpc5121_nfc_send_prog_page(mtd); + break; + /* + * NFC does not support sub-page reads and writes, + * so emulate them using full page transfers. + */ + case NAND_CMD_READ0: + column = 0; + break; + + case NAND_CMD_READ1: + prv->column += 256; + command = NAND_CMD_READ0; + column = 0; + break; + + case NAND_CMD_READOOB: + prv->spareonly = 1; + command = NAND_CMD_READ0; + column = 0; + break; + + case NAND_CMD_SEQIN: + mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page); + column = 0; + break; + + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_READID: + case NAND_CMD_STATUS: + break; + + default: + return; + } + + mpc5121_nfc_send_cmd(mtd, command); + mpc5121_nfc_addr_cycle(mtd, column, page); + + switch (command) { + case NAND_CMD_READ0: + if (mtd->writesize > 512) + mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART); + mpc5121_nfc_send_read_page(mtd); + break; + + case NAND_CMD_READID: + mpc5121_nfc_send_read_id(mtd); + break; + + case NAND_CMD_STATUS: + mpc5121_nfc_send_read_status(mtd); + if (chip->options & NAND_BUSWIDTH_16) + prv->column = 1; + else + prv->column = 0; + break; + } +} + +/* Copy data from/to NFC spare buffers. */ +static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, + u8 *buffer, uint size, int wr) +{ + struct nand_chip *nand = mtd->priv; + struct mpc5121_nfc_prv *prv = nand->priv; + uint o, s, sbsize, blksize; + + /* + * NAND spare area is available through NFC spare buffers. + * The NFC divides spare area into (page_size / 512) chunks. + * Each chunk is placed into separate spare memory area, using + * first (spare_size / num_of_chunks) bytes of the buffer. + * + * For NAND device in which the spare area is not divided fully + * by the number of chunks, number of used bytes in each spare + * buffer is rounded down to the nearest even number of bytes, + * and all remaining bytes are added to the last used spare area. + * + * For more information read section 26.6.10 of MPC5121e + * Microcontroller Reference Manual, Rev. 3. + */ + + /* Calculate number of valid bytes in each spare buffer */ + sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1; + + while (size) { + /* Calculate spare buffer number */ + s = offset / sbsize; + if (s > NFC_SPARE_BUFFERS - 1) + s = NFC_SPARE_BUFFERS - 1; + + /* + * Calculate offset to requested data block in selected spare + * buffer and its size. + */ + o = offset - (s * sbsize); + blksize = min(sbsize - o, size); + + if (wr) + memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o, + buffer, blksize); + else + memcpy_fromio(buffer, + prv->regs + NFC_SPARE_AREA(s) + o, blksize); + + buffer += blksize; + offset += blksize; + size -= blksize; + }; +} + +/* Copy data from/to NFC main and spare buffers */ +static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len, + int wr) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + uint c = prv->column; + uint l; + + /* Handle spare area access */ + if (prv->spareonly || c >= mtd->writesize) { + /* Calculate offset from beginning of spare area */ + if (c >= mtd->writesize) + c -= mtd->writesize; + + prv->column += len; + mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); + return; + } + + /* + * Handle main area access - limit copy length to prevent + * crossing main/spare boundary. + */ + l = min((uint)len, mtd->writesize - c); + prv->column += l; + + if (wr) + memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); + else + memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); + + /* Handle crossing main/spare boundary */ + if (l != len) { + buf += l; + len -= l; + mpc5121_nfc_buf_copy(mtd, buf, len, wr); + } +} + +/* Read data from NFC buffers */ +static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + mpc5121_nfc_buf_copy(mtd, buf, len, 0); +} + +/* Write data to NFC buffers */ +static void mpc5121_nfc_write_buf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); +} + +/* Compare buffer with NAND flash */ +static int mpc5121_nfc_verify_buf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + u_char tmp[256]; + uint bsize; + + while (len) { + bsize = min(len, 256); + mpc5121_nfc_read_buf(mtd, tmp, bsize); + + if (memcmp(buf, tmp, bsize)) + return 1; + + buf += bsize; + len -= bsize; + } + + return 0; +} + +/* Read byte from NFC buffers */ +static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) +{ + u8 tmp; + + mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp)); + + return tmp; +} + +/* Read word from NFC buffers */ +static u16 mpc5121_nfc_read_word(struct mtd_info *mtd) +{ + u16 tmp; + + mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp)); + + return tmp; +} + +/* + * Read NFC configuration from Reset Config Word + * + * NFC is configured during reset in basis of information stored + * in Reset Config Word. There is no other way to set NAND block + * size, spare size and bus width. + */ +static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + struct mpc512x_reset_module *rm; + struct device_node *rmnode; + uint rcw_pagesize = 0; + uint rcw_sparesize = 0; + uint rcw_width; + uint rcwh; + uint romloc, ps; + + rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); + if (!rmnode) { + dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' " + "node in device tree!\n"); + return -ENODEV; + } + + rm = of_iomap(rmnode, 0); + if (!rm) { + dev_err(prv->dev, "Error mapping reset module node!\n"); + return -EBUSY; + } + + rcwh = in_be32(&rm->rcwhr); + + /* Bit 6: NFC bus width */ + rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1; + + /* Bit 7: NFC Page/Spare size */ + ps = (rcwh >> 7) & 0x1; + + /* Bits [22:21]: ROM Location */ + romloc = (rcwh >> 21) & 0x3; + + /* Decode RCW bits */ + switch ((ps << 2) | romloc) { + case 0x00: + case 0x01: + rcw_pagesize = 512; + rcw_sparesize = 16; + break; + case 0x02: + case 0x03: + rcw_pagesize = 4096; + rcw_sparesize = 128; + break; + case 0x04: + case 0x05: + rcw_pagesize = 2048; + rcw_sparesize = 64; + break; + case 0x06: + case 0x07: + rcw_pagesize = 4096; + rcw_sparesize = 218; + break; + } + + mtd->writesize = rcw_pagesize; + mtd->oobsize = rcw_sparesize; + if (rcw_width == 2) + chip->options |= NAND_BUSWIDTH_16; + + dev_notice(prv->dev, "Configured for " + "%u-bit NAND, page size %u " + "with %u spare.\n", + rcw_width * 8, rcw_pagesize, + rcw_sparesize); + iounmap(rm); + of_node_put(rmnode); + return 0; +} + +/* Free driver resources */ +static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + + if (prv->clk) { + clk_disable(prv->clk); + clk_put(prv->clk); + } + + if (prv->csreg) + iounmap(prv->csreg); +} + +static int __devinit mpc5121_nfc_probe(struct of_device *op, + const struct of_device_id *match) +{ + struct device_node *rootnode, *dn = op->node; + struct device *dev = &op->dev; + struct mpc5121_nfc_prv *prv; + struct resource res; + struct mtd_info *mtd; +#ifdef CONFIG_MTD_PARTITIONS + struct mtd_partition *parts; +#endif + struct nand_chip *chip; + unsigned long regs_paddr, regs_size; + const uint *chips_no; + int resettime = 0; + int retval = 0; + int rev, len; + + /* + * Check SoC revision. This driver supports only NFC + * in MPC5121 revision 2 and MPC5123 revision 3. + */ + rev = (mfspr(SPRN_SVR) >> 4) & 0xF; + if ((rev != 2) && (rev != 3)) { + dev_err(dev, "SoC revision %u is not supported!\n", rev); + return -ENXIO; + } + + prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL); + if (!prv) { + dev_err(dev, "Memory exhausted!\n"); + return -ENOMEM; + } + + mtd = &prv->mtd; + chip = &prv->chip; + + mtd->priv = chip; + chip->priv = prv; + prv->dev = dev; + + /* Read NFC configuration from Reset Config Word */ + retval = mpc5121_nfc_read_hw_config(mtd); + if (retval) { + dev_err(dev, "Unable to read NFC config!\n"); + return retval; + } + + prv->irq = irq_of_parse_and_map(dn, 0); + if (prv->irq == NO_IRQ) { + dev_err(dev, "Error mapping IRQ!\n"); + return -EINVAL; + } + + retval = of_address_to_resource(dn, 0, &res); + if (retval) { + dev_err(dev, "Error parsing memory region!\n"); + return retval; + } + + chips_no = of_get_property(dn, "chips", &len); + if (!chips_no || len != sizeof(*chips_no)) { + dev_err(dev, "Invalid/missing 'chips' property!\n"); + return -EINVAL; + } + + regs_paddr = res.start; + regs_size = res.end - res.start + 1; + + if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) { + dev_err(dev, "Error requesting memory region!\n"); + return -EBUSY; + } + + prv->regs = devm_ioremap(dev, regs_paddr, regs_size); + if (!prv->regs) { + dev_err(dev, "Error mapping memory region!\n"); + return -ENOMEM; + } + + mtd->name = "MPC5121 NAND"; + chip->dev_ready = mpc5121_nfc_dev_ready; + chip->cmdfunc = mpc5121_nfc_command; + chip->read_byte = mpc5121_nfc_read_byte; + chip->read_word = mpc5121_nfc_read_word; + chip->read_buf = mpc5121_nfc_read_buf; + chip->write_buf = mpc5121_nfc_write_buf; + chip->verify_buf = mpc5121_nfc_verify_buf; + chip->select_chip = mpc5121_nfc_select_chip; + chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; + chip->ecc.mode = NAND_ECC_SOFT; + + /* Support external chip-select logic on ADS5121 board */ + rootnode = of_find_node_by_path("/"); + if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) { + retval = ads5121_chipselect_init(mtd); + if (retval) { + dev_err(dev, "Chipselect init error!\n"); + of_node_put(rootnode); + return retval; + } + + chip->select_chip = ads5121_select_chip; + } + of_node_put(rootnode); + + /* Enable NFC clock */ + prv->clk = clk_get(dev, "nfc_clk"); + if (!prv->clk) { + dev_err(dev, "Unable to acquire NFC clock!\n"); + retval = -ENODEV; + goto error; + } + + clk_enable(prv->clk); + + /* Reset NAND Flash controller */ + nfc_set(mtd, NFC_CONFIG1, NFC_RESET); + while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) { + if (resettime++ >= NFC_RESET_TIMEOUT) { + dev_err(dev, "Timeout while resetting NFC!\n"); + retval = -EINVAL; + goto error; + } + + udelay(1); + } + + /* Enable write to NFC memory */ + nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED); + + /* Enable write to all NAND pages */ + nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000); + nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF); + nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK); + + /* + * Setup NFC: + * - Big Endian transfers, + * - Interrupt after full page read/write. + */ + nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK | + NFC_FULL_PAGE_INT); + + /* Set spare area size */ + nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1); + + init_waitqueue_head(&prv->irq_waitq); + retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME, + mtd); + if (retval) { + dev_err(dev, "Error requesting IRQ!\n"); + goto error; + } + + /* Detect NAND chips */ + if (nand_scan(mtd, *chips_no)) { + dev_err(dev, "NAND Flash not found !\n"); + devm_free_irq(dev, prv->irq, mtd); + retval = -ENXIO; + goto error; + } + + /* Set erase block size */ + switch (mtd->erasesize / mtd->writesize) { + case 32: + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32); + break; + + case 64: + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64); + break; + + case 128: + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128); + break; + + case 256: + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256); + break; + + default: + dev_err(dev, "Unsupported NAND flash!\n"); + devm_free_irq(dev, prv->irq, mtd); + retval = -ENXIO; + goto error; + } + + dev_set_drvdata(dev, mtd); + + /* Register device in MTD */ +#ifdef CONFIG_MTD_PARTITIONS + retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); +#ifdef CONFIG_MTD_OF_PARTS + if (retval == 0) + retval = of_mtd_parse_partitions(dev, dn, &parts); +#endif + if (retval < 0) { + dev_err(dev, "Error parsing MTD partitions!\n"); + devm_free_irq(dev, prv->irq, mtd); + retval = -EINVAL; + goto error; + } + + if (retval > 0) + retval = add_mtd_partitions(mtd, parts, retval); + else +#endif + retval = add_mtd_device(mtd); + + if (retval) { + dev_err(dev, "Error adding MTD device!\n"); + devm_free_irq(dev, prv->irq, mtd); + goto error; + } + + return 0; +error: + mpc5121_nfc_free(dev, mtd); + return retval; +} + +static int __devexit mpc5121_nfc_remove(struct of_device *op) +{ + struct device *dev = &op->dev; + struct mtd_info *mtd = dev_get_drvdata(dev); + struct nand_chip *chip = mtd->priv; + struct mpc5121_nfc_prv *prv = chip->priv; + + nand_release(mtd); + devm_free_irq(dev, prv->irq, mtd); + mpc5121_nfc_free(dev, mtd); + + return 0; +} + +static struct of_device_id mpc5121_nfc_match[] __devinitdata = { + { .compatible = "fsl,mpc5121-nfc", }, + {}, +}; + +static struct of_platform_driver mpc5121_nfc_driver = { + .match_table = mpc5121_nfc_match, + .probe = mpc5121_nfc_probe, + .remove = __devexit_p(mpc5121_nfc_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init mpc5121_nfc_init(void) +{ + return of_register_platform_driver(&mpc5121_nfc_driver); +} + +module_init(mpc5121_nfc_init); + +static void __exit mpc5121_nfc_cleanup(void) +{ + of_unregister_platform_driver(&mpc5121_nfc_driver); +} + +module_exit(mpc5121_nfc_cleanup); + +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_DESCRIPTION("MPC5121 NAND MTD driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index b2900d8406d..82e94389824 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -38,7 +38,7 @@ #define DRIVER_NAME "mxc_nand" #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) -#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27()) +#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) /* Addresses for NFC registers */ #define NFC_BUF_SIZE 0xE00 @@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) { struct mxc_nand_host *host = dev_id; - uint16_t tmp; - - tmp = readw(host->regs + NFC_CONFIG1); - tmp |= NFC_INT_MSK; /* Disable interrupt */ - writew(tmp, host->regs + NFC_CONFIG1); + disable_irq_nosync(irq); wake_up(&host->irq_waitq); @@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) */ static void wait_op_done(struct mxc_nand_host *host, int useirq) { - uint32_t tmp; - int max_retries = 2000; + uint16_t tmp; + int max_retries = 8000; if (useirq) { if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { - tmp = readw(host->regs + NFC_CONFIG1); - tmp &= ~NFC_INT_MSK; /* Enable interrupt */ - writew(tmp, host->regs + NFC_CONFIG1); + enable_irq(host->irq); wait_event(host->irq_waitq, readw(host->regs + NFC_CONFIG2) & NFC_INT); @@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq) writew(cmd, host->regs + NFC_FLASH_CMD); writew(NFC_CMD, host->regs + NFC_CONFIG2); - /* Wait for operation to complete */ - wait_op_done(host, useirq); + if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { + int max_retries = 100; + /* Reset completion is indicated by NFC_CONFIG2 */ + /* being set to 0 */ + while (max_retries-- > 0) { + if (readw(host->regs + NFC_CONFIG2) == 0) { + break; + } + udelay(1); + } + if (max_retries < 0) + DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", + __func__); + } else { + /* Wait for operation to complete */ + wait_op_done(host, useirq); + } } /* This function sends an address (or partial address) to the @@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) } } +static void preset(struct mtd_info *mtd) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + uint16_t tmp; + + /* enable interrupt, disable spare enable */ + tmp = readw(host->regs + NFC_CONFIG1); + tmp &= ~NFC_INT_MSK; + tmp &= ~NFC_SP_EN; + if (nand_chip->ecc.mode == NAND_ECC_HW) { + tmp |= NFC_ECC_EN; + } else { + tmp &= ~NFC_ECC_EN; + } + writew(tmp, host->regs + NFC_CONFIG1); + /* preset operation */ + + /* Unlock the internal RAM Buffer */ + writew(0x2, host->regs + NFC_CONFIG); + + /* Blocks to be unlocked */ + if (nfc_is_v21()) { + writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); + writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); + } else if (nfc_is_v1()) { + writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); + writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); + } else + BUG(); + + /* Unlock Block Command for given address range */ + writew(0x4, host->regs + NFC_WRPROT); +} + /* Used by the upper layer to write command to NAND Flash for * different operations to be carried out on NAND Flash */ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, @@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, /* Command pre-processing step */ switch (command) { + case NAND_CMD_RESET: + send_cmd(host, command, false); + preset(mtd); + break; case NAND_CMD_STATUS: host->buf_start = 0; @@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev) struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; struct mxc_nand_host *host; struct resource *res; - uint16_t tmp; int err = 0, nr_parts = 0; struct nand_ecclayout *oob_smallpage, *oob_largepage; @@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev) host->spare_len = 64; oob_smallpage = &nandv2_hw_eccoob_smallpage; oob_largepage = &nandv2_hw_eccoob_largepage; + this->ecc.bytes = 9; } else if (nfc_is_v1()) { host->regs = host->base; host->spare0 = host->base + 0x800; host->spare_len = 16; oob_smallpage = &nandv1_hw_eccoob_smallpage; oob_largepage = &nandv1_hw_eccoob_largepage; - } else - BUG(); - - /* disable interrupt and spare enable */ - tmp = readw(host->regs + NFC_CONFIG1); - tmp |= NFC_INT_MSK; - tmp &= ~NFC_SP_EN; - writew(tmp, host->regs + NFC_CONFIG1); - - init_waitqueue_head(&host->irq_waitq); - - host->irq = platform_get_irq(pdev, 0); - - err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host); - if (err) - goto eirq; - - /* Reset NAND */ - this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); - - /* preset operation */ - /* Unlock the internal RAM Buffer */ - writew(0x2, host->regs + NFC_CONFIG); - - /* Blocks to be unlocked */ - if (nfc_is_v21()) { - writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); - writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); - this->ecc.bytes = 9; - } else if (nfc_is_v1()) { - writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); - writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); this->ecc.bytes = 3; } else BUG(); - /* Unlock Block Command for given address range */ - writew(0x4, host->regs + NFC_WRPROT); - this->ecc.size = 512; this->ecc.layout = oob_smallpage; @@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) this->ecc.hwctl = mxc_nand_enable_hwecc; this->ecc.correct = mxc_nand_correct_data; this->ecc.mode = NAND_ECC_HW; - tmp = readw(host->regs + NFC_CONFIG1); - tmp |= NFC_ECC_EN; - writew(tmp, host->regs + NFC_CONFIG1); } else { this->ecc.mode = NAND_ECC_SOFT; - tmp = readw(host->regs + NFC_CONFIG1); - tmp &= ~NFC_ECC_EN; - writew(tmp, host->regs + NFC_CONFIG1); } /* NAND bus width determines access funtions used by upper layer */ @@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev) this->options |= NAND_USE_FLASH_BBT; } + init_waitqueue_head(&host->irq_waitq); + + host->irq = platform_get_irq(pdev, 0); + + err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); + if (err) + goto eirq; + /* first scan to find the device and get the page size */ - if (nand_scan_ident(mtd, 1)) { + if (nand_scan_ident(mtd, 1, NULL)) { err = -ENXIO; goto escan; } @@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) int ret = 0; DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); - if (mtd) { - ret = mtd->suspend(mtd); - /* Disable the NFC clock */ - clk_disable(host->clk); - } + + ret = mtd->suspend(mtd); + + /* + * nand_suspend locks the device for exclusive access, so + * the clock must already be off. + */ + BUG_ON(!ret && host->clk_act); return ret; } @@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev) DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); - if (mtd) { - /* Enable the NFC clock */ - clk_enable(host->clk); - mtd->resume(mtd); - } + mtd->resume(mtd); return ret; } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8f2958fe214..4a7b86423ee 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, */ DEFINE_LED_TRIGGER(nand_led_trigger); +static int check_offs_len(struct mtd_info *mtd, + loff_t ofs, uint64_t len) +{ + struct nand_chip *chip = mtd->priv; + int ret = 0; + + /* Start address must align on block boundary */ + if (ofs & ((1 << chip->phys_erase_shift) - 1)) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); + ret = -EINVAL; + } + + /* Length must align on block boundary */ + if (len & ((1 << chip->phys_erase_shift) - 1)) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", + __func__); + ret = -EINVAL; + } + + /* Do not allow past end of device */ + if (ofs + len > mtd->size) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", + __func__); + ret = -EINVAL; + } + + return ret; +} + /** * nand_release_device - [GENERIC] release chip * @mtd: MTD device structure @@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) struct nand_chip *chip = mtd->priv; u16 bad; + if (chip->options & NAND_BB_LAST_PAGE) + ofs += mtd->erasesize - mtd->writesize; + page = (int)(ofs >> chip->page_shift) & chip->pagemask; if (getchip) { @@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) bad = cpu_to_le16(chip->read_word(mtd)); if (chip->badblockpos & 0x1) bad >>= 8; - if ((bad & 0xFF) != 0xff) - res = 1; + else + bad &= 0xFF; } else { chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); - if (chip->read_byte(mtd) != 0xff) - res = 1; + bad = chip->read_byte(mtd); } + if (likely(chip->badblockbits == 8)) + res = bad != 0xFF; + else + res = hweight8(bad) < chip->badblockbits; + if (getchip) nand_release_device(mtd); @@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) uint8_t buf[2] = { 0, 0 }; int block, ret; + if (chip->options & NAND_BB_LAST_PAGE) + ofs += mtd->erasesize - mtd->writesize; + /* Get block number */ block = (int)(ofs >> chip->bbt_erase_shift); if (chip->bbt) @@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) static int nand_check_wp(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; + + /* broken xD cards report WP despite being writable */ + if (chip->options & NAND_BROKEN_XD) + return 0; + /* Check the WP bit */ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; @@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) chip->state = FL_PM_SUSPENDED; spin_unlock(lock); return 0; - } else { - spin_unlock(lock); - return -EAGAIN; } } set_current_state(TASK_UNINTERRUPTIBLE); @@ -835,6 +876,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) } /** + * __nand_unlock - [REPLACABLE] unlocks specified locked blockes + * + * @param mtd - mtd info + * @param ofs - offset to start unlock from + * @param len - length to unlock + * @invert - when = 0, unlock the range of blocks within the lower and + * upper boundary address + * whne = 1, unlock the range of blocks outside the boundaries + * of the lower and upper boundary address + * + * @return - unlock status + */ +static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, + uint64_t len, int invert) +{ + int ret = 0; + int status, page; + struct nand_chip *chip = mtd->priv; + + /* Submit address of first page to unlock */ + page = ofs >> chip->page_shift; + chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask); + + /* Submit address of last page to unlock */ + page = (ofs + len) >> chip->page_shift; + chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, + (page | invert) & chip->pagemask); + + /* Call wait ready function */ + status = chip->waitfunc(mtd, chip); + udelay(1000); + /* See if device thinks it succeeded */ + if (status & 0x01) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", + __func__, status); + ret = -EIO; + } + + return ret; +} + +/** + * nand_unlock - [REPLACABLE] unlocks specified locked blockes + * + * @param mtd - mtd info + * @param ofs - offset to start unlock from + * @param len - length to unlock + * + * @return - unlock status + */ +int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + int ret = 0; + int chipnr; + struct nand_chip *chip = mtd->priv; + + DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", + __func__, (unsigned long long)ofs, len); + + if (check_offs_len(mtd, ofs, len)) + ret = -EINVAL; + + /* Align to last block address if size addresses end of the device */ + if (ofs + len == mtd->size) + len -= mtd->erasesize; + + nand_get_device(chip, mtd, FL_UNLOCKING); + + /* Shift to get chip number */ + chipnr = ofs >> chip->chip_shift; + + chip->select_chip(mtd, chipnr); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", + __func__); + ret = -EIO; + goto out; + } + + ret = __nand_unlock(mtd, ofs, len, 0); + +out: + /* de-select the NAND device */ + chip->select_chip(mtd, -1); + + nand_release_device(mtd); + + return ret; +} + +/** + * nand_lock - [REPLACABLE] locks all blockes present in the device + * + * @param mtd - mtd info + * @param ofs - offset to start unlock from + * @param len - length to unlock + * + * @return - lock status + * + * This feature is not support in many NAND parts. 'Micron' NAND parts + * do have this feature, but it allows only to lock all blocks not for + * specified range for block. + * + * Implementing 'lock' feature by making use of 'unlock', for now. + */ +int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + int ret = 0; + int chipnr, status, page; + struct nand_chip *chip = mtd->priv; + + DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", + __func__, (unsigned long long)ofs, len); + + if (check_offs_len(mtd, ofs, len)) + ret = -EINVAL; + + nand_get_device(chip, mtd, FL_LOCKING); + + /* Shift to get chip number */ + chipnr = ofs >> chip->chip_shift; + + chip->select_chip(mtd, chipnr); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", + __func__); + status = MTD_ERASE_FAILED; + ret = -EIO; + goto out; + } + + /* Submit address of first page to lock */ + page = ofs >> chip->page_shift; + chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask); + + /* Call wait ready function */ + status = chip->waitfunc(mtd, chip); + udelay(1000); + /* See if device thinks it succeeded */ + if (status & 0x01) { + DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", + __func__, status); + ret = -EIO; + goto out; + } + + ret = __nand_unlock(mtd, ofs, len, 0x1); + +out: + /* de-select the NAND device */ + chip->select_chip(mtd, -1); + + nand_release_device(mtd); + + return ret; +} + +/** * nand_read_page_raw - [Intern] read raw page data without ecc * @mtd: mtd info structure * @chip: nand chip info structure @@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, int ret = 0; uint32_t readlen = ops->len; uint32_t oobreadlen = ops->ooblen; + uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? + mtd->oobavail : mtd->oobsize; + uint8_t *bufpoi, *oob, *buf; stats = mtd->ecc_stats; @@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, buf += bytes; if (unlikely(oob)) { - /* Raw mode does data:oob:data:oob */ - if (ops->mode != MTD_OOB_RAW) { - int toread = min(oobreadlen, - chip->ecc.layout->oobavail); - if (toread) { - oob = nand_transfer_oob(chip, - oob, ops, toread); - oobreadlen -= toread; - } - } else - buf = nand_transfer_oob(chip, - buf, ops, mtd->oobsize); + + int toread = min(oobreadlen, max_oobsize); + + if (toread) { + oob = nand_transfer_oob(chip, + oob, ops, toread); + oobreadlen -= toread; + } } if (!(chip->options & NAND_NO_READRDY)) { @@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, * @oob: oob data buffer * @ops: oob ops structure */ -static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, - struct mtd_oob_ops *ops) +static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, + struct mtd_oob_ops *ops) { - size_t len = ops->ooblen; - switch(ops->mode) { case MTD_OOB_PLACE: @@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, int chipnr, realpage, page, blockmask, column; struct nand_chip *chip = mtd->priv; uint32_t writelen = ops->len; + + uint32_t oobwritelen = ops->ooblen; + uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? + mtd->oobavail : mtd->oobsize; + uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; int ret, subpage; @@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, if (likely(!oob)) memset(chip->oob_poi, 0xff, mtd->oobsize); + /* Don't allow multipage oob writes with offset */ + if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) + return -EINVAL; + while(1) { int bytes = mtd->writesize; int cached = writelen > bytes && page != blockmask; @@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, wbuf = chip->buffers->databuf; } - if (unlikely(oob)) - oob = nand_fill_oob(chip, oob, ops); + if (unlikely(oob)) { + size_t len = min(oobwritelen, oobmaxlen); + oob = nand_fill_oob(chip, oob, len, ops); + oobwritelen -= len; + } ret = chip->write_page(mtd, chip, wbuf, page, cached, (ops->mode == MTD_OOB_RAW)); @@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, chip->pagebuf = -1; memset(chip->oob_poi, 0xff, mtd->oobsize); - nand_fill_oob(chip, ops->oobbuf, ops); + nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); memset(chip->oob_poi, 0xff, mtd->oobsize); @@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, __func__, (unsigned long long)instr->addr, (unsigned long long)instr->len); - /* Start address must align on block boundary */ - if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { - DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); + if (check_offs_len(mtd, instr->addr, instr->len)) return -EINVAL; - } - - /* Length must align on block boundary */ - if (instr->len & ((1 << chip->phys_erase_shift) - 1)) { - DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", - __func__); - return -EINVAL; - } - - /* Do not allow erase past end of device */ - if ((instr->len + instr->addr) > mtd->size) { - DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n", - __func__); - return -EINVAL; - } instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; @@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) */ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip, - int busw, int *maf_id) + int busw, int *maf_id, + struct nand_flash_dev *type) { - struct nand_flash_dev *type = NULL; int i, dev_id, maf_idx; - int tmp_id, tmp_manf; + u8 id_data[8]; /* Select the device */ chip->select_chip(mtd, 0); @@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); - /* Read manufacturer and device IDs */ + /* Read entire ID string */ - tmp_manf = chip->read_byte(mtd); - tmp_id = chip->read_byte(mtd); + for (i = 0; i < 8; i++) + id_data[i] = chip->read_byte(mtd); - if (tmp_manf != *maf_id || tmp_id != dev_id) { + if (id_data[0] != *maf_id || id_data[1] != dev_id) { printk(KERN_INFO "%s: second ID read did not match " "%02x,%02x against %02x,%02x\n", __func__, - *maf_id, dev_id, tmp_manf, tmp_id); + *maf_id, dev_id, id_data[0], id_data[1]); return ERR_PTR(-ENODEV); } - /* Lookup the flash id */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (dev_id == nand_flash_ids[i].id) { - type = &nand_flash_ids[i]; - break; - } - } - if (!type) + type = nand_flash_ids; + + for (; type->name != NULL; type++) + if (dev_id == type->id) + break; + + if (!type->name) return ERR_PTR(-ENODEV); if (!mtd->name) @@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, if (!type->pagesize) { int extid; /* The 3rd id byte holds MLC / multichip data */ - chip->cellinfo = chip->read_byte(mtd); + chip->cellinfo = id_data[2]; /* The 4th id byte is the important one */ - extid = chip->read_byte(mtd); - /* Calc pagesize */ - mtd->writesize = 1024 << (extid & 0x3); - extid >>= 2; - /* Calc oobsize */ - mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); - extid >>= 2; - /* Calc blocksize. Blocksize is multiples of 64KiB */ - mtd->erasesize = (64 * 1024) << (extid & 0x03); - extid >>= 2; - /* Get buswidth information */ - busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; + extid = id_data[3]; + /* + * Field definitions are in the following datasheets: + * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) + * New style (6 byte ID): Samsung K9GAG08U0D (p.40) + * + * Check for wraparound + Samsung ID + nonzero 6th byte + * to decide what to do. + */ + if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && + id_data[0] == NAND_MFR_SAMSUNG && + id_data[5] != 0x00) { + /* Calc pagesize */ + mtd->writesize = 2048 << (extid & 0x03); + extid >>= 2; + /* Calc oobsize */ + mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218; + extid >>= 2; + /* Calc blocksize */ + mtd->erasesize = (128 * 1024) << + (((extid >> 1) & 0x04) | (extid & 0x03)); + busw = 0; + } else { + /* Calc pagesize */ + mtd->writesize = 1024 << (extid & 0x03); + extid >>= 2; + /* Calc oobsize */ + mtd->oobsize = (8 << (extid & 0x01)) * + (mtd->writesize >> 9); + extid >>= 2; + /* Calc blocksize. Blocksize is multiples of 64KiB */ + mtd->erasesize = (64 * 1024) << (extid & 0x03); + extid >>= 2; + /* Get buswidth information */ + busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; + } } else { /* * Old devices have chip data hardcoded in the device id table @@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, /* Set the bad block position */ chip->badblockpos = mtd->writesize > 512 ? NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; + chip->badblockbits = 8; /* Get chip options, preserve non chip based options */ chip->options &= ~NAND_CHIPOPTIONS_MSK; @@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; + /* + * Bad block marker is stored in the last page of each block + * on Samsung and Hynix MLC devices + */ + if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && + (*maf_id == NAND_MFR_SAMSUNG || + *maf_id == NAND_MFR_HYNIX)) + chip->options |= NAND_BB_LAST_PAGE; + /* Check for AND chips with 4 page planes */ if (chip->options & NAND_4PAGE_ARRAY) chip->erase_cmd = multi_erase_cmd; @@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, * nand_scan_ident - [NAND Interface] Scan for the NAND device * @mtd: MTD device structure * @maxchips: Number of chips to scan for + * @table: Alternative NAND ID table * * This is the first phase of the normal nand_scan() function. It * reads the flash ID and sets up MTD fields accordingly. * * The mtd->owner field must be set to the module of the caller. */ -int nand_scan_ident(struct mtd_info *mtd, int maxchips) +int nand_scan_ident(struct mtd_info *mtd, int maxchips, + struct nand_flash_dev *table) { int i, busw, nand_maf_id; struct nand_chip *chip = mtd->priv; @@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips) nand_set_defaults(chip, busw); /* Read the flash type */ - type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); + type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table); if (IS_ERR(type)) { if (!(chip->options & NAND_SCAN_SILENT_NODEV)) @@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd) /* Fill in remaining MTD driver data */ mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; + mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : + MTD_CAP_NANDFLASH; mtd->erase = nand_erase; mtd->point = NULL; mtd->unpoint = NULL; @@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips) BUG(); } - ret = nand_scan_ident(mtd, maxchips); + ret = nand_scan_ident(mtd, maxchips, NULL); if (!ret) ret = nand_scan_tail(mtd); return ret; @@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd) kfree(chip->buffers); } +EXPORT_SYMBOL_GPL(nand_lock); +EXPORT_SYMBOL_GPL(nand_unlock); EXPORT_SYMBOL_GPL(nand_scan); EXPORT_SYMBOL_GPL(nand_scan_ident); EXPORT_SYMBOL_GPL(nand_scan_tail); diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 55c23e5cd21..ad97c0ce73b 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs, size_t len) { struct mtd_oob_ops ops; + int res; ops.mode = MTD_OOB_RAW; ops.ooboffs = 0; ops.ooblen = mtd->oobsize; - ops.oobbuf = buf; - ops.datbuf = buf; - ops.len = len; - return mtd->read_oob(mtd, offs, &ops); + + while (len > 0) { + if (len <= mtd->writesize) { + ops.oobbuf = buf + len; + ops.datbuf = buf; + ops.len = len; + return mtd->read_oob(mtd, offs, &ops); + } else { + ops.oobbuf = buf + mtd->writesize; + ops.datbuf = buf; + ops.len = mtd->writesize; + res = mtd->read_oob(mtd, offs, &ops); + + if (res) + return res; + } + + buf += mtd->oobsize + mtd->writesize; + len -= mtd->writesize; + } + return 0; } /* @@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, from = (loff_t)startblock << (this->bbt_erase_shift - 1); } + if (this->options & NAND_BB_LAST_PAGE) + from += mtd->erasesize - (mtd->writesize * len); + for (i = startblock; i < numblocks;) { int ret; diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h index 7cec2cd9785..198b304d6f7 100644 --- a/drivers/mtd/nand/nand_bcm_umi.h +++ b/drivers/mtd/nand/nand_bcm_umi.h @@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, int numToRead = 16; /* There are 16 bytes per sector in the OOB */ /* ECC is already paused when this function is called */ + if (pageSize != NAND_DATA_ACCESS_SIZE) { + /* skip BI */ +#if defined(__KERNEL__) && !defined(STANDALONE) + *oobp++ = REG_NAND_DATA8; +#else + REG_NAND_DATA8; +#endif + numToRead--; + } - if (pageSize == NAND_DATA_ACCESS_SIZE) { - while (numToRead > numEccBytes) { - /* skip free oob region */ + while (numToRead > numEccBytes) { + /* skip free oob region */ #if defined(__KERNEL__) && !defined(STANDALONE) - *oobp++ = REG_NAND_DATA8; + *oobp++ = REG_NAND_DATA8; #else - REG_NAND_DATA8; + REG_NAND_DATA8; #endif - numToRead--; - } + numToRead--; + } + if (pageSize == NAND_DATA_ACCESS_SIZE) { /* read ECC bytes before BI */ nand_bcm_umi_bch_resume_read_ecc_calc(); @@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, #else eccCalc[eccPos++] = REG_NAND_DATA8; #endif + numToRead--; } nand_bcm_umi_bch_pause_read_ecc_calc(); @@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, numToRead--; } - /* read ECC bytes */ - nand_bcm_umi_bch_resume_read_ecc_calc(); - while (numToRead) { -#if defined(__KERNEL__) && !defined(STANDALONE) - *oobp = REG_NAND_DATA8; - eccCalc[eccPos++] = *oobp; - oobp++; -#else - eccCalc[eccPos++] = REG_NAND_DATA8; -#endif - numToRead--; - } - } else { - /* skip BI */ + } + /* read ECC bytes */ + nand_bcm_umi_bch_resume_read_ecc_calc(); + while (numToRead) { #if defined(__KERNEL__) && !defined(STANDALONE) - *oobp++ = REG_NAND_DATA8; + *oobp = REG_NAND_DATA8; + eccCalc[eccPos++] = *oobp; + oobp++; #else - REG_NAND_DATA8; + eccCalc[eccPos++] = REG_NAND_DATA8; #endif numToRead--; - - while (numToRead > numEccBytes) { - /* skip free oob region */ -#if defined(__KERNEL__) && !defined(STANDALONE) - *oobp++ = REG_NAND_DATA8; -#else - REG_NAND_DATA8; -#endif - numToRead--; - } - - /* read ECC bytes */ - nand_bcm_umi_bch_resume_read_ecc_calc(); - while (numToRead) { -#if defined(__KERNEL__) && !defined(STANDALONE) - *oobp = REG_NAND_DATA8; - eccCalc[eccPos++] = *oobp; - oobp++; -#else - eccCalc[eccPos++] = REG_NAND_DATA8; -#endif - numToRead--; - } } } diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 69ee2c90eb0..89907ed9900 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = { /* 1 Gigabit */ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, + {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 7281000fef2..261337efe0e 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -80,6 +80,9 @@ #ifndef CONFIG_NANDSIM_DBG #define CONFIG_NANDSIM_DBG 0 #endif +#ifndef CONFIG_NANDSIM_MAX_PARTS +#define CONFIG_NANDSIM_MAX_PARTS 32 +#endif static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; @@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH; static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; static uint log = CONFIG_NANDSIM_LOG; static uint dbg = CONFIG_NANDSIM_DBG; -static unsigned long parts[MAX_MTD_DEVICES]; +static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS]; static unsigned int parts_num; static char *badblocks = NULL; static char *weakblocks = NULL; @@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); -MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); -MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)"); +MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)"); +MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)"); MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); MODULE_PARM_DESC(log, "Perform logging if not zero"); @@ -288,7 +291,7 @@ union ns_mem { * The structure which describes all the internal simulator data. */ struct nandsim { - struct mtd_partition partitions[MAX_MTD_DEVICES]; + struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS]; unsigned int nbparts; uint busw; /* flash chip bus width (8 or 16) */ @@ -312,7 +315,7 @@ struct nandsim { union ns_mem buf; /* NAND flash "geometry" */ - struct nandsin_geometry { + struct { uint64_t totsz; /* total flash size, bytes */ uint32_t secsz; /* flash sector (erase block) size, bytes */ uint pgsz; /* NAND flash page size, bytes */ @@ -331,7 +334,7 @@ struct nandsim { } geom; /* NAND flash internal registers */ - struct nandsim_regs { + struct { unsigned command; /* the command register */ u_char status; /* the status register */ uint row; /* the page number */ @@ -342,7 +345,7 @@ struct nandsim { } regs; /* NAND flash lines state */ - struct ns_lines_status { + struct { int ce; /* chip Enable */ int cle; /* command Latch Enable */ int ale; /* address Latch Enable */ diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c index 1f6f741af5d..8c0b6937522 100644 --- a/drivers/mtd/nand/nomadik_nand.c +++ b/drivers/mtd/nand/nomadik_nand.c @@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev) ret = -EIO; goto err_unmap; } - host->addr_va = ioremap(res->start, res->end - res->start + 1); + host->addr_va = ioremap(res->start, resource_size(res)); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); if (!res) { ret = -EIO; goto err_unmap; } - host->data_va = ioremap(res->start, res->end - res->start + 1); + host->data_va = ioremap(res->start, resource_size(res)); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); if (!res) { ret = -EIO; goto err_unmap; } - host->cmd_va = ioremap(res->start, res->end - res->start + 1); + host->cmd_va = ioremap(res->start, resource_size(res)); if (!host->addr_va || !host->data_va || !host->cmd_va) { ret = -ENOMEM; diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c index 7680e731348..6eddf7361ed 100644 --- a/drivers/mtd/nand/w90p910_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Nuvoton technology corporation. + * Copyright © 2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * @@ -55,7 +55,7 @@ #define write_addr_reg(dev, val) \ __raw_writel((val), (dev)->reg + REG_SMADDR) -struct w90p910_nand { +struct nuc900_nand { struct mtd_info mtd; struct nand_chip chip; void __iomem *reg; @@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = { } }; -static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd) +static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd) { unsigned char ret; - struct w90p910_nand *nand; + struct nuc900_nand *nand; - nand = container_of(mtd, struct w90p910_nand, mtd); + nand = container_of(mtd, struct nuc900_nand, mtd); ret = (unsigned char)read_data_reg(nand); return ret; } -static void w90p910_nand_read_buf(struct mtd_info *mtd, - unsigned char *buf, int len) +static void nuc900_nand_read_buf(struct mtd_info *mtd, + unsigned char *buf, int len) { int i; - struct w90p910_nand *nand; + struct nuc900_nand *nand; - nand = container_of(mtd, struct w90p910_nand, mtd); + nand = container_of(mtd, struct nuc900_nand, mtd); for (i = 0; i < len; i++) buf[i] = (unsigned char)read_data_reg(nand); } -static void w90p910_nand_write_buf(struct mtd_info *mtd, - const unsigned char *buf, int len) +static void nuc900_nand_write_buf(struct mtd_info *mtd, + const unsigned char *buf, int len) { int i; - struct w90p910_nand *nand; + struct nuc900_nand *nand; - nand = container_of(mtd, struct w90p910_nand, mtd); + nand = container_of(mtd, struct nuc900_nand, mtd); for (i = 0; i < len; i++) write_data_reg(nand, buf[i]); } -static int w90p910_verify_buf(struct mtd_info *mtd, - const unsigned char *buf, int len) +static int nuc900_verify_buf(struct mtd_info *mtd, + const unsigned char *buf, int len) { int i; - struct w90p910_nand *nand; + struct nuc900_nand *nand; - nand = container_of(mtd, struct w90p910_nand, mtd); + nand = container_of(mtd, struct nuc900_nand, mtd); for (i = 0; i < len; i++) { if (buf[i] != (unsigned char)read_data_reg(nand)) @@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd, return 0; } -static int w90p910_check_rb(struct w90p910_nand *nand) +static int nuc900_check_rb(struct nuc900_nand *nand) { unsigned int val; spin_lock(&nand->lock); @@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand) return val; } -static int w90p910_nand_devready(struct mtd_info *mtd) +static int nuc900_nand_devready(struct mtd_info *mtd) { - struct w90p910_nand *nand; + struct nuc900_nand *nand; int ready; - nand = container_of(mtd, struct w90p910_nand, mtd); + nand = container_of(mtd, struct nuc900_nand, mtd); - ready = (w90p910_check_rb(nand)) ? 1 : 0; + ready = (nuc900_check_rb(nand)) ? 1 : 0; return ready; } -static void w90p910_nand_command_lp(struct mtd_info *mtd, - unsigned int command, int column, int page_addr) +static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, + int column, int page_addr) { register struct nand_chip *chip = mtd->priv; - struct w90p910_nand *nand; + struct nuc900_nand *nand; - nand = container_of(mtd, struct w90p910_nand, mtd); + nand = container_of(mtd, struct nuc900_nand, mtd); if (command == NAND_CMD_READOOB) { column += mtd->writesize; @@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd, write_cmd_reg(nand, NAND_CMD_STATUS); write_cmd_reg(nand, command); - while (!w90p910_check_rb(nand)) + while (!nuc900_check_rb(nand)) ; return; @@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd, } -static void w90p910_nand_enable(struct w90p910_nand *nand) +static void nuc900_nand_enable(struct nuc900_nand *nand) { unsigned int val; spin_lock(&nand->lock); @@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand) spin_unlock(&nand->lock); } -static int __devinit w90p910_nand_probe(struct platform_device *pdev) +static int __devinit nuc900_nand_probe(struct platform_device *pdev) { - struct w90p910_nand *w90p910_nand; + struct nuc900_nand *nuc900_nand; struct nand_chip *chip; int retval; struct resource *res; retval = 0; - w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL); - if (!w90p910_nand) + nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); + if (!nuc900_nand) return -ENOMEM; - chip = &(w90p910_nand->chip); + chip = &(nuc900_nand->chip); - w90p910_nand->mtd.priv = chip; - w90p910_nand->mtd.owner = THIS_MODULE; - spin_lock_init(&w90p910_nand->lock); + nuc900_nand->mtd.priv = chip; + nuc900_nand->mtd.owner = THIS_MODULE; + spin_lock_init(&nuc900_nand->lock); - w90p910_nand->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(w90p910_nand->clk)) { + nuc900_nand->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(nuc900_nand->clk)) { retval = -ENOENT; goto fail1; } - clk_enable(w90p910_nand->clk); - - chip->cmdfunc = w90p910_nand_command_lp; - chip->dev_ready = w90p910_nand_devready; - chip->read_byte = w90p910_nand_read_byte; - chip->write_buf = w90p910_nand_write_buf; - chip->read_buf = w90p910_nand_read_buf; - chip->verify_buf = w90p910_verify_buf; + clk_enable(nuc900_nand->clk); + + chip->cmdfunc = nuc900_nand_command_lp; + chip->dev_ready = nuc900_nand_devready; + chip->read_byte = nuc900_nand_read_byte; + chip->write_buf = nuc900_nand_write_buf; + chip->read_buf = nuc900_nand_read_buf; + chip->verify_buf = nuc900_verify_buf; chip->chip_delay = 50; chip->options = 0; chip->ecc.mode = NAND_ECC_SOFT; @@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev) goto fail1; } - w90p910_nand->reg = ioremap(res->start, resource_size(res)); - if (!w90p910_nand->reg) { + nuc900_nand->reg = ioremap(res->start, resource_size(res)); + if (!nuc900_nand->reg) { retval = -ENOMEM; goto fail2; } - w90p910_nand_enable(w90p910_nand); + nuc900_nand_enable(nuc900_nand); - if (nand_scan(&(w90p910_nand->mtd), 1)) { + if (nand_scan(&(nuc900_nand->mtd), 1)) { retval = -ENXIO; goto fail3; } - add_mtd_partitions(&(w90p910_nand->mtd), partitions, + add_mtd_partitions(&(nuc900_nand->mtd), partitions, ARRAY_SIZE(partitions)); - platform_set_drvdata(pdev, w90p910_nand); + platform_set_drvdata(pdev, nuc900_nand); return retval; -fail3: iounmap(w90p910_nand->reg); +fail3: iounmap(nuc900_nand->reg); fail2: release_mem_region(res->start, resource_size(res)); -fail1: kfree(w90p910_nand); +fail1: kfree(nuc900_nand); return retval; } -static int __devexit w90p910_nand_remove(struct platform_device *pdev) +static int __devexit nuc900_nand_remove(struct platform_device *pdev) { - struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev); + struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); struct resource *res; - iounmap(w90p910_nand->reg); + iounmap(nuc900_nand->reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); - clk_disable(w90p910_nand->clk); - clk_put(w90p910_nand->clk); + clk_disable(nuc900_nand->clk); + clk_put(nuc900_nand->clk); - kfree(w90p910_nand); + kfree(nuc900_nand); platform_set_drvdata(pdev, NULL); return 0; } -static struct platform_driver w90p910_nand_driver = { - .probe = w90p910_nand_probe, - .remove = __devexit_p(w90p910_nand_remove), +static struct platform_driver nuc900_nand_driver = { + .probe = nuc900_nand_probe, + .remove = __devexit_p(nuc900_nand_remove), .driver = { - .name = "w90p910-fmi", + .name = "nuc900-fmi", .owner = THIS_MODULE, }, }; -static int __init w90p910_nand_init(void) +static int __init nuc900_nand_init(void) { - return platform_driver_register(&w90p910_nand_driver); + return platform_driver_register(&nuc900_nand_driver); } -static void __exit w90p910_nand_exit(void) +static void __exit nuc900_nand_exit(void) { - platform_driver_unregister(&w90p910_nand_driver); + platform_driver_unregister(&nuc900_nand_driver); } -module_init(w90p910_nand_init); -module_exit(w90p910_nand_exit); +module_init(nuc900_nand_init); +module_exit(nuc900_nand_exit); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); -MODULE_DESCRIPTION("w90p910 nand driver!"); +MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:w90p910-fmi"); +MODULE_ALIAS("platform:nuc900-fmi"); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 7545568fce4..ee87325c771 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) u32 *p = (u32 *)buf; /* take care of subpage reads */ - for (; len % 4 != 0; ) { - *buf++ = __raw_readb(info->nand.IO_ADDR_R); - len--; + if (len % 4) { + if (info->nand.options & NAND_BUSWIDTH_16) + omap_read_buf16(mtd, buf, len % 4); + else + omap_read_buf8(mtd, buf, len % 4); + p = (u32 *) (buf + len % 4); + len -= len % 4; } - p = (u32 *) buf; /* configure and start prefetch transfer */ ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); @@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, omap_write_buf_pref(mtd, buf, len); else /* start transfer in DMA mode */ - omap_nand_dma_transfer(mtd, buf, len, 0x1); + omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); } /** @@ -1028,7 +1031,8 @@ out_free_info: static int omap_nand_remove(struct platform_device *pdev) { struct mtd_info *mtd = platform_get_drvdata(pdev); - struct omap_nand_info *info = mtd->priv; + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); platform_set_drvdata(pdev, NULL); if (use_dma) diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index d60fc5719fe..da6e7534305 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) struct mtd_info *mtd; struct nand_chip *nc; struct orion_nand_data *board; + struct resource *res; void __iomem *io_base; int ret = 0; #ifdef CONFIG_MTD_PARTITIONS @@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev) } mtd = (struct mtd_info *)(nc + 1); - io_base = ioremap(pdev->resource[0].start, - pdev->resource[0].end - pdev->resource[0].start + 1); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto no_res; + } + + io_base = ioremap(res->start, resource_size(res)); if (!io_base) { printk(KERN_ERR "orion_nand: ioremap failed\n"); ret = -EIO; @@ -120,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev) if (board->width == 16) nc->options |= NAND_BUSWIDTH_16; + if (board->dev_ready) + nc->dev_ready = board->dev_ready; + platform_set_drvdata(pdev, mtd); if (nand_scan(mtd, 1)) { diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index a8b9376cf32..090a05c12cb 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev) return 0; } -static struct of_device_id pasemi_nand_match[] = +static const struct of_device_id pasemi_nand_match[] = { { .compatible = "pasemi,localbus-nand", diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 5d55152162c..e02fa4f0e3c 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) goto fail_free_irq; } + if (mtd_has_cmdlinepart()) { + static const char *probes[] = { "cmdlinepart", NULL }; + struct mtd_partition *parts; + int nr_parts; + + nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0); + + if (nr_parts) + return add_mtd_partitions(mtd, parts, nr_parts); + } + return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); fail_free_irq: diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c new file mode 100644 index 00000000000..78a42329547 --- /dev/null +++ b/drivers/mtd/nand/r852.c @@ -0,0 +1,1140 @@ +/* + * Copyright © 2009 - Maxim Levitsky + * driver for Ricoh xD readers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/jiffies.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <asm/byteorder.h> +#include <linux/sched.h> +#include "sm_common.h" +#include "r852.h" + + +static int r852_enable_dma = 1; +module_param(r852_enable_dma, bool, S_IRUGO); +MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); + +static int debug; +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Debug level (0-2)"); + +/* read register */ +static inline uint8_t r852_read_reg(struct r852_device *dev, int address) +{ + uint8_t reg = readb(dev->mmio + address); + return reg; +} + +/* write register */ +static inline void r852_write_reg(struct r852_device *dev, + int address, uint8_t value) +{ + writeb(value, dev->mmio + address); + mmiowb(); +} + + +/* read dword sized register */ +static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) +{ + uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); + return reg; +} + +/* write dword sized register */ +static inline void r852_write_reg_dword(struct r852_device *dev, + int address, uint32_t value) +{ + writel(cpu_to_le32(value), dev->mmio + address); + mmiowb(); +} + +/* returns pointer to our private structure */ +static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) +{ + struct nand_chip *chip = (struct nand_chip *)mtd->priv; + return (struct r852_device *)chip->priv; +} + + +/* check if controller supports dma */ +static void r852_dma_test(struct r852_device *dev) +{ + dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & + (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); + + if (!dev->dma_usable) + message("Non dma capable device detected, dma disabled"); + + if (!r852_enable_dma) { + message("disabling dma on user request"); + dev->dma_usable = 0; + } +} + +/* + * Enable dma. Enables ether first or second stage of the DMA, + * Expects dev->dma_dir and dev->dma_state be set + */ +static void r852_dma_enable(struct r852_device *dev) +{ + uint8_t dma_reg, dma_irq_reg; + + /* Set up dma settings */ + dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); + dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); + + if (dev->dma_dir) + dma_reg |= R852_DMA_READ; + + if (dev->dma_state == DMA_INTERNAL) { + dma_reg |= R852_DMA_INTERNAL; + /* Precaution to make sure HW doesn't write */ + /* to random kernel memory */ + r852_write_reg_dword(dev, R852_DMA_ADDR, + cpu_to_le32(dev->phys_bounce_buffer)); + } else { + dma_reg |= R852_DMA_MEMORY; + r852_write_reg_dword(dev, R852_DMA_ADDR, + cpu_to_le32(dev->phys_dma_addr)); + } + + /* Precaution: make sure write reached the device */ + r852_read_reg_dword(dev, R852_DMA_ADDR); + + r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); + + /* Set dma irq */ + dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); + r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, + dma_irq_reg | + R852_DMA_IRQ_INTERNAL | + R852_DMA_IRQ_ERROR | + R852_DMA_IRQ_MEMORY); +} + +/* + * Disable dma, called from the interrupt handler, which specifies + * success of the operation via 'error' argument + */ +static void r852_dma_done(struct r852_device *dev, int error) +{ + WARN_ON(dev->dma_stage == 0); + + r852_write_reg_dword(dev, R852_DMA_IRQ_STA, + r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); + + r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); + r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); + + /* Precaution to make sure HW doesn't write to random kernel memory */ + r852_write_reg_dword(dev, R852_DMA_ADDR, + cpu_to_le32(dev->phys_bounce_buffer)); + r852_read_reg_dword(dev, R852_DMA_ADDR); + + dev->dma_error = error; + dev->dma_stage = 0; + + if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) + pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, + dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); + complete(&dev->dma_done); +} + +/* + * Wait, till dma is done, which includes both phases of it + */ +static int r852_dma_wait(struct r852_device *dev) +{ + long timeout = wait_for_completion_timeout(&dev->dma_done, + msecs_to_jiffies(1000)); + if (!timeout) { + dbg("timeout waiting for DMA interrupt"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Read/Write one page using dma. Only pages can be read (512 bytes) +*/ +static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) +{ + int bounce = 0; + unsigned long flags; + int error; + + dev->dma_error = 0; + + /* Set dma direction */ + dev->dma_dir = do_read; + dev->dma_stage = 1; + + dbg_verbose("doing dma %s ", do_read ? "read" : "write"); + + /* Set intial dma state: for reading first fill on board buffer, + from device, for writes first fill the buffer from memory*/ + dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; + + /* if incoming buffer is not page aligned, we should do bounce */ + if ((unsigned long)buf & (R852_DMA_LEN-1)) + bounce = 1; + + if (!bounce) { + dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, + R852_DMA_LEN, + (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); + + if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) + bounce = 1; + } + + if (bounce) { + dbg_verbose("dma: using bounce buffer"); + dev->phys_dma_addr = dev->phys_bounce_buffer; + if (!do_read) + memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); + } + + /* Enable DMA */ + spin_lock_irqsave(&dev->irqlock, flags); + r852_dma_enable(dev); + spin_unlock_irqrestore(&dev->irqlock, flags); + + /* Wait till complete */ + error = r852_dma_wait(dev); + + if (error) { + r852_dma_done(dev, error); + return; + } + + if (do_read && bounce) + memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); +} + +/* + * Program data lines of the nand chip to send data to it + */ +void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ + struct r852_device *dev = r852_get_dev(mtd); + uint32_t reg; + + /* Don't allow any access to hardware if we suspect card removal */ + if (dev->card_unstable) + return; + + /* Special case for whole sector read */ + if (len == R852_DMA_LEN && dev->dma_usable) { + r852_do_dma(dev, (uint8_t *)buf, 0); + return; + } + + /* write DWORD chinks - faster */ + while (len) { + reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; + r852_write_reg_dword(dev, R852_DATALINE, reg); + buf += 4; + len -= 4; + + } + + /* write rest */ + while (len) + r852_write_reg(dev, R852_DATALINE, *buf++); +} + +/* + * Read data lines of the nand chip to retrieve data + */ +void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ + struct r852_device *dev = r852_get_dev(mtd); + uint32_t reg; + + if (dev->card_unstable) { + /* since we can't signal error here, at least, return + predictable buffer */ + memset(buf, 0, len); + return; + } + + /* special case for whole sector read */ + if (len == R852_DMA_LEN && dev->dma_usable) { + r852_do_dma(dev, buf, 1); + return; + } + + /* read in dword sized chunks */ + while (len >= 4) { + + reg = r852_read_reg_dword(dev, R852_DATALINE); + *buf++ = reg & 0xFF; + *buf++ = (reg >> 8) & 0xFF; + *buf++ = (reg >> 16) & 0xFF; + *buf++ = (reg >> 24) & 0xFF; + len -= 4; + } + + /* read the reset by bytes */ + while (len--) + *buf++ = r852_read_reg(dev, R852_DATALINE); +} + +/* + * Read one byte from nand chip + */ +static uint8_t r852_read_byte(struct mtd_info *mtd) +{ + struct r852_device *dev = r852_get_dev(mtd); + + /* Same problem as in r852_read_buf.... */ + if (dev->card_unstable) + return 0; + + return r852_read_reg(dev, R852_DATALINE); +} + + +/* + * Readback the buffer to verify it + */ +int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ + struct r852_device *dev = r852_get_dev(mtd); + + /* We can't be sure about anything here... */ + if (dev->card_unstable) + return -1; + + /* This will never happen, unless you wired up a nand chip + with > 512 bytes page size to the reader */ + if (len > SM_SECTOR_SIZE) + return 0; + + r852_read_buf(mtd, dev->tmp_buffer, len); + return memcmp(buf, dev->tmp_buffer, len); +} + +/* + * Control several chip lines & send commands + */ +void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) +{ + struct r852_device *dev = r852_get_dev(mtd); + + if (dev->card_unstable) + return; + + if (ctrl & NAND_CTRL_CHANGE) { + + dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | + R852_CTL_ON | R852_CTL_CARDENABLE); + + if (ctrl & NAND_ALE) + dev->ctlreg |= R852_CTL_DATA; + + if (ctrl & NAND_CLE) + dev->ctlreg |= R852_CTL_COMMAND; + + if (ctrl & NAND_NCE) + dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); + else + dev->ctlreg &= ~R852_CTL_WRITE; + + /* when write is stareted, enable write access */ + if (dat == NAND_CMD_ERASE1) + dev->ctlreg |= R852_CTL_WRITE; + + r852_write_reg(dev, R852_CTL, dev->ctlreg); + } + + /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need + to set write mode */ + if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { + dev->ctlreg |= R852_CTL_WRITE; + r852_write_reg(dev, R852_CTL, dev->ctlreg); + } + + if (dat != NAND_CMD_NONE) + r852_write_reg(dev, R852_DATALINE, dat); +} + +/* + * Wait till card is ready. + * based on nand_wait, but returns errors on DMA error + */ +int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) +{ + struct r852_device *dev = (struct r852_device *)chip->priv; + + unsigned long timeout; + int status; + + timeout = jiffies + (chip->state == FL_ERASING ? + msecs_to_jiffies(400) : msecs_to_jiffies(20)); + + while (time_before(jiffies, timeout)) + if (chip->dev_ready(mtd)) + break; + + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + status = (int)chip->read_byte(mtd); + + /* Unfortunelly, no way to send detailed error status... */ + if (dev->dma_error) { + status |= NAND_STATUS_FAIL; + dev->dma_error = 0; + } + return status; +} + +/* + * Check if card is ready + */ + +int r852_ready(struct mtd_info *mtd) +{ + struct r852_device *dev = r852_get_dev(mtd); + return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); +} + + +/* + * Set ECC engine mode +*/ + +void r852_ecc_hwctl(struct mtd_info *mtd, int mode) +{ + struct r852_device *dev = r852_get_dev(mtd); + + if (dev->card_unstable) + return; + + switch (mode) { + case NAND_ECC_READ: + case NAND_ECC_WRITE: + /* enable ecc generation/check*/ + dev->ctlreg |= R852_CTL_ECC_ENABLE; + + /* flush ecc buffer */ + r852_write_reg(dev, R852_CTL, + dev->ctlreg | R852_CTL_ECC_ACCESS); + + r852_read_reg_dword(dev, R852_DATALINE); + r852_write_reg(dev, R852_CTL, dev->ctlreg); + return; + + case NAND_ECC_READSYN: + /* disable ecc generation */ + dev->ctlreg &= ~R852_CTL_ECC_ENABLE; + r852_write_reg(dev, R852_CTL, dev->ctlreg); + } +} + +/* + * Calculate ECC, only used for writes + */ + +int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, + uint8_t *ecc_code) +{ + struct r852_device *dev = r852_get_dev(mtd); + struct sm_oob *oob = (struct sm_oob *)ecc_code; + uint32_t ecc1, ecc2; + + if (dev->card_unstable) + return 0; + + dev->ctlreg &= ~R852_CTL_ECC_ENABLE; + r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); + + ecc1 = r852_read_reg_dword(dev, R852_DATALINE); + ecc2 = r852_read_reg_dword(dev, R852_DATALINE); + + oob->ecc1[0] = (ecc1) & 0xFF; + oob->ecc1[1] = (ecc1 >> 8) & 0xFF; + oob->ecc1[2] = (ecc1 >> 16) & 0xFF; + + oob->ecc2[0] = (ecc2) & 0xFF; + oob->ecc2[1] = (ecc2 >> 8) & 0xFF; + oob->ecc2[2] = (ecc2 >> 16) & 0xFF; + + r852_write_reg(dev, R852_CTL, dev->ctlreg); + return 0; +} + +/* + * Correct the data using ECC, hw did almost everything for us + */ + +int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, + uint8_t *read_ecc, uint8_t *calc_ecc) +{ + uint16_t ecc_reg; + uint8_t ecc_status, err_byte; + int i, error = 0; + + struct r852_device *dev = r852_get_dev(mtd); + + if (dev->card_unstable) + return 0; + + r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); + ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); + r852_write_reg(dev, R852_CTL, dev->ctlreg); + + for (i = 0 ; i <= 1 ; i++) { + + ecc_status = (ecc_reg >> 8) & 0xFF; + + /* ecc uncorrectable error */ + if (ecc_status & R852_ECC_FAIL) { + dbg("ecc: unrecoverable error, in half %d", i); + error = -1; + goto exit; + } + + /* correctable error */ + if (ecc_status & R852_ECC_CORRECTABLE) { + + err_byte = ecc_reg & 0xFF; + dbg("ecc: recoverable error, " + "in half %d, byte %d, bit %d", i, + err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); + + dat[err_byte] ^= + 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); + error++; + } + + dat += 256; + ecc_reg >>= 16; + } +exit: + return error; +} + +/* + * This is copy of nand_read_oob_std + * nand_read_oob_syndrome assumes we can send column address - we can't + */ +static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page, int sndcmd) +{ + if (sndcmd) { + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); + sndcmd = 0; + } + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + return sndcmd; +} + +/* + * Start the nand engine + */ + +void r852_engine_enable(struct r852_device *dev) +{ + if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { + r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); + r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); + } else { + r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); + r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); + } + msleep(300); + r852_write_reg(dev, R852_CTL, 0); +} + + +/* + * Stop the nand engine + */ + +void r852_engine_disable(struct r852_device *dev) +{ + r852_write_reg_dword(dev, R852_HW, 0); + r852_write_reg(dev, R852_CTL, R852_CTL_RESET); +} + +/* + * Test if card is present + */ + +void r852_card_update_present(struct r852_device *dev) +{ + unsigned long flags; + uint8_t reg; + + spin_lock_irqsave(&dev->irqlock, flags); + reg = r852_read_reg(dev, R852_CARD_STA); + dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); + spin_unlock_irqrestore(&dev->irqlock, flags); +} + +/* + * Update card detection IRQ state according to current card state + * which is read in r852_card_update_present + */ +void r852_update_card_detect(struct r852_device *dev) +{ + int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); + dev->card_unstable = 0; + + card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); + card_detect_reg |= R852_CARD_IRQ_GENABLE; + + card_detect_reg |= dev->card_detected ? + R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; + + r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); +} + +ssize_t r852_media_type_show(struct device *sys_dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); + struct r852_device *dev = r852_get_dev(mtd); + char *data = dev->sm ? "smartmedia" : "xd"; + + strcpy(buf, data); + return strlen(data); +} + +DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); + + +/* Detect properties of card in slot */ +void r852_update_media_status(struct r852_device *dev) +{ + uint8_t reg; + unsigned long flags; + int readonly; + + spin_lock_irqsave(&dev->irqlock, flags); + if (!dev->card_detected) { + message("card removed"); + spin_unlock_irqrestore(&dev->irqlock, flags); + return ; + } + + readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; + reg = r852_read_reg(dev, R852_DMA_CAP); + dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); + + message("detected %s %s card in slot", + dev->sm ? "SmartMedia" : "xD", + readonly ? "readonly" : "writeable"); + + dev->readonly = readonly; + spin_unlock_irqrestore(&dev->irqlock, flags); +} + +/* + * Register the nand device + * Called when the card is detected + */ +int r852_register_nand_device(struct r852_device *dev) +{ + dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); + + if (!dev->mtd) + goto error1; + + WARN_ON(dev->card_registred); + + dev->mtd->owner = THIS_MODULE; + dev->mtd->priv = dev->chip; + dev->mtd->dev.parent = &dev->pci_dev->dev; + + if (dev->readonly) + dev->chip->options |= NAND_ROM; + + r852_engine_enable(dev); + + if (sm_register_device(dev->mtd, dev->sm)) + goto error2; + + if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) + message("can't create media type sysfs attribute"); + + dev->card_registred = 1; + return 0; +error2: + kfree(dev->mtd); +error1: + /* Force card redetect */ + dev->card_detected = 0; + return -1; +} + +/* + * Unregister the card + */ + +void r852_unregister_nand_device(struct r852_device *dev) +{ + if (!dev->card_registred) + return; + + device_remove_file(&dev->mtd->dev, &dev_attr_media_type); + nand_release(dev->mtd); + r852_engine_disable(dev); + dev->card_registred = 0; + kfree(dev->mtd); + dev->mtd = NULL; +} + +/* Card state updater */ +void r852_card_detect_work(struct work_struct *work) +{ + struct r852_device *dev = + container_of(work, struct r852_device, card_detect_work.work); + + r852_card_update_present(dev); + dev->card_unstable = 0; + + /* False alarm */ + if (dev->card_detected == dev->card_registred) + goto exit; + + /* Read media properties */ + r852_update_media_status(dev); + + /* Register the card */ + if (dev->card_detected) + r852_register_nand_device(dev); + else + r852_unregister_nand_device(dev); +exit: + /* Update detection logic */ + r852_update_card_detect(dev); +} + +/* Ack + disable IRQ generation */ +static void r852_disable_irqs(struct r852_device *dev) +{ + uint8_t reg; + reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); + r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); + + reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); + r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, + reg & ~R852_DMA_IRQ_MASK); + + r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); + r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); +} + +/* Interrupt handler */ +static irqreturn_t r852_irq(int irq, void *data) +{ + struct r852_device *dev = (struct r852_device *)data; + + uint8_t card_status, dma_status; + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + + spin_lock_irqsave(&dev->irqlock, flags); + + /* We can recieve shared interrupt while pci is suspended + in that case reads will return 0xFFFFFFFF.... */ + if (dev->insuspend) + goto out; + + /* handle card detection interrupts first */ + card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); + r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); + + if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { + + ret = IRQ_HANDLED; + dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); + + /* we shouldn't recieve any interrupts if we wait for card + to settle */ + WARN_ON(dev->card_unstable); + + /* disable irqs while card is unstable */ + /* this will timeout DMA if active, but better that garbage */ + r852_disable_irqs(dev); + + if (dev->card_unstable) + goto out; + + /* let, card state to settle a bit, and then do the work */ + dev->card_unstable = 1; + queue_delayed_work(dev->card_workqueue, + &dev->card_detect_work, msecs_to_jiffies(100)); + goto out; + } + + + /* Handle dma interrupts */ + dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); + r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); + + if (dma_status & R852_DMA_IRQ_MASK) { + + ret = IRQ_HANDLED; + + if (dma_status & R852_DMA_IRQ_ERROR) { + dbg("recieved dma error IRQ"); + r852_dma_done(dev, -EIO); + goto out; + } + + /* recieved DMA interrupt out of nowhere? */ + WARN_ON_ONCE(dev->dma_stage == 0); + + if (dev->dma_stage == 0) + goto out; + + /* done device access */ + if (dev->dma_state == DMA_INTERNAL && + (dma_status & R852_DMA_IRQ_INTERNAL)) { + + dev->dma_state = DMA_MEMORY; + dev->dma_stage++; + } + + /* done memory DMA */ + if (dev->dma_state == DMA_MEMORY && + (dma_status & R852_DMA_IRQ_MEMORY)) { + dev->dma_state = DMA_INTERNAL; + dev->dma_stage++; + } + + /* Enable 2nd half of dma dance */ + if (dev->dma_stage == 2) + r852_dma_enable(dev); + + /* Operation done */ + if (dev->dma_stage == 3) + r852_dma_done(dev, 0); + goto out; + } + + /* Handle unknown interrupts */ + if (dma_status) + dbg("bad dma IRQ status = %x", dma_status); + + if (card_status & ~R852_CARD_STA_CD) + dbg("strange card status = %x", card_status); + +out: + spin_unlock_irqrestore(&dev->irqlock, flags); + return ret; +} + +int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + int error; + struct nand_chip *chip; + struct r852_device *dev; + + /* pci initialization */ + error = pci_enable_device(pci_dev); + + if (error) + goto error1; + + pci_set_master(pci_dev); + + error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (error) + goto error2; + + error = pci_request_regions(pci_dev, DRV_NAME); + + if (error) + goto error3; + + error = -ENOMEM; + + /* init nand chip, but register it only on card insert */ + chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); + + if (!chip) + goto error4; + + /* commands */ + chip->cmd_ctrl = r852_cmdctl; + chip->waitfunc = r852_wait; + chip->dev_ready = r852_ready; + + /* I/O */ + chip->read_byte = r852_read_byte; + chip->read_buf = r852_read_buf; + chip->write_buf = r852_write_buf; + chip->verify_buf = r852_verify_buf; + + /* ecc */ + chip->ecc.mode = NAND_ECC_HW_SYNDROME; + chip->ecc.size = R852_DMA_LEN; + chip->ecc.bytes = SM_OOB_SIZE; + chip->ecc.hwctl = r852_ecc_hwctl; + chip->ecc.calculate = r852_ecc_calculate; + chip->ecc.correct = r852_ecc_correct; + + /* TODO: hack */ + chip->ecc.read_oob = r852_read_oob; + + /* init our device structure */ + dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); + + if (!dev) + goto error5; + + chip->priv = dev; + dev->chip = chip; + dev->pci_dev = pci_dev; + pci_set_drvdata(pci_dev, dev); + + dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, + &dev->phys_bounce_buffer); + + if (!dev->bounce_buffer) + goto error6; + + + error = -ENODEV; + dev->mmio = pci_ioremap_bar(pci_dev, 0); + + if (!dev->mmio) + goto error7; + + error = -ENOMEM; + dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); + + if (!dev->tmp_buffer) + goto error8; + + init_completion(&dev->dma_done); + + dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); + + if (!dev->card_workqueue) + goto error9; + + INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); + + /* shutdown everything - precation */ + r852_engine_disable(dev); + r852_disable_irqs(dev); + + r852_dma_test(dev); + + /*register irq handler*/ + error = -ENODEV; + if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, + DRV_NAME, dev)) + goto error10; + + dev->irq = pci_dev->irq; + spin_lock_init(&dev->irqlock); + + /* kick initial present test */ + dev->card_detected = 0; + r852_card_update_present(dev); + queue_delayed_work(dev->card_workqueue, + &dev->card_detect_work, 0); + + + printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n"); + return 0; + +error10: + destroy_workqueue(dev->card_workqueue); +error9: + kfree(dev->tmp_buffer); +error8: + pci_iounmap(pci_dev, dev->mmio); +error7: + pci_free_consistent(pci_dev, R852_DMA_LEN, + dev->bounce_buffer, dev->phys_bounce_buffer); +error6: + kfree(dev); +error5: + kfree(chip); +error4: + pci_release_regions(pci_dev); +error3: +error2: + pci_disable_device(pci_dev); +error1: + return error; +} + +void r852_remove(struct pci_dev *pci_dev) +{ + struct r852_device *dev = pci_get_drvdata(pci_dev); + + /* Stop detect workqueue - + we are going to unregister the device anyway*/ + cancel_delayed_work_sync(&dev->card_detect_work); + destroy_workqueue(dev->card_workqueue); + + /* Unregister the device, this might make more IO */ + r852_unregister_nand_device(dev); + + /* Stop interrupts */ + r852_disable_irqs(dev); + synchronize_irq(dev->irq); + free_irq(dev->irq, dev); + + /* Cleanup */ + kfree(dev->tmp_buffer); + pci_iounmap(pci_dev, dev->mmio); + pci_free_consistent(pci_dev, R852_DMA_LEN, + dev->bounce_buffer, dev->phys_bounce_buffer); + + kfree(dev->chip); + kfree(dev); + + /* Shutdown the PCI device */ + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); +} + +void r852_shutdown(struct pci_dev *pci_dev) +{ + struct r852_device *dev = pci_get_drvdata(pci_dev); + + cancel_delayed_work_sync(&dev->card_detect_work); + r852_disable_irqs(dev); + synchronize_irq(dev->irq); + pci_disable_device(pci_dev); +} + +#ifdef CONFIG_PM +int r852_suspend(struct device *device) +{ + struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); + unsigned long flags; + + if (dev->ctlreg & R852_CTL_CARDENABLE) + return -EBUSY; + + /* First make sure the detect work is gone */ + cancel_delayed_work_sync(&dev->card_detect_work); + + /* Turn off the interrupts and stop the device */ + r852_disable_irqs(dev); + r852_engine_disable(dev); + + spin_lock_irqsave(&dev->irqlock, flags); + dev->insuspend = 1; + spin_unlock_irqrestore(&dev->irqlock, flags); + + /* At that point, even if interrupt handler is running, it will quit */ + /* So wait for this to happen explictly */ + synchronize_irq(dev->irq); + + /* If card was pulled off just during the suspend, which is very + unlikely, we will remove it on resume, it too late now + anyway... */ + dev->card_unstable = 0; + + pci_save_state(to_pci_dev(device)); + return pci_prepare_to_sleep(to_pci_dev(device)); +} + +int r852_resume(struct device *device) +{ + struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); + unsigned long flags; + + /* Turn on the hardware */ + pci_back_from_sleep(to_pci_dev(device)); + pci_restore_state(to_pci_dev(device)); + + r852_disable_irqs(dev); + r852_card_update_present(dev); + r852_engine_disable(dev); + + + /* Now its safe for IRQ to run */ + spin_lock_irqsave(&dev->irqlock, flags); + dev->insuspend = 0; + spin_unlock_irqrestore(&dev->irqlock, flags); + + + /* If card status changed, just do the work */ + if (dev->card_detected != dev->card_registred) { + dbg("card was %s during low power state", + dev->card_detected ? "added" : "removed"); + + queue_delayed_work(dev->card_workqueue, + &dev->card_detect_work, 1000); + return 0; + } + + /* Otherwise, initialize the card */ + if (dev->card_registred) { + r852_engine_enable(dev); + dev->chip->select_chip(dev->mtd, 0); + dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); + dev->chip->select_chip(dev->mtd, -1); + } + + /* Program card detection IRQ */ + r852_update_card_detect(dev); + return 0; +} +#else +#define r852_suspend NULL +#define r852_resume NULL +#endif + +static const struct pci_device_id r852_pci_id_tbl[] = { + + { PCI_VDEVICE(RICOH, 0x0852), }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); + +SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); + + +static struct pci_driver r852_pci_driver = { + .name = DRV_NAME, + .id_table = r852_pci_id_tbl, + .probe = r852_probe, + .remove = r852_remove, + .shutdown = r852_shutdown, + .driver.pm = &r852_pm_ops, +}; + +static __init int r852_module_init(void) +{ + return pci_register_driver(&r852_pci_driver); +} + +static void __exit r852_module_exit(void) +{ + pci_unregister_driver(&r852_pci_driver); +} + +module_init(r852_module_init); +module_exit(r852_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); +MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver"); diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h new file mode 100644 index 00000000000..8096cc280c7 --- /dev/null +++ b/drivers/mtd/nand/r852.h @@ -0,0 +1,163 @@ +/* + * Copyright © 2009 - Maxim Levitsky + * driver for Ricoh xD readers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/pci.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/mtd/nand.h> +#include <linux/spinlock.h> + + +/* nand interface + ecc + byte write/read does one cycle on nand data lines. + dword write/read does 4 cycles + if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads + results of ecc correction, if DMA read was done before. + If write was done two dword reads read generated ecc checksums +*/ +#define R852_DATALINE 0x00 + +/* control register */ +#define R852_CTL 0x04 +#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/ +#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/ +#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */ + /* but has to be set on start...*/ +#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/ +#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/ +#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */ +#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/ +#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */ + +/* card detection status */ +#define R852_CARD_STA 0x05 + +#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */ +#define R852_CARD_STA_RO 0x02 /* card is readonly */ +#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */ +#define R852_CARD_STA_ABSENT 0x08 /* card is absent */ +#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */ + +/* card detection irq status & enable*/ +#define R852_CARD_IRQ_STA 0x06 /* IRQ status */ +#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */ + +#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/ +#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */ +#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */ +#define R852_CARD_IRQ_UNK1 0x10 /* unknown */ +#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */ +#define R852_CARD_IRQ_MASK 0x1D + + + +/* hardware enable */ +#define R852_HW 0x08 +#define R852_HW_ENABLED 0x01 /* hw enabled */ +#define R852_HW_UNKNOWN 0x80 + + +/* dma capabilities */ +#define R852_DMA_CAP 0x09 +#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */ + /* hw is smartmedia */ +#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */ +#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */ + + +/* physical DMA address - 32 bit value*/ +#define R852_DMA_ADDR 0x0C + + +/* dma settings */ +#define R852_DMA_SETTINGS 0x10 +#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */ +#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */ +#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */ + +/* dma IRQ status */ +#define R852_DMA_IRQ_STA 0x14 + +/* dma IRQ enable */ +#define R852_DMA_IRQ_ENABLE 0x18 + +#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */ +#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */ +#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */ +#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */ + + +/* ECC syndrome format - read from reg #0 will return two copies of these for + each half of the page. + first byte is error byte location, and second, bit location + flags */ +#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */ +#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */ +#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */ +#define R852_ECC_FAIL 0x40 /* non correctable error detected */ + +#define R852_DMA_LEN 512 + +#define DMA_INTERNAL 0 +#define DMA_MEMORY 1 + +struct r852_device { + void __iomem *mmio; /* mmio */ + struct mtd_info *mtd; /* mtd backpointer */ + struct nand_chip *chip; /* nand chip backpointer */ + struct pci_dev *pci_dev; /* pci backpointer */ + + /* dma area */ + dma_addr_t phys_dma_addr; /* bus address of buffer*/ + struct completion dma_done; /* data transfer done */ + + dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */ + uint8_t *bounce_buffer; /* virtual address of bounce buffer */ + + int dma_dir; /* 1 = read, 0 = write */ + int dma_stage; /* 0 - idle, 1 - first step, + 2 - second step */ + + int dma_state; /* 0 = internal, 1 = memory */ + int dma_error; /* dma errors */ + int dma_usable; /* is it possible to use dma */ + + /* card status area */ + struct delayed_work card_detect_work; + struct workqueue_struct *card_workqueue; + int card_registred; /* card registered with mtd */ + int card_detected; /* card detected in slot */ + int card_unstable; /* whenever the card is inserted, + is not known yet */ + int readonly; /* card is readonly */ + int sm; /* Is card smartmedia */ + + /* interrupt handling */ + spinlock_t irqlock; /* IRQ protecting lock */ + int irq; /* irq num */ + int insuspend; /* device is suspended */ + + /* misc */ + void *tmp_buffer; /* temporary buffer */ + uint8_t ctlreg; /* cached contents of control reg */ +}; + +#define DRV_NAME "r852" + + +#define dbg(format, ...) \ + if (debug) \ + printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__) + +#define dbg_verbose(format, ...) \ + if (debug > 1) \ + printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__) + + +#define message(format, ...) \ + printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__) diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index fa6e9c7fe51..239aadfd01b 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) pr_debug("s3c2410_nand_probe(%p)\n", pdev); - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { dev_err(&pdev->dev, "no memory for flash info\n"); err = -ENOMEM; goto exit_error; } - memset(info, 0, sizeof(*info)); platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); @@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) /* currently we assume we have the one resource */ res = pdev->resource; - size = res->end - res->start + 1; + size = resource_size(res); info->area = request_mem_region(res->start, size, pdev->name); @@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) /* allocate our information */ size = nr_sets * sizeof(*info->mtds); - info->mtds = kmalloc(size, GFP_KERNEL); + info->mtds = kzalloc(size, GFP_KERNEL); if (info->mtds == NULL) { dev_err(&pdev->dev, "failed to allocate mtd storage\n"); err = -ENOMEM; goto exit_error; } - memset(info->mtds, 0, size); - /* initialise all possible chips */ nmtd = info->mtds; @@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev) s3c2410_nand_init_chip(info, nmtd, sets); nmtd->scan_res = nand_scan_ident(&nmtd->mtd, - (sets) ? sets->nr_chips : 1); + (sets) ? sets->nr_chips : 1, + NULL); if (nmtd->scan_res == 0) { s3c2410_nand_update_chip(info, nmtd); diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 34752fce079..546c2f0eb2e 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev) nand->read_word = flctl_read_word; } - ret = nand_scan_ident(flctl_mtd, 1); + ret = nand_scan_ident(flctl_mtd, 1, NULL); if (ret) goto err; diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c new file mode 100644 index 00000000000..ac80fb362e6 --- /dev/null +++ b/drivers/mtd/nand/sm_common.c @@ -0,0 +1,148 @@ +/* + * Copyright © 2009 - Maxim Levitsky + * Common routines & support for xD format + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/kernel.h> +#include <linux/mtd/nand.h> +#include "sm_common.h" + +static struct nand_ecclayout nand_oob_sm = { + .eccbytes = 6, + .eccpos = {8, 9, 10, 13, 14, 15}, + .oobfree = { + {.offset = 0 , .length = 4}, /* reserved */ + {.offset = 6 , .length = 2}, /* LBA1 */ + {.offset = 11, .length = 2} /* LBA2 */ + } +}; + +/* NOTE: This layout is is not compatabable with SmartMedia, */ +/* because the 256 byte devices have page depenent oob layout */ +/* However it does preserve the bad block markers */ +/* If you use smftl, it will bypass this and work correctly */ +/* If you not, then you break SmartMedia compliance anyway */ + +static struct nand_ecclayout nand_oob_sm_small = { + .eccbytes = 3, + .eccpos = {0, 1, 2}, + .oobfree = { + {.offset = 3 , .length = 2}, /* reserved */ + {.offset = 6 , .length = 2}, /* LBA1 */ + } +}; + + +static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + struct mtd_oob_ops ops; + struct sm_oob oob; + int ret, error = 0; + + memset(&oob, -1, SM_OOB_SIZE); + oob.block_status = 0x0F; + + /* As long as this function is called on erase block boundaries + it will work correctly for 256 byte nand */ + ops.mode = MTD_OOB_PLACE; + ops.ooboffs = 0; + ops.ooblen = mtd->oobsize; + ops.oobbuf = (void *)&oob; + ops.datbuf = NULL; + + + ret = mtd->write_oob(mtd, ofs, &ops); + if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { + printk(KERN_NOTICE + "sm_common: can't mark sector at %i as bad\n", + (int)ofs); + error = -EIO; + } else + mtd->ecc_stats.badblocks++; + + return error; +} + + +static struct nand_flash_dev nand_smartmedia_flash_ids[] = { + {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0}, + {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0}, + {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0}, + {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0}, + {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0}, + {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM}, + {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0}, + {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0}, + {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0}, + {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM}, + {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0}, + {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM}, + {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, + {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM}, + {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, + {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM}, + {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, + {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM}, + {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, + {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM}, + {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 }, + {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM}, + {NULL,} +}; + +#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD) +static struct nand_flash_dev nand_xd_flash_ids[] = { + + {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, + {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, + {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, + {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, + {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM}, + {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM}, + {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM}, + {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM}, + {NULL,} +}; + +int sm_register_device(struct mtd_info *mtd, int smartmedia) +{ + struct nand_chip *chip = (struct nand_chip *)mtd->priv; + int ret; + + chip->options |= NAND_SKIP_BBTSCAN; + + /* Scan for card properties */ + ret = nand_scan_ident(mtd, 1, smartmedia ? + nand_smartmedia_flash_ids : nand_xd_flash_ids); + + if (ret) + return ret; + + /* Bad block marker postion */ + chip->badblockpos = 0x05; + chip->badblockbits = 7; + chip->block_markbad = sm_block_markbad; + + /* ECC layout */ + if (mtd->writesize == SM_SECTOR_SIZE) + chip->ecc.layout = &nand_oob_sm; + else if (mtd->writesize == SM_SMALL_PAGE) + chip->ecc.layout = &nand_oob_sm_small; + else + return -ENODEV; + + ret = nand_scan_tail(mtd); + + if (ret) + return ret; + + return add_mtd_device(mtd); +} +EXPORT_SYMBOL_GPL(sm_register_device); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); +MODULE_DESCRIPTION("Common SmartMedia/xD functions"); diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h new file mode 100644 index 00000000000..00f4a83359b --- /dev/null +++ b/drivers/mtd/nand/sm_common.h @@ -0,0 +1,61 @@ +/* + * Copyright © 2009 - Maxim Levitsky + * Common routines & support for SmartMedia/xD format + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/bitops.h> +#include <linux/mtd/mtd.h> + +/* Full oob structure as written on the flash */ +struct sm_oob { + uint32_t reserved; + uint8_t data_status; + uint8_t block_status; + uint8_t lba_copy1[2]; + uint8_t ecc2[3]; + uint8_t lba_copy2[2]; + uint8_t ecc1[3]; +} __attribute__((packed)); + + +/* one sector is always 512 bytes, but it can consist of two nand pages */ +#define SM_SECTOR_SIZE 512 + +/* oob area is also 16 bytes, but might be from two pages */ +#define SM_OOB_SIZE 16 + +/* This is maximum zone size, and all devices that have more that one zone + have this size */ +#define SM_MAX_ZONE_SIZE 1024 + +/* support for small page nand */ +#define SM_SMALL_PAGE 256 +#define SM_SMALL_OOB_SIZE 8 + + +extern int sm_register_device(struct mtd_info *mtd, int smartmedia); + + +static inline int sm_sector_valid(struct sm_oob *oob) +{ + return hweight16(oob->data_status) >= 5; +} + +static inline int sm_block_valid(struct sm_oob *oob) +{ + return hweight16(oob->block_status) >= 7; +} + +static inline int sm_block_erased(struct sm_oob *oob) +{ + static const uint32_t erased_pattern[4] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; + + /* First test for erased block */ + if (!memcmp(oob, erased_pattern, sizeof(*oob))) + return 1; + return 0; +} diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index a4519a7bd68..b37cbde6e7d 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev, dev_set_drvdata(&ofdev->dev, host); /* first scan to find the device and get the page size */ - if (nand_scan_ident(mtd, 1)) { + if (nand_scan_ident(mtd, 1, NULL)) { res = -ENXIO; goto out; } @@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev) return 0; } -static struct of_device_id socrates_nand_match[] = +static const struct of_device_id socrates_nand_match[] = { { .compatible = "abb,socrates-nand", diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index fa28f01ae00..3041d1f7ae3 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c @@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf, static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + struct mfd_cell *cell = dev_get_platdata(&dev->dev); int ret; if (cell->enable) { @@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + struct mfd_cell *cell = dev_get_platdata(&dev->dev); tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); if (cell->disable) @@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) static int tmio_probe(struct platform_device *dev) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + struct mfd_cell *cell = dev_get_platdata(&dev->dev); struct tmio_nand_data *data = cell->driver_data; struct resource *fcr = platform_get_resource(dev, IORESOURCE_MEM, 0); @@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev) mtd->priv = nand_chip; mtd->name = "tmio-nand"; - tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1); + tmio->ccr = ioremap(ccr->start, resource_size(ccr)); if (!tmio->ccr) { retval = -EIO; goto err_iomap_ccr; } tmio->fcr_base = fcr->start & 0xfffff; - tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1); + tmio->fcr = ioremap(fcr->start, resource_size(fcr)); if (!tmio->fcr) { retval = -EIO; goto err_iomap_fcr; @@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev) #ifdef CONFIG_PM static int tmio_suspend(struct platform_device *dev, pm_message_t state) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + struct mfd_cell *cell = dev_get_platdata(&dev->dev); if (cell->suspend) cell->suspend(dev); @@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state) static int tmio_resume(struct platform_device *dev) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + struct mfd_cell *cell = dev_get_platdata(&dev->dev); /* FIXME - is this required or merely another attack of the broken * SHARP platform? Looks suspicious. diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c deleted file mode 100644 index 0f5562aeedc..00000000000 --- a/drivers/mtd/nand/ts7250.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * drivers/mtd/nand/ts7250.c - * - * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com) - * - * Derived from drivers/mtd/nand/edb7312.c - * Copyright (C) 2004 Marius Gröger (mag@sysgo.de) - * - * Derived from drivers/mtd/nand/autcpu12.c - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the NAND flash device found on the - * TS-7250 board which utilizes a Samsung 32 Mbyte part. - */ - -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/nand.h> -#include <linux/mtd/partitions.h> -#include <linux/io.h> - -#include <mach/hardware.h> -#include <mach/ts72xx.h> - -#include <asm/sizes.h> -#include <asm/mach-types.h> - -/* - * MTD structure for TS7250 board - */ -static struct mtd_info *ts7250_mtd = NULL; - -#ifdef CONFIG_MTD_PARTITIONS -static const char *part_probes[] = { "cmdlinepart", NULL }; - -#define NUM_PARTITIONS 3 - -/* - * Define static partitions for flash device - */ -static struct mtd_partition partition_info32[] = { - { - .name = "TS-BOOTROM", - .offset = 0x00000000, - .size = 0x00004000, - }, { - .name = "Linux", - .offset = 0x00004000, - .size = 0x01d00000, - }, { - .name = "RedBoot", - .offset = 0x01d04000, - .size = 0x002fc000, - }, -}; - -/* - * Define static partitions for flash device - */ -static struct mtd_partition partition_info128[] = { - { - .name = "TS-BOOTROM", - .offset = 0x00000000, - .size = 0x00004000, - }, { - .name = "Linux", - .offset = 0x00004000, - .size = 0x07d00000, - }, { - .name = "RedBoot", - .offset = 0x07d04000, - .size = 0x002fc000, - }, -}; -#endif - - -/* - * hardware specific access to control-lines - * - * ctrl: - * NAND_NCE: bit 0 -> bit 2 - * NAND_CLE: bit 1 -> bit 1 - * NAND_ALE: bit 2 -> bit 0 - */ -static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (ctrl & NAND_CTRL_CHANGE) { - unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE; - unsigned char bits; - - bits = (ctrl & NAND_NCE) << 2; - bits |= ctrl & NAND_CLE; - bits |= (ctrl & NAND_ALE) >> 2; - - __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr); - } - - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W); -} - -/* - * read device ready pin - */ -static int ts7250_device_ready(struct mtd_info *mtd) -{ - return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20; -} - -/* - * Main initialization routine - */ -static int __init ts7250_init(void) -{ - struct nand_chip *this; - const char *part_type = 0; - int mtd_parts_nb = 0; - struct mtd_partition *mtd_parts = 0; - - if (!machine_is_ts72xx() || board_is_ts7200()) - return -ENXIO; - - /* Allocate memory for MTD device structure and private data */ - ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!ts7250_mtd) { - printk("Unable to allocate TS7250 NAND MTD device structure.\n"); - return -ENOMEM; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&ts7250_mtd[1]); - - /* Initialize structures */ - memset(ts7250_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - ts7250_mtd->priv = this; - ts7250_mtd->owner = THIS_MODULE; - - /* insert callbacks */ - this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE; - this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE; - this->cmd_ctrl = ts7250_hwcontrol; - this->dev_ready = ts7250_device_ready; - this->chip_delay = 15; - this->ecc.mode = NAND_ECC_SOFT; - - printk("Searching for NAND flash...\n"); - /* Scan to find existence of the device */ - if (nand_scan(ts7250_mtd, 1)) { - kfree(ts7250_mtd); - return -ENXIO; - } -#ifdef CONFIG_MTD_PARTITIONS - ts7250_mtd->name = "ts7250-nand"; - mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0); - if (mtd_parts_nb > 0) - part_type = "command line"; - else - mtd_parts_nb = 0; -#endif - if (mtd_parts_nb == 0) { - mtd_parts = partition_info32; - if (ts7250_mtd->size >= (128 * 0x100000)) - mtd_parts = partition_info128; - mtd_parts_nb = NUM_PARTITIONS; - part_type = "static"; - } - - /* Register the partitions */ - printk(KERN_NOTICE "Using %s partition definition\n", part_type); - add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb); - - /* Return happy */ - return 0; -} - -module_init(ts7250_init); - -/* - * Clean up routine - */ -static void __exit ts7250_cleanup(void) -{ - /* Unregister the device */ - del_mtd_device(ts7250_mtd); - - /* Free the MTD device structure */ - kfree(ts7250_mtd); -} - -module_exit(ts7250_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>"); -MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board"); diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index 863513c3b69..054a41c0ef4 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c @@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd) struct nand_chip *chip = mtd->priv; int ret; - ret = nand_scan_ident(mtd, 1); + ret = nand_scan_ident(mtd, 1, NULL); if (!ret) { if (mtd->writesize >= 512) { chip->ecc.size = mtd->writesize; |