summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /drivers/mtd/nand
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/Kconfig633
-rw-r--r--drivers/mtd/nand/Makefile56
-rw-r--r--drivers/mtd/nand/alauda.c723
-rw-r--r--drivers/mtd/nand/ams-delta.c312
-rw-r--r--drivers/mtd/nand/atmel_nand.c785
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h39
-rw-r--r--drivers/mtd/nand/au1550nd.c567
-rw-r--r--drivers/mtd/nand/autcpu12.c239
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c213
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c560
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c894
-rw-r--r--drivers/mtd/nand/cafe_nand.c905
-rw-r--r--drivers/mtd/nand/cmx270_nand.c264
-rw-r--r--drivers/mtd/nand/cs553x_nand.c361
-rw-r--r--drivers/mtd/nand/davinci_nand.c838
-rw-r--r--drivers/mtd/nand/denali.c1718
-rw-r--r--drivers/mtd/nand/denali.h499
-rw-r--r--drivers/mtd/nand/diskonchip.c1775
-rw-r--r--drivers/mtd/nand/docg4.c1377
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c998
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c1072
-rw-r--r--drivers/mtd/nand/fsl_upm.c361
-rw-r--r--drivers/mtd/nand/fsmc_nand.c1267
-rw-r--r--drivers/mtd/nand/gpio.c476
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h84
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1076
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1620
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h273
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h172
-rw-r--r--drivers/mtd/nand/h1910.c168
-rw-r--r--drivers/mtd/nand/jz4740_nand.c436
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c886
-rw-r--r--drivers/mtd/nand/mxc_nand.c1298
-rw-r--r--drivers/mtd/nand/nand_base.c4332
-rw-r--r--drivers/mtd/nand/nand_bbt.c2720
-rw-r--r--drivers/mtd/nand/nand_bch.c243
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h337
-rw-r--r--drivers/mtd/nand/nand_ecc.c534
-rw-r--r--drivers/mtd/nand/nand_ids.c561
-rw-r--r--drivers/mtd/nand/nandsim.c2427
-rw-r--r--drivers/mtd/nand/ndfc.c302
-rw-r--r--drivers/mtd/nand/nomadik_nand.c235
-rw-r--r--drivers/mtd/nand/nuc900_nand.c372
-rw-r--r--drivers/mtd/nand/omap2.c1153
-rw-r--r--drivers/mtd/nand/orion_nand.c188
-rw-r--r--drivers/mtd/nand/pasemi_nand.c237
-rw-r--r--drivers/mtd/nand/plat_nand.c157
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c403
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1319
-rw-r--r--drivers/mtd/nand/r852.c1122
-rw-r--r--drivers/mtd/nand/r852.h161
-rw-r--r--drivers/mtd/nand/rtc_from4.c624
-rw-r--r--drivers/mtd/nand/s3c2410.c1154
-rw-r--r--drivers/mtd/nand/sh_flctl.c963
-rw-r--r--drivers/mtd/nand/sharpsl.c238
-rw-r--r--drivers/mtd/nand/sm_common.c149
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c280
-rw-r--r--drivers/mtd/nand/spia.c176
-rw-r--r--drivers/mtd/nand/tmio_nand.c542
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c457
-rwxr-xr-xdrivers/mtd/nand/wmt_nand.c8285
-rwxr-xr-xdrivers/mtd/nand/wmt_nand.h365
65 files changed, 53194 insertions, 0 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
new file mode 100644
index 00000000..8a13c2f3
--- /dev/null
+++ b/drivers/mtd/nand/Kconfig
@@ -0,0 +1,633 @@
+config MTD_NAND_IDS
+ tristate "Include chip ids for known NAND devices."
+ depends on MTD
+ help
+ Useful for NAND drivers that do not use the NAND subsystem but
+ still like to take advantage of the known chip information.
+
+config MTD_NAND_ECC
+ tristate
+
+config MTD_NAND_ECC_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
+
+menuconfig MTD_NAND
+ tristate "NAND Device Support"
+ depends on MTD
+ select MTD_NAND_IDS
+ select MTD_NAND_ECC
+ help
+ This enables support for accessing all type of NAND flash
+ devices. For further information see
+ <http://www.linux-mtd.infradead.org/doc/nand.html>.
+
+if MTD_NAND
+
+config MTD_NAND_VERIFY_WRITE
+ bool "Verify NAND page writes"
+ help
+ This adds an extra check when data is written to the flash. The
+ NAND flash device internally checks only bits transitioning
+ from 1 to 0. There is a rare possibility that even though the
+ device thinks the write was successful, a bit could have been
+ flipped accidentally due to device wear or something else.
+
+config MTD_NAND_BCH
+ tristate
+ select BCH
+ depends on MTD_NAND_ECC_BCH
+ default MTD_NAND
+
+config MTD_NAND_ECC_BCH
+ bool "Support software BCH ECC"
+ default n
+ help
+ This enables support for software BCH error correction. Binary BCH
+ codes are more powerful and cpu intensive than traditional Hamming
+ ECC codes. They are used with NAND devices requiring more than 1 bit
+ of error correction.
+
+config MTD_SM_COMMON
+ tristate
+ default n
+
+config MTD_NAND_MUSEUM_IDS
+ bool "Enable chip ids for obsolete ancient NAND devices"
+ default n
+ help
+ Enable this option only when your board has first generation
+ NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
+ of these chips were reused by later, larger chips.
+
+config MTD_NAND_AUTCPU12
+ tristate "SmartMediaCard on autronix autcpu12 board"
+ depends on ARCH_AUTCPU12
+ help
+ This enables the driver for the autronix autcpu12 board to
+ access the SmartMediaCard.
+
+config MTD_NAND_DENALI
+ depends on PCI
+ tristate "Support Denali NAND controller on Intel Moorestown"
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_SCRATCH_REG_ADDR
+ hex "Denali NAND size scratch register address"
+ default "0xFF108018"
+ depends on MTD_NAND_DENALI
+ help
+ Some platforms place the NAND chip size in a scratch register
+ because (some versions of) the driver aren't able to automatically
+ determine the size of certain chips. Set the address of the
+ scratch register here to enable this feature. On Intel Moorestown
+ boards, the scratch register is at 0xFF108018.
+
+config MTD_NAND_H1900
+ tristate "iPAQ H1900 flash"
+ depends on ARCH_PXA && BROKEN
+ help
+ This enables the driver for the iPAQ h1900 flash.
+
+config MTD_NAND_GPIO
+ tristate "GPIO NAND Flash driver"
+ depends on GENERIC_GPIO && ARM
+ help
+ This enables a GPIO based NAND flash driver.
+
+config MTD_NAND_SPIA
+ tristate "NAND Flash device on SPIA board"
+ depends on ARCH_P720T
+ help
+ If you had to ask, you don't have one. Say 'N'.
+
+config MTD_NAND_AMS_DELTA
+ tristate "NAND Flash device on Amstrad E3"
+ depends on MACH_AMS_DELTA
+ default y
+ help
+ Support for NAND flash on Amstrad E3 (Delta).
+
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
+ depends on ARCH_OMAP2PLUS
+ help
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
+ platforms.
+
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
+config MTD_NAND_AU1550
+ tristate "Au1550/1200 NAND support"
+ depends on MIPS_ALCHEMY
+ help
+ This enables the driver for the NAND flash controller on the
+ AMD/Alchemy 1550 SOC.
+
+config MTD_NAND_BF5XX
+ tristate "Blackfin on-chip NAND Flash Controller driver"
+ depends on BF54x || BF52x
+ help
+ This enables the Blackfin on-chip NAND flash controller
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+ This driver can also be built as a module. If so, the module
+ will be called bf5xx-nand.
+
+config MTD_NAND_BF5XX_HWECC
+ bool "BF5XX NAND Hardware ECC"
+ default y
+ depends on MTD_NAND_BF5XX
+ help
+ Enable the use of the BF5XX's internal ECC generator when
+ using NAND.
+
+config MTD_NAND_BF5XX_BOOTROM_ECC
+ bool "Use Blackfin BootROM ECC Layout"
+ default n
+ depends on MTD_NAND_BF5XX_HWECC
+ help
+ If you wish to modify NAND pages and allow the Blackfin on-chip
+ BootROM to boot from them, say Y here. This is only necessary
+ if you are booting U-Boot out of NAND and you wish to update
+ U-Boot from Linux' userspace. Otherwise, you should say N here.
+
+ If unsure, say N.
+
+config MTD_NAND_RTC_FROM4
+ tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
+ depends on SH_SOLUTION_ENGINE
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC8
+ select BITREVERSE
+ help
+ This enables the driver for the Renesas Technology AG-AND
+ flash interface board (FROM_BOARD4)
+
+config MTD_NAND_PPCHAMELEONEVB
+ tristate "NAND Flash device on PPChameleonEVB board"
+ depends on PPCHAMELEONEVB && BROKEN
+ help
+ This enables the NAND flash driver on the PPChameleon EVB Board.
+
+config MTD_NAND_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C24XX || ARCH_S3C64XX
+ help
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
+ SoCs
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_S3C2410_DEBUG
+ bool "Samsung S3C NAND driver debug"
+ depends on MTD_NAND_S3C2410
+ help
+ Enable debugging of the S3C NAND driver
+
+config MTD_NAND_S3C2410_HWECC
+ bool "Samsung S3C NAND Hardware ECC"
+ depends on MTD_NAND_S3C2410
+ help
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
+ incorrect ECC generation, and if using these, the default of
+ software ECC is preferable.
+
+config MTD_NAND_NDFC
+ tristate "NDFC NanD Flash Controller"
+ depends on 4xx
+ select MTD_NAND_ECC_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
+config MTD_NAND_S3C2410_CLKSTOP
+ bool "Samsung S3C NAND IDLE clock stop"
+ depends on MTD_NAND_S3C2410
+ default n
+ help
+ Stop the clock to the NAND controller when there is no chip
+ selected to save power. This will mean there is a small delay
+ when the is NAND chip selected or released, but will save
+ approximately 5mA of power when there is nothing happening.
+
+config MTD_NAND_BCM_UMI
+ tristate "NAND Flash support for BCM Reference Boards"
+ depends on ARCH_BCMRING
+ help
+ This enables the NAND flash controller on the BCM UMI block.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_BCM_UMI_HWCS
+ bool "BCM UMI NAND Hardware CS"
+ depends on MTD_NAND_BCM_UMI
+ help
+ Enable the use of the BCM UMI block's internal CS using NAND.
+ This should only be used if you know the external NAND CS can toggle.
+
+config MTD_NAND_DISKONCHIP
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on HAS_IOMEM
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ This is a reimplementation of M-Systems DiskOnChip 2000,
+ Millennium and Millennium Plus as a standard NAND device driver,
+ as opposed to the earlier self-contained MTD device drivers.
+ This should enable, among other things, proper JFFS2 operation on
+ these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ ---help---
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+ bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+ and 2000 TSOP/Alon), Linux reserves some space at the end of the
+ device for the Bad Block Table (BBT). If you have existing INFTL
+ data on your device (created by non-Linux tools such as M-Systems'
+ DOS drivers), your data might overlap the area Linux wants to use for
+ the BBT. If this is a concern for you, leave this option disabled and
+ Linux will not write BBT data into this area.
+ The downside of leaving this option disabled is that if bad blocks
+ are detected by Linux, they will not be recorded in the BBT, which
+ could cause future problems.
+ Once you enable this option, new filesystems (INFTL or others, created
+ in Linux or other operating systems) will not use the reserved area.
+ The only reason not to enable this option is to prevent damage to
+ preexisting filesystems.
+ Even if you leave this disabled, you can enable BBT writes at module
+ load time (assuming you build diskonchip as a module) with the module
+ parameter "inftl_bbt_write=1".
+
+config MTD_NAND_DOCG4
+ tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ select BCH
+ select BITREVERSE
+ help
+ Support for diskonchip G4 nand flash, found in various smartphones and
+ PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
+ Portege G900, Asus P526, and O2 XDA Zinc.
+
+ With this driver you will be able to use UBI and create a ubifs on the
+ device, so you may wish to consider enabling UBI and UBIFS as well.
+
+ These devices ship with the Mys/Sandisk SAFTL formatting, for which
+ there is currently no mtd parser, so you may want to use command line
+ partitioning to segregate write-protected blocks. On the Treo680, the
+ first five erase blocks (256KiB each) are write-protected, followed
+ by the block containing the saftl partition table. This is probably
+ typical.
+
+config MTD_NAND_SHARPSL
+ tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+ depends on ARCH_PXA
+
+config MTD_NAND_CAFE
+ tristate "NAND support for OLPC CAF? chip"
+ depends on PCI
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ Use NAND flash attached to the CAF? chip designed for the OLPC
+ laptop.
+
+config MTD_NAND_CS553X
+ tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
+ depends on X86_32
+ help
+ The CS553x companion chips for the AMD Geode processor
+ include NAND flash controllers with built-in hardware ECC
+ capabilities; enabling this option will allow you to use
+ these. The driver will check the MSRs to verify that the
+ controller is enabled for NAND, and currently requires that
+ the controller be in MMIO mode.
+
+ If you say "m", the module will be called cs553x_nand.
+
+config MTD_NAND_ATMEL
+ tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
+ depends on ARCH_AT91 || AVR32
+ help
+ Enables support for NAND Flash / Smart Media Card interface
+ on Atmel AT91 and AVR32 processors.
+choice
+ prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
+ depends on MTD_NAND_ATMEL
+
+config MTD_NAND_ATMEL_ECC_HW
+ bool "Hardware ECC"
+ depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
+ help
+ Use hardware ECC instead of software ECC when the chip
+ supports it.
+
+ The hardware ECC controller is capable of single bit error
+ correction and 2-bit random detection per page.
+
+ NB : hardware and software ECC schemes are incompatible.
+ If you switch from one to another, you'll have to erase your
+ mtd partition.
+
+ If unsure, say Y
+
+config MTD_NAND_ATMEL_ECC_SOFT
+ bool "Software ECC"
+ help
+ Use software ECC.
+
+ NB : hardware and software ECC schemes are incompatible.
+ If you switch from one to another, you'll have to erase your
+ mtd partition.
+
+config MTD_NAND_ATMEL_ECC_NONE
+ bool "No ECC (testing only, DANGEROUS)"
+ depends on DEBUG_KERNEL
+ help
+ No ECC will be used.
+ It's not a good idea and it should be reserved for testing
+ purpose only.
+
+ If unsure, say N
+
+endchoice
+
+config MTD_NAND_PXA3xx
+ tristate "Support for NAND flash devices on PXA3xx"
+ depends on PXA3xx || ARCH_MMP
+ help
+ This enables the driver for the NAND flash device found on
+ PXA3xx processors
+
+config MTD_NAND_CM_X270
+ tristate "Support for NAND Flash on CM-X270 modules"
+ depends on MACH_ARMCORE
+
+config MTD_NAND_PASEMI
+ tristate "NAND support for PA Semi PWRficient"
+ depends on PPC_PASEMI
+ help
+ Enables support for NAND Flash interface on PA Semi PWRficient
+ based boards
+
+config MTD_NAND_TMIO
+ tristate "NAND Flash device on Toshiba Mobile IO Controller"
+ depends on MFD_TMIO
+ help
+ Support for NAND flash connected to a Toshiba Mobile IO
+ Controller in some PDAs, including the Sharp SL6000x.
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ help
+ The simulator may simulate various NAND flash chips for the
+ MTD nand layer.
+
+config MTD_NAND_GPMI_NAND
+ bool "GPMI NAND Flash Controller driver"
+ depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+ help
+ Enables NAND Flash support for IMX23 or IMX28.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time. The GPMI may conflicts with other
+ block, such as SD card. So pay attention to it when you enable
+ the GPMI.
+
+config MTD_NAND_PLATFORM
+ tristate "Support for generic platform NAND driver"
+ depends on HAS_IOMEM
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+config MTD_ALAUDA
+ tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1"
+ depends on USB
+ help
+ These two (and possibly other) Alauda-based cardreaders for
+ SmartMedia and xD allow raw flash access.
+
+config MTD_NAND_ORION
+ tristate "NAND Flash support for Marvell Orion SoC"
+ depends on PLAT_ORION
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_FSL_ELBC
+ tristate "NAND support for Freescale eLBC controllers"
+ depends on PPC_OF
+ select FSL_LBC
+ help
+ Various Freescale chips, including the 8313, include a NAND Flash
+ Controller Module with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_IFC
+ tristate "NAND support for Freescale IFC controller"
+ depends on MTD_NAND && FSL_SOC
+ select FSL_IFC
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_UPM
+ tristate "Support for NAND on Freescale UPM"
+ depends on PPC_83xx || PPC_85xx
+ select FSL_LBC
+ help
+ Enables support for NAND Flash chips wired onto Freescale PowerPC
+ processor localbus with User-Programmable Machine support.
+
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 built-in NAND Flash Controller support"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
+config MTD_NAND_MXC
+ tristate "MXC NAND support"
+ depends on IMX_HAVE_PLATFORM_MXC_NAND
+ help
+ This enables the driver for the NAND flash controller on the
+ MXC processors.
+
+config MTD_NAND_NOMADIK
+ tristate "ST Nomadik 8815 NAND support"
+ depends on ARCH_NOMADIK
+ help
+ Driver for the NAND flash controller on the Nomadik, with ECC.
+
+config MTD_NAND_SH_FLCTL
+ tristate "Support for NAND on Renesas SuperH FLCTL"
+ depends on SUPERH || ARCH_SHMOBILE
+ help
+ Several Renesas SuperH CPU has FLCTL. This option enables support
+ for NAND Flash using FLCTL.
+
+config MTD_NAND_DAVINCI
+ tristate "Support NAND on DaVinci SoC"
+ depends on ARCH_DAVINCI
+ help
+ Enable the driver for NAND flash chips on Texas Instruments
+ DaVinci processors.
+
+config MTD_NAND_TXX9NDFMC
+ tristate "NAND Flash support for TXx9 SoC"
+ depends on SOC_TX4938 || SOC_TX4939
+ help
+ This enables the NAND flash controller on the TXx9 SoCs.
+
+config MTD_NAND_SOCRATES
+ tristate "Support for NAND on Socrates board"
+ depends on SOCRATES
+ help
+ Enables support for NAND Flash chips wired onto Socrates board.
+
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
+ depends on ARCH_W90X900
+ help
+ This enables the driver for the NAND Flash on evaluation board based
+ on w90p910 / NUC9xx.
+
+config MTD_NAND_JZ4740
+ tristate "Support for JZ4740 SoC NAND controller"
+ depends on MACH_JZ4740
+ help
+ Enables support for NAND Flash on JZ4740 SoC based boards.
+
+config MTD_NAND_FSMC
+ tristate "Support for NAND on ST Micros FSMC"
+ depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
+ help
+ Enables support for NAND Flash chips on the ST Microelectronics
+ Flexible Static Memory Controller (FSMC)
+
+config MTD_NAND_WMT
+ tristate "NAND Flash support for WMT SoC"
+# depends on ARCH_VT8500
+ help
+ This enables the NAND flash controller on the WMT
+ SoCs
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_CHIP_NUM
+ int "NAND Flash numbers select verbosity (1 = CE0, 2 = CE0,CE1)"
+ depends on MTD_NAND_WMT
+ default "2"
+ help
+ Determines the verbosity numbers of nand chip supported by WMT.
+
+config MTD_NAND_WMT_HWECC
+ bool "WMT NAND Hardware ECC"
+ depends on MTD_NAND_WMT
+ default y
+ help
+ Enable the use of the WMT's internal ECC generator when
+ using NAND.
+
+config MTD_NAND_HM_ECC
+ int "WMT NAND Hardware ECC Algorithm select verbosity(Harming ECC: =1, BCH ECC: =2)"
+ depends on MTD_NAND_WMT
+ default "2"
+ help
+ Enable the use of the WMT's internal ECC generator when
+ using NAND.
+
+choice
+ prompt "WMT NAND Partition for System"
+ default MTD_NAND_WMT_ANDROID
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for Android, Ubuntu or Android/Ubuntu Dual system
+
+config MTD_NAND_WMT_ANDROID
+ bool "Android"
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for WMT Android System
+
+config MTD_NAND_WMT_UBUNTU
+ bool "Ubuntu"
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for WMT Ubuntu System
+
+config MTD_NAND_WMT_ANDROID_UBUNTU_DUALOS
+ bool "Android + Ubuntu"
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for WMT Android/Ubuntu Dual System
+endchoice
+
+endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
new file mode 100644
index 00000000..41fb98f5
--- /dev/null
+++ b/drivers/mtd/nand/Makefile
@@ -0,0 +1,56 @@
+#
+# linux/drivers/nand/Makefile
+#
+
+obj-$(CONFIG_MTD_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
+obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o
+obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
+
+obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
+obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
+obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
+obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
+obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
+obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
+obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
+obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
+obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
+obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
+obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
+obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
+obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
+obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
+obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
+obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
+obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
+obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
+obj-$(CONFIG_MTD_ALAUDA) += alauda.o
+obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
+obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
+obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
+obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
+obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
+obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_WMT) += wmt_nand.o
+
+nand-$(CONFIG_MTD_NAND) := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
new file mode 100644
index 00000000..4f20e1d8
--- /dev/null
+++ b/drivers/mtd/nand/alauda.c
@@ -0,0 +1,723 @@
+/*
+ * MTD driver for Alauda chips
+ *
+ * Copyright (C) 2007 Joern Engel <joern@logfs.org>
+ *
+ * Based on drivers/usb/usb-skeleton.c which is:
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ * and on drivers/usb/storage/alauda.c, which is:
+ * (c) 2005 Daniel Drake <dsd@gentoo.org>
+ *
+ * Idea and initial work by Arnd Bergmann <arnd@arndb.de>
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand_ecc.h>
+
+/* Control commands */
+#define ALAUDA_GET_XD_MEDIA_STATUS 0x08
+#define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a
+#define ALAUDA_GET_XD_MEDIA_SIG 0x86
+
+/* Common prefix */
+#define ALAUDA_BULK_CMD 0x40
+
+/* The two ports */
+#define ALAUDA_PORT_XD 0x00
+#define ALAUDA_PORT_SM 0x01
+
+/* Bulk commands */
+#define ALAUDA_BULK_READ_PAGE 0x84
+#define ALAUDA_BULK_READ_OOB 0x85 /* don't use, there's a chip bug */
+#define ALAUDA_BULK_READ_BLOCK 0x94
+#define ALAUDA_BULK_ERASE_BLOCK 0xa3
+#define ALAUDA_BULK_WRITE_PAGE 0xa4
+#define ALAUDA_BULK_WRITE_BLOCK 0xb4
+#define ALAUDA_BULK_RESET_MEDIA 0xe0
+
+/* Address shifting */
+#define PBA_LO(pba) ((pba & 0xF) << 5)
+#define PBA_HI(pba) (pba >> 3)
+#define PBA_ZONE(pba) (pba >> 11)
+
+#define TIMEOUT HZ
+
+static const struct usb_device_id alauda_table[] = {
+ { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
+ { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, alauda_table);
+
+struct alauda_card {
+ u8 id; /* id byte */
+ u8 chipshift; /* 1<<chipshift total size */
+ u8 pageshift; /* 1<<pageshift page size */
+ u8 blockshift; /* 1<<blockshift block size */
+};
+
+struct alauda {
+ struct usb_device *dev;
+ struct usb_interface *interface;
+ struct mtd_info *mtd;
+ struct alauda_card *card;
+ struct mutex card_mutex;
+ u32 pagemask;
+ u32 bytemask;
+ u32 blockmask;
+ unsigned int write_out;
+ unsigned int bulk_in;
+ unsigned int bulk_out;
+ u8 port;
+ struct kref kref;
+};
+
+static struct alauda_card alauda_card_ids[] = {
+ /* NAND flash */
+ { 0x6e, 20, 8, 12}, /* 1 MB */
+ { 0xe8, 20, 8, 12}, /* 1 MB */
+ { 0xec, 20, 8, 12}, /* 1 MB */
+ { 0x64, 21, 8, 12}, /* 2 MB */
+ { 0xea, 21, 8, 12}, /* 2 MB */
+ { 0x6b, 22, 9, 13}, /* 4 MB */
+ { 0xe3, 22, 9, 13}, /* 4 MB */
+ { 0xe5, 22, 9, 13}, /* 4 MB */
+ { 0xe6, 23, 9, 13}, /* 8 MB */
+ { 0x73, 24, 9, 14}, /* 16 MB */
+ { 0x75, 25, 9, 14}, /* 32 MB */
+ { 0x76, 26, 9, 14}, /* 64 MB */
+ { 0x79, 27, 9, 14}, /* 128 MB */
+ { 0x71, 28, 9, 14}, /* 256 MB */
+
+ /* MASK ROM */
+ { 0x5d, 21, 9, 13}, /* 2 MB */
+ { 0xd5, 22, 9, 13}, /* 4 MB */
+ { 0xd6, 23, 9, 13}, /* 8 MB */
+ { 0x57, 24, 9, 13}, /* 16 MB */
+ { 0x58, 25, 9, 13}, /* 32 MB */
+ { }
+};
+
+static struct alauda_card *get_card(u8 id)
+{
+ struct alauda_card *card;
+
+ for (card = alauda_card_ids; card->id; card++)
+ if (card->id == id)
+ return card;
+ return NULL;
+}
+
+static void alauda_delete(struct kref *kref)
+{
+ struct alauda *al = container_of(kref, struct alauda, kref);
+
+ if (al->mtd) {
+ mtd_device_unregister(al->mtd);
+ kfree(al->mtd);
+ }
+ usb_put_dev(al->dev);
+ kfree(al);
+}
+
+static int alauda_get_media_status(struct alauda *al, void *buf)
+{
+ int ret;
+
+ mutex_lock(&al->card_mutex);
+ ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
+ ALAUDA_GET_XD_MEDIA_STATUS, 0xc0, 0, 1, buf, 2, HZ);
+ mutex_unlock(&al->card_mutex);
+ return ret;
+}
+
+static int alauda_ack_media(struct alauda *al)
+{
+ int ret;
+
+ mutex_lock(&al->card_mutex);
+ ret = usb_control_msg(al->dev, usb_sndctrlpipe(al->dev, 0),
+ ALAUDA_ACK_XD_MEDIA_CHANGE, 0x40, 0, 1, NULL, 0, HZ);
+ mutex_unlock(&al->card_mutex);
+ return ret;
+}
+
+static int alauda_get_media_signatures(struct alauda *al, void *buf)
+{
+ int ret;
+
+ mutex_lock(&al->card_mutex);
+ ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
+ ALAUDA_GET_XD_MEDIA_SIG, 0xc0, 0, 0, buf, 4, HZ);
+ mutex_unlock(&al->card_mutex);
+ return ret;
+}
+
+static void alauda_reset(struct alauda *al)
+{
+ u8 command[] = {
+ ALAUDA_BULK_CMD, ALAUDA_BULK_RESET_MEDIA, 0, 0,
+ 0, 0, 0, 0, al->port
+ };
+ mutex_lock(&al->card_mutex);
+ usb_bulk_msg(al->dev, al->bulk_out, command, 9, NULL, HZ);
+ mutex_unlock(&al->card_mutex);
+}
+
+static void correct_data(void *buf, void *read_ecc,
+ int *corrected, int *uncorrected)
+{
+ u8 calc_ecc[3];
+ int err;
+
+ nand_calculate_ecc(NULL, buf, calc_ecc);
+ err = nand_correct_data(NULL, buf, read_ecc, calc_ecc);
+ if (err) {
+ if (err > 0)
+ (*corrected)++;
+ else
+ (*uncorrected)++;
+ }
+}
+
+struct alauda_sg_request {
+ struct urb *urb[3];
+ struct completion comp;
+};
+
+static void alauda_complete(struct urb *urb)
+{
+ struct completion *comp = urb->context;
+
+ if (comp)
+ complete(comp);
+}
+
+static int __alauda_read_page(struct mtd_info *mtd, loff_t from, void *buf,
+ void *oob)
+{
+ struct alauda_sg_request sg;
+ struct alauda *al = mtd->priv;
+ u32 pba = from >> al->card->blockshift;
+ u32 page = (from >> al->card->pageshift) & al->pagemask;
+ u8 command[] = {
+ ALAUDA_BULK_CMD, ALAUDA_BULK_READ_PAGE, PBA_HI(pba),
+ PBA_ZONE(pba), 0, PBA_LO(pba) + page, 1, 0, al->port
+ };
+ int i, err;
+
+ for (i=0; i<3; i++)
+ sg.urb[i] = NULL;
+
+ err = -ENOMEM;
+ for (i=0; i<3; i++) {
+ sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
+ if (!sg.urb[i])
+ goto out;
+ }
+ init_completion(&sg.comp);
+ usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
+ alauda_complete, NULL);
+ usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, mtd->writesize,
+ alauda_complete, NULL);
+ usb_fill_bulk_urb(sg.urb[2], al->dev, al->bulk_in, oob, 16,
+ alauda_complete, &sg.comp);
+
+ mutex_lock(&al->card_mutex);
+ for (i=0; i<3; i++) {
+ err = usb_submit_urb(sg.urb[i], GFP_NOIO);
+ if (err)
+ goto cancel;
+ }
+ if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
+ err = -ETIMEDOUT;
+cancel:
+ for (i=0; i<3; i++) {
+ usb_kill_urb(sg.urb[i]);
+ }
+ }
+ mutex_unlock(&al->card_mutex);
+
+out:
+ usb_free_urb(sg.urb[0]);
+ usb_free_urb(sg.urb[1]);
+ usb_free_urb(sg.urb[2]);
+ return err;
+}
+
+static int alauda_read_page(struct mtd_info *mtd, loff_t from,
+ void *buf, u8 *oob, int *corrected, int *uncorrected)
+{
+ int err;
+
+ err = __alauda_read_page(mtd, from, buf, oob);
+ if (err)
+ return err;
+ correct_data(buf, oob+13, corrected, uncorrected);
+ correct_data(buf+256, oob+8, corrected, uncorrected);
+ return 0;
+}
+
+static int alauda_write_page(struct mtd_info *mtd, loff_t to, void *buf,
+ void *oob)
+{
+ struct alauda_sg_request sg;
+ struct alauda *al = mtd->priv;
+ u32 pba = to >> al->card->blockshift;
+ u32 page = (to >> al->card->pageshift) & al->pagemask;
+ u8 command[] = {
+ ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_PAGE, PBA_HI(pba),
+ PBA_ZONE(pba), 0, PBA_LO(pba) + page, 32, 0, al->port
+ };
+ int i, err;
+
+ for (i=0; i<3; i++)
+ sg.urb[i] = NULL;
+
+ err = -ENOMEM;
+ for (i=0; i<3; i++) {
+ sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
+ if (!sg.urb[i])
+ goto out;
+ }
+ init_completion(&sg.comp);
+ usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
+ alauda_complete, NULL);
+ usb_fill_bulk_urb(sg.urb[1], al->dev, al->write_out, buf,mtd->writesize,
+ alauda_complete, NULL);
+ usb_fill_bulk_urb(sg.urb[2], al->dev, al->write_out, oob, 16,
+ alauda_complete, &sg.comp);
+
+ mutex_lock(&al->card_mutex);
+ for (i=0; i<3; i++) {
+ err = usb_submit_urb(sg.urb[i], GFP_NOIO);
+ if (err)
+ goto cancel;
+ }
+ if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
+ err = -ETIMEDOUT;
+cancel:
+ for (i=0; i<3; i++) {
+ usb_kill_urb(sg.urb[i]);
+ }
+ }
+ mutex_unlock(&al->card_mutex);
+
+out:
+ usb_free_urb(sg.urb[0]);
+ usb_free_urb(sg.urb[1]);
+ usb_free_urb(sg.urb[2]);
+ return err;
+}
+
+static int alauda_erase_block(struct mtd_info *mtd, loff_t ofs)
+{
+ struct alauda_sg_request sg;
+ struct alauda *al = mtd->priv;
+ u32 pba = ofs >> al->card->blockshift;
+ u8 command[] = {
+ ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
+ PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, al->port
+ };
+ u8 buf[2];
+ int i, err;
+
+ for (i=0; i<2; i++)
+ sg.urb[i] = NULL;
+
+ err = -ENOMEM;
+ for (i=0; i<2; i++) {
+ sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
+ if (!sg.urb[i])
+ goto out;
+ }
+ init_completion(&sg.comp);
+ usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
+ alauda_complete, NULL);
+ usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, 2,
+ alauda_complete, &sg.comp);
+
+ mutex_lock(&al->card_mutex);
+ for (i=0; i<2; i++) {
+ err = usb_submit_urb(sg.urb[i], GFP_NOIO);
+ if (err)
+ goto cancel;
+ }
+ if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
+ err = -ETIMEDOUT;
+cancel:
+ for (i=0; i<2; i++) {
+ usb_kill_urb(sg.urb[i]);
+ }
+ }
+ mutex_unlock(&al->card_mutex);
+
+out:
+ usb_free_urb(sg.urb[0]);
+ usb_free_urb(sg.urb[1]);
+ return err;
+}
+
+static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
+{
+ static u8 ignore_buf[512]; /* write only */
+
+ return __alauda_read_page(mtd, from, ignore_buf, oob);
+}
+
+static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ u8 oob[16];
+ int err;
+
+ err = alauda_read_oob(mtd, ofs, oob);
+ if (err)
+ return err;
+
+ /* A block is marked bad if two or more bits are zero */
+ return hweight8(oob[5]) >= 7 ? 0 : 1;
+}
+
+static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct alauda *al = mtd->priv;
+ void *bounce_buf;
+ int err, corrected=0, uncorrected=0;
+
+ bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!bounce_buf)
+ return -ENOMEM;
+
+ *retlen = len;
+ while (len) {
+ u8 oob[16];
+ size_t byte = from & al->bytemask;
+ size_t cplen = min(len, mtd->writesize - byte);
+
+ err = alauda_read_page(mtd, from, bounce_buf, oob,
+ &corrected, &uncorrected);
+ if (err)
+ goto out;
+
+ memcpy(buf, bounce_buf + byte, cplen);
+ buf += cplen;
+ from += cplen;
+ len -= cplen;
+ }
+ err = 0;
+ if (corrected)
+ err = -EUCLEAN;
+ if (uncorrected)
+ err = -EBADMSG;
+out:
+ kfree(bounce_buf);
+ return err;
+}
+
+static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct alauda *al = mtd->priv;
+ int err, corrected=0, uncorrected=0;
+
+ if ((from & al->bytemask) || (len & al->bytemask))
+ return alauda_bounce_read(mtd, from, len, retlen, buf);
+
+ *retlen = len;
+ while (len) {
+ u8 oob[16];
+
+ err = alauda_read_page(mtd, from, buf, oob,
+ &corrected, &uncorrected);
+ if (err)
+ return err;
+
+ buf += mtd->writesize;
+ from += mtd->writesize;
+ len -= mtd->writesize;
+ }
+ err = 0;
+ if (corrected)
+ err = -EUCLEAN;
+ if (uncorrected)
+ err = -EBADMSG;
+ return err;
+}
+
+static int alauda_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct alauda *al = mtd->priv;
+ int err;
+
+ if ((to & al->bytemask) || (len & al->bytemask))
+ return -EINVAL;
+
+ *retlen = len;
+ while (len) {
+ u32 page = (to >> al->card->pageshift) & al->pagemask;
+ u8 oob[16] = { 'h', 'e', 'l', 'l', 'o', 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ /* don't write to bad blocks */
+ if (page == 0) {
+ err = alauda_isbad(mtd, to);
+ if (err) {
+ return -EIO;
+ }
+ }
+ nand_calculate_ecc(mtd, buf, &oob[13]);
+ nand_calculate_ecc(mtd, buf+256, &oob[8]);
+
+ err = alauda_write_page(mtd, to, (void*)buf, oob);
+ if (err)
+ return err;
+
+ buf += mtd->writesize;
+ to += mtd->writesize;
+ len -= mtd->writesize;
+ }
+ return 0;
+}
+
+static int __alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct alauda *al = mtd->priv;
+ u32 ofs = instr->addr;
+ u32 len = instr->len;
+ int err;
+
+ if ((ofs & al->blockmask) || (len & al->blockmask))
+ return -EINVAL;
+
+ while (len) {
+ /* don't erase bad blocks */
+ err = alauda_isbad(mtd, ofs);
+ if (err > 0)
+ err = -EIO;
+ if (err < 0)
+ return err;
+
+ err = alauda_erase_block(mtd, ofs);
+ if (err < 0)
+ return err;
+
+ ofs += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ return 0;
+}
+
+static int alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int err;
+
+ err = __alauda_erase(mtd, instr);
+ instr->state = err ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return err;
+}
+
+static int alauda_init_media(struct alauda *al)
+{
+ u8 buf[4], *b0=buf, *b1=buf+1;
+ struct alauda_card *card;
+ struct mtd_info *mtd;
+ int err;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ for (;;) {
+ err = alauda_get_media_status(al, buf);
+ if (err < 0)
+ goto error;
+ if (*b0 & 0x10)
+ break;
+ msleep(20);
+ }
+
+ err = alauda_ack_media(al);
+ if (err)
+ goto error;
+
+ msleep(10);
+
+ err = alauda_get_media_status(al, buf);
+ if (err < 0)
+ goto error;
+
+ if (*b0 != 0x14) {
+ /* media not ready */
+ err = -EIO;
+ goto error;
+ }
+ err = alauda_get_media_signatures(al, buf);
+ if (err < 0)
+ goto error;
+
+ card = get_card(*b1);
+ if (!card) {
+ printk(KERN_ERR"Alauda: unknown card id %02x\n", *b1);
+ err = -EIO;
+ goto error;
+ }
+ printk(KERN_INFO"pagesize=%x\nerasesize=%x\nsize=%xMiB\n",
+ 1<<card->pageshift, 1<<card->blockshift,
+ 1<<(card->chipshift-20));
+ al->card = card;
+ al->pagemask = (1 << (card->blockshift - card->pageshift)) - 1;
+ al->bytemask = (1 << card->pageshift) - 1;
+ al->blockmask = (1 << card->blockshift) - 1;
+
+ mtd->name = "alauda";
+ mtd->size = 1<<card->chipshift;
+ mtd->erasesize = 1<<card->blockshift;
+ mtd->writesize = 1<<card->pageshift;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_read = alauda_read;
+ mtd->_write = alauda_write;
+ mtd->_erase = alauda_erase;
+ mtd->_block_isbad = alauda_isbad;
+ mtd->priv = al;
+ mtd->owner = THIS_MODULE;
+ mtd->ecc_strength = 1;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err) {
+ err = -ENFILE;
+ goto error;
+ }
+
+ al->mtd = mtd;
+ alauda_reset(al); /* no clue whether this is necessary */
+ return 0;
+error:
+ kfree(mtd);
+ return err;
+}
+
+static int alauda_check_media(struct alauda *al)
+{
+ u8 buf[2], *b0 = buf, *b1 = buf+1;
+ int err;
+
+ err = alauda_get_media_status(al, buf);
+ if (err < 0)
+ return err;
+
+ if ((*b1 & 0x01) == 0) {
+ /* door open */
+ return -EIO;
+ }
+ if ((*b0 & 0x80) || ((*b0 & 0x1F) == 0x10)) {
+ /* no media ? */
+ return -EIO;
+ }
+ if (*b0 & 0x08) {
+ /* media change ? */
+ return alauda_init_media(al);
+ }
+ return 0;
+}
+
+static int alauda_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct alauda *al;
+ struct usb_host_interface *iface;
+ struct usb_endpoint_descriptor *ep,
+ *ep_in=NULL, *ep_out=NULL, *ep_wr=NULL;
+ int i, err = -ENOMEM;
+
+ al = kzalloc(2*sizeof(*al), GFP_KERNEL);
+ if (!al)
+ goto error;
+
+ kref_init(&al->kref);
+ usb_set_intfdata(interface, al);
+
+ al->dev = usb_get_dev(interface_to_usbdev(interface));
+ al->interface = interface;
+
+ iface = interface->cur_altsetting;
+ for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
+ ep = &iface->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep)) {
+ ep_in = ep;
+ } else if (usb_endpoint_is_bulk_out(ep)) {
+ if (i==0)
+ ep_wr = ep;
+ else
+ ep_out = ep;
+ }
+ }
+ err = -EIO;
+ if (!ep_wr || !ep_in || !ep_out)
+ goto error;
+
+ al->write_out = usb_sndbulkpipe(al->dev,
+ usb_endpoint_num(ep_wr));
+ al->bulk_in = usb_rcvbulkpipe(al->dev,
+ usb_endpoint_num(ep_in));
+ al->bulk_out = usb_sndbulkpipe(al->dev,
+ usb_endpoint_num(ep_out));
+
+ /* second device is identical up to now */
+ memcpy(al+1, al, sizeof(*al));
+
+ mutex_init(&al[0].card_mutex);
+ mutex_init(&al[1].card_mutex);
+
+ al[0].port = ALAUDA_PORT_XD;
+ al[1].port = ALAUDA_PORT_SM;
+
+ dev_info(&interface->dev, "alauda probed\n");
+ alauda_check_media(al);
+ alauda_check_media(al+1);
+
+ return 0;
+
+error:
+ if (al)
+ kref_put(&al->kref, alauda_delete);
+ return err;
+}
+
+static void alauda_disconnect(struct usb_interface *interface)
+{
+ struct alauda *al;
+
+ al = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ /* FIXME: prevent more I/O from starting */
+
+ /* decrement our usage count */
+ if (al)
+ kref_put(&al->kref, alauda_delete);
+
+ dev_info(&interface->dev, "alauda gone");
+}
+
+static struct usb_driver alauda_driver = {
+ .name = "alauda",
+ .probe = alauda_probe,
+ .disconnect = alauda_disconnect,
+ .id_table = alauda_table,
+};
+
+module_usb_driver(alauda_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
new file mode 100644
index 00000000..861ca8f7
--- /dev/null
+++ b/drivers/mtd/nand/ams-delta.c
@@ -0,0 +1,312 @@
+/*
+ * drivers/mtd/nand/ams-delta.c
+ *
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * Derived from drivers/mtd/toto.c
+ * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ * Partially stolen from drivers/mtd/nand/plat_nand.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * Amstrad E3 (Delta).
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <mach/hardware.h>
+#include <asm/sizes.h>
+#include <linux/gpio.h>
+#include <plat/board-ams-delta.h>
+
+/*
+ * MTD structure for E3 (Delta)
+ */
+static struct mtd_info *ams_delta_mtd = NULL;
+
+/*
+ * Define partitions for flash devices
+ */
+
+static struct mtd_partition partition_info[] = {
+ { .name = "Kernel",
+ .offset = 0,
+ .size = 3 * SZ_1M + SZ_512K },
+ { .name = "u-boot",
+ .offset = 3 * SZ_1M + SZ_512K,
+ .size = SZ_256K },
+ { .name = "u-boot params",
+ .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
+ .size = SZ_256K },
+ { .name = "Amstrad LDR",
+ .offset = 4 * SZ_1M,
+ .size = SZ_256K },
+ { .name = "File system",
+ .offset = 4 * SZ_1M + 1 * SZ_256K,
+ .size = 27 * SZ_1M },
+ { .name = "PBL reserved",
+ .offset = 32 * SZ_1M - 3 * SZ_256K,
+ .size = 3 * SZ_256K },
+};
+
+static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
+
+ writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+ writew(byte, this->IO_ADDR_W);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
+ ndelay(40);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
+}
+
+static u_char ams_delta_read_byte(struct mtd_info *mtd)
+{
+ u_char res;
+ struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
+
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
+ ndelay(40);
+ writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+ res = readw(this->IO_ADDR_R);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
+
+ return res;
+}
+
+static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ int i;
+
+ for (i=0; i<len; i++)
+ ams_delta_write_byte(mtd, buf[i]);
+}
+
+static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+
+ for (i=0; i<len; i++)
+ buf[i] = ams_delta_read_byte(mtd);
+}
+
+static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ int i;
+
+ for (i=0; i<len; i++)
+ if (buf[i] != ams_delta_read_byte(mtd))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Command control function
+ *
+ * ctrl:
+ * NAND_NCE: bit 0 -> bit 2
+ * NAND_CLE: bit 1 -> bit 7
+ * NAND_ALE: bit 2 -> bit 6
+ */
+static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
+ (ctrl & NAND_NCE) == 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
+ (ctrl & NAND_CLE) != 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
+ (ctrl & NAND_ALE) != 0);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ ams_delta_write_byte(mtd, cmd);
+}
+
+static int ams_delta_nand_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
+}
+
+static const struct gpio _mandatory_gpio[] = {
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nce",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nre",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwp",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwe",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_ale",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_cle",
+ },
+};
+
+/*
+ * Main initialization routine
+ */
+static int __devinit ams_delta_init(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *io_base;
+ int err = 0;
+
+ if (!res)
+ return -ENXIO;
+
+ /* Allocate memory for MTD device structure and private data */
+ ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!ams_delta_mtd) {
+ printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ams_delta_mtd->owner = THIS_MODULE;
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&ams_delta_mtd[1]);
+
+ /* Initialize structures */
+ memset(ams_delta_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ ams_delta_mtd->priv = this;
+
+ /*
+ * Don't try to request the memory region from here,
+ * it should have been already requested from the
+ * gpio-omap driver and requesting it again would fail.
+ */
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_free;
+ }
+
+ this->priv = io_base;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+ this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
+ this->read_byte = ams_delta_read_byte;
+ this->write_buf = ams_delta_write_buf;
+ this->read_buf = ams_delta_read_buf;
+ this->verify_buf = ams_delta_verify_buf;
+ this->cmd_ctrl = ams_delta_hwcontrol;
+ if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
+ this->dev_ready = ams_delta_nand_ready;
+ } else {
+ this->dev_ready = NULL;
+ printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n");
+ }
+ /* 25 us command delay time */
+ this->chip_delay = 30;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ platform_set_drvdata(pdev, io_base);
+
+ /* Set chip enabled, but */
+ err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ if (err)
+ goto out_gpio;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(ams_delta_mtd, 1)) {
+ err = -ENXIO;
+ goto out_mtd;
+ }
+
+ /* Register the partitions */
+ mtd_device_register(ams_delta_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
+
+ goto out;
+
+ out_mtd:
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+out_gpio:
+ platform_set_drvdata(pdev, NULL);
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
+ iounmap(io_base);
+out_free:
+ kfree(ams_delta_mtd);
+ out:
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int __devexit ams_delta_cleanup(struct platform_device *pdev)
+{
+ void __iomem *io_base = platform_get_drvdata(pdev);
+
+ /* Release resources, unregister device */
+ nand_release(ams_delta_mtd);
+
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
+ iounmap(io_base);
+
+ /* Free the MTD device structure */
+ kfree(ams_delta_mtd);
+
+ return 0;
+}
+
+static struct platform_driver ams_delta_nand_driver = {
+ .probe = ams_delta_init,
+ .remove = __devexit_p(ams_delta_cleanup),
+ .driver = {
+ .name = "ams-delta-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ams_delta_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
new file mode 100644
index 00000000..2165576a
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -0,0 +1,785 @@
+/*
+ * Copyright (C) 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/platform_data/atmel.h>
+
+#include <mach/cpu.h>
+
+static int use_dma = 1;
+module_param(use_dma, int, 0);
+
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
+/* Register access macros */
+#define ecc_readl(add, reg) \
+ __raw_readl(add + ATMEL_ECC_##reg)
+#define ecc_writel(add, reg, value) \
+ __raw_writel((value), add + ATMEL_ECC_##reg)
+
+#include "atmel_nand_ecc.h" /* Hardware ECC registers */
+
+/* oob layout for large page size
+ * bad block info is on bytes 0 and 1
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_large = {
+ .eccbytes = 4,
+ .eccpos = {60, 61, 62, 63},
+ .oobfree = {
+ {2, 58}
+ },
+};
+
+/* oob layout for small page size
+ * bad block info is on bytes 4 and 5
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_small = {
+ .eccbytes = 4,
+ .eccpos = {0, 1, 2, 3},
+ .oobfree = {
+ {6, 10}
+ },
+};
+
+struct atmel_nand_host {
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ dma_addr_t io_phys;
+ struct atmel_nand_data board;
+ struct device *dev;
+ void __iomem *ecc;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+};
+
+static int cpu_has_dma(void)
+{
+ return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
+}
+
+/*
+ * Enable NAND.
+ */
+static void atmel_nand_enable(struct atmel_nand_host *host)
+{
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 0);
+}
+
+/*
+ * Disable NAND.
+ */
+static void atmel_nand_disable(struct atmel_nand_host *host)
+{
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 1);
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_NCE)
+ atmel_nand_enable(host);
+ else
+ atmel_nand_disable(host);
+ }
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, host->io_base + (1 << host->board.cle));
+ else
+ writeb(cmd, host->io_base + (1 << host->board.ale));
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int atmel_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ return gpio_get_value(host->board.rdy_pin) ^
+ !!host->board.rdy_pin_active_low;
+}
+
+/*
+ * Minimal-overhead PIO for data access.
+ */
+static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+}
+
+static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+}
+
+static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
+}
+
+static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
+}
+
+static void dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
+ int is_read)
+{
+ struct dma_device *dma_dev;
+ enum dma_ctrl_flags flags;
+ dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_cookie_t cookie;
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ void *p = buf;
+ int err = -EIO;
+ enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (buf >= high_memory)
+ goto err_buf;
+
+ dma_dev = host->dma_chan->device;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP;
+
+ phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
+ if (dma_mapping_error(dma_dev->dev, phys_addr)) {
+ dev_err(host->dev, "Failed to dma_map_single\n");
+ goto err_buf;
+ }
+
+ if (is_read) {
+ dma_src_addr = host->io_phys;
+ dma_dst_addr = phys_addr;
+ } else {
+ dma_src_addr = phys_addr;
+ dma_dst_addr = host->io_phys;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
+ dma_src_addr, len, flags);
+ if (!tx) {
+ dev_err(host->dev, "Failed to prepare DMA memcpy\n");
+ goto err_dma;
+ }
+
+ init_completion(&host->comp);
+ tx->callback = dma_complete_func;
+ tx->callback_param = &host->comp;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(host->dev, "Failed to do DMA tx_submit\n");
+ goto err_dma;
+ }
+
+ dma_async_issue_pending(host->dma_chan);
+ wait_for_completion(&host->comp);
+
+ err = 0;
+
+err_dma:
+ dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
+err_buf:
+ if (err != 0)
+ dev_warn(host->dev, "Fall back to CPU I/O\n");
+ return err;
+}
+
+static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
+ if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
+ return;
+
+ if (host->board.bus_width_16)
+ atmel_read_buf16(mtd, buf, len);
+ else
+ atmel_read_buf8(mtd, buf, len);
+}
+
+static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
+ return;
+
+ if (host->board.bus_width_16)
+ atmel_write_buf16(mtd, buf, len);
+ else
+ atmel_write_buf8(mtd, buf, len);
+}
+
+/*
+ * Calculate HW ECC
+ *
+ * function called after a write
+ *
+ * mtd: MTD block structure
+ * dat: raw data (unused)
+ * ecc_code: buffer for ECC
+ */
+static int atmel_nand_calculate(struct mtd_info *mtd,
+ const u_char *dat, unsigned char *ecc_code)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned int ecc_value;
+
+ /* get the first 2 ECC bytes */
+ ecc_value = ecc_readl(host->ecc, PR);
+
+ ecc_code[0] = ecc_value & 0xFF;
+ ecc_code[1] = (ecc_value >> 8) & 0xFF;
+
+ /* get the last 2 ECC bytes */
+ ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
+
+ ecc_code[2] = ecc_value & 0xFF;
+ ecc_code[3] = (ecc_value >> 8) & 0xFF;
+
+ return 0;
+}
+
+/*
+ * HW ECC read page function
+ *
+ * mtd: mtd info structure
+ * chip: nand chip info structure
+ * buf: buffer to store read data
+ */
+static int atmel_nand_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ uint8_t *ecc_pos;
+ int stat;
+
+ /*
+ * Errata: ALE is incorrectly wired up to the ECC controller
+ * on the AP7000, so it will include the address cycles in the
+ * ECC calculation.
+ *
+ * Workaround: Reset the parity registers before reading the
+ * actual data.
+ */
+ if (cpu_is_at32ap7000()) {
+ struct atmel_nand_host *host = chip->priv;
+ ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+ }
+
+ /* read the page */
+ chip->read_buf(mtd, p, eccsize);
+
+ /* move to ECC position if needed */
+ if (eccpos[0] != 0) {
+ /* This only works on large pages
+ * because the ECC controller waits for
+ * NAND_CMD_RNDOUTSTART after the
+ * NAND_CMD_RNDOUT.
+ * anyway, for small pages, the eccpos[0] == 0
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + eccpos[0], -1);
+ }
+
+ /* the ECC controller needs to read the ECC just after the data */
+ ecc_pos = oob + eccpos[0];
+ chip->read_buf(mtd, ecc_pos, eccbytes);
+
+ /* check if there's an error */
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ /* get back to oob start (end of page) */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+
+ /* read the oob */
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * HW ECC Correction
+ *
+ * function called after a read
+ *
+ * mtd: MTD block structure
+ * dat: raw data read from the chip
+ * read_ecc: ECC from the chip (unused)
+ * isnull: unused
+ *
+ * Detect and correct a 1 bit error for a page
+ */
+static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned int ecc_status;
+ unsigned int ecc_word, ecc_bit;
+
+ /* get the status from the Status Register */
+ ecc_status = ecc_readl(host->ecc, SR);
+
+ /* if there's no error */
+ if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
+ return 0;
+
+ /* get error bit offset (4 bits) */
+ ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
+ /* get word address (12 bits) */
+ ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
+ ecc_word >>= 4;
+
+ /* if there are multiple errors */
+ if (ecc_status & ATMEL_ECC_MULERR) {
+ /* check if it is a freshly erased block
+ * (filled with 0xff) */
+ if ((ecc_bit == ATMEL_ECC_BITADDR)
+ && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
+ /* the block has just been erased, return OK */
+ return 0;
+ }
+ /* it doesn't seems to be a freshly
+ * erased block.
+ * We can't correct so many errors */
+ dev_dbg(host->dev, "atmel_nand : multiple errors detected."
+ " Unable to correct.\n");
+ return -EIO;
+ }
+
+ /* if there's a single bit error : we can correct it */
+ if (ecc_status & ATMEL_ECC_ECCERR) {
+ /* there's nothing much to do here.
+ * the bit error is on the ECC itself.
+ */
+ dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
+ " Nothing to correct\n");
+ return 0;
+ }
+
+ dev_dbg(host->dev, "atmel_nand : one bit error on data."
+ " (word offset in the page :"
+ " 0x%x bit offset : 0x%x)\n",
+ ecc_word, ecc_bit);
+ /* correct the error */
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* 16 bits words */
+ ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
+ } else {
+ /* 8 bits words */
+ dat[ecc_word] ^= (1 << ecc_bit);
+ }
+ dev_dbg(host->dev, "atmel_nand : error corrected\n");
+ return 1;
+}
+
+/*
+ * Enable HW ECC : unused on most chips
+ */
+static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+ if (cpu_is_at32ap7000()) {
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+ }
+}
+
+#if defined(CONFIG_OF)
+static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ u32 val;
+ int ecc_mode;
+ struct atmel_nand_data *board = &host->board;
+ enum of_gpio_flags flags;
+
+ if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid addr-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->ale = val;
+ }
+
+ if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid cmd-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->cle = val;
+ }
+
+ ecc_mode = of_get_nand_ecc_mode(np);
+
+ board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
+
+ board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ if (of_get_nand_bus_width(np) == 16)
+ board->bus_width_16 = 1;
+
+ board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+ board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+ board->enable_pin = of_get_gpio(np, 1);
+ board->det_pin = of_get_gpio(np, 2);
+
+ return 0;
+}
+#else
+static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ return -EINVAL;
+}
+#endif
+
+/*
+ * Probe for the NAND device.
+ */
+static int __init atmel_nand_probe(struct platform_device *pdev)
+{
+ struct atmel_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *regs;
+ struct resource *mem;
+ struct mtd_part_parser_data ppdata = {};
+ int res;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
+ return -ENXIO;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
+ if (!host) {
+ printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ host->io_phys = (dma_addr_t)mem->start;
+
+ host->io_base = ioremap(mem->start, resource_size(mem));
+ if (host->io_base == NULL) {
+ printk(KERN_ERR "atmel_nand: ioremap failed\n");
+ res = -EIO;
+ goto err_nand_ioremap;
+ }
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ host->dev = &pdev->dev;
+ if (pdev->dev.of_node) {
+ res = atmel_of_init_port(host, pdev->dev.of_node);
+ if (res)
+ goto err_nand_ioremap;
+ } else {
+ memcpy(&host->board, pdev->dev.platform_data,
+ sizeof(struct atmel_nand_data));
+ }
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->owner = THIS_MODULE;
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = host->io_base;
+ nand_chip->IO_ADDR_W = host->io_base;
+ nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
+
+ if (gpio_is_valid(host->board.rdy_pin))
+ nand_chip->dev_ready = atmel_nand_device_ready;
+
+ nand_chip->ecc.mode = host->board.ecc_mode;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) {
+ printk(KERN_ERR "atmel_nand: can't get I/O resource "
+ "regs\nFalling back on software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ host->ecc = ioremap(regs->start, resource_size(regs));
+ if (host->ecc == NULL) {
+ printk(KERN_ERR "atmel_nand: ioremap failed\n");
+ res = -EIO;
+ goto err_ecc_ioremap;
+ }
+ nand_chip->ecc.calculate = atmel_nand_calculate;
+ nand_chip->ecc.correct = atmel_nand_correct;
+ nand_chip->ecc.hwctl = atmel_nand_hwctl;
+ nand_chip->ecc.read_page = atmel_nand_read_page;
+ nand_chip->ecc.bytes = 4;
+ nand_chip->ecc.strength = 1;
+ }
+
+ nand_chip->chip_delay = 20; /* 20us command delay time */
+
+ if (host->board.bus_width_16) /* 16-bit bus width */
+ nand_chip->options |= NAND_BUSWIDTH_16;
+
+ nand_chip->read_buf = atmel_read_buf;
+ nand_chip->write_buf = atmel_write_buf;
+
+ platform_set_drvdata(pdev, host);
+ atmel_nand_enable(host);
+
+ if (gpio_is_valid(host->board.det_pin)) {
+ if (gpio_get_value(host->board.det_pin)) {
+ printk(KERN_INFO "No SmartMedia card inserted.\n");
+ res = -ENXIO;
+ goto err_no_card;
+ }
+ }
+
+ if (host->board.on_flash_bbt || on_flash_bbt) {
+ printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+ }
+
+ if (!cpu_has_dma())
+ use_dma = 0;
+
+ if (use_dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!host->dma_chan) {
+ dev_err(host->dev, "Failed to request DMA channel\n");
+ use_dma = 0;
+ }
+ }
+ if (use_dma)
+ dev_info(host->dev, "Using %s for DMA transfers.\n",
+ dma_chan_name(host->dma_chan));
+ else
+ dev_info(host->dev, "No DMA support for NAND access.\n");
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_scan_ident;
+ }
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ /* ECC is calculated for the whole page (1 step) */
+ nand_chip->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ nand_chip->ecc.layout = &atmel_oobinfo_small;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
+ break;
+ case 1024:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
+ break;
+ case 2048:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
+ break;
+ case 4096:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.calculate = NULL;
+ nand_chip->ecc.correct = NULL;
+ nand_chip->ecc.hwctl = NULL;
+ nand_chip->ecc.read_page = NULL;
+ nand_chip->ecc.postpad = 0;
+ nand_chip->ecc.prepad = 0;
+ nand_chip->ecc.bytes = 0;
+ break;
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_scan_tail;
+ }
+
+ mtd->name = "atmel_nand";
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata,
+ host->board.parts, host->board.num_parts);
+ if (!res)
+ return res;
+
+err_scan_tail:
+err_scan_ident:
+err_no_card:
+ atmel_nand_disable(host);
+ platform_set_drvdata(pdev, NULL);
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+ if (host->ecc)
+ iounmap(host->ecc);
+err_ecc_ioremap:
+ iounmap(host->io_base);
+err_nand_ioremap:
+ kfree(host);
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int __exit atmel_nand_remove(struct platform_device *pdev)
+{
+ struct atmel_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ atmel_nand_disable(host);
+
+ if (host->ecc)
+ iounmap(host->ecc);
+
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+
+ iounmap(host->io_base);
+ kfree(host);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_nand_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-nand" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
+#endif
+
+static struct platform_driver atmel_nand_driver = {
+ .remove = __exit_p(atmel_nand_remove),
+ .driver = {
+ .name = "atmel_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_nand_dt_ids),
+ },
+};
+
+static int __init atmel_nand_init(void)
+{
+ return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
+}
+
+
+static void __exit atmel_nand_exit(void)
+{
+ platform_driver_unregister(&atmel_nand_driver);
+}
+
+
+module_init(atmel_nand_init);
+module_exit(atmel_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
+MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
new file mode 100644
index 00000000..578c776e
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -0,0 +1,39 @@
+/*
+ * Error Corrected Code Controller (ECC) - System peripherals regsters.
+ * Based on AT91SAM9260 datasheet revision B.
+ *
+ * Copyright (C) 2007 Andrew Victor
+ * Copyright (C) 2007 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_ECC_H
+#define ATMEL_NAND_ECC_H
+
+#define ATMEL_ECC_CR 0x00 /* Control register */
+#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
+
+#define ATMEL_ECC_MR 0x04 /* Mode register */
+#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
+#define ATMEL_ECC_PAGESIZE_528 (0)
+#define ATMEL_ECC_PAGESIZE_1056 (1)
+#define ATMEL_ECC_PAGESIZE_2112 (2)
+#define ATMEL_ECC_PAGESIZE_4224 (3)
+
+#define ATMEL_ECC_SR 0x08 /* Status register */
+#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
+#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
+#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
+
+#define ATMEL_ECC_PR 0x0c /* Parity register */
+#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
+#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
+
+#define ATMEL_ECC_NPR 0x10 /* NParity register */
+#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+
+#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
new file mode 100644
index 00000000..73abbc3e
--- /dev/null
+++ b/drivers/mtd/nand/au1550nd.c
@@ -0,0 +1,567 @@
+/*
+ * drivers/mtd/nand/au1550nd.c
+ *
+ * Copyright (C) 2004 Embedded Edge, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
+
+
+struct au1550nd_ctx {
+ struct mtd_info info;
+ struct nand_chip chip;
+
+ int cs;
+ void __iomem *base;
+ void (*write_byte)(struct mtd_info *, u_char);
+};
+
+/**
+ * au_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 8bit buswidth
+ */
+static u_char au_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char ret = readb(this->IO_ADDR_R);
+ au_sync();
+ return ret;
+}
+
+/**
+ * au_write_byte - write one byte to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * write function for 8it buswidth
+ */
+static void au_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writeb(byte, this->IO_ADDR_W);
+ au_sync();
+}
+
+/**
+ * au_read_byte16 - read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 16bit buswidth with endianness conversion
+ */
+static u_char au_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
+ au_sync();
+ return ret;
+}
+
+/**
+ * au_write_byte16 - write one byte endianness aware to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * write function for 16bit buswidth with endianness conversion
+ */
+static void au_write_byte16(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
+ au_sync();
+}
+
+/**
+ * au_read_word - read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 16bit buswidth without endianness conversion
+ */
+static u16 au_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u16 ret = readw(this->IO_ADDR_R);
+ au_sync();
+ return ret;
+}
+
+/**
+ * au_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 8bit buswidth
+ */
+static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ writeb(buf[i], this->IO_ADDR_W);
+ au_sync();
+ }
+}
+
+/**
+ * au_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 8bit buswidth
+ */
+static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = readb(this->IO_ADDR_R);
+ au_sync();
+ }
+}
+
+/**
+ * au_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * verify function for 8bit buswidth
+ */
+static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ if (buf[i] != readb(this->IO_ADDR_R))
+ return -EFAULT;
+ au_sync();
+ }
+
+ return 0;
+}
+
+/**
+ * au_write_buf16 - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 16bit buswidth
+ */
+static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++) {
+ writew(p[i], this->IO_ADDR_W);
+ au_sync();
+ }
+
+}
+
+/**
+ * au_read_buf16 - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 16bit buswidth
+ */
+static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++) {
+ p[i] = readw(this->IO_ADDR_R);
+ au_sync();
+ }
+}
+
+/**
+ * au_verify_buf16 - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * verify function for 16bit buswidth
+ */
+static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++) {
+ if (p[i] != readw(this->IO_ADDR_R))
+ return -EFAULT;
+ au_sync();
+ }
+ return 0;
+}
+
+/* Select the chip by setting nCE to low */
+#define NAND_CTL_SETNCE 1
+/* Deselect the chip by setting nCE to high */
+#define NAND_CTL_CLRNCE 2
+/* Select the command latch by setting CLE to high */
+#define NAND_CTL_SETCLE 3
+/* Deselect the command latch by setting CLE to low */
+#define NAND_CTL_CLRCLE 4
+/* Select the address latch by setting ALE to high */
+#define NAND_CTL_SETALE 5
+/* Deselect the address latch by setting ALE to low */
+#define NAND_CTL_CLRALE 6
+
+static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
+
+ switch (cmd) {
+
+ case NAND_CTL_SETCLE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
+ break;
+
+ case NAND_CTL_CLRCLE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ break;
+
+ case NAND_CTL_SETALE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
+ break;
+
+ case NAND_CTL_CLRALE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ /* FIXME: Nobody knows why this is necessary,
+ * but it works only that way */
+ udelay(1);
+ break;
+
+ case NAND_CTL_SETNCE:
+ /* assert (force assert) chip enable */
+ au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
+ break;
+
+ case NAND_CTL_CLRNCE:
+ /* deassert chip enable */
+ au_writel(0, MEM_STNDCTL);
+ break;
+ }
+
+ this->IO_ADDR_R = this->IO_ADDR_W;
+
+ /* Drain the writebuffer */
+ au_sync();
+}
+
+int au1550_device_ready(struct mtd_info *mtd)
+{
+ int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
+ au_sync();
+ return ret;
+}
+
+/**
+ * au1550_select_chip - control -CE line
+ * Forbid driving -CE manually permitting the NAND controller to do this.
+ * Keeping -CE asserted during the whole sector reads interferes with the
+ * NOR flash and PCMCIA drivers as it causes contention on the static bus.
+ * We only have to hold -CE low for the NAND read commands since the flash
+ * chip needs it to be asserted during chip not ready time but the NAND
+ * controller keeps it released.
+ *
+ * @mtd: MTD device structure
+ * @chip: chipnumber to select, -1 for deselect
+ */
+static void au1550_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/**
+ * au1550_command - Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ */
+static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
+ int ce_override = 0, i;
+ unsigned long flags = 0;
+
+ /* Begin command latch cycle */
+ au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ ctx->write_byte(mtd, readcmd);
+ }
+ ctx->write_byte(mtd, command);
+
+ /* Set ALE and clear CLE to start address cycle */
+ au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ au1550_hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ ctx->write_byte(mtd, column);
+ }
+ if (page_addr != -1) {
+ ctx->write_byte(mtd, (u8)(page_addr & 0xff));
+
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB) {
+ /*
+ * NAND controller will release -CE after
+ * the last address byte is written, so we'll
+ * have to forcibly assert it. No interrupts
+ * are allowed while we do this as we don't
+ * want the NOR flash or PCMCIA drivers to
+ * steal our precious bytes of data...
+ */
+ ce_override = 1;
+ local_irq_save(flags);
+ au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
+ }
+
+ ctx->write_byte(mtd, (u8)(page_addr >> 8));
+
+ /* One more address cycle for devices > 32MiB */
+ if (this->chipsize > (32 << 20))
+ ctx->write_byte(mtd,
+ ((page_addr >> 16) & 0x0f));
+ }
+ /* Latch in address */
+ au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * Program and erase have their own busy handlers.
+ * Status and sequential in need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ break;
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READOOB:
+ /* Check if we're really driving -CE low (just in case) */
+ if (unlikely(!ce_override))
+ break;
+
+ /* Apply a short delay always to ensure that we do wait tWB. */
+ ndelay(100);
+ /* Wait for a chip to become ready... */
+ for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
+ udelay(1);
+
+ /* Release -CE and re-enable interrupts. */
+ au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
+ local_irq_restore(flags);
+ return;
+ }
+ /* Apply this short delay always to ensure that we do wait tWB. */
+ ndelay(100);
+
+ while(!this->dev_ready(mtd));
+}
+
+static int __devinit find_nand_cs(unsigned long nand_base)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int __devinit au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct resource *r;
+ int ret, cs;
+
+ pd = pdev->dev.platform_data;
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "no memory for NAND context\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
+ }
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ctx->base = ioremap_nocache(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ this = &ctx->chip;
+ ctx->info.priv = this;
+ ctx->info.owner = THIS_MODULE;
+
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
+
+ this->dev_ready = au1550_device_ready;
+ this->select_chip = au1550_select_chip;
+ this->cmdfunc = au1550_command;
+
+ /* 30 us command delay time */
+ this->chip_delay = 30;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ this->options = NAND_NO_AUTOINCR;
+
+ if (pd->devwidth)
+ this->options |= NAND_BUSWIDTH_16;
+
+ this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+ ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
+ this->read_word = au_read_word;
+ this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+ this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+ this->verify_buf = (pd->devwidth) ? au_verify_buf16 : au_verify_buf;
+
+ ret = nand_scan(&ctx->info, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
+ }
+
+ mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
+
+ return 0;
+
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit au1550nd_remove(struct platform_device *pdev)
+{
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ nand_release(&ctx->info);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+ return 0;
+}
+
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = au1550nd_probe,
+ .remove = __devexit_p(au1550nd_remove),
+};
+
+module_platform_driver(au1550nd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Embedded Edge, LLC");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
new file mode 100644
index 00000000..2e42ec2e
--- /dev/null
+++ b/drivers/mtd/nand/autcpu12.c
@@ -0,0 +1,239 @@
+/*
+ * drivers/mtd/autcpu12.c
+ *
+ * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
+ *
+ * Derived from drivers/mtd/spia.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * autronix autcpu12 board, which is a SmartMediaCard. It supports
+ * 16MiB, 32MiB and 64MiB cards.
+ *
+ *
+ * 02-12-2002 TG Cleanup of module params
+ *
+ * 02-20-2002 TG adjusted for different rd/wr address support
+ * added support for read device ready/busy line
+ * added page_cache
+ *
+ * 10-06-2002 TG 128K card support added
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <mach/hardware.h>
+#include <asm/sizes.h>
+#include <mach/autcpu12.h>
+
+/*
+ * MTD structure for AUTCPU12 board
+ */
+static struct mtd_info *autcpu12_mtd = NULL;
+static void __iomem *autcpu12_fio_base;
+
+/*
+ * Define partitions for flash devices
+ */
+static struct mtd_partition partition_info16k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 8 * SZ_1M,
+ .size = 8 * SZ_1M },
+};
+
+static struct mtd_partition partition_info32k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 8 * SZ_1M,
+ .size = 24 * SZ_1M },
+};
+
+static struct mtd_partition partition_info64k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 16 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 16 * SZ_1M,
+ .size = 48 * SZ_1M },
+};
+
+static struct mtd_partition partition_info128k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 16 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 16 * SZ_1M,
+ .size = 112 * SZ_1M },
+};
+
+#define NUM_PARTITIONS16K 2
+#define NUM_PARTITIONS32K 2
+#define NUM_PARTITIONS64K 2
+#define NUM_PARTITIONS128K 2
+/*
+ * hardware specific access to control-lines
+ *
+ * ALE bit 4 autcpu12_pedr
+ * CLE bit 5 autcpu12_pedr
+ * NCE bit 0 fio_ctrl
+ *
+ */
+static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ void __iomem *addr;
+ unsigned char bits;
+
+ addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
+ bits = (ctrl & NAND_CLE) << 4;
+ bits |= (ctrl & NAND_ALE) << 2;
+ writeb((readb(addr) & ~0x30) | bits, addr);
+
+ addr = autcpu12_fio_base + AUTCPU12_SMC_SELECT_OFFSET;
+ writeb((readb(addr) & ~0x1) | (ctrl & NAND_NCE), addr);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+/*
+ * read device ready pin
+ */
+int autcpu12_device_ready(struct mtd_info *mtd)
+{
+ void __iomem *addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
+
+ return readb(addr) & AUTCPU12_SMC_RDY;
+}
+
+/*
+ * Main initialization routine
+ */
+static int __init autcpu12_init(void)
+{
+ struct nand_chip *this;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ autcpu12_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!autcpu12_mtd) {
+ printk("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* map physical address */
+ autcpu12_fio_base = ioremap(AUTCPU12_PHYS_SMC, SZ_1K);
+ if (!autcpu12_fio_base) {
+ printk("Ioremap autcpu12 SmartMedia Card failed\n");
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&autcpu12_mtd[1]);
+
+ /* Initialize structures */
+ memset(autcpu12_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ autcpu12_mtd->priv = this;
+ autcpu12_mtd->owner = THIS_MODULE;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = autcpu12_fio_base;
+ this->IO_ADDR_W = autcpu12_fio_base;
+ this->cmd_ctrl = autcpu12_hwcontrol;
+ this->dev_ready = autcpu12_device_ready;
+ /* 20 us command delay time */
+ this->chip_delay = 20;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ /* Enable the following for a flash based bad block table */
+ /*
+ this->bbt_options = NAND_BBT_USE_FLASH;
+ */
+ this->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(autcpu12_mtd, 1)) {
+ err = -ENXIO;
+ goto out_ior;
+ }
+
+ /* Register the partitions */
+ switch (autcpu12_mtd->size) {
+ case SZ_16M:
+ mtd_device_register(autcpu12_mtd, partition_info16k,
+ NUM_PARTITIONS16K);
+ break;
+ case SZ_32M:
+ mtd_device_register(autcpu12_mtd, partition_info32k,
+ NUM_PARTITIONS32K);
+ break;
+ case SZ_64M:
+ mtd_device_register(autcpu12_mtd, partition_info64k,
+ NUM_PARTITIONS64K);
+ break;
+ case SZ_128M:
+ mtd_device_register(autcpu12_mtd, partition_info128k,
+ NUM_PARTITIONS128K);
+ break;
+ default:
+ printk("Unsupported SmartMedia device\n");
+ err = -ENXIO;
+ goto out_ior;
+ }
+ goto out;
+
+ out_ior:
+ iounmap(autcpu12_fio_base);
+ out_mtd:
+ kfree(autcpu12_mtd);
+ out:
+ return err;
+}
+
+module_init(autcpu12_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit autcpu12_cleanup(void)
+{
+ /* Release resources, unregister device */
+ nand_release(autcpu12_mtd);
+
+ /* unmap physical address */
+ iounmap(autcpu12_fio_base);
+
+ /* Free the MTD device structure */
+ kfree(autcpu12_mtd);
+}
+
+module_exit(autcpu12_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Glue layer for SmartMediaCard on autronix autcpu12");
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
new file mode 100644
index 00000000..a930666d
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -0,0 +1,213 @@
+/*****************************************************************************
+* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/* ---- Include Files ---------------------------------------------------- */
+#include "nand_bcm_umi.h"
+
+/* ---- External Variable Declarations ----------------------------------- */
+/* ---- External Function Prototypes ------------------------------------- */
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+
+/* ---- Private Function Prototypes -------------------------------------- */
+static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page);
+static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf);
+
+/* ---- Private Variables ------------------------------------------------ */
+
+/*
+** nand_hw_eccoob
+** New oob placement block for use with hardware ecc generation.
+*/
+static struct nand_ecclayout nand_hw_eccoob_512 = {
+ /* Reserve 5 for BI indicator */
+ .oobfree = {
+#if (NAND_ECC_NUM_BYTES > 3)
+ {.offset = 0, .length = 2}
+#else
+ {.offset = 0, .length = 5},
+ {.offset = 6, .length = 7}
+#endif
+ }
+};
+
+/*
+** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
+** except the BI is at byte 0.
+*/
+static struct nand_ecclayout nand_hw_eccoob_2048 = {
+ /* Reserve 0 as BI indicator */
+ .oobfree = {
+#if (NAND_ECC_NUM_BYTES > 10)
+ {.offset = 1, .length = 2},
+#elif (NAND_ECC_NUM_BYTES > 7)
+ {.offset = 1, .length = 5},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6}
+#else
+ {.offset = 1, .length = 8},
+ {.offset = 16, .length = 9},
+ {.offset = 32, .length = 9},
+ {.offset = 48, .length = 9}
+#endif
+ }
+};
+
+/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
+ * except the BI is at byte 0. */
+static struct nand_ecclayout nand_hw_eccoob_4096 = {
+ /* Reserve 0 as BI indicator */
+ .oobfree = {
+#if (NAND_ECC_NUM_BYTES > 10)
+ {.offset = 1, .length = 2},
+ {.offset = 16, .length = 3},
+ {.offset = 32, .length = 3},
+ {.offset = 48, .length = 3},
+ {.offset = 64, .length = 3},
+ {.offset = 80, .length = 3},
+ {.offset = 96, .length = 3},
+ {.offset = 112, .length = 3}
+#else
+ {.offset = 1, .length = 5},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6},
+ {.offset = 64, .length = 6},
+ {.offset = 80, .length = 6},
+ {.offset = 96, .length = 6},
+ {.offset = 112, .length = 6}
+#endif
+ }
+};
+
+/* ---- Private Functions ------------------------------------------------ */
+/* ==== Public Functions ================================================= */
+
+/****************************************************************************
+*
+* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
+* @mtd: mtd info structure
+* @chip: nand chip info structure
+* @buf: buffer to store read data
+*
+***************************************************************************/
+static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t * buf,
+ int page)
+{
+ int sectorIdx = 0;
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *datap = buf;
+ uint8_t eccCalc[NAND_ECC_NUM_BYTES];
+ int sectorOobSize = mtd->oobsize / eccsteps;
+ int stat;
+
+ for (sectorIdx = 0; sectorIdx < eccsteps;
+ sectorIdx++, datap += eccsize) {
+ if (sectorIdx > 0) {
+ /* Seek to page location within sector */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
+ -1);
+ }
+
+ /* Enable hardware ECC before reading the buf */
+ nand_bcm_umi_bch_enable_read_hwecc();
+
+ /* Read in data */
+ bcm_umi_nand_read_buf(mtd, datap, eccsize);
+
+ /* Pause hardware ECC after reading the buf */
+ nand_bcm_umi_bch_pause_read_ecc_calc();
+
+ /* Read the OOB ECC */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + sectorIdx * sectorOobSize, -1);
+ nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
+ NAND_ECC_NUM_BYTES,
+ chip->oob_poi +
+ sectorIdx * sectorOobSize);
+
+ /* Correct any ECC detected errors */
+ stat =
+ nand_bcm_umi_bch_correct_page(datap, eccCalc,
+ NAND_ECC_NUM_BYTES);
+
+ /* Update Stats */
+ if (stat < 0) {
+#if defined(NAND_BCM_UMI_DEBUG)
+ printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
+ __func__, sectorIdx);
+ printk(KERN_WARNING
+ "%s data %02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ __func__, datap[0], datap[1], datap[2], datap[3],
+ datap[4], datap[5], datap[6], datap[7]);
+ printk(KERN_WARNING
+ "%s ecc %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x\n",
+ __func__, eccCalc[0], eccCalc[1], eccCalc[2],
+ eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
+ eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
+ eccCalc[11], eccCalc[12]);
+ BUG();
+#endif
+ mtd->ecc_stats.failed++;
+ } else {
+#if defined(NAND_BCM_UMI_DEBUG)
+ if (stat > 0) {
+ printk(KERN_INFO
+ "%s %d correctable_errors detected\n",
+ __func__, stat);
+ }
+#endif
+ mtd->ecc_stats.corrected += stat;
+ }
+ }
+ return 0;
+}
+
+/****************************************************************************
+*
+* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
+* @mtd: mtd info structure
+* @chip: nand chip info structure
+* @buf: data buffer
+*
+***************************************************************************/
+static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ int sectorIdx = 0;
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *datap = buf;
+ uint8_t *oobp = chip->oob_poi;
+ int sectorOobSize = mtd->oobsize / eccsteps;
+
+ for (sectorIdx = 0; sectorIdx < eccsteps;
+ sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
+ /* Enable hardware ECC before writing the buf */
+ nand_bcm_umi_bch_enable_write_hwecc();
+ bcm_umi_nand_write_buf(mtd, datap, eccsize);
+ nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
+ NAND_ECC_NUM_BYTES);
+ }
+
+ bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
new file mode 100644
index 00000000..6908cdde
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -0,0 +1,560 @@
+/*****************************************************************************
+* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/reg_nand.h>
+#include <mach/reg_umi.h>
+
+#include "nand_bcm_umi.h"
+
+#include <mach/memory_settings.h>
+
+#define USE_DMA 1
+#include <mach/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+
+/* ---- External Variable Declarations ----------------------------------- */
+/* ---- External Function Prototypes ------------------------------------- */
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+static const __devinitconst char gBanner[] = KERN_INFO \
+ "BCM UMI MTD NAND Driver: 1.00\n";
+
+#if NAND_ECC_BCH
+static uint8_t scan_ff_pattern[] = { 0xff };
+
+static struct nand_bbt_descr largepage_bbt = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+#endif
+
+/*
+** Preallocate a buffer to avoid having to do this every dma operation.
+** This is the size of the preallocated coherent DMA buffer.
+*/
+#if USE_DMA
+#define DMA_MIN_BUFLEN 512
+#define DMA_MAX_BUFLEN PAGE_SIZE
+#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
+ ((len) > DMA_MAX_BUFLEN))
+
+/*
+ * The current NAND data space goes from 0x80001900 to 0x80001FFF,
+ * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
+ * size NAND flash. Need to break the DMA down to multiple 1Ks.
+ *
+ * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
+ */
+#define DMA_MAX_LEN 1024
+
+#else /* !USE_DMA */
+#define DMA_MIN_BUFLEN 0
+#define DMA_MAX_BUFLEN 0
+#define USE_DIRECT_IO(len) 1
+#endif
+/* ---- Private Function Prototypes -------------------------------------- */
+static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
+static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
+ int len);
+
+/* ---- Private Variables ------------------------------------------------ */
+static struct mtd_info *board_mtd;
+static void __iomem *bcm_umi_io_base;
+static void *virtPtr;
+static dma_addr_t physPtr;
+static struct completion nand_comp;
+
+/* ---- Private Functions ------------------------------------------------ */
+#if NAND_ECC_BCH
+#include "bcm_umi_bch.c"
+#else
+#include "bcm_umi_hamming.c"
+#endif
+
+#if USE_DMA
+
+/* Handler called when the DMA finishes. */
+static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
+{
+ complete(&nand_comp);
+}
+
+static int nand_dma_init(void)
+{
+ int rc;
+
+ rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
+ nand_dma_handler, NULL);
+ if (rc != 0) {
+ printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
+ return rc;
+ }
+
+ virtPtr =
+ dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
+ if (virtPtr == NULL) {
+ printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nand_dma_term(void)
+{
+ if (virtPtr != NULL)
+ dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
+}
+
+static void nand_dma_read(void *buf, int len)
+{
+ int offset = 0;
+ int tmp_len = 0;
+ int len_left = len;
+ DMA_Handle_t hndl;
+
+ if (virtPtr == NULL)
+ panic("nand_dma_read: virtPtr == NULL\n");
+
+ if ((void *)physPtr == NULL)
+ panic("nand_dma_read: physPtr == NULL\n");
+
+ hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
+ if (hndl < 0) {
+ printk(KERN_ERR
+ "nand_dma_read: unable to allocate dma channel: %d\n",
+ (int)hndl);
+ panic("\n");
+ }
+
+ while (len_left > 0) {
+ if (len_left > DMA_MAX_LEN) {
+ tmp_len = DMA_MAX_LEN;
+ len_left -= DMA_MAX_LEN;
+ } else {
+ tmp_len = len_left;
+ len_left = 0;
+ }
+
+ init_completion(&nand_comp);
+ dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
+ physPtr + offset, tmp_len);
+ wait_for_completion(&nand_comp);
+
+ offset += tmp_len;
+ }
+
+ dma_free_channel(hndl);
+
+ if (buf != NULL)
+ memcpy(buf, virtPtr, len);
+}
+
+static void nand_dma_write(const void *buf, int len)
+{
+ int offset = 0;
+ int tmp_len = 0;
+ int len_left = len;
+ DMA_Handle_t hndl;
+
+ if (buf == NULL)
+ panic("nand_dma_write: buf == NULL\n");
+
+ if (virtPtr == NULL)
+ panic("nand_dma_write: virtPtr == NULL\n");
+
+ if ((void *)physPtr == NULL)
+ panic("nand_dma_write: physPtr == NULL\n");
+
+ memcpy(virtPtr, buf, len);
+
+
+ hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
+ if (hndl < 0) {
+ printk(KERN_ERR
+ "nand_dma_write: unable to allocate dma channel: %d\n",
+ (int)hndl);
+ panic("\n");
+ }
+
+ while (len_left > 0) {
+ if (len_left > DMA_MAX_LEN) {
+ tmp_len = DMA_MAX_LEN;
+ len_left -= DMA_MAX_LEN;
+ } else {
+ tmp_len = len_left;
+ len_left = 0;
+ }
+
+ init_completion(&nand_comp);
+ dma_transfer_mem_to_mem(hndl, physPtr + offset,
+ REG_NAND_DATA_PADDR, tmp_len);
+ wait_for_completion(&nand_comp);
+
+ offset += tmp_len;
+ }
+
+ dma_free_channel(hndl);
+}
+
+#endif
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ return nand_bcm_umi_dev_ready();
+}
+
+/****************************************************************************
+*
+* bcm_umi_nand_inithw
+*
+* This routine does the necessary hardware (board-specific)
+* initializations. This includes setting up the timings, etc.
+*
+***************************************************************************/
+int bcm_umi_nand_inithw(void)
+{
+ /* Configure nand timing parameters */
+ REG_UMI_NAND_TCR &= ~0x7ffff;
+ REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
+
+#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
+ /* enable software control of CS */
+ REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
+#endif
+
+ /* keep NAND chip select asserted */
+ REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
+
+ REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
+ /* enable writes to flash */
+ REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
+
+ writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
+ nand_bcm_umi_wait_till_ready();
+
+#if NAND_ECC_BCH
+ nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
+#endif
+
+ return 0;
+}
+
+/* Used to turn latch the proper register for access. */
+static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ /* send command to hardware */
+ struct nand_chip *chip = mtd->priv;
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_CLE) {
+ chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
+ goto CMD;
+ }
+ if (ctrl & NAND_ALE) {
+ chip->IO_ADDR_W =
+ bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
+ goto CMD;
+ }
+ chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
+ }
+
+CMD:
+ /* Send command to chip directly */
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
+ int len)
+{
+ if (USE_DIRECT_IO(len)) {
+ /* Do it the old way if the buffer is small or too large.
+ * Probably quicker than starting and checking dma. */
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], this->IO_ADDR_W);
+ }
+#if USE_DMA
+ else
+ nand_dma_write(buf, len);
+#endif
+}
+
+static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
+{
+ if (USE_DIRECT_IO(len)) {
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(this->IO_ADDR_R);
+ }
+#if USE_DMA
+ else
+ nand_dma_read(buf, len);
+#endif
+}
+
+static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
+static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
+ int len)
+{
+ /*
+ * Try to readback page with ECC correction. This is necessary
+ * for MLC parts which may have permanently stuck bits.
+ */
+ struct nand_chip *chip = mtd->priv;
+ int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+ if (ret < 0)
+ return -EFAULT;
+ else {
+ if (memcmp(readbackbuf, buf, len) == 0)
+ return 0;
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct resource *r;
+ int err = 0;
+
+ printk(gBanner);
+
+ /* Allocate memory for MTD device structure and private data */
+ board_mtd =
+ kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!board_mtd) {
+ printk(KERN_WARNING
+ "Unable to allocate NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!r) {
+ err = -ENXIO;
+ goto out_free;
+ }
+
+ /* map physical address */
+ bcm_umi_io_base = ioremap(r->start, resource_size(r));
+
+ if (!bcm_umi_io_base) {
+ printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
+ err = -EIO;
+ goto out_free;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&board_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *)board_mtd, 0, sizeof(struct mtd_info));
+ memset((char *)this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ board_mtd->priv = this;
+
+ /* Initialize the NAND hardware. */
+ if (bcm_umi_nand_inithw() < 0) {
+ printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
+ err = -EIO;
+ goto out_unmap;
+ }
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
+ this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
+
+ /* Set command delay time, see datasheet for correct value */
+ this->chip_delay = 0;
+ /* Assign the device ready function, if available */
+ this->dev_ready = nand_dev_ready;
+ this->options = 0;
+
+ this->write_buf = bcm_umi_nand_write_buf;
+ this->read_buf = bcm_umi_nand_read_buf;
+ this->verify_buf = bcm_umi_nand_verify_buf;
+
+ this->cmd_ctrl = bcm_umi_nand_hwcontrol;
+ this->ecc.mode = NAND_ECC_HW;
+ this->ecc.size = 512;
+ this->ecc.bytes = NAND_ECC_NUM_BYTES;
+#if NAND_ECC_BCH
+ this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
+ this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
+#else
+ this->ecc.correct = nand_correct_data512;
+ this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
+ this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
+#endif
+
+#if USE_DMA
+ err = nand_dma_init();
+ if (err != 0)
+ goto out_unmap;
+#endif
+
+ /* Figure out the size of the device that we have.
+ * We need to do this to figure out which ECC
+ * layout we'll be using.
+ */
+
+ err = nand_scan_ident(board_mtd, 1, NULL);
+ if (err) {
+ printk(KERN_ERR "nand_scan failed: %d\n", err);
+ goto out_unmap;
+ }
+
+ /* Now that we know the nand size, we can setup the ECC layout */
+
+ switch (board_mtd->writesize) { /* writesize is the pagesize */
+ case 4096:
+ this->ecc.layout = &nand_hw_eccoob_4096;
+ break;
+ case 2048:
+ this->ecc.layout = &nand_hw_eccoob_2048;
+ break;
+ case 512:
+ this->ecc.layout = &nand_hw_eccoob_512;
+ break;
+ default:
+ {
+ printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
+ board_mtd->writesize);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+#if NAND_ECC_BCH
+ if (board_mtd->writesize > 512) {
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
+ this->badblock_pattern = &largepage_bbt;
+ }
+
+ /*
+ * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
+ * conservative guess, given 13 ecc bytes and using bch alg.
+ * (Assume Galois field order m=15 to allow a margin of error.)
+ */
+ this->ecc.strength = 6;
+
+#endif
+
+ /* Now finish off the scan, now that ecc.layout has been initialized. */
+
+ err = nand_scan_tail(board_mtd);
+ if (err) {
+ printk(KERN_ERR "nand_scan failed: %d\n", err);
+ goto out_unmap;
+ }
+
+ /* Register the partitions */
+ board_mtd->name = "bcm_umi-nand";
+ mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
+
+ /* Return happy */
+ return 0;
+out_unmap:
+ iounmap(bcm_umi_io_base);
+out_free:
+ kfree(board_mtd);
+ return err;
+}
+
+static int bcm_umi_nand_remove(struct platform_device *pdev)
+{
+#if USE_DMA
+ nand_dma_term();
+#endif
+
+ /* Release resources, unregister device */
+ nand_release(board_mtd);
+
+ /* unmap physical address */
+ iounmap(bcm_umi_io_base);
+
+ /* Free the MTD device structure */
+ kfree(board_mtd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bcm_umi_nand_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ printk(KERN_ERR "MTD NAND suspend is being called\n");
+ return 0;
+}
+
+static int bcm_umi_nand_resume(struct platform_device *pdev)
+{
+ printk(KERN_ERR "MTD NAND resume is being called\n");
+ return 0;
+}
+#else
+#define bcm_umi_nand_suspend NULL
+#define bcm_umi_nand_resume NULL
+#endif
+
+static struct platform_driver nand_driver = {
+ .driver = {
+ .name = "bcm-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = bcm_umi_nand_probe,
+ .remove = bcm_umi_nand_remove,
+ .suspend = bcm_umi_nand_suspend,
+ .resume = bcm_umi_nand_resume,
+};
+
+module_platform_driver(nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
new file mode 100644
index 00000000..d7b86b92
--- /dev/null
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -0,0 +1,894 @@
+/* linux/drivers/mtd/nand/bf5xx_nand.c
+ *
+ * Copyright 2006-2008 Analog Devices Inc.
+ * http://blackfin.uclinux.org/
+ * Bryan Wu <bryan.wu@analog.com>
+ *
+ * Blackfin BF5xx on-chip NAND flash controller driver
+ *
+ * Derived from drivers/mtd/nand/s3c2410.c
+ * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk>
+ *
+ * Derived from drivers/mtd/nand/cafe.c
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Changelog:
+ * 12-Jun-2007 Bryan Wu: Initial version
+ * 18-Jul-2007 Bryan Wu:
+ * - ECC_HW and ECC_SW supported
+ * - DMA supported in ECC_HW
+ * - YAFFS tested as rootfs in both ECC_HW and ECC_SW
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+#include <asm/cacheflush.h>
+#include <asm/nand.h>
+#include <asm/portmux.h>
+
+#define DRV_NAME "bf5xx-nand"
+#define DRV_VERSION "1.2"
+#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
+#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
+
+/* NFC_STAT Masks */
+#define NBUSY 0x01 /* Not Busy */
+#define WB_FULL 0x02 /* Write Buffer Full */
+#define PG_WR_STAT 0x04 /* Page Write Pending */
+#define PG_RD_STAT 0x08 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ 0x01 /* Not Busy IRQ */
+#define WB_OVF 0x02 /* Write Buffer Overflow */
+#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
+#define RD_RDY 0x08 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01 /* Page Read Start */
+#define PG_WR_START 0x02 /* Page Write Start */
+
+#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
+static int hardware_ecc = 1;
+#else
+static int hardware_ecc;
+#endif
+
+static const unsigned short bfin_nfc_pin_req[] =
+ {P_NAND_CE,
+ P_NAND_RB,
+ P_NAND_D0,
+ P_NAND_D1,
+ P_NAND_D2,
+ P_NAND_D3,
+ P_NAND_D4,
+ P_NAND_D5,
+ P_NAND_D6,
+ P_NAND_D7,
+ P_NAND_WE,
+ P_NAND_RE,
+ P_NAND_CLE,
+ P_NAND_ALE,
+ 0};
+
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+static struct nand_ecclayout bootrom_ecclayout = {
+ .eccbytes = 24,
+ .eccpos = {
+ 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2,
+ 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2,
+ 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2,
+ 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2,
+ 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2,
+ 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2,
+ 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2,
+ 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2
+ },
+ .oobfree = {
+ { 0x8 * 0 + 3, 5 },
+ { 0x8 * 1 + 3, 5 },
+ { 0x8 * 2 + 3, 5 },
+ { 0x8 * 3 + 3, 5 },
+ { 0x8 * 4 + 3, 5 },
+ { 0x8 * 5 + 3, 5 },
+ { 0x8 * 6 + 3, 5 },
+ { 0x8 * 7 + 3, 5 },
+ }
+};
+#endif
+
+/*
+ * Data structures for bf5xx nand flash controller driver
+ */
+
+/* bf5xx nand info */
+struct bf5xx_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ /* platform info */
+ struct bf5xx_nand_platform *platform;
+
+ /* device info */
+ struct device *device;
+
+ /* DMA stuff */
+ struct completion dma_completion;
+};
+
+/*
+ * Conversion functions
+ */
+static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct bf5xx_nand_info, mtd);
+}
+
+static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+
+static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+/*
+ * struct nand_chip interface function pointers
+ */
+
+/*
+ * bf5xx_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+ */
+static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ cpu_relax();
+
+ if (ctrl & NAND_CLE)
+ bfin_write_NFC_CMD(cmd);
+ else if (ctrl & NAND_ALE)
+ bfin_write_NFC_ADDR(cmd);
+ SSYNC();
+}
+
+/*
+ * bf5xx_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+ */
+static int bf5xx_nand_devready(struct mtd_info *mtd)
+{
+ unsigned short val = bfin_read_NFC_STAT();
+
+ if ((val & NBUSY) == NBUSY)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * ECC functions
+ * These allow the bf5xx to use the controller's ECC
+ * generator block to ECC the data as it passes through
+ */
+
+/*
+ * ECC error correction function
+ */
+static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ u32 syndrome[5];
+ u32 calced, stored;
+ int i;
+ unsigned short failing_bit, failing_byte;
+ u_char data;
+
+ calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16);
+ stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16);
+
+ syndrome[0] = (calced ^ stored);
+
+ /*
+ * syndrome 0: all zero
+ * No error in data
+ * No action
+ */
+ if (!syndrome[0] || !calced || !stored)
+ return 0;
+
+ /*
+ * sysdrome 0: only one bit is one
+ * ECC data was incorrect
+ * No action
+ */
+ if (hweight32(syndrome[0]) == 1) {
+ dev_err(info->device, "ECC data was incorrect!\n");
+ return 1;
+ }
+
+ syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF);
+ syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF);
+ syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF);
+ syndrome[4] = syndrome[2] ^ syndrome[3];
+
+ for (i = 0; i < 5; i++)
+ dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]);
+
+ dev_info(info->device,
+ "calced[0x%08x], stored[0x%08x]\n",
+ calced, stored);
+
+ /*
+ * sysdrome 0: exactly 11 bits are one, each parity
+ * and parity' pair is 1 & 0 or 0 & 1.
+ * 1-bit correctable error
+ * Correct the error
+ */
+ if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) {
+ dev_info(info->device,
+ "1-bit correctable error, correct it.\n");
+ dev_info(info->device,
+ "syndrome[1] 0x%08x\n", syndrome[1]);
+
+ failing_bit = syndrome[1] & 0x7;
+ failing_byte = syndrome[1] >> 0x3;
+ data = *(dat + failing_byte);
+ data = data ^ (0x1 << failing_bit);
+ *(dat + failing_byte) = data;
+
+ return 0;
+ }
+
+ /*
+ * sysdrome 0: random data
+ * More than 1-bit error, non-correctable error
+ * Discard data, mark bad block
+ */
+ dev_err(info->device,
+ "More than 1-bit error, non-correctable error.\n");
+ dev_err(info->device,
+ "Please discard data, mark bad block\n");
+
+ return 1;
+}
+
+static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+
+ /* If ecc size is 512, correct second 256 bytes */
+ if (chip->ecc.size == 512) {
+ dat += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+ }
+
+ return ret;
+}
+
+static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ return;
+}
+
+static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+ u16 ecc0, ecc1;
+ u32 code[2];
+ u8 *p;
+
+ /* first 3 bytes ECC code for 256 page size */
+ ecc0 = bfin_read_NFC_ECC0();
+ ecc1 = bfin_read_NFC_ECC1();
+
+ code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
+
+ dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
+
+ p = (u8 *) code;
+ memcpy(ecc_code, p, 3);
+
+ /* second 3 bytes ECC code for 512 ecc size */
+ if (chip->ecc.size == 512) {
+ ecc0 = bfin_read_NFC_ECC2();
+ ecc1 = bfin_read_NFC_ECC3();
+ code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
+
+ /* second 3 bytes in ecc_code for second 256
+ * bytes of 512 page size
+ */
+ p = (u8 *) (code + 1);
+ memcpy((ecc_code + 3), p, 3);
+ dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
+ }
+
+ return 0;
+}
+
+/*
+ * PIO mode for buffer writing and reading
+ */
+static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ unsigned short val;
+
+ /*
+ * Data reads are requested by first writing to NFC_DATA_RD
+ * and then reading back from NFC_READ.
+ */
+ for (i = 0; i < len; i++) {
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ cpu_relax();
+
+ /* Contents do not matter */
+ bfin_write_NFC_DATA_RD(0x0000);
+ SSYNC();
+
+ while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY)
+ cpu_relax();
+
+ buf[i] = bfin_read_NFC_READ();
+
+ val = bfin_read_NFC_IRQSTAT();
+ val |= RD_RDY;
+ bfin_write_NFC_IRQSTAT(val);
+ SSYNC();
+ }
+}
+
+static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd)
+{
+ uint8_t val;
+
+ bf5xx_nand_read_buf(mtd, &val, 1);
+
+ return val;
+}
+
+static void bf5xx_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ cpu_relax();
+
+ bfin_write_NFC_DATA_WR(buf[i]);
+ SSYNC();
+ }
+}
+
+static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ /*
+ * Data reads are requested by first writing to NFC_DATA_RD
+ * and then reading back from NFC_READ.
+ */
+ bfin_write_NFC_DATA_RD(0x5555);
+
+ SSYNC();
+
+ for (i = 0; i < len; i++)
+ p[i] = bfin_read_NFC_READ();
+}
+
+static void bf5xx_nand_write_buf16(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ bfin_write_NFC_DATA_WR(p[i]);
+
+ SSYNC();
+}
+
+/*
+ * DMA functions for buffer writing and reading
+ */
+static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
+{
+ struct bf5xx_nand_info *info = dev_id;
+
+ clear_dma_irqstat(CH_NFC);
+ disable_dma(CH_NFC);
+ complete(&info->dma_completion);
+
+ return IRQ_HANDLED;
+}
+
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
+ uint8_t *buf, int is_read)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned short val;
+
+ dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n",
+ mtd, buf, is_read);
+
+ /*
+ * Before starting a dma transfer, be sure to invalidate/flush
+ * the cache over the address range of your DMA buffer to
+ * prevent cache coherency problems. Otherwise very subtle bugs
+ * can be introduced to your driver.
+ */
+ if (is_read)
+ invalidate_dcache_range((unsigned int)buf,
+ (unsigned int)(buf + chip->ecc.size));
+ else
+ flush_dcache_range((unsigned int)buf,
+ (unsigned int)(buf + chip->ecc.size));
+
+ /*
+ * This register must be written before each page is
+ * transferred to generate the correct ECC register
+ * values.
+ */
+ bfin_write_NFC_RST(ECC_RST);
+ SSYNC();
+ while (bfin_read_NFC_RST() & ECC_RST)
+ cpu_relax();
+
+ disable_dma(CH_NFC);
+ clear_dma_irqstat(CH_NFC);
+
+ /* setup DMA register with Blackfin DMA API */
+ set_dma_config(CH_NFC, 0x0);
+ set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+ /* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (chip->ecc.size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
+ set_dma_x_count(CH_NFC, (chip->ecc.size >> 2));
+ set_dma_x_modify(CH_NFC, 4);
+ val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
+ if (is_read)
+ val |= WNR;
+ set_dma_config(CH_NFC, val);
+ enable_dma(CH_NFC);
+
+ /* Start PAGE read/write operation */
+ if (is_read)
+ bfin_write_NFC_PGCTL(PG_RD_START);
+ else
+ bfin_write_NFC_PGCTL(PG_WR_START);
+ wait_for_completion(&info->dma_completion);
+}
+
+static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len);
+
+ if (len == chip->ecc.size)
+ bf5xx_nand_dma_rw(mtd, buf, 1);
+ else
+ bf5xx_nand_read_buf(mtd, buf, len);
+}
+
+static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len);
+
+ if (len == chip->ecc.size)
+ bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0);
+ else
+ bf5xx_nand_write_buf(mtd, buf, len);
+}
+
+static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
+ bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+/*
+ * System initialization functions
+ */
+static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
+{
+ int ret;
+
+ /* Do not use dma */
+ if (!hardware_ecc)
+ return 0;
+
+ init_completion(&info->dma_completion);
+
+ /* Request NFC DMA channel */
+ ret = request_dma(CH_NFC, "BF5XX NFC driver");
+ if (ret < 0) {
+ dev_err(info->device, " unable to get DMA channel\n");
+ return ret;
+ }
+
+#ifdef CONFIG_BF54x
+ /* Setup DMAC1 channel mux for NFC which shared with SDH */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() & ~1);
+ SSYNC();
+#endif
+
+ set_dma_callback(CH_NFC, bf5xx_nand_dma_irq, info);
+
+ /* Turn off the DMA channel first */
+ disable_dma(CH_NFC);
+ return 0;
+}
+
+static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info)
+{
+ /* Free NFC DMA channel */
+ if (hardware_ecc)
+ free_dma(CH_NFC);
+}
+
+/*
+ * BF5XX NFC hardware initialization
+ * - pin mux setup
+ * - clear interrupt status
+ */
+static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
+{
+ int err = 0;
+ unsigned short val;
+ struct bf5xx_nand_platform *plat = info->platform;
+
+ /* setup NFC_CTL register */
+ dev_info(info->device,
+ "data_width=%d, wr_dly=%d, rd_dly=%d\n",
+ (plat->data_width ? 16 : 8),
+ plat->wr_dly, plat->rd_dly);
+
+ val = (1 << NFC_PG_SIZE_OFFSET) |
+ (plat->data_width << NFC_NWIDTH_OFFSET) |
+ (plat->rd_dly << NFC_RDDLY_OFFSET) |
+ (plat->wr_dly << NFC_WRDLY_OFFSET);
+ dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val);
+
+ bfin_write_NFC_CTL(val);
+ SSYNC();
+
+ /* clear interrupt status */
+ bfin_write_NFC_IRQMASK(0x0);
+ SSYNC();
+ val = bfin_read_NFC_IRQSTAT();
+ bfin_write_NFC_IRQSTAT(val);
+ SSYNC();
+
+ /* DMA initialization */
+ if (bf5xx_nand_dma_init(info))
+ err = -ENXIO;
+
+ return err;
+}
+
+/*
+ * Device management interface
+ */
+static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+{
+ struct mtd_info *mtd = &info->mtd;
+ struct mtd_partition *parts = info->platform->partitions;
+ int nr = info->platform->nr_partitions;
+
+ return mtd_device_register(mtd, parts, nr);
+}
+
+static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
+{
+ struct bf5xx_nand_info *info = to_nand_info(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ /* first thing we need to do is release all our mtds
+ * and their partitions, then go through freeing the
+ * resources used
+ */
+ nand_release(&info->mtd);
+
+ peripheral_free_list(bfin_nfc_pin_req);
+ bf5xx_nand_dma_remove(info);
+
+ /* free the common resources */
+ kfree(info);
+
+ return 0;
+}
+
+static int bf5xx_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ if (hardware_ecc) {
+ /*
+ * for nand with page size > 512B, think it as several sections with 512B
+ */
+ if (likely(mtd->writesize >= 512)) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
+ SSYNC();
+ }
+ }
+
+ return nand_scan_tail(mtd);
+}
+
+/*
+ * bf5xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+ */
+static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
+{
+ struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
+ struct bf5xx_nand_info *info = NULL;
+ struct nand_chip *chip = NULL;
+ struct mtd_info *mtd = NULL;
+ int err = 0;
+
+ dev_dbg(&pdev->dev, "(%p)\n", pdev);
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform specific information\n");
+ return -EINVAL;
+ }
+
+ if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
+ dev_err(&pdev->dev, "requesting Peripherals failed\n");
+ return -EFAULT;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "no memory for flash info\n");
+ err = -ENOMEM;
+ goto out_err_kzalloc;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+
+ /* initialise chip data struct */
+ chip = &info->chip;
+
+ if (plat->data_width)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN;
+
+ chip->read_buf = (plat->data_width) ?
+ bf5xx_nand_read_buf16 : bf5xx_nand_read_buf;
+ chip->write_buf = (plat->data_width) ?
+ bf5xx_nand_write_buf16 : bf5xx_nand_write_buf;
+
+ chip->read_byte = bf5xx_nand_read_byte;
+
+ chip->cmd_ctrl = bf5xx_nand_hwcontrol;
+ chip->dev_ready = bf5xx_nand_devready;
+
+ chip->priv = &info->mtd;
+ chip->controller = &info->controller;
+
+ chip->IO_ADDR_R = (void __iomem *) NFC_READ;
+ chip->IO_ADDR_W = (void __iomem *) NFC_DATA_WR;
+
+ chip->chip_delay = 0;
+
+ /* initialise mtd info data struct */
+ mtd = &info->mtd;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+
+ /* initialise the hardware */
+ err = bf5xx_nand_hw_init(info);
+ if (err)
+ goto out_err_hw_init;
+
+ /* setup hardware ECC data struct */
+ if (hardware_ecc) {
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+ chip->ecc.layout = &bootrom_ecclayout;
+#endif
+ chip->read_buf = bf5xx_nand_dma_read_buf;
+ chip->write_buf = bf5xx_nand_dma_write_buf;
+ chip->ecc.calculate = bf5xx_nand_calculate_ecc;
+ chip->ecc.correct = bf5xx_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.hwctl = bf5xx_nand_enable_hwecc;
+ chip->ecc.read_page_raw = bf5xx_nand_read_page_raw;
+ chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ /* scan hardware nand chip and setup mtd info data struct */
+ if (bf5xx_nand_scan(mtd)) {
+ err = -ENXIO;
+ goto out_err_nand_scan;
+ }
+
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+ chip->badblockpos = 63;
+#endif
+
+ /* add NAND partition */
+ bf5xx_nand_add_partition(info);
+
+ dev_dbg(&pdev->dev, "initialised ok\n");
+ return 0;
+
+out_err_nand_scan:
+ bf5xx_nand_dma_remove(info);
+out_err_hw_init:
+ platform_set_drvdata(pdev, NULL);
+ kfree(info);
+out_err_kzalloc:
+ peripheral_free_list(bfin_nfc_pin_req);
+
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int bf5xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct bf5xx_nand_info *info = platform_get_drvdata(dev);
+
+ return 0;
+}
+
+static int bf5xx_nand_resume(struct platform_device *dev)
+{
+ struct bf5xx_nand_info *info = platform_get_drvdata(dev);
+
+ return 0;
+}
+
+#else
+#define bf5xx_nand_suspend NULL
+#define bf5xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+static struct platform_driver bf5xx_nand_driver = {
+ .probe = bf5xx_nand_probe,
+ .remove = __devexit_p(bf5xx_nand_remove),
+ .suspend = bf5xx_nand_suspend,
+ .resume = bf5xx_nand_resume,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bf5xx_nand_init(void)
+{
+ printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n",
+ DRV_DESC, DRV_VERSION);
+
+ return platform_driver_register(&bf5xx_nand_driver);
+}
+
+static void __exit bf5xx_nand_exit(void)
+{
+ platform_driver_unregister(&bf5xx_nand_driver);
+}
+
+module_init(bf5xx_nand_init);
+module_exit(bf5xx_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
new file mode 100644
index 00000000..6d227559
--- /dev/null
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -0,0 +1,905 @@
+/*
+ * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
+ *
+ * The data sheet for this device can be found at:
+ * http://wiki.laptop.org/go/Datasheets
+ *
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define DEBUG
+
+#include <linux/device.h>
+#undef DEBUG
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/rslib.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define CAFE_NAND_CTRL1 0x00
+#define CAFE_NAND_CTRL2 0x04
+#define CAFE_NAND_CTRL3 0x08
+#define CAFE_NAND_STATUS 0x0c
+#define CAFE_NAND_IRQ 0x10
+#define CAFE_NAND_IRQ_MASK 0x14
+#define CAFE_NAND_DATA_LEN 0x18
+#define CAFE_NAND_ADDR1 0x1c
+#define CAFE_NAND_ADDR2 0x20
+#define CAFE_NAND_TIMING1 0x24
+#define CAFE_NAND_TIMING2 0x28
+#define CAFE_NAND_TIMING3 0x2c
+#define CAFE_NAND_NONMEM 0x30
+#define CAFE_NAND_ECC_RESULT 0x3C
+#define CAFE_NAND_DMA_CTRL 0x40
+#define CAFE_NAND_DMA_ADDR0 0x44
+#define CAFE_NAND_DMA_ADDR1 0x48
+#define CAFE_NAND_ECC_SYN01 0x50
+#define CAFE_NAND_ECC_SYN23 0x54
+#define CAFE_NAND_ECC_SYN45 0x58
+#define CAFE_NAND_ECC_SYN67 0x5c
+#define CAFE_NAND_READ_DATA 0x1000
+#define CAFE_NAND_WRITE_DATA 0x2000
+
+#define CAFE_GLOBAL_CTRL 0x3004
+#define CAFE_GLOBAL_IRQ 0x3008
+#define CAFE_GLOBAL_IRQ_MASK 0x300c
+#define CAFE_NAND_RESET 0x3034
+
+/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
+#define CTRL1_CHIPSELECT (1<<19)
+
+struct cafe_priv {
+ struct nand_chip nand;
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ struct rs_control *rs;
+ uint32_t ctl1;
+ uint32_t ctl2;
+ int datalen;
+ int nr_data;
+ int data_pos;
+ int page_addr;
+ dma_addr_t dmaaddr;
+ unsigned char *dmabuf;
+};
+
+static int usedma = 1;
+module_param(usedma, int, 0644);
+
+static int skipbbt = 0;
+module_param(skipbbt, int, 0644);
+
+static int debug = 0;
+module_param(debug, int, 0644);
+
+static int regdebug = 0;
+module_param(regdebug, int, 0644);
+
+static int checkecc = 1;
+module_param(checkecc, int, 0644);
+
+static unsigned int numtimings;
+static int timing[3];
+module_param_array(timing, int, &numtimings, 0644);
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+/* Hrm. Why isn't this already conditional on something in the struct device? */
+#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
+
+/* Make it easier to switch to PIO if we need to */
+#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
+#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
+
+static int cafe_device_ready(struct mtd_info *mtd)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+ cafe_writel(cafe, irqs, NAND_IRQ);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
+ result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
+ cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ return result;
+}
+
+
+static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ if (usedma)
+ memcpy(cafe->dmabuf + cafe->datalen, buf, len);
+ else
+ memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
+
+ cafe->datalen += len;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
+ len, cafe->datalen);
+}
+
+static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ if (usedma)
+ memcpy(buf, cafe->dmabuf + cafe->datalen, len);
+ else
+ memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
+ len, cafe->datalen);
+ cafe->datalen += len;
+}
+
+static uint8_t cafe_read_byte(struct mtd_info *mtd)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ uint8_t d;
+
+ cafe_read_buf(mtd, &d, 1);
+ cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
+
+ return d;
+}
+
+static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ int adrbytes = 0;
+ uint32_t ctl1;
+ uint32_t doneint = 0x80000000;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
+ command, column, page_addr);
+
+ if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
+ /* Second half of a command we already calculated */
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
+ ctl1 = cafe->ctl1;
+ cafe->ctl2 &= ~(1<<30);
+ cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
+ cafe->ctl1, cafe->nr_data);
+ goto do_command;
+ }
+ /* Reset ECC engine */
+ cafe_writel(cafe, 0, NAND_CTRL2);
+
+ /* Emulate NAND_CMD_READOOB on large-page chips */
+ if (mtd->writesize > 512 &&
+ command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* FIXME: Do we need to send read command before sending data
+ for small-page chips, to position the buffer correctly? */
+
+ if (column != -1) {
+ cafe_writel(cafe, column, NAND_ADDR1);
+ adrbytes = 2;
+ if (page_addr != -1)
+ goto write_adr2;
+ } else if (page_addr != -1) {
+ cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
+ page_addr >>= 16;
+ write_adr2:
+ cafe_writel(cafe, page_addr, NAND_ADDR2);
+ adrbytes += 2;
+ if (mtd->size > mtd->writesize << 16)
+ adrbytes++;
+ }
+
+ cafe->data_pos = cafe->datalen = 0;
+
+ /* Set command valid bit, mask in the chip select bit */
+ ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
+
+ /* Set RD or WR bits as appropriate */
+ if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
+ ctl1 |= (1<<26); /* rd */
+ /* Always 5 bytes, for now */
+ cafe->datalen = 4;
+ /* And one address cycle -- even for STATUS, since the controller doesn't work without */
+ adrbytes = 1;
+ } else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
+ ctl1 |= 1<<26; /* rd */
+ /* For now, assume just read to end of page */
+ cafe->datalen = mtd->writesize + mtd->oobsize - column;
+ } else if (command == NAND_CMD_SEQIN)
+ ctl1 |= 1<<25; /* wr */
+
+ /* Set number of address bytes */
+ if (adrbytes)
+ ctl1 |= ((adrbytes-1)|8) << 27;
+
+ if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
+ /* Ignore the first command of a pair; the hardware
+ deals with them both at once, later */
+ cafe->ctl1 = ctl1;
+ cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
+ cafe->ctl1, cafe->datalen);
+ return;
+ }
+ /* RNDOUT and READ0 commands need a following byte */
+ if (command == NAND_CMD_RNDOUT)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
+ else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
+
+ do_command:
+ cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
+ cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
+
+ /* NB: The datasheet lies -- we really should be subtracting 1 here */
+ cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
+ cafe_writel(cafe, 0x90000000, NAND_IRQ);
+ if (usedma && (ctl1 & (3<<25))) {
+ uint32_t dmactl = 0xc0000000 + cafe->datalen;
+ /* If WR or RD bits set, set up DMA */
+ if (ctl1 & (1<<26)) {
+ /* It's a read */
+ dmactl |= (1<<29);
+ /* ... so it's done when the DMA is done, not just
+ the command. */
+ doneint = 0x10000000;
+ }
+ cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
+ }
+ cafe->datalen = 0;
+
+ if (unlikely(regdebug)) {
+ int i;
+ printk("About to write command %08x to register 0\n", ctl1);
+ for (i=4; i< 0x5c; i+=4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ }
+
+ cafe_writel(cafe, ctl1, NAND_CTRL1);
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ if (1) {
+ int c;
+ uint32_t irqs;
+
+ for (c = 500000; c != 0; c--) {
+ irqs = cafe_readl(cafe, NAND_IRQ);
+ if (irqs & doneint)
+ break;
+ udelay(1);
+ if (!(c % 100000))
+ cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
+ cpu_relax();
+ }
+ cafe_writel(cafe, doneint, NAND_IRQ);
+ cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
+ command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
+ }
+
+ WARN_ON(cafe->ctl2 & (1<<30));
+
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_DEPLETE1:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ case NAND_CMD_STATUS_ERROR1:
+ case NAND_CMD_STATUS_ERROR2:
+ case NAND_CMD_STATUS_ERROR3:
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+ return;
+ }
+ nand_wait_ready(mtd);
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+}
+
+static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+
+ /* Mask the appropriate bit into the stored value of ctl1
+ which will be used by cafe_nand_cmdfunc() */
+ if (chipnr)
+ cafe->ctl1 |= CTRL1_CHIPSELECT;
+ else
+ cafe->ctl1 &= ~CTRL1_CHIPSELECT;
+}
+
+static irqreturn_t cafe_nand_interrupt(int irq, void *id)
+{
+ struct mtd_info *mtd = id;
+ struct cafe_priv *cafe = mtd->priv;
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+ cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
+ if (!irqs)
+ return IRQ_NONE;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
+ return IRQ_HANDLED;
+}
+
+static void cafe_nand_bug(struct mtd_info *mtd)
+{
+ BUG();
+}
+
+static int cafe_nand_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int status = 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/* Don't use -- use nand_read_oob_std for now */
+static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 1;
+}
+/**
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
+ cafe_readl(cafe, NAND_ECC_RESULT),
+ cafe_readl(cafe, NAND_ECC_SYN01));
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
+ unsigned short syn[8], pat[4];
+ int pos[4];
+ u8 *oob = chip->oob_poi;
+ int i, n;
+
+ for (i=0; i<8; i+=2) {
+ uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
+ syn[i] = cafe->rs->index_of[tmp & 0xfff];
+ syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
+ }
+
+ n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
+ pat);
+
+ for (i = 0; i < n; i++) {
+ int p = pos[i];
+
+ /* The 12-bit symbols are mapped to bytes here */
+
+ if (p > 1374) {
+ /* out of range */
+ n = -1374;
+ } else if (p == 0) {
+ /* high four bits do not correspond to data */
+ if (pat[i] > 0xff)
+ n = -2048;
+ else
+ buf[0] ^= pat[i];
+ } else if (p == 1365) {
+ buf[2047] ^= pat[i] >> 4;
+ oob[0] ^= pat[i] << 4;
+ } else if (p > 1365) {
+ if ((p & 1) == 1) {
+ oob[3*p/2 - 2048] ^= pat[i] >> 4;
+ oob[3*p/2 - 2047] ^= pat[i] << 4;
+ } else {
+ oob[3*p/2 - 2049] ^= pat[i] >> 8;
+ oob[3*p/2 - 2048] ^= pat[i];
+ }
+ } else if ((p & 1) == 1) {
+ buf[3*p/2] ^= pat[i] >> 4;
+ buf[3*p/2 + 1] ^= pat[i] << 4;
+ } else {
+ buf[3*p/2 - 1] ^= pat[i] >> 8;
+ buf[3*p/2] ^= pat[i];
+ }
+ }
+
+ if (n < 0) {
+ dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
+ cafe_readl(cafe, NAND_ADDR2) * 2048);
+ for (i = 0; i < 0x5c; i += 4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ mtd->ecc_stats.failed++;
+ } else {
+ dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
+ mtd->ecc_stats.corrected += n;
+ }
+ }
+
+ return 0;
+}
+
+static struct nand_ecclayout cafe_oobinfo_2048 = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
+ .oobfree = {{14, 50}}
+};
+
+/* Ick. The BBT code really ought to be able to work this bit out
+ for itself from the above, at least for the 2KiB case */
+static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
+static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
+
+
+static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_2048
+};
+
+static struct nand_ecclayout cafe_oobinfo_512 = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
+ .oobfree = {{14, 2}}
+};
+
+static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_512
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_512
+};
+
+
+static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Set up ECC autogeneration */
+ cafe->ctl2 |= (1<<30);
+}
+
+static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int page, int cached, int raw)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ if (unlikely(raw))
+ chip->ecc.write_page_raw(mtd, chip, buf);
+ else
+ chip->ecc.write_page(mtd, chip, buf);
+
+ /*
+ * Cached progamming disabled for now, Not sure if its worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+ */
+ cached = 0;
+
+ if (!cached || !(chip->options & NAND_CACHEPRG)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ /*
+ * See if operation failed and additional status checks are
+ * available
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status,
+ page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /* Send command to read back the data */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ if (chip->verify_buf(mtd, buf, mtd->writesize))
+ return -EIO;
+#endif
+ return 0;
+}
+
+static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ return 0;
+}
+
+/* F_2[X]/(X**6+X+1) */
+static unsigned short __devinit gf64_mul(u8 a, u8 b)
+{
+ u8 c;
+ unsigned int i;
+
+ c = 0;
+ for (i = 0; i < 6; i++) {
+ if (a & 1)
+ c ^= b;
+ a >>= 1;
+ b <<= 1;
+ if ((b & 0x40) != 0)
+ b ^= 0x43;
+ }
+
+ return c;
+}
+
+/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
+static u16 __devinit gf4096_mul(u16 a, u16 b)
+{
+ u8 ah, al, bh, bl, ch, cl;
+
+ ah = a >> 6;
+ al = a & 0x3f;
+ bh = b >> 6;
+ bl = b & 0x3f;
+
+ ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
+ cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
+
+ return (ch << 6) ^ cl;
+}
+
+static int __devinit cafe_mul(int x)
+{
+ if (x == 0)
+ return 1;
+ return gf4096_mul(x, 0xe01);
+}
+
+static int __devinit cafe_nand_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mtd_info *mtd;
+ struct cafe_priv *cafe;
+ uint32_t ctrl;
+ int err = 0;
+
+ /* Very old versions shared the same PCI ident for all three
+ functions on the chip. Verify the class too... */
+ if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
+ return -ENODEV;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
+ if (!mtd) {
+ dev_warn(&pdev->dev, "failed to alloc mtd_info\n");
+ return -ENOMEM;
+ }
+ cafe = (void *)(&mtd[1]);
+
+ mtd->dev.parent = &pdev->dev;
+ mtd->priv = cafe;
+ mtd->owner = THIS_MODULE;
+
+ cafe->pdev = pdev;
+ cafe->mmio = pci_iomap(pdev, 0, 0);
+ if (!cafe->mmio) {
+ dev_warn(&pdev->dev, "failed to iomap\n");
+ err = -ENOMEM;
+ goto out_free_mtd;
+ }
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112 + sizeof(struct nand_buffers),
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+ cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
+
+ cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
+ if (!cafe->rs) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
+ cafe->nand.cmdfunc = cafe_nand_cmdfunc;
+ cafe->nand.dev_ready = cafe_device_ready;
+ cafe->nand.read_byte = cafe_read_byte;
+ cafe->nand.read_buf = cafe_read_buf;
+ cafe->nand.write_buf = cafe_write_buf;
+ cafe->nand.select_chip = cafe_select_chip;
+
+ cafe->nand.chip_delay = 0;
+
+ /* Enable the following for a flash based bad block table */
+ cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+ cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+
+ if (skipbbt) {
+ cafe->nand.options |= NAND_SKIP_BBTSCAN;
+ cafe->nand.block_bad = cafe_nand_block_bad;
+ }
+
+ if (numtimings && numtimings != 3) {
+ dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
+ }
+
+ if (numtimings == 3) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ timing[0] = cafe_readl(cafe, NAND_TIMING1);
+ timing[1] = cafe_readl(cafe, NAND_TIMING2);
+ timing[2] = cafe_readl(cafe, NAND_TIMING3);
+
+ if (timing[0] | timing[1] | timing[2]) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
+ timing[0] = timing[1] = timing[2] = 0xffffffff;
+ }
+ }
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+ err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
+ "CAFE NAND", mtd);
+ if (err) {
+ dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+ goto out_free_dma;
+ }
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Set up DMA address */
+ cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+ if (sizeof(cafe->dmaaddr) > 4)
+ /* Shift in two parts to shut the compiler up */
+ cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+ else
+ cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+ cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+ cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ /* Scan to find existence of the device */
+ if (nand_scan_ident(mtd, 2, NULL)) {
+ err = -ENXIO;
+ goto out_irq;
+ }
+
+ cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
+ if (mtd->writesize == 2048)
+ cafe->ctl2 |= 1<<29; /* 2KiB page size */
+
+ /* Set up ECC according to the type of chip we found */
+ if (mtd->writesize == 2048) {
+ cafe->nand.ecc.layout = &cafe_oobinfo_2048;
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+ } else if (mtd->writesize == 512) {
+ cafe->nand.ecc.layout = &cafe_oobinfo_512;
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+ } else {
+ printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
+ mtd->writesize);
+ goto out_irq;
+ }
+ cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ cafe->nand.ecc.size = mtd->writesize;
+ cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
+ cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
+ cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
+ cafe->nand.ecc.correct = (void *)cafe_nand_bug;
+ cafe->nand.write_page = cafe_nand_write_page;
+ cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+ cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+ cafe->nand.ecc.read_page = cafe_nand_read_page;
+ cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+ err = nand_scan_tail(mtd);
+ if (err)
+ goto out_irq;
+
+ pci_set_drvdata(pdev, mtd);
+
+ mtd->name = "cafe_nand";
+ mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+
+ goto out;
+
+ out_irq:
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+ out_ior:
+ pci_iounmap(pdev, cafe->mmio);
+ out_free_mtd:
+ kfree(mtd);
+ out:
+ return err;
+}
+
+static void __devexit cafe_nand_remove(struct pci_dev *pdev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct cafe_priv *cafe = mtd->priv;
+
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ nand_release(mtd);
+ free_rs(cafe->rs);
+ pci_iounmap(pdev, cafe->mmio);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+ kfree(mtd);
+}
+
+static const struct pci_device_id cafe_nand_tbl[] = {
+ { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
+
+static int cafe_nand_resume(struct pci_dev *pdev)
+{
+ uint32_t ctrl;
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct cafe_priv *cafe = mtd->priv;
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+
+ /* Restore timing configuration */
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Set up DMA address */
+ cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+ if (sizeof(cafe->dmaaddr) > 4)
+ /* Shift in two parts to shut the compiler up */
+ cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+ else
+ cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ return 0;
+}
+
+static struct pci_driver cafe_nand_pci_driver = {
+ .name = "CAFÉ NAND",
+ .id_table = cafe_nand_tbl,
+ .probe = cafe_nand_probe,
+ .remove = __devexit_p(cafe_nand_remove),
+ .resume = cafe_nand_resume,
+};
+
+static int __init cafe_nand_init(void)
+{
+ return pci_register_driver(&cafe_nand_pci_driver);
+}
+
+static void __exit cafe_nand_exit(void)
+{
+ pci_unregister_driver(&cafe_nand_pci_driver);
+}
+module_init(cafe_nand_init);
+module_exit(cafe_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
new file mode 100644
index 00000000..1024bfc0
--- /dev/null
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -0,0 +1,264 @@
+/*
+ * linux/drivers/mtd/nand/cmx270-nand.c
+ *
+ * Copyright (C) 2006 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Derived from drivers/mtd/nand/h1910.c
+ * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * CM-X270 board.
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <mach/pxa2xx-regs.h>
+
+#define GPIO_NAND_CS (11)
+#define GPIO_NAND_RB (89)
+
+/* MTD structure for CM-X270 board */
+static struct mtd_info *cmx270_nand_mtd;
+
+/* remaped IO address of the device */
+static void __iomem *cmx270_nand_io;
+
+/*
+ * Define static partitions for flash device
+ */
+static struct mtd_partition partition_info[] = {
+ [0] = {
+ .name = "cmx270-0",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL
+ }
+};
+#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
+
+static u_char cmx270_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ return (readl(this->IO_ADDR_R) >> 16);
+}
+
+static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ writel((*buf++ << 16), this->IO_ADDR_W);
+}
+
+static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ *buf++ = readl(this->IO_ADDR_R) >> 16;
+}
+
+static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline void nand_cs_on(void)
+{
+ gpio_set_value(GPIO_NAND_CS, 0);
+}
+
+static void nand_cs_off(void)
+{
+ dsb();
+
+ gpio_set_value(GPIO_NAND_CS, 1);
+}
+
+/*
+ * hardware specific access to control-lines
+ */
+static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ struct nand_chip* this = mtd->priv;
+ unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
+
+ dsb();
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if ( ctrl & NAND_ALE )
+ nandaddr |= (1 << 3);
+ else
+ nandaddr &= ~(1 << 3);
+ if ( ctrl & NAND_CLE )
+ nandaddr |= (1 << 2);
+ else
+ nandaddr &= ~(1 << 2);
+ if ( ctrl & NAND_NCE )
+ nand_cs_on();
+ else
+ nand_cs_off();
+ }
+
+ dsb();
+ this->IO_ADDR_W = (void __iomem*)nandaddr;
+ if (dat != NAND_CMD_NONE)
+ writel((dat << 16), this->IO_ADDR_W);
+
+ dsb();
+}
+
+/*
+ * read device ready pin
+ */
+static int cmx270_device_ready(struct mtd_info *mtd)
+{
+ dsb();
+
+ return (gpio_get_value(GPIO_NAND_RB));
+}
+
+/*
+ * Main initialization routine
+ */
+static int __init cmx270_init(void)
+{
+ struct nand_chip *this;
+ int ret;
+
+ if (!(machine_is_armcore() && cpu_is_pxa27x()))
+ return -ENODEV;
+
+ ret = gpio_request(GPIO_NAND_CS, "NAND CS");
+ if (ret) {
+ pr_warning("CM-X270: failed to request NAND CS gpio\n");
+ return ret;
+ }
+
+ gpio_direction_output(GPIO_NAND_CS, 1);
+
+ ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
+ if (ret) {
+ pr_warning("CM-X270: failed to request NAND R/B gpio\n");
+ goto err_gpio_request;
+ }
+
+ gpio_direction_input(GPIO_NAND_RB);
+
+ /* Allocate memory for MTD device structure and private data */
+ cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!cmx270_nand_mtd) {
+ pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
+ if (!cmx270_nand_io) {
+ pr_debug("Unable to ioremap NAND device\n");
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&cmx270_nand_mtd[1]);
+
+ /* Link the private data with the MTD structure */
+ cmx270_nand_mtd->owner = THIS_MODULE;
+ cmx270_nand_mtd->priv = this;
+
+ /* insert callbacks */
+ this->IO_ADDR_R = cmx270_nand_io;
+ this->IO_ADDR_W = cmx270_nand_io;
+ this->cmd_ctrl = cmx270_hwcontrol;
+ this->dev_ready = cmx270_device_ready;
+
+ /* 15 us command delay time */
+ this->chip_delay = 20;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ /* read/write functions */
+ this->read_byte = cmx270_read_byte;
+ this->read_buf = cmx270_read_buf;
+ this->write_buf = cmx270_write_buf;
+ this->verify_buf = cmx270_verify_buf;
+
+ /* Scan to find existence of the device */
+ if (nand_scan (cmx270_nand_mtd, 1)) {
+ pr_notice("No NAND device\n");
+ ret = -ENXIO;
+ goto err_scan;
+ }
+
+ /* Register the partitions */
+ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
+ partition_info, NUM_PARTITIONS);
+ if (ret)
+ goto err_scan;
+
+ /* Return happy */
+ return 0;
+
+err_scan:
+ iounmap(cmx270_nand_io);
+err_ioremap:
+ kfree(cmx270_nand_mtd);
+err_kzalloc:
+ gpio_free(GPIO_NAND_RB);
+err_gpio_request:
+ gpio_free(GPIO_NAND_CS);
+
+ return ret;
+
+}
+module_init(cmx270_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit cmx270_cleanup(void)
+{
+ /* Release resources, unregister device */
+ nand_release(cmx270_nand_mtd);
+
+ gpio_free(GPIO_NAND_RB);
+ gpio_free(GPIO_NAND_CS);
+
+ iounmap(cmx270_nand_io);
+
+ /* Free the MTD device structure */
+ kfree (cmx270_nand_mtd);
+}
+module_exit(cmx270_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
new file mode 100644
index 00000000..821c34c6
--- /dev/null
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -0,0 +1,361 @@
+/*
+ * drivers/mtd/nand/cs553x_nand.c
+ *
+ * (C) 2005, 2006 Red Hat Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Tom Sylla <tom.sylla@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash controller found on
+ * the AMD CS5535/CS5536 companion chipsets for the Geode processor.
+ * mtd-id for command line partitioning is cs553x_nand_cs[0-3]
+ * where 0-3 reflects the chip select for NAND.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/msr.h>
+#include <asm/io.h>
+
+#define NR_CS553X_CONTROLLERS 4
+
+#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
+#define CAP_CS5535 0x2df000ULL
+#define CAP_CS5536 0x5df500ULL
+
+/* NAND Timing MSRs */
+#define MSR_NANDF_DATA 0x5140001b /* NAND Flash Data Timing MSR */
+#define MSR_NANDF_CTL 0x5140001c /* NAND Flash Control Timing */
+#define MSR_NANDF_RSVD 0x5140001d /* Reserved */
+
+/* NAND BAR MSRs */
+#define MSR_DIVIL_LBAR_FLSH0 0x51400010 /* Flash Chip Select 0 */
+#define MSR_DIVIL_LBAR_FLSH1 0x51400011 /* Flash Chip Select 1 */
+#define MSR_DIVIL_LBAR_FLSH2 0x51400012 /* Flash Chip Select 2 */
+#define MSR_DIVIL_LBAR_FLSH3 0x51400013 /* Flash Chip Select 3 */
+ /* Each made up of... */
+#define FLSH_LBAR_EN (1ULL<<32)
+#define FLSH_NOR_NAND (1ULL<<33) /* 1 for NAND */
+#define FLSH_MEM_IO (1ULL<<34) /* 1 for MMIO */
+ /* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
+ /* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
+
+/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
+#define MSR_DIVIL_BALL_OPTS 0x51400015
+#define PIN_OPT_IDE (1<<0) /* 0 for flash, 1 for IDE */
+
+/* Registers within the NAND flash controller BAR -- memory mapped */
+#define MM_NAND_DATA 0x00 /* 0 to 0x7ff, in fact */
+#define MM_NAND_CTL 0x800 /* Any even address 0x800-0x80e */
+#define MM_NAND_IO 0x801 /* Any odd address 0x801-0x80f */
+#define MM_NAND_STS 0x810
+#define MM_NAND_ECC_LSB 0x811
+#define MM_NAND_ECC_MSB 0x812
+#define MM_NAND_ECC_COL 0x813
+#define MM_NAND_LAC 0x814
+#define MM_NAND_ECC_CTL 0x815
+
+/* Registers within the NAND flash controller BAR -- I/O mapped */
+#define IO_NAND_DATA 0x00 /* 0 to 3, in fact */
+#define IO_NAND_CTL 0x04
+#define IO_NAND_IO 0x05
+#define IO_NAND_STS 0x06
+#define IO_NAND_ECC_CTL 0x08
+#define IO_NAND_ECC_LSB 0x09
+#define IO_NAND_ECC_MSB 0x0a
+#define IO_NAND_ECC_COL 0x0b
+#define IO_NAND_LAC 0x0c
+
+#define CS_NAND_CTL_DIST_EN (1<<4) /* Enable NAND Distract interrupt */
+#define CS_NAND_CTL_RDY_INT_MASK (1<<3) /* Enable RDY/BUSY# interrupt */
+#define CS_NAND_CTL_ALE (1<<2)
+#define CS_NAND_CTL_CLE (1<<1)
+#define CS_NAND_CTL_CE (1<<0) /* Keep low; 1 to reset */
+
+#define CS_NAND_STS_FLASH_RDY (1<<3)
+#define CS_NAND_CTLR_BUSY (1<<2)
+#define CS_NAND_CMD_COMP (1<<1)
+#define CS_NAND_DIST_ST (1<<0)
+
+#define CS_NAND_ECC_PARITY (1<<2)
+#define CS_NAND_ECC_CLRECC (1<<1)
+#define CS_NAND_ECC_ENECC (1<<0)
+
+static void cs553x_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ while (unlikely(len > 0x800)) {
+ memcpy_fromio(buf, this->IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, this->IO_ADDR_R, len);
+}
+
+static void cs553x_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ while (unlikely(len > 0x800)) {
+ memcpy_toio(this->IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(this->IO_ADDR_R, buf, len);
+}
+
+static unsigned char cs553x_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return readb(this->IO_ADDR_R);
+}
+
+static void cs553x_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ int i = 100000;
+
+ while (i && readb(this->IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
+ udelay(1);
+ i--;
+ }
+ writeb(byte, this->IO_ADDR_W + 0x801);
+}
+
+static void cs553x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
+ writeb(ctl, mmio_base + MM_NAND_CTL);
+ }
+ if (cmd != NAND_CMD_NONE)
+ cs553x_write_byte(mtd, cmd);
+}
+
+static int cs553x_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+ unsigned char foo = readb(mmio_base + MM_NAND_STS);
+
+ return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
+}
+
+static void cs_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+
+ writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
+}
+
+static int cs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ uint32_t ecc;
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+
+ ecc = readl(mmio_base + MM_NAND_STS);
+
+ ecc_code[1] = ecc >> 8;
+ ecc_code[0] = ecc >> 16;
+ ecc_code[2] = ecc >> 24;
+ return 0;
+}
+
+static struct mtd_info *cs553x_mtd[4];
+
+static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+{
+ int err = 0;
+ struct nand_chip *this;
+ struct mtd_info *new_mtd;
+
+ printk(KERN_NOTICE "Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n", cs, mmio?"MM":"P", adr);
+
+ if (!mmio) {
+ printk(KERN_NOTICE "PIO mode not yet implemented for CS553X NAND controller\n");
+ return -ENXIO;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ new_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!new_mtd) {
+ printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&new_mtd[1]);
+
+ /* Initialize structures */
+ memset(new_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ new_mtd->priv = this;
+ new_mtd->owner = THIS_MODULE;
+
+ /* map physical address */
+ this->IO_ADDR_R = this->IO_ADDR_W = ioremap(adr, 4096);
+ if (!this->IO_ADDR_R) {
+ printk(KERN_WARNING "ioremap cs553x NAND @0x%08lx failed\n", adr);
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ this->cmd_ctrl = cs553x_hwcontrol;
+ this->dev_ready = cs553x_device_ready;
+ this->read_byte = cs553x_read_byte;
+ this->read_buf = cs553x_read_buf;
+ this->write_buf = cs553x_write_buf;
+
+ this->chip_delay = 0;
+
+ this->ecc.mode = NAND_ECC_HW;
+ this->ecc.size = 256;
+ this->ecc.bytes = 3;
+ this->ecc.hwctl = cs_enable_hwecc;
+ this->ecc.calculate = cs_calculate_ecc;
+ this->ecc.correct = nand_correct_data;
+
+ /* Enable the following for a flash based bad block table */
+ this->bbt_options = NAND_BBT_USE_FLASH;
+ this->options = NAND_NO_AUTOINCR;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(new_mtd, 1)) {
+ err = -ENXIO;
+ goto out_ior;
+ }
+
+ this->ecc.strength = 1;
+
+ new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+
+ cs553x_mtd[cs] = new_mtd;
+ goto out;
+
+out_ior:
+ iounmap(this->IO_ADDR_R);
+out_mtd:
+ kfree(new_mtd);
+out:
+ return err;
+}
+
+static int is_geode(void)
+{
+ /* These are the CPUs which will have a CS553[56] companion chip */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 10)
+ return 1; /* Geode LX */
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 5)
+ return 1; /* Geode GX (née GX2) */
+
+ return 0;
+}
+
+static int __init cs553x_init(void)
+{
+ int err = -ENXIO;
+ int i;
+ uint64_t val;
+
+ /* If the CPU isn't a Geode GX or LX, abort */
+ if (!is_geode())
+ return -ENXIO;
+
+ /* If it doesn't have the CS553[56], abort */
+ rdmsrl(MSR_DIVIL_GLD_CAP, val);
+ val &= ~0xFFULL;
+ if (val != CAP_CS5535 && val != CAP_CS5536)
+ return -ENXIO;
+
+ /* If it doesn't have the NAND controller enabled, abort */
+ rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+ if (val & PIN_OPT_IDE) {
+ printk(KERN_INFO "CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+
+ if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
+ err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
+ }
+
+ /* Register all devices together here. This means we can easily hack it to
+ do mtdconcat etc. if we want to. */
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ if (cs553x_mtd[i]) {
+ /* If any devices registered, return success. Else the last error. */
+ mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
+ NULL, 0);
+ err = 0;
+ }
+ }
+
+ return err;
+}
+
+module_init(cs553x_init);
+
+static void __exit cs553x_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ struct mtd_info *mtd = cs553x_mtd[i];
+ struct nand_chip *this;
+ void __iomem *mmio_base;
+
+ if (!mtd)
+ continue;
+
+ this = cs553x_mtd[i]->priv;
+ mmio_base = this->IO_ADDR_R;
+
+ /* Release resources, unregister device */
+ nand_release(cs553x_mtd[i]);
+ kfree(cs553x_mtd[i]->name);
+ cs553x_mtd[i] = NULL;
+
+ /* unmap physical address */
+ iounmap(mmio_base);
+
+ /* Free the MTD device structure */
+ kfree(mtd);
+ }
+}
+
+module_exit(cs553x_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
new file mode 100644
index 00000000..d94b03c2
--- /dev/null
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -0,0 +1,838 @@
+/*
+ * davinci_nand.c - NAND Flash Driver for DaVinci family chips
+ *
+ * Copyright © 2006 Texas Instruments.
+ *
+ * Port to 2.6.23 Copyright © 2008 by:
+ * Sander Huijsen <Shuijsen@optelecom-nkf.com>
+ * Troy Kisky <troy.kisky@boundarydevices.com>
+ * Dirk Behme <Dirk.Behme@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+
+#include <mach/nand.h>
+#include <mach/aemif.h>
+
+/*
+ * This is a device driver for the NAND flash controller found on the
+ * various DaVinci family chips. It handles up to four SoC chipselects,
+ * and some flavors of secondary chipselect (e.g. based on A12) as used
+ * with multichip packages.
+ *
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
+ * available on chips like the DM355 and OMAP-L137 and needed with the
+ * more error-prone MLC NAND chips.
+ *
+ * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
+ * outputs in a "wire-AND" configuration, with no per-chip signals.
+ */
+struct davinci_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
+
+ struct device *dev;
+ struct clk *clk;
+
+ bool is_readmode;
+
+ void __iomem *base;
+ void __iomem *vaddr;
+
+ uint32_t ioaddr;
+ uint32_t current_cs;
+
+ uint32_t mask_chipsel;
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ uint32_t core_chipsel;
+
+ struct davinci_aemif_timing *timing;
+};
+
+static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
+
+#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
+
+
+static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
+ int offset)
+{
+ return __raw_readl(info->base + offset);
+}
+
+static inline void davinci_nand_writel(struct davinci_nand_info *info,
+ int offset, unsigned long value)
+{
+ __raw_writel(value, info->base + offset);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Access to hardware control lines: ALE, CLE, secondary chipselect.
+ */
+
+static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ uint32_t addr = info->current_cs;
+ struct nand_chip *nand = mtd->priv;
+
+ /* Did the control lines change? */
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
+ addr |= info->mask_cle;
+ else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
+ addr |= info->mask_ale;
+
+ nand->IO_ADDR_W = (void __iomem __force *)addr;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ iowrite8(cmd, nand->IO_ADDR_W);
+}
+
+static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ uint32_t addr = info->ioaddr;
+
+ /* maybe kick in a second chipselect */
+ if (chip > 0)
+ addr |= info->mask_chipsel;
+ info->current_cs = addr;
+
+ info->chip.IO_ADDR_W = (void __iomem __force *)addr;
+ info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 1-bit hardware ECC ... context maintained for each core chipselect
+ */
+
+static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+ return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ + 4 * info->core_chipsel);
+}
+
+static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info;
+ uint32_t nandcfr;
+ unsigned long flags;
+
+ info = to_davinci_nand(mtd);
+
+ /* Reset ECC hardware */
+ nand_davinci_readecc_1bit(mtd);
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Restart ECC hardware */
+ nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
+ nandcfr |= BIT(8 + info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/*
+ * Read hardware ECC value and pack into three bytes
+ */
+static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
+ unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
+
+ /* invert so that erased block ecc is correct */
+ ecc24 = ~ecc24;
+ ecc_code[0] = (u_char)(ecc24);
+ ecc_code[1] = (u_char)(ecc24 >> 8);
+ ecc_code[2] = (u_char)(ecc24 >> 16);
+
+ return 0;
+}
+
+static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
+ (read_ecc[2] << 16);
+ uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
+ (calc_ecc[2] << 16);
+ uint32_t diff = eccCalc ^ eccNand;
+
+ if (diff) {
+ if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+ /* Correctable error */
+ if ((diff >> (12 + 3)) < chip->ecc.size) {
+ dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
+ return 1;
+ } else {
+ return -1;
+ }
+ } else if (!(diff & (diff - 1))) {
+ /* Single bit ECC error in the ECC itself,
+ * nothing to fix */
+ return 1;
+ } else {
+ /* Uncorrectable error */
+ return -1;
+ }
+
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ u32 ecc_state;
+ unsigned num_errors, corrected;
+ unsigned long timeo;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+
+ /*
+ * ECC_STATE field reads 0x3 (Error correction complete) immediately
+ * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
+ * begin trying to poll for the state, you may fall right out of your
+ * loop without any of the correction calculations having taken place.
+ * The recommendation from the hardware team is to initially delay as
+ * long as ECC_STATE reads less than 4. After that, ECC HW has entered
+ * correction state.
+ */
+ timeo = jiffies + usecs_to_jiffies(100);
+ do {
+ ecc_state = (davinci_nand_readl(info,
+ NANDFSR_OFFSET) >> 8) & 0x0f;
+ cpu_relax();
+ } while ((ecc_state < 4) && time_before(jiffies, timeo));
+
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return 0;
+ case 1: /* five or more errors detected */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
+ * how these chips are normally wired. This translates to both 8 and 16
+ * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
+ *
+ * For now we assume that configuration, or any other one which ignores
+ * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
+ * and have that transparently morphed into multiple NAND operations.
+ */
+static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
+ else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
+ else
+ ioread8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+static void nand_davinci_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
+ else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
+ else
+ iowrite8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+/*
+ * Check hardware register for wait status. Returns 1 if device is ready,
+ * 0 if it is still busy.
+ */
+static int nand_davinci_dev_ready(struct mtd_info *mtd)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+ return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
+ * storing ten ECC bytes plus the manufacturer's bad block marker byte,
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_2048 __initconst = {
+ .eccbytes = 40,
+ .eccpos = {
+ /* at the end of spare sector */
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ /* 2 bytes at offset 0 hold manufacturer badblock markers */
+ {.offset = 2, .length = 22, },
+ /* 5 bytes at offset 8 hold BBT markers */
+ /* 8 bytes at offset 16 hold JFFS2 clean markers */
+ },
+};
+
+static int __init nand_davinci_probe(struct platform_device *pdev)
+{
+ struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
+ struct davinci_nand_info *info;
+ struct resource *res1;
+ struct resource *res2;
+ void __iomem *vaddr;
+ void __iomem *base;
+ int ret;
+ uint32_t val;
+ nand_ecc_modes_t ecc_mode;
+
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
+ /* which external chipselect will we be managing? */
+ if (pdev->id < 0 || pdev->id > 3)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_nomem;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1 || !res2) {
+ dev_err(&pdev->dev, "resource missing\n");
+ ret = -EINVAL;
+ goto err_nomem;
+ }
+
+ vaddr = ioremap(res1->start, resource_size(res1));
+ base = ioremap(res2->start, resource_size(res2));
+ if (!vaddr || !base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ info->dev = &pdev->dev;
+ info->base = base;
+ info->vaddr = vaddr;
+
+ info->mtd.priv = &info->chip;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ info->mtd.dev.parent = &pdev->dev;
+
+ info->chip.IO_ADDR_R = vaddr;
+ info->chip.IO_ADDR_W = vaddr;
+ info->chip.chip_delay = 0;
+ info->chip.select_chip = nand_davinci_select_chip;
+
+ /* options such as NAND_BBT_USE_FLASH */
+ info->chip.bbt_options = pdata->bbt_options;
+ /* options such as 16-bit widths */
+ info->chip.options = pdata->options;
+ info->chip.bbt_td = pdata->bbt_td;
+ info->chip.bbt_md = pdata->bbt_md;
+ info->timing = pdata->timing;
+
+ info->ioaddr = (uint32_t __force) vaddr;
+
+ info->current_cs = info->ioaddr;
+ info->core_chipsel = pdev->id;
+ info->mask_chipsel = pdata->mask_chipsel;
+
+ /* use nandboot-capable ALE/CLE masks by default */
+ info->mask_ale = pdata->mask_ale ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
+
+ /* Set address of hardware control function */
+ info->chip.cmd_ctrl = nand_davinci_hwcontrol;
+ info->chip.dev_ready = nand_davinci_dev_ready;
+
+ /* Speed up buffer I/O */
+ info->chip.read_buf = nand_davinci_read_buf;
+ info->chip.write_buf = nand_davinci_write_buf;
+
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+
+ ret = -EINVAL;
+ switch (ecc_mode) {
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
+ break;
+ case NAND_ECC_HW:
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
+ info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_ecc;
+ }
+ info->chip.ecc.mode = ecc_mode;
+
+ info->clk = clk_get(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk)) {
+ ret = PTR_ERR(info->clk);
+ dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
+ goto err_clk;
+ }
+
+ ret = clk_enable(info->clk);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+ ret);
+ goto err_clk_enable;
+ }
+
+ /*
+ * Setup Async configuration register in case we did not boot from
+ * NAND and so bootloader did not bother to set it up.
+ */
+ val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
+
+ /* Extended Wait is not valid and Select Strobe mode is not used */
+ val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
+ if (info->chip.options & NAND_BUSWIDTH_16)
+ val |= 0x1;
+
+ davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
+
+ ret = 0;
+ if (info->timing)
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
+ info->core_chipsel);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
+ goto err_timing;
+ }
+
+ spin_lock_irq(&davinci_nand_lock);
+
+ /* put CSxNAND into NAND mode */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val |= BIT(info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ spin_unlock_irq(&davinci_nand_lock);
+
+ /* Scan to find existence of the device(s) */
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+ goto err_scan;
+ }
+
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+ if (chunks == 4) {
+ info->ecclayout = hwecc4_2048;
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ goto syndrome_done;
+ }
+
+ /* 4KiB page chips are not yet supported. The eccpos from
+ * nand_ecclayout cannot hold 80 bytes and change to eccpos[]
+ * breaks userspace ioctl interface with mtd-utils. Once we
+ * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
+ * for the 4KiB page chips.
+ *
+ * TODO: Note that nand_ecclayout has now been expanded and can
+ * hold plenty of OOB entries.
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for 4KiB-page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
+
+ if (ret < 0)
+ goto err_scan;
+
+ val = davinci_nand_readl(info, NRCSR_OFFSET);
+ dev_info(&pdev->dev, "controller rev. %d.%d\n",
+ (val >> 8) & 0xff, val & 0xff);
+
+ return 0;
+
+err_scan:
+err_timing:
+ clk_disable(info->clk);
+
+err_clk_enable:
+ clk_put(info->clk);
+
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
+err_ecc:
+err_clk:
+err_ioremap:
+ if (base)
+ iounmap(base);
+ if (vaddr)
+ iounmap(vaddr);
+
+err_nomem:
+ kfree(info);
+ return ret;
+}
+
+static int __exit nand_davinci_remove(struct platform_device *pdev)
+{
+ struct davinci_nand_info *info = platform_get_drvdata(pdev);
+
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ iounmap(info->base);
+ iounmap(info->vaddr);
+
+ nand_release(&info->mtd);
+
+ clk_disable(info->clk);
+ clk_put(info->clk);
+
+ kfree(info);
+
+ return 0;
+}
+
+static struct platform_driver nand_davinci_driver = {
+ .remove = __exit_p(nand_davinci_remove),
+ .driver = {
+ .name = "davinci_nand",
+ },
+};
+MODULE_ALIAS("platform:davinci_nand");
+
+static int __init nand_davinci_init(void)
+{
+ return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe);
+}
+module_init(nand_davinci_init);
+
+static void __exit nand_davinci_exit(void)
+{
+ platform_driver_unregister(&nand_davinci_driver);
+}
+module_exit(nand_davinci_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("Davinci NAND flash driver");
+
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
new file mode 100644
index 00000000..a9e57d68
--- /dev/null
+++ b/drivers/mtd/nand/denali.c
@@ -0,0 +1,1718 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+
+#include "denali.h"
+
+MODULE_LICENSE("GPL");
+
+/* We define a module parameter that allows the user to override
+ * the hardware and decide what timing mode should be used.
+ */
+#define NAND_DEFAULT_TIMINGS -1
+
+static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
+module_param(onfi_timing_mode, int, S_IRUGO);
+MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
+ " -1 indicates use default timings");
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* We define a macro here that combines all interrupts this driver uses into
+ * a single constant value, for convenience. */
+#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
+ INTR_STATUS__ECC_TRANSACTION_DONE | \
+ INTR_STATUS__ECC_ERR | \
+ INTR_STATUS__PROGRAM_FAIL | \
+ INTR_STATUS__LOAD_COMP | \
+ INTR_STATUS__PROGRAM_COMP | \
+ INTR_STATUS__TIME_OUT | \
+ INTR_STATUS__ERASE_FAIL | \
+ INTR_STATUS__RST_COMP | \
+ INTR_STATUS__ERASE_COMP)
+
+/* indicates whether or not the internal value for the flash bank is
+ * valid or not */
+#define CHIP_SELECT_INVALID -1
+
+#define SUPPORT_8BITECC 1
+
+/* This macro divides two integers and rounds fractional values up
+ * to the nearest integer value. */
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+
+/* this macro allows us to convert from an MTD structure to our own
+ * device context (denali) structure.
+ */
+#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
+
+/* These constants are defined by the driver to enable common driver
+ * configuration options. */
+#define SPARE_ACCESS 0x41
+#define MAIN_ACCESS 0x42
+#define MAIN_SPARE_ACCESS 0x43
+
+#define DENALI_READ 0
+#define DENALI_WRITE 0x100
+
+/* types of device accesses. We can issue commands and get status */
+#define COMMAND_CYCLE 0
+#define ADDR_CYCLE 1
+#define STATUS_CYCLE 2
+
+/* this is a helper macro that allows us to
+ * format the bank into the proper bits for the controller */
+#define BANK(x) ((x) << 24)
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+
+/* forward declarations */
+static void clear_interrupts(struct denali_nand_info *denali);
+static uint32_t wait_for_irq(struct denali_nand_info *denali,
+ uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali,
+ uint32_t int_mask);
+static uint32_t read_interrupt_status(struct denali_nand_info *denali);
+
+/* Certain operations for the denali NAND controller use
+ * an indexed mode to read/write data. The operation is
+ * performed by writing the address value of the command
+ * to the device memory followed by the data. This function
+ * abstracts this common operation.
+*/
+static void index_addr(struct denali_nand_info *denali,
+ uint32_t address, uint32_t data)
+{
+ iowrite32(address, denali->flash_mem);
+ iowrite32(data, denali->flash_mem + 0x10);
+}
+
+/* Perform an indexed read of the device */
+static void index_addr_read_data(struct denali_nand_info *denali,
+ uint32_t address, uint32_t *pdata)
+{
+ iowrite32(address, denali->flash_mem);
+ *pdata = ioread32(denali->flash_mem + 0x10);
+}
+
+/* We need to buffer some data for some of the NAND core routines.
+ * The operations manage buffering that data. */
+static void reset_buf(struct denali_nand_info *denali)
+{
+ denali->buf.head = denali->buf.tail = 0;
+}
+
+static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
+{
+ BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
+ denali->buf.buf[denali->buf.tail++] = byte;
+}
+
+/* reads the status of the device */
+static void read_status(struct denali_nand_info *denali)
+{
+ uint32_t cmd = 0x0;
+
+ /* initialize the data buffer to store status */
+ reset_buf(denali);
+
+ cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
+ if (cmd)
+ write_byte_to_buf(denali, NAND_STATUS_WP);
+ else
+ write_byte_to_buf(denali, 0);
+}
+
+/* resets a specific device connected to the core */
+static void reset_bank(struct denali_nand_info *denali)
+{
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS__RST_COMP |
+ INTR_STATUS__TIME_OUT;
+
+ clear_interrupts(denali);
+
+ iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
+
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status & INTR_STATUS__TIME_OUT)
+ dev_err(denali->dev, "reset bank failed.\n");
+}
+
+/* Reset the flash controller */
+static uint16_t denali_nand_reset(struct denali_nand_info *denali)
+{
+ uint32_t i;
+
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0 ; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
+
+ for (i = 0 ; i < denali->max_banks; i++) {
+ iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
+ while (!(ioread32(denali->flash_reg +
+ INTR_STATUS(i)) &
+ (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
+ cpu_relax();
+ if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
+ INTR_STATUS__TIME_OUT)
+ dev_dbg(denali->dev,
+ "NAND Reset operation timed out on bank %d\n", i);
+ }
+
+ for (i = 0; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
+
+ return PASS;
+}
+
+/* this routine calculates the ONFI timing values for a given mode and
+ * programs the clocking register accordingly. The mode is determined by
+ * the get_onfi_nand_para routine.
+ */
+static void nand_onfi_timing_set(struct denali_nand_info *denali,
+ uint16_t mode)
+{
+ uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
+ uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
+ uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
+ uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
+ uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
+ uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
+ uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
+ uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
+ uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
+ uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+ uint16_t TclsRising = 1;
+ uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
+ uint16_t dv_window = 0;
+ uint16_t en_lo, en_hi;
+ uint16_t acc_clks;
+ uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
+#if ONFI_BLOOM_TIME
+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
+ en_hi++;
+#endif
+
+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+ if ((en_lo + en_hi) < CLK_MULTI)
+ en_lo += CLK_MULTI - en_lo - en_hi;
+
+ while (dv_window < 8) {
+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+ data_invalid =
+ data_invalid_rhoh <
+ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
+
+ dv_window = data_invalid - Trea[mode];
+
+ if (dv_window < 8)
+ en_lo++;
+ }
+
+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
+ acc_clks++;
+
+ if ((data_invalid - acc_clks * CLK_X) < 2)
+ dev_warn(denali->dev, "%s, Line %d: Warning!\n",
+ __FILE__, __LINE__);
+
+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+ if (!TclsRising)
+ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
+ if (cs_cnt == 0)
+ cs_cnt = 1;
+
+ if (Tcea[mode]) {
+ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
+ cs_cnt++;
+ }
+
+#if MODE5_WORKAROUND
+ if (mode == 5)
+ acc_clks = 5;
+#endif
+
+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+ if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
+ (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
+ acc_clks = 6;
+
+ iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
+ iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
+ iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
+ iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
+ iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+ iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+ iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+ iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
+}
+
+/* queries the NAND device to see what ONFI modes it supports. */
+static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+{
+ int i;
+ /* we needn't to do a reset here because driver has already
+ * reset all the banks before
+ * */
+ if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ ONFI_TIMING_MODE__VALUE))
+ return FAIL;
+
+ for (i = 5; i > 0; i--) {
+ if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ (0x01 << i))
+ break;
+ }
+
+ nand_onfi_timing_set(denali, i);
+
+ /* By now, all the ONFI devices we know support the page cache */
+ /* rw feature. So here we enable the pipeline_rw_ahead feature */
+ /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
+ /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
+
+ return PASS;
+}
+
+static void get_samsung_nand_para(struct denali_nand_info *denali,
+ uint8_t device_id)
+{
+ if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
+ /* Set timing register values according to datasheet */
+ iowrite32(5, denali->flash_reg + ACC_CLKS);
+ iowrite32(20, denali->flash_reg + RE_2_WE);
+ iowrite32(12, denali->flash_reg + WE_2_RE);
+ iowrite32(14, denali->flash_reg + ADDR_2_DATA);
+ iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+ iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+ iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
+ }
+}
+
+static void get_toshiba_nand_para(struct denali_nand_info *denali)
+{
+ uint32_t tmp;
+
+ /* Workaround to fix a controller bug which reports a wrong */
+ /* spare area size for some kind of Toshiba NAND device */
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+ iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(tmp,
+ denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ }
+}
+
+static void get_hynix_nand_para(struct denali_nand_info *denali,
+ uint8_t device_id)
+{
+ uint32_t main_size, spare_size;
+
+ switch (device_id) {
+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+ iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
+ iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 *
+ ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ spare_size = 224 *
+ ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ iowrite32(main_size,
+ denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+ iowrite32(spare_size,
+ denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ break;
+ default:
+ dev_warn(denali->dev,
+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
+ "Will use default parameter values instead.\n",
+ device_id);
+ }
+}
+
+/* determines how many NAND chips are connected to the controller. Note for
+ * Intel CE4100 devices we don't support more than one device.
+ */
+static void find_valid_banks(struct denali_nand_info *denali)
+{
+ uint32_t id[denali->max_banks];
+ int i;
+
+ denali->total_used_banks = 1;
+ for (i = 0; i < denali->max_banks; i++) {
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
+ index_addr_read_data(denali,
+ (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
+
+ dev_dbg(denali->dev,
+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+ if (i == 0) {
+ if (!(id[i] & 0x0ff))
+ break; /* WTF? */
+ } else {
+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+ denali->total_used_banks++;
+ else
+ break;
+ }
+ }
+
+ if (denali->platform == INTEL_CE4100) {
+ /* Platform limitations of the CE4100 device limit
+ * users to a single chip solution for NAND.
+ * Multichip support is not enabled.
+ */
+ if (denali->total_used_banks != 1) {
+ dev_err(denali->dev,
+ "Sorry, Intel CE4100 only supports "
+ "a single NAND device.\n");
+ BUG();
+ }
+ }
+ dev_dbg(denali->dev,
+ "denali->total_used_banks: %d\n", denali->total_used_banks);
+}
+
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void detect_max_banks(struct denali_nand_info *denali)
+{
+ uint32_t features = ioread32(denali->flash_reg + FEATURES);
+
+ denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+}
+
+static void detect_partition_feature(struct denali_nand_info *denali)
+{
+ /* For MRST platform, denali->fwblks represent the
+ * number of blocks firmware is taken,
+ * FW is in protect partition and MTD driver has no
+ * permission to access it. So let driver know how many
+ * blocks it can't touch.
+ * */
+ if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
+ PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
+ denali->fwblks =
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
+ MIN_MAX_BANK__MIN_VALUE) *
+ denali->blksperchip)
+ +
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
+ MIN_BLK_ADDR__VALUE);
+ } else
+ denali->fwblks = SPECTRA_START_BLOCK;
+ } else
+ denali->fwblks = SPECTRA_START_BLOCK;
+}
+
+static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
+{
+ uint16_t status = PASS;
+ uint32_t id_bytes[5], addr;
+ uint8_t i, maf_id, device_id;
+
+ dev_dbg(denali->dev,
+ "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ /* Use read id method to get device ID and other
+ * params. For some NAND chips, controller can't
+ * report the correct device ID by reading from
+ * DEVICE_ID register
+ * */
+ addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, (uint32_t)addr | 0, 0x90);
+ index_addr(denali, (uint32_t)addr | 1, 0);
+ for (i = 0; i < 5; i++)
+ index_addr_read_data(denali, addr | 2, &id_bytes[i]);
+ maf_id = id_bytes[0];
+ device_id = id_bytes[1];
+
+ if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+ if (FAIL == get_onfi_nand_para(denali))
+ return FAIL;
+ } else if (maf_id == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para(denali, device_id);
+ } else if (maf_id == 0x98) { /* Toshiba NAND */
+ get_toshiba_nand_para(denali);
+ } else if (maf_id == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para(denali, device_id);
+ }
+
+ dev_info(denali->dev,
+ "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
+ "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + RE_2_RE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ find_valid_banks(denali);
+
+ detect_partition_feature(denali);
+
+ /* If the user specified to override the default timings
+ * with a specific ONFI mode, we apply those changes here.
+ */
+ if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
+ nand_onfi_timing_set(denali, onfi_timing_mode);
+
+ return status;
+}
+
+static void denali_set_intr_modes(struct denali_nand_info *denali,
+ uint16_t INT_ENABLE)
+{
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (INT_ENABLE)
+ iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+ else
+ iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+}
+
+/* validation function to verify that the controlling software is making
+ * a valid request
+ */
+static inline bool is_flash_bank_valid(int flash_bank)
+{
+ return (flash_bank >= 0 && flash_bank < 4);
+}
+
+static void denali_irq_init(struct denali_nand_info *denali)
+{
+ uint32_t int_mask = 0;
+ int i;
+
+ /* Disable global interrupts */
+ denali_set_intr_modes(denali, false);
+
+ int_mask = DENALI_IRQ_ALL;
+
+ /* Clear all status bits */
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
+
+ denali_irq_enable(denali, int_mask);
+}
+
+static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+{
+ denali_set_intr_modes(denali, false);
+ free_irq(irqnum, denali);
+}
+
+static void denali_irq_enable(struct denali_nand_info *denali,
+ uint32_t int_mask)
+{
+ int i;
+
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
+}
+
+/* This function only returns when an interrupt that this driver cares about
+ * occurs. This is to reduce the overhead of servicing interrupts
+ */
+static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+{
+ return read_interrupt_status(denali) & DENALI_IRQ_ALL;
+}
+
+/* Interrupts are cleared by writing a 1 to the appropriate status bit */
+static inline void clear_interrupt(struct denali_nand_info *denali,
+ uint32_t irq_mask)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
+
+ iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
+}
+
+static void clear_interrupts(struct denali_nand_info *denali)
+{
+ uint32_t status = 0x0;
+ spin_lock_irq(&denali->irq_lock);
+
+ status = read_interrupt_status(denali);
+ clear_interrupt(denali, status);
+
+ denali->irq_status = 0x0;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
+
+ return ioread32(denali->flash_reg + intr_status_reg);
+}
+
+/* This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared
+ * interrupt.
+ */
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_nand_info *denali = dev_id;
+ uint32_t irq_status = 0x0;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&denali->irq_lock);
+
+ /* check to see if a valid NAND chip has
+ * been selected.
+ */
+ if (is_flash_bank_valid(denali->flash_bank)) {
+ /* check to see if controller generated
+ * the interrupt, since this is a shared interrupt */
+ irq_status = denali_irq_detected(denali);
+ if (irq_status != 0) {
+ /* handle interrupt */
+ /* first acknowledge it */
+ clear_interrupt(denali, irq_status);
+ /* store the status in the device context for someone
+ to read */
+ denali->irq_status |= irq_status;
+ /* notify anyone who cares that it happened */
+ complete(&denali->complete);
+ /* tell the OS that we've handled this */
+ result = IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&denali->irq_lock);
+ return result;
+}
+#define BANK(x) ((x) << 24)
+
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ unsigned long comp_res = 0;
+ uint32_t intr_status = 0;
+ bool retry = false;
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ do {
+ comp_res =
+ wait_for_completion_timeout(&denali->complete, timeout);
+ spin_lock_irq(&denali->irq_lock);
+ intr_status = denali->irq_status;
+
+ if (intr_status & irq_mask) {
+ denali->irq_status &= ~irq_mask;
+ spin_unlock_irq(&denali->irq_lock);
+ /* our interrupt was detected */
+ break;
+ } else {
+ /* these are not the interrupts you are looking for -
+ * need to wait again */
+ spin_unlock_irq(&denali->irq_lock);
+ retry = true;
+ }
+ } while (comp_res != 0);
+
+ if (comp_res == 0) {
+ /* timeout */
+ printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ intr_status, irq_mask);
+
+ intr_status = 0;
+ }
+ return intr_status;
+}
+
+/* This helper function setups the registers for ECC and whether or not
+ * the spare area will be transferred. */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare)
+{
+ int ecc_en_flag = 0, transfer_spare_flag = 0;
+
+ /* set ECC, transfer spare bits if needed */
+ ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
+ transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
+
+ /* Enable spare area/ECC per user's request. */
+ iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+ iowrite32(transfer_spare_flag,
+ denali->flash_reg + TRANSFER_SPARE_REG);
+}
+
+/* sends a pipeline command operation to the controller. See the Denali NAND
+ * controller's user guide for more information (section 4.2.3.6).
+ */
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
+ bool ecc_en,
+ bool transfer_spare,
+ int access_type,
+ int op)
+{
+ int status = PASS;
+ uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
+ irq_mask = 0;
+
+ if (op == DENALI_READ)
+ irq_mask = INTR_STATUS__LOAD_COMP;
+ else if (op == DENALI_WRITE)
+ irq_mask = 0;
+ else
+ BUG();
+
+ setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ addr = BANK(denali->flash_bank) | denali->page;
+
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
+ /* read spare area */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else if (op == DENALI_READ) {
+ /* setup page read request for access type */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ /* page 33 of the NAND controller spec indicates we should not
+ use the pipeline commands in Spare area only mode. So we
+ don't.
+ */
+ if (access_type == SPARE_ACCESS) {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else {
+ index_addr(denali, (uint32_t)cmd,
+ 0x2000 | op | page_count);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the
+ * mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0) {
+ dev_err(denali->dev,
+ "cmd, page, addr on timeout "
+ "(0x%x, 0x%x, 0x%x)\n",
+ cmd, denali->page, addr);
+ status = FAIL;
+ } else {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ }
+ }
+ }
+ return status;
+}
+
+/* helper function that simply writes a buffer to the flash */
+static int write_data_to_flash_mem(struct denali_nand_info *denali,
+ const uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* verify that the len is a multiple of 4. see comment in
+ * read_data_from_flash_mem() */
+ BUG_ON((len % 4) != 0);
+
+ /* write the data to the flash memory */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ iowrite32(*buf32++, denali->flash_mem + 0x10);
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* helper function that simply reads a buffer from the flash */
+static int read_data_from_flash_mem(struct denali_nand_info *denali,
+ uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* we assume that len will be a multiple of 4, if not
+ * it would be nice to know about it ASAP rather than
+ * have random failures...
+ * This assumption is based on the fact that this
+ * function is designed to be used to read flash pages,
+ * which are typically multiples of 4...
+ */
+
+ BUG_ON((len % 4) != 0);
+
+ /* transfer the data from the flash */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ *buf32++ = ioread32(denali->flash_mem + 0x10);
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* writes OOB data to the device */
+static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
+ int status = 0;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+ DENALI_WRITE) == PASS) {
+ write_data_to_flash_mem(denali, buf, mtd->oobsize);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0) {
+ dev_err(denali->dev, "OOB write failed\n");
+ status = -EIO;
+ }
+ } else {
+ dev_err(denali->dev, "unable to send pipeline command\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* reads OOB data from the device */
+static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
+ irq_status = 0, addr = 0x0, cmd = 0x0;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+ DENALI_READ) == PASS) {
+ read_data_from_flash_mem(denali, buf, mtd->oobsize);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ dev_err(denali->dev, "page on OOB timeout %d\n",
+ denali->page);
+
+ /* We set the device back to MAIN_ACCESS here as I observed
+ * instability with the controller if you do a block erase
+ * and the last transaction was a SPARE_ACCESS. Block erase
+ * is reliable (according to the MTD test infrastructure)
+ * if you are in MAIN_ACCESS.
+ */
+ addr = BANK(denali->flash_bank) | denali->page;
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
+ }
+}
+
+/* this function examines buffers to see if they contain data that
+ * indicate that the buffer is part of an erased region of flash.
+ */
+bool is_erased(uint8_t *buf, int len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ if (buf[i] != 0xFF)
+ return false;
+ return true;
+}
+#define ECC_SECTOR_SIZE 512
+
+#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
+#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
+#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
+#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
+#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
+
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+ uint32_t irq_status)
+{
+ bool check_erased_page = false;
+
+ if (irq_status & INTR_STATUS__ECC_ERR) {
+ /* read the ECC errors. we'll ignore them for now */
+ uint32_t err_address = 0, err_correction_info = 0;
+ uint32_t err_byte = 0, err_sector = 0, err_device = 0;
+ uint32_t err_correction_value = 0;
+ denali_set_intr_modes(denali, false);
+
+ do {
+ err_address = ioread32(denali->flash_reg +
+ ECC_ERROR_ADDRESS);
+ err_sector = ECC_SECTOR(err_address);
+ err_byte = ECC_BYTE(err_address);
+
+ err_correction_info = ioread32(denali->flash_reg +
+ ERR_CORRECTION_INFO);
+ err_correction_value =
+ ECC_CORRECTION_VALUE(err_correction_info);
+ err_device = ECC_ERR_DEVICE(err_correction_info);
+
+ if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
+ /* If err_byte is larger than ECC_SECTOR_SIZE,
+ * means error happened in OOB, so we ignore
+ * it. It's no need for us to correct it
+ * err_device is represented the NAND error
+ * bits are happened in if there are more
+ * than one NAND connected.
+ * */
+ if (err_byte < ECC_SECTOR_SIZE) {
+ int offset;
+ offset = (err_sector *
+ ECC_SECTOR_SIZE +
+ err_byte) *
+ denali->devnum +
+ err_device;
+ /* correct the ECC error */
+ buf[offset] ^= err_correction_value;
+ denali->mtd.ecc_stats.corrected++;
+ }
+ } else {
+ /* if the error is not correctable, need to
+ * look at the page to see if it is an erased
+ * page. if so, then it's not a real ECC error
+ * */
+ check_erased_page = true;
+ }
+ } while (!ECC_LAST_ERR(err_correction_info));
+ /* Once handle all ecc errors, controller will triger
+ * a ECC_TRANSACTION_DONE interrupt, so here just wait
+ * for a while for this interrupt
+ * */
+ while (!(read_interrupt_status(denali) &
+ INTR_STATUS__ECC_TRANSACTION_DONE))
+ cpu_relax();
+ clear_interrupts(denali);
+ denali_set_intr_modes(denali, true);
+ }
+ return check_erased_page;
+}
+
+/* programs the controller to either enable/disable DMA transfers */
+static void denali_enable_dma(struct denali_nand_info *denali, bool en)
+{
+ uint32_t reg_val = 0x0;
+
+ if (en)
+ reg_val = DMA_ENABLE__FLAG;
+
+ iowrite32(reg_val, denali->flash_reg + DMA_ENABLE);
+ ioread32(denali->flash_reg + DMA_ENABLE);
+}
+
+/* setups the HW to perform the data DMA */
+static void denali_setup_dma(struct denali_nand_info *denali, int op)
+{
+ uint32_t mode = 0x0;
+ const int page_count = 1;
+ dma_addr_t addr = denali->buf.dma_buf;
+
+ mode = MODE_10 | BANK(denali->flash_bank);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes*/
+ index_addr(denali, mode | 0x14000, 0x2400);
+}
+
+/* writes a page. user specifies type, and this function handles the
+ * configuration details. */
+static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, bool raw_xfer)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
+
+ /* if it is a raw xfer, we want to disable ecc, and send
+ * the spare area.
+ * !raw_xfer - enable ecc
+ * raw_xfer - transfer spare
+ */
+ setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+
+ /* copy buffer into DMA buffer */
+ memcpy(denali->buf.buf, buf, mtd->writesize);
+
+ if (raw_xfer) {
+ /* transfer the data to the spare area */
+ memcpy(denali->buf.buf + mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize);
+ }
+
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
+
+ clear_interrupts(denali);
+ denali_enable_dma(denali, true);
+
+ denali_setup_dma(denali, DENALI_WRITE);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0) {
+ dev_err(denali->dev,
+ "timeout on write_page (type = %d)\n",
+ raw_xfer);
+ denali->status =
+ (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
+ NAND_STATUS_FAIL : PASS;
+ }
+
+ denali_enable_dma(denali, false);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+}
+
+/* NAND core entry points */
+
+/* this is the callback that the NAND core calls to write a page. Since
+ * writing a page with ECC or without is similar, all the work is done
+ * by write_page above.
+ * */
+static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for regular page writes, we let HW handle all the ECC
+ * data written to the device. */
+ write_page(mtd, chip, buf, false);
+}
+
+/* This is the callback that the NAND core calls to write a page without ECC.
+ * raw access is similar to ECC page writes, so all the work is done in the
+ * write_page() function above.
+ */
+static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for raw page writes, we want to disable ECC and simply write
+ whatever data is in the buffer. */
+ write_page(mtd, chip, buf, true);
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return write_oob_data(mtd, chip->oob_poi, page);
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ read_oob_data(mtd, chip->oob_poi, page);
+
+ return 0; /* notify NAND core to send command to
+ NAND device. */
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
+ INTR_STATUS__ECC_ERR;
+ bool check_erased_page = false;
+
+ if (page != denali->page) {
+ dev_err(denali->dev, "IN %s: page %d is not"
+ " equal to denali->page %d, investigate!!",
+ __func__, page, denali->page);
+ BUG();
+ }
+
+ setup_ecc_for_xfer(denali, true, false);
+
+ denali_enable_dma(denali, true);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+
+ check_erased_page = handle_ecc(denali, buf, irq_status);
+ denali_enable_dma(denali, false);
+
+ if (check_erased_page) {
+ read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
+
+ /* check ECC failures that may have occurred on erased pages */
+ if (check_erased_page) {
+ if (!is_erased(buf, denali->mtd.writesize))
+ denali->mtd.ecc_stats.failed++;
+ if (!is_erased(buf, denali->mtd.oobsize))
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ return 0;
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
+
+ if (page != denali->page) {
+ dev_err(denali->dev, "IN %s: page %d is not"
+ " equal to denali->page %d, investigate!!",
+ __func__, page, denali->page);
+ BUG();
+ }
+
+ setup_ecc_for_xfer(denali, false, true);
+ denali_enable_dma(denali, true);
+
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ denali_enable_dma(denali, false);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+ memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+
+ return 0;
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t result = 0xff;
+
+ if (denali->buf.head < denali->buf.tail)
+ result = denali->buf.buf[denali->buf.head++];
+
+ return result;
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ spin_lock_irq(&denali->irq_lock);
+ denali->flash_bank = chip;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int status = denali->status;
+ denali->status = 0;
+
+ return status;
+}
+
+static void denali_erase(struct mtd_info *mtd, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ uint32_t cmd = 0x0, irq_status = 0;
+
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ /* setup page read request for access type */
+ cmd = MODE_10 | BANK(denali->flash_bank) | page;
+ index_addr(denali, (uint32_t)cmd, 0x1);
+
+ /* wait for erase to complete or failure to occur */
+ irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
+ INTR_STATUS__ERASE_FAIL);
+
+ denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
+ NAND_STATUS_FAIL : PASS;
+}
+
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+ int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t addr, id;
+ int i;
+
+ switch (cmd) {
+ case NAND_CMD_PAGEPROG:
+ break;
+ case NAND_CMD_STATUS:
+ read_status(denali);
+ break;
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ reset_buf(denali);
+ /*sometimes ManufactureId read from register is not right
+ * e.g. some of Micron MT29F32G08QAA MLC NAND chips
+ * So here we send READID cmd to NAND insteand
+ * */
+ addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, (uint32_t)addr | 0, 0x90);
+ index_addr(denali, (uint32_t)addr | 1, 0);
+ for (i = 0; i < 5; i++) {
+ index_addr_read_data(denali,
+ (uint32_t)addr | 2,
+ &id);
+ write_byte_to_buf(denali, id);
+ }
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_SEQIN:
+ denali->page = page;
+ break;
+ case NAND_CMD_RESET:
+ reset_bank(denali);
+ break;
+ case NAND_CMD_READOOB:
+ /* TODO: Read OOB data */
+ break;
+ default:
+ printk(KERN_ERR ": unsupported command"
+ " received 0x%x\n", cmd);
+ break;
+ }
+}
+
+/* stubs for ECC functions not used by the NAND core */
+static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc_code)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dev_err(denali->dev,
+ "denali_ecc_calculate called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dev_err(denali->dev,
+ "denali_ecc_correct called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dev_err(denali->dev,
+ "denali_ecc_hwctl called unexpectedly\n");
+ BUG();
+}
+/* end NAND core entry points */
+
+/* Initialization code to bring the device up to a known good state */
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+ /* tell driver how many bit controller will skip before
+ * writing ECC code in OOB, this register may be already
+ * set by firmware. So we read this value out.
+ * if this value is 0, just let it be.
+ * */
+ denali->bbtskipbytes = ioread32(denali->flash_reg +
+ SPARE_AREA_SKIP_BYTES);
+ detect_max_banks(denali);
+ denali_nand_reset(denali);
+ iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG,
+ denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+ iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+
+ /* Should set value for these registers when init */
+ iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(1, denali->flash_reg + ECC_ENABLE);
+ denali_nand_timing_set(denali);
+ denali_irq_init(denali);
+}
+
+/* Althogh controller spec said SLC ECC is forceb to be 4bit,
+ * but denali controller in MRST only support 15bit and 8bit ECC
+ * correction
+ * */
+#define ECC_8BITS 14
+static struct nand_ecclayout nand_8bit_oob = {
+ .eccbytes = 14,
+};
+
+#define ECC_15BITS 26
+static struct nand_ecclayout nand_15bit_oob = {
+ .eccbytes = 26,
+};
+
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* initialize driver data structures */
+void denali_drv_init(struct denali_nand_info *denali)
+{
+ denali->idx = 0;
+
+ /* setup interrupt handler */
+ /* the completion object will be used to notify
+ * the callee that the interrupt is done */
+ init_completion(&denali->complete);
+
+ /* the spinlock will be used to synchronize the ISR
+ * with any element that might be access shared
+ * data (interrupt status) */
+ spin_lock_init(&denali->irq_lock);
+
+ /* indicate that MTD has not selected a valid bank yet */
+ denali->flash_bank = CHIP_SELECT_INVALID;
+
+ /* initialize our irq_status variable to indicate no interrupts */
+ denali->irq_status = 0;
+}
+
+/* driver entry point */
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+ goto failed_alloc_memery;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ /* Due to a silicon limitation, we can only support
+ * ONFI timing mode 1 and below.
+ */
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
+ printk(KERN_ERR "Intel CE4100 only supports"
+ " ONFI timing mode 1 or below\n");
+ ret = -EINVAL;
+ goto failed_enable_dev;
+ }
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ printk(KERN_ERR "Spectra: no usable DMA configuration\n");
+ goto failed_enable_dev;
+ }
+ denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
+ dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
+ goto failed_enable_dev;
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->mtd.dev.parent = &dev->dev;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Unable to request memory regions\n");
+ goto failed_dma_map;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_req_regions;
+ }
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ ret = -ENOMEM;
+ goto failed_remap_reg;
+ }
+
+ denali_hw_init(denali);
+ denali_drv_init(denali);
+
+ /* denali_isr register is done after all the hardware
+ * initilization is finished*/
+ if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ DENALI_NAND_NAME, denali)) {
+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto failed_remap_mem;
+ }
+
+ /* now that our ISR is registered, we can enable interrupts */
+ denali_set_intr_modes(denali, true);
+
+ pci_set_drvdata(dev, denali);
+
+ denali->mtd.name = "denali-nand";
+ denali->mtd.owner = THIS_MODULE;
+ denali->mtd.priv = &denali->nand;
+
+ /* register the driver with the NAND core subsystem */
+ denali->nand.select_chip = denali_select_chip;
+ denali->nand.cmdfunc = denali_cmdfunc;
+ denali->nand.read_byte = denali_read_byte;
+ denali->nand.waitfunc = denali_waitfunc;
+
+ /* scan for NAND devices attached to the controller
+ * this is the first stage in a two step process to register
+ * with the nand subsystem */
+ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
+ ret = -ENXIO;
+ goto failed_req_irq;
+ }
+
+ /* MTD supported page sizes vary by kernel. We validate our
+ * kernel supports the device here.
+ */
+ if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
+ ret = -ENODEV;
+ printk(KERN_ERR "Spectra: device size not supported by this "
+ "version of MTD.");
+ goto failed_req_irq;
+ }
+
+ /* support for multi nand
+ * MTD known nothing about multi nand,
+ * so we should tell it the real pagesize
+ * and anything necessery
+ */
+ denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali->nand.chipsize <<= (denali->devnum - 1);
+ denali->nand.page_shift += (denali->devnum - 1);
+ denali->nand.pagemask = (denali->nand.chipsize >>
+ denali->nand.page_shift) - 1;
+ denali->nand.bbt_erase_shift += (denali->devnum - 1);
+ denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
+ denali->nand.chip_shift += (denali->devnum - 1);
+ denali->mtd.writesize <<= (denali->devnum - 1);
+ denali->mtd.oobsize <<= (denali->devnum - 1);
+ denali->mtd.erasesize <<= (denali->devnum - 1);
+ denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
+ denali->bbtskipbytes *= denali->devnum;
+
+ /* second stage of the NAND scan
+ * this stage requires information regarding ECC and
+ * bad block management. */
+
+ /* Bad block management */
+ denali->nand.bbt_td = &bbt_main_descr;
+ denali->nand.bbt_md = &bbt_mirror_descr;
+
+ /* skip the scan for now until we have OOB read and write support */
+ denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
+ denali->nand.options |= NAND_SKIP_BBTSCAN;
+ denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+ /* Denali Controller only support 15bit and 8bit ECC in MRST,
+ * so just let controller do 15bit ECC for MLC and 8bit ECC for
+ * SLC if possible.
+ * */
+ if (denali->nand.cellinfo & 0xc &&
+ (denali->mtd.oobsize > (denali->bbtskipbytes +
+ ECC_15BITS * (denali->mtd.writesize /
+ ECC_SECTOR_SIZE)))) {
+ /* if MLC OOB size is large enough, use 15bit ECC*/
+ denali->nand.ecc.strength = 15;
+ denali->nand.ecc.layout = &nand_15bit_oob;
+ denali->nand.ecc.bytes = ECC_15BITS;
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+ } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
+ ECC_8BITS * (denali->mtd.writesize /
+ ECC_SECTOR_SIZE))) {
+ printk(KERN_ERR "Your NAND chip OOB is not large enough to"
+ " contain 8bit ECC correction codes");
+ goto failed_req_irq;
+ } else {
+ denali->nand.ecc.strength = 8;
+ denali->nand.ecc.layout = &nand_8bit_oob;
+ denali->nand.ecc.bytes = ECC_8BITS;
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+ }
+
+ denali->nand.ecc.bytes *= denali->devnum;
+ denali->nand.ecc.strength *= denali->devnum;
+ denali->nand.ecc.layout->eccbytes *=
+ denali->mtd.writesize / ECC_SECTOR_SIZE;
+ denali->nand.ecc.layout->oobfree[0].offset =
+ denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
+ denali->nand.ecc.layout->oobfree[0].length =
+ denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
+ denali->bbtskipbytes;
+
+ /* Let driver know the total blocks number and
+ * how many blocks contained by each nand chip.
+ * blksperchip will help driver to know how many
+ * blocks is taken by FW.
+ * */
+ denali->totalblks = denali->mtd.size >>
+ denali->nand.phys_erase_shift;
+ denali->blksperchip = denali->totalblks / denali->nand.numchips;
+
+ /* These functions are required by the NAND core framework, otherwise,
+ * the NAND core will assert. However, we don't need them, so we'll stub
+ * them out. */
+ denali->nand.ecc.calculate = denali_ecc_calculate;
+ denali->nand.ecc.correct = denali_ecc_correct;
+ denali->nand.ecc.hwctl = denali_ecc_hwctl;
+
+ /* override the default read operations */
+ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
+ denali->nand.ecc.read_page = denali_read_page;
+ denali->nand.ecc.read_page_raw = denali_read_page_raw;
+ denali->nand.ecc.write_page = denali_write_page;
+ denali->nand.ecc.write_page_raw = denali_write_page_raw;
+ denali->nand.ecc.read_oob = denali_read_oob;
+ denali->nand.ecc.write_oob = denali_write_oob;
+ denali->nand.erase_cmd = denali_erase;
+
+ if (nand_scan_tail(&denali->mtd)) {
+ ret = -ENXIO;
+ goto failed_req_irq;
+ }
+
+ ret = mtd_device_register(&denali->mtd, NULL, 0);
+ if (ret) {
+ dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
+ ret);
+ goto failed_req_irq;
+ }
+ return 0;
+
+failed_req_irq:
+ denali_irq_cleanup(dev->irq, denali);
+failed_remap_mem:
+ iounmap(denali->flash_mem);
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
+ pci_release_regions(dev);
+failed_dma_map:
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
+ kfree(denali);
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ nand_release(&denali->mtd);
+
+ denali_irq_cleanup(dev->irq, denali);
+
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int __devinit denali_init(void)
+{
+ printk(KERN_INFO "Spectra MTD driver\n");
+ return pci_register_driver(&denali_pci_driver);
+}
+
+/* Free memory */
+static void __devexit denali_exit(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+
+module_init(denali_init);
+module_exit(denali_exit);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
new file mode 100644
index 00000000..fabb9d56
--- /dev/null
+++ b/drivers/mtd/nand/denali.h
@@ -0,0 +1,499 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/mtd/nand.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK0 0x0001
+#define DEVICE_RESET__BANK1 0x0002
+#define DEVICE_RESET__BANK2 0x0004
+#define DEVICE_RESET__BANK3 0x0008
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG 0x0001
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE 0xffff
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE 0xffff
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE 0xffff
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE 0xffff
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK0 0x0001
+#define RB_PIN_ENABLED__BANK1 0x0002
+#define RB_PIN_ENABLED__BANK2 0x0004
+#define RB_PIN_ENABLED__BANK3 0x0008
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG 0x0001
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG 0x0001
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG 0x0001
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG 0x0001
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN 0x0001
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG 0x01
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG 0x0001
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG 0x01
+
+#define WE_2_RE 0x100
+#define WE_2_RE__VALUE 0x003f
+
+#define ADDR_2_DATA 0x110
+#define ADDR_2_DATA__VALUE 0x003f
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE 0x003f
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE 0x000f
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE 0x0007
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE 0xffff
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE 0x0003
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE 0x001f
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE 0x000f
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE 0x000f
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE 0x000f
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE 0x001f
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE 0x001f
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE 0x000f
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE 0x001f
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE 0xffff
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE 0x0007
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE 0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG 0x0001
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE 0x003f
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE 0x00ff
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE 0x00ff
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE 0x00ff
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE 0x00ff
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE 0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+
+#define REVISION 0x370
+#define REVISION__VALUE 0xffff
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS 0x0003
+#define FEATURES__ECC_MAX_ERR 0x003c
+#define FEATURES__DMA 0x0040
+#define FEATURES__CMD_DMA 0x0080
+#define FEATURES__PARTITION 0x0100
+#define FEATURES__XDMA_SIDEBAND 0x0200
+#define FEATURES__GPREG 0x0400
+#define FEATURES__INDEX_ADDR 0x0800
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE 0x0003
+
+#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
+#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
+
+#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS__ECC_ERR 0x0002
+#define INTR_STATUS__DMA_CMD_COMP 0x0004
+#define INTR_STATUS__TIME_OUT 0x0008
+#define INTR_STATUS__PROGRAM_FAIL 0x0010
+#define INTR_STATUS__ERASE_FAIL 0x0020
+#define INTR_STATUS__LOAD_COMP 0x0040
+#define INTR_STATUS__PROGRAM_COMP 0x0080
+#define INTR_STATUS__ERASE_COMP 0x0100
+#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS__LOCKED_BLK 0x0400
+#define INTR_STATUS__UNSUP_CMD 0x0800
+#define INTR_STATUS__INT_ACT 0x1000
+#define INTR_STATUS__RST_COMP 0x2000
+#define INTR_STATUS__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS__PAGE_XFER_INC 0x8000
+
+#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN__ECC_ERR 0x0002
+#define INTR_EN__DMA_CMD_COMP 0x0004
+#define INTR_EN__TIME_OUT 0x0008
+#define INTR_EN__PROGRAM_FAIL 0x0010
+#define INTR_EN__ERASE_FAIL 0x0020
+#define INTR_EN__LOAD_COMP 0x0040
+#define INTR_EN__PROGRAM_COMP 0x0080
+#define INTR_EN__ERASE_COMP 0x0100
+#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN__LOCKED_BLK 0x0400
+#define INTR_EN__UNSUP_CMD 0x0800
+#define INTR_EN__INT_ACT 0x1000
+#define INTR_EN__RST_COMP 0x2000
+#define INTR_EN__PIPE_CMD_ERR 0x4000
+#define INTR_EN__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
+#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
+#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
+
+#define DATA_INTR 0x550
+#define DATA_INTR__WRITE_SPACE_AV 0x0001
+#define DATA_INTR__READ_DATA_AV 0x0002
+
+#define DATA_INTR_EN 0x560
+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
+#define DATA_INTR_EN__READ_DATA_AV 0x0002
+
+#define GPREG_0 0x570
+#define GPREG_0__VALUE 0xffff
+
+#define GPREG_1 0x580
+#define GPREG_1__VALUE 0xffff
+
+#define GPREG_2 0x590
+#define GPREG_2__VALUE 0xffff
+
+#define GPREG_3 0x5a0
+#define GPREG_3__VALUE 0xffff
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE 0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG 0x0001
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG 0x0001
+
+#define DMA_INTR 0x720
+#define DMA_INTR__TARGET_ERROR 0x0001
+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+
+#define DMA_INTR_EN 0x730
+#define DMA_INTR_EN__TARGET_ERROR 0x0001
+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 0x0001
+#define CHNL_ACTIVE__CHANNEL1 0x0002
+#define CHNL_ACTIVE__CHANNEL2 0x0004
+#define CHNL_ACTIVE__CHANNEL3 0x0008
+
+#define ACTIVE_SRC_ID 0x800
+#define ACTIVE_SRC_ID__VALUE 0x00ff
+
+#define PTN_INTR 0x810
+#define PTN_INTR__CONFIG_ERROR 0x0001
+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
+
+#define PTN_INTR_EN 0x820
+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
+
+#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
+#define PERM_SRC_ID__SRCID 0x00ff
+#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID__READ_ACTIVE 0x4000
+#define PERM_SRC_ID__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
+#define MIN_BLK_ADDR__VALUE 0xffff
+
+#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
+#define MAX_BLK_ADDR__VALUE 0xffff
+
+#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
+#define MIN_MAX_BANK__MIN_VALUE 0x0003
+#define MIN_MAX_BANK__MAX_VALUE 0x000c
+
+
+/* ffsdefs.h */
+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
+#define SET 1 /*use this to set a field instead of "pass"*/
+#define FAIL 1 /*failed flag*/
+#define PASS 0 /*success flag*/
+#define ERR -1 /*error flag*/
+
+/* lld.h */
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X 5
+#define CLK_MULTI 4
+
+/* spectraswconfig.h */
+#define CMD_DMA 0
+
+#define SPECTRA_PARTITION_ID 0
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK 3
+#define NUM_FREE_BLOCKS_GATE 30
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
+#define SCRATCH_REG_SIZE 64
+
+#define GLOB_HWCTL_DEFAULT_BLKS 2048
+
+#define SUPPORT_15BITECC 1
+#define SUPPORT_8BITECC 1
+
+#define CUSTOM_CONF_PARAMS 0
+
+#define ONFI_BLOOM_TIME 1
+#define MODE5_WORKAROUND 0
+
+/* lld_nand.h */
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_NAND_
+#define _LLD_NAND_
+
+#define MODE_00 0x00000000
+#define MODE_01 0x04000000
+#define MODE_10 0x08000000
+#define MODE_11 0x0C000000
+
+
+#define DATA_TRANSFER_MODE 0
+#define PROTECTION_PER_BLOCK 1
+#define LOAD_WAIT_COUNT 2
+#define PROGRAM_WAIT_COUNT 3
+#define ERASE_WAIT_COUNT 4
+#define INT_MONITOR_CYCLE_COUNT 5
+#define READ_BUSY_PIN_ENABLED 6
+#define MULTIPLANE_OPERATION_SUPPORT 7
+#define PRE_FETCH_MODE 8
+#define CE_DONT_CARE_SUPPORT 9
+#define COPYBACK_SUPPORT 10
+#define CACHE_WRITE_SUPPORT 11
+#define CACHE_READ_SUPPORT 12
+#define NUM_PAGES_IN_BLOCK 13
+#define ECC_ENABLE_SELECT 14
+#define WRITE_ENABLE_2_READ_ENABLE 15
+#define ADDRESS_2_DATA 16
+#define READ_ENABLE_2_WRITE_ENABLE 17
+#define TWO_ROW_ADDRESS_CYCLES 18
+#define MULTIPLANE_ADDRESS_RESTRICT 19
+#define ACC_CLOCKS 20
+#define READ_WRITE_ENABLE_LOW_COUNT 21
+#define READ_WRITE_ENABLE_HIGH_COUNT 22
+
+#define ECC_SECTOR_SIZE 512
+
+#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
+
+struct nand_buf {
+ int head;
+ int tail;
+ uint8_t buf[DENALI_BUF_SIZE];
+ dma_addr_t dma_buf;
+};
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
+struct denali_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ int flash_bank; /* currently selected chip */
+ int status;
+ int platform;
+ struct nand_buf buf;
+ struct device *dev;
+ int total_used_banks;
+ uint32_t block; /* stored for future use */
+ uint16_t page;
+ void __iomem *flash_reg; /* Mapped io reg base address */
+ void __iomem *flash_mem; /* Mapped io reg base address */
+
+ /* elements used by ISR */
+ struct completion complete;
+ spinlock_t irq_lock;
+ uint32_t irq_status;
+ int irq_debug_array[32];
+ int idx;
+
+ uint32_t devnum; /* represent how many nands connected */
+ uint32_t fwblks; /* represent how many blocks FW used */
+ uint32_t totalblks;
+ uint32_t blksperchip;
+ uint32_t bbtskipbytes;
+ uint32_t max_banks;
+};
+
+#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
new file mode 100644
index 00000000..e2ca0676
--- /dev/null
+++ b/drivers/mtd/nand/diskonchip.c
@@ -0,0 +1,1775 @@
+/*
+ * drivers/mtd/nand/diskonchip.c
+ *
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+#include <linux/module.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long __initdata doc_locations[] = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else /* CONFIG_MTD_DOCPROBE_HIGH */
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+#endif /* CONFIG_MTD_DOCPROBE_HIGH */
+#else
+#warning Unknown architecture for DiskOnChip. No default probe locations defined
+#endif
+ 0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+ void __iomem *virtadr;
+ unsigned long physadr;
+ u_char ChipID;
+ u_char CDSNControl;
+ int chips_per_floor; /* The number of chips detected on each floor */
+ int curfloor;
+ int curchip;
+ int mh0_page;
+ int mh1_page;
+ struct mtd_info *nextdoc;
+};
+
+/* This is the syndrome computed by the HW ecc generator upon reading an empty
+ page, one with all 0xff for data and stored ecc code. */
+static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
+
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+ page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int bitmask);
+static void doc200x_select_chip(struct mtd_info *mtd, int chip);
+
+static int debug = 0;
+module_param(debug, int, 0);
+
+static int try_dword = 1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures = 0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart = 0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition = 0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write = 1;
+#else
+static int inftl_bbt_write = 0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrome usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon library. tglx
+ */
+static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+ int i, j, nerr, errpos[8];
+ uint8_t parity;
+ uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+
+ memset(syn, 0, sizeof(syn));
+ /* Convert the ecc bytes into words */
+ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+ ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+ ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+ ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+ parity = ecc[1];
+
+ /* Initialize the syndrome buffer */
+ for (i = 0; i < NROOTS; i++)
+ s[i] = ds[0];
+ /*
+ * Evaluate
+ * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+ * where x = alpha^(FCR + i)
+ */
+ for (j = 1; j < NROOTS; j++) {
+ if (ds[j] == 0)
+ continue;
+ tmp = rs->index_of[ds[j]];
+ for (i = 0; i < NROOTS; i++)
+ s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
+ }
+
+ /* Calc syn[i] = s[i] / alpha^(v + i) */
+ for (i = 0; i < NROOTS; i++) {
+ if (s[i])
+ syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
+ }
+ /* Call the decoder library */
+ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+ /* Incorrectable errors ? */
+ if (nerr < 0)
+ return nerr;
+
+ /*
+ * Correct the errors. The bitpositions are a bit of magic,
+ * but they are given by the design of the de/encoder circuit
+ * in the DoC ASIC's.
+ */
+ for (i = 0; i < nerr; i++) {
+ int index, bitpos, pos = 1015 - errpos[i];
+ uint8_t val;
+ if (pos >= NB_DATA && pos < 1019)
+ continue;
+ if (pos < NB_DATA) {
+ /* extract bit position (MSB first) */
+ pos = 10 * (NB_DATA - 1 - pos) - 6;
+ /* now correct the following 10 bits. At most two bytes
+ can be modified since pos is even */
+ index = (pos >> 3) ^ 1;
+ bitpos = pos & 7;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] >> (2 + bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ index = ((pos >> 3) + 1) ^ 1;
+ bitpos = (bitpos + 10) & 7;
+ if (bitpos == 0)
+ bitpos = 8;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] << (8 - bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ }
+ }
+ /* If the parity is wrong, no rescue possible */
+ return parity ? -EBADMSG : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+ volatile char dummy;
+ int i;
+
+ for (i = 0; i < cycles; i++) {
+ if (DoC_is_Millennium(doc))
+ dummy = ReadDOC(doc->virtadr, NOP);
+ else if (DoC_is_MillenniumPlus(doc))
+ dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+ else
+ dummy = ReadDOC(doc->virtadr, DOCStatus);
+ }
+
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ unsigned long timeo = jiffies + (HZ * 10);
+
+ if (debug)
+ printk("_DoC_WaitReady...\n");
+ /* Out-of-line routine to wait for chip response */
+ if (DoC_is_MillenniumPlus(doc)) {
+ while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ } else {
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ int ret = 0;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ DoC_Delay(doc, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ } else {
+ DoC_Delay(doc, 4);
+
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
+ }
+
+ if (debug)
+ printk("DoC_WaitReady OK\n");
+ return ret;
+}
+
+static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (debug)
+ printk("write_byte %02x\n", datum);
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static u_char doc2000_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ ret = ReadDOC(docptr, 2k_CDSN_IO);
+ if (debug)
+ printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ for (i = 0; i < len; i++) {
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+}
+
+static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf_dword of %d bytes: ", len);
+
+ if (unlikely((((unsigned long)buf) | len) & 3)) {
+ for (i = 0; i < len; i++) {
+ *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+ } else {
+ for (i = 0; i < len; i += 4) {
+ *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
+ }
+}
+
+static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
+ return -EFAULT;
+ return 0;
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t ret;
+
+ doc200x_select_chip(mtd, nr);
+ doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* We can't use dev_ready here, but at least we wait for the
+ * command to complete
+ */
+ udelay(50);
+
+ ret = this->read_byte(mtd) << 8;
+ ret |= this->read_byte(mtd);
+
+ if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+ /* First chip probe. See if we get same results by 32-bit access */
+ union {
+ uint32_t dword;
+ uint8_t byte[4];
+ } ident;
+ void __iomem *docptr = doc->virtadr;
+
+ doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ udelay(50);
+
+ ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+ if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+ printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
+ this->read_buf = &doc2000_readbuf_dword;
+ }
+ }
+
+ return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t mfrid;
+ int i;
+
+ /* Max 4 chips per floor on DiskOnChip 2000 */
+ doc->chips_per_floor = 4;
+
+ /* Find out what the first chip is */
+ mfrid = doc200x_ident_chip(mtd, 0);
+
+ /* Find how many chips in each floor. */
+ for (i = 1; i < 4; i++) {
+ if (doc200x_ident_chip(mtd, i) != mfrid)
+ break;
+ }
+ doc->chips_per_floor = i;
+ printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
+}
+
+static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct doc_priv *doc = this->priv;
+
+ int status;
+
+ DoC_WaitReady(doc);
+ this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ DoC_WaitReady(doc);
+ status = (int)this->read_byte(mtd);
+
+ return status;
+}
+
+static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, Mil_CDSN_IO);
+ WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static u_char doc2001_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ //ReadDOC(docptr, CDSNSlowIO);
+ /* 11.4.5 -- delay twice to allow extended length cycle */
+ DoC_Delay(doc, 2);
+ ReadDOC(docptr, ReadPipeInit);
+ //return ReadDOC(docptr, Mil_CDSN_IO);
+ return ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ /* Terminate write pipeline */
+ WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+ /* Terminate read pipeline */
+ buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
+ ReadDOC(docptr, LastDataRead);
+ return i;
+ }
+ if (buf[i] != ReadDOC(docptr, LastDataRead))
+ return i;
+ return 0;
+}
+
+static u_char doc2001plus_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ret = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug)
+ printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++) {
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+
+ /* Terminate read pipeline */
+ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 2]);
+ buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 1]);
+ if (debug)
+ printk("\n");
+}
+
+static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("verifybuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++)
+ if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
+ ReadDOC(docptr, Mplus_LastDataRead);
+ ReadDOC(docptr, Mplus_LastDataRead);
+ return i;
+ }
+ if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
+ return len - 2;
+ if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
+ return len - 1;
+ return 0;
+}
+
+static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if (debug)
+ printk("select chip (%d)\n", chip);
+
+ if (chip == -1) {
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+ return;
+ }
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+static void doc200x_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if (debug)
+ printk("select chip (%d)\n", chip);
+
+ if (chip == -1)
+ return;
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* 11.4.4 -- deassert CE before changing chip */
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+
+ WriteDOC(floor, docptr, FloorSelect);
+ WriteDOC(chip, docptr, CDSNDeviceSelect);
+
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ doc->CDSNControl &= ~CDSN_CTRL_MSK;
+ doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
+ if (debug)
+ printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+ }
+ if (cmd != NAND_CMD_NONE) {
+ if (DoC_is_2000(doc))
+ doc2000_write_byte(mtd, cmd);
+ else
+ doc2001_write_byte(mtd, cmd);
+ }
+}
+
+static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /*
+ * Must terminate write pipeline before sending any commands
+ * to the device.
+ */
+ if (command == NAND_CMD_PAGEPROG) {
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ }
+
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ WriteDOC(readcmd, docptr, Mplus_FlashCmd);
+ }
+ WriteDOC(command, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+
+ if (column != -1 || page_addr != -1) {
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ WriteDOC(column, docptr, Mplus_FlashAddress);
+ }
+ if (page_addr != -1) {
+ WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
+ WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
+ /* One more address cycle for higher density devices */
+ if (this->chipsize & 0x0c000000) {
+ WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
+ printk("high density\n");
+ }
+ }
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ /* deassert ALE */
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_READID)
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ udelay(this->chip_delay);
+ WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ while (!(this->read_byte(mtd) & 0x40)) ;
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay(this->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+ /* wait until command is processed */
+ while (!this->dev_ready(mtd)) ;
+}
+
+static int doc200x_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (debug)
+ printk("not ready\n");
+ return 0;
+ }
+ if (debug)
+ printk("was ready\n");
+ return 1;
+ } else {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (debug)
+ printk("not ready\n");
+ return 0;
+ }
+ /* 11.4.2 -- Must NOP twice if it's ready */
+ DoC_Delay(doc, 2);
+ if (debug)
+ printk("was ready\n");
+ return 1;
+ }
+}
+
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* This is our last resort if we couldn't find or create a BBT. Just
+ pretend all blocks are good. */
+ return 0;
+}
+
+static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ break;
+ }
+}
+
+static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ break;
+ }
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ } else {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (ecc_code[i] != empty_write_ecc[i])
+ emptymatch = 0;
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+ /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the writebuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, we do have an all-0xff data buffer.
+ Return all-0xff ecc value instead of the computed one, so
+ it'll look just like a freshly-erased page. */
+ if (emptymatch)
+ memset(ecc_code, 0xff, 6);
+#endif
+ return 0;
+}
+
+static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ int i, ret = 0;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t calc_ecc[6];
+ volatile u_char dummy;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ }
+
+ /* Error occurred ? */
+ if (dummy & 0x80) {
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (calc_ecc[i] != empty_read_syndrome[i])
+ emptymatch = 0;
+ }
+ /* If emptymatch=1, the read syndrome is consistent with an
+ all-0xff data and stored ecc block. Check the stored ecc. */
+ if (emptymatch) {
+ for (i = 0; i < 6; i++) {
+ if (read_ecc[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, check the data block. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the readbuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, this is almost certainly a freshly-
+ erased block, in which case the ECC will not come out right.
+ We'll suppress the error and tell the caller everything's
+ OK. Because it is. */
+ if (!emptymatch)
+ ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
+ if (ret > 0)
+ printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (no_ecc_failures && mtd_is_eccerr(ret)) {
+ printk(KERN_ERR "suppressing ECC failure\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+//u_char mydatabuf[528];
+
+/* The strange out-of-order .oobfree list below is a (possibly unneeded)
+ * attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the following
+ * scheme might affect existing jffs2 installs by moving the cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme seems
+ * safer. The only problem with it is that any code that parses oobfree must
+ * be able to handle out-of-order segments.
+ */
+static struct nand_ecclayout doc200x_oobinfo = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 4, 5},
+ .oobfree = {{8, 8}, {6, 2}}
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+ On successful return, buf will contain a copy of the media header for
+ further processing. id is the string to scan for, and will presumably be
+ either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
+ header. The page #s of the found media headers are placed in mh0_page and
+ mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ unsigned offs;
+ int ret;
+ size_t retlen;
+
+ for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize)
+ continue;
+ if (ret) {
+ printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
+ }
+ if (memcmp(buf, id, 6))
+ continue;
+ printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+ if (doc->mh0_page == -1) {
+ doc->mh0_page = offs >> this->page_shift;
+ if (!findmirror)
+ return 1;
+ continue;
+ }
+ doc->mh1_page = offs >> this->page_shift;
+ return 2;
+ }
+ if (doc->mh0_page == -1) {
+ printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
+ return 0;
+ }
+ /* Only one mediaheader was found. We want buf to contain a
+ mediaheader on return, so we'll have to re-read the one we found. */
+ offs = doc->mh0_page << this->page_shift;
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize) {
+ /* Insanity. Give up. */
+ printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct NFTLMediaHeader *mh;
+ const unsigned psize = 1 << this->page_shift;
+ int numparts = 0;
+ unsigned blocks, maxblocks;
+ int offs, numheaders;
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
+ return 0;
+ }
+ if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
+ goto out;
+ mh = (struct NFTLMediaHeader *)buf;
+
+ le16_to_cpus(&mh->NumEraseUnits);
+ le16_to_cpus(&mh->FirstPhysicalEUN);
+ le32_to_cpus(&mh->FormattedSize);
+
+ printk(KERN_INFO " DataOrgID = %s\n"
+ " NumEraseUnits = %d\n"
+ " FirstPhysicalEUN = %d\n"
+ " FormattedSize = %d\n"
+ " UnitSizeFactor = %d\n",
+ mh->DataOrgID, mh->NumEraseUnits,
+ mh->FirstPhysicalEUN, mh->FormattedSize,
+ mh->UnitSizeFactor);
+
+ blocks = mtd->size >> this->phys_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+
+ if (mh->UnitSizeFactor == 0x00) {
+ /* Auto-determine UnitSizeFactor. The constraints are:
+ - There can be at most 32768 virtual blocks.
+ - There can be at most (virtual block size - page size)
+ virtual blocks (because MediaHeader+BBT must fit in 1).
+ */
+ mh->UnitSizeFactor = 0xff;
+ while (blocks > maxblocks) {
+ blocks >>= 1;
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
+ mh->UnitSizeFactor--;
+ }
+ printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+ }
+
+ /* NOTE: The lines below modify internal variables of the NAND and MTD
+ layers; variables with have already been configured by nand_scan.
+ Unfortunately, we didn't know before this point what these values
+ should be. Thus, this code is somewhat dependent on the exact
+ implementation of the NAND layer. */
+ if (mh->UnitSizeFactor != 0xff) {
+ this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+ printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
+ blocks = mtd->size >> this->bbt_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+ }
+
+ if (blocks > maxblocks) {
+ printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
+ goto out;
+ }
+
+ /* Skip past the media headers. */
+ offs = max(doc->mh0_page, doc->mh1_page);
+ offs <<= this->page_shift;
+ offs += mtd->erasesize;
+
+ if (show_firmware_partition == 1) {
+ parts[0].name = " DiskOnChip Firmware / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = offs;
+ numparts = 1;
+ }
+
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+ offs += parts[numparts].size;
+ numparts++;
+
+ if (offs < mtd->size) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = mtd->size - offs;
+ numparts++;
+ }
+
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct INFTLMediaHeader *mh;
+ struct INFTLPartition *ip;
+ int numparts = 0;
+ int blocks;
+ int vshift, lastvunit = 0;
+ int i;
+ int end = mtd->size;
+
+ if (inftl_bbt_write)
+ end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
+ return 0;
+ }
+
+ if (!find_media_headers(mtd, buf, "BNAND", 0))
+ goto out;
+ doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+ mh = (struct INFTLMediaHeader *)buf;
+
+ le32_to_cpus(&mh->NoOfBootImageBlocks);
+ le32_to_cpus(&mh->NoOfBinaryPartitions);
+ le32_to_cpus(&mh->NoOfBDTLPartitions);
+ le32_to_cpus(&mh->BlockMultiplierBits);
+ le32_to_cpus(&mh->FormatFlags);
+ le32_to_cpus(&mh->PercentUsed);
+
+ printk(KERN_INFO " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = %d.%d.%d.%d\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+ mh->PercentUsed);
+
+ vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+ blocks = mtd->size >> vshift;
+ if (blocks > 32768) {
+ printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
+ goto out;
+ }
+
+ blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+ if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+ printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
+ goto out;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &(mh->Partitions[i]);
+ le32_to_cpus(&ip->virtualUnits);
+ le32_to_cpus(&ip->firstUnit);
+ le32_to_cpus(&ip->lastUnit);
+ le32_to_cpus(&ip->flags);
+ le32_to_cpus(&ip->spareUnits);
+ le32_to_cpus(&ip->Reserved0);
+
+ printk(KERN_INFO " PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if ((show_firmware_partition == 1) &&
+ (i == 0) && (ip->firstUnit > 0)) {
+ parts[0].name = " DiskOnChip IPL / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = mtd->erasesize * ip->firstUnit;
+ numparts = 1;
+ }
+
+ if (ip->flags & INFTL_BINARY)
+ parts[numparts].name = " DiskOnChip BDK partition";
+ else
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = ip->firstUnit << vshift;
+ parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+ numparts++;
+ if (ip->lastUnit > lastvunit)
+ lastvunit = ip->lastUnit;
+ if (ip->flags & INFTL_LAST)
+ break;
+ }
+ lastvunit++;
+ if ((lastvunit << vshift) < end) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = lastvunit << vshift;
+ parts[numparts].size = end - parts[numparts].offset;
+ numparts++;
+ }
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[2];
+
+ memset((char *)parts, 0, sizeof(parts));
+ /* On NFTL, we have to find the media headers before we can read the
+ BBTs, since they're stored in the media header eraseblocks. */
+ numparts = nftl_partscan(mtd, parts);
+ if (!numparts)
+ return -EIO;
+ this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->pages[0] = doc->mh0_page + 1;
+ if (doc->mh1_page != -1) {
+ this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->pages[0] = doc->mh1_page + 1;
+ } else {
+ this->bbt_md = NULL;
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ mtd_device_register(mtd, NULL, 0);
+ if (!no_autopart)
+ mtd_device_register(mtd, parts, numparts);
+ return 0;
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[5];
+
+ if (this->numchips > doc->chips_per_floor) {
+ printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
+ return -EIO;
+ }
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->pages[0] = 2;
+ this->bbt_md = NULL;
+ } else {
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ memset((char *)parts, 0, sizeof(parts));
+ numparts = inftl_partscan(mtd, parts);
+ /* At least for now, require the INFTL Media Header. We could probably
+ do without it for non-INFTL use, since all it gives us is
+ autopartitioning, but I want to give it more thought. */
+ if (!numparts)
+ return -EIO;
+ mtd_device_register(mtd, NULL, 0);
+ if (!no_autopart)
+ mtd_device_register(mtd, parts, numparts);
+ return 0;
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2000_read_byte;
+ this->write_buf = doc2000_writebuf;
+ this->read_buf = doc2000_readbuf;
+ this->verify_buf = doc2000_verifybuf;
+ this->scan_bbt = nftl_scan_bbt;
+
+ doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (NFTL Model)";
+ return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2001_read_byte;
+ this->write_buf = doc2001_writebuf;
+ this->read_buf = doc2001_readbuf;
+ this->verify_buf = doc2001_verifybuf;
+
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+ /* It's not a Millennium; it's one of the newer
+ DiskOnChip 2000 units with a similar ASIC.
+ Treat it like a Millennium, except that it
+ can have multiple chips. */
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ this->scan_bbt = inftl_scan_bbt;
+ return (4 * doc->chips_per_floor);
+ } else {
+ /* Bog-standard Millennium */
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium";
+ this->scan_bbt = nftl_scan_bbt;
+ return 1;
+ }
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2001plus_read_byte;
+ this->write_buf = doc2001plus_writebuf;
+ this->read_buf = doc2001plus_readbuf;
+ this->verify_buf = doc2001plus_verifybuf;
+ this->scan_bbt = inftl_scan_bbt;
+ this->cmd_ctrl = NULL;
+ this->select_chip = doc2001plus_select_chip;
+ this->cmdfunc = doc2001plus_command;
+ this->ecc.hwctl = doc2001plus_enable_hwecc;
+
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium Plus";
+
+ return 1;
+}
+
+static int __init doc_probe(unsigned long physadr)
+{
+ unsigned char ChipID;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+ void __iomem *virtadr;
+ unsigned char save_control;
+ unsigned char tmp, tmpb, tmpc;
+ int reg, len, numchips;
+ int ret = 0;
+
+ virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+ if (!virtadr) {
+ printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
+ return -EIO;
+ }
+
+ /* It's not possible to cleanly detect the DiskOnChip - the
+ * bootup procedure will put the device into reset mode, and
+ * it's not possible to talk to it without actually writing
+ * to the DOCControl register. So we store the current contents
+ * of the DOCControl register's location, in case we later decide
+ * that it's not a DiskOnChip, and want to put it back how we
+ * found it.
+ */
+ save_control = ReadDOC(virtadr, DOCControl);
+
+ /* Reset the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+
+ /* Enable the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_Doc2k:
+ reg = DoC_2k_ECCStatus;
+ break;
+ case DOC_ChipID_DocMil:
+ reg = DoC_ECCConf;
+ break;
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium Plus, need to do more checks */
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(virtadr, Mplus_Power);
+
+ /* Reset the Millennium Plus ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+ mdelay(1);
+ /* Enable the Millennium Plus ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+ mdelay(1);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ reg = DoC_Mplus_Toggle;
+ break;
+ case DOC_ChipID_DocMilPlus32:
+ printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ /* Check the TOGGLE bit in the ECC register */
+ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ if ((tmp == tmpb) || (tmp != tmpc)) {
+ printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+ ret = -ENODEV;
+ goto notfound;
+ }
+
+ for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
+ nand = mtd->priv;
+ doc = nand->priv;
+ /* Use the alias resolution register to determine if this is
+ in fact the same DOC aliased to a new address. If writes
+ to one chip's alias resolution register change the value on
+ the other chip, they're the same chip. */
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ newval = ReadDOC(virtadr, Mplus_AliasResolution);
+ } else {
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
+ }
+ if (oldval != newval)
+ continue;
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
+ } else {
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); // restore it
+ }
+ newval = ~newval;
+ if (oldval == newval) {
+ printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
+ goto notfound;
+ }
+ }
+
+ printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
+
+ len = sizeof(struct mtd_info) +
+ sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct doc_priv *) (nand + 1);
+ nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
+ nand->bbt_md = nand->bbt_td + 1;
+
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ nand->priv = doc;
+ nand->select_chip = doc200x_select_chip;
+ nand->cmd_ctrl = doc200x_hwcontrol;
+ nand->dev_ready = doc200x_dev_ready;
+ nand->waitfunc = doc200x_wait;
+ nand->block_bad = doc200x_block_bad;
+ nand->ecc.hwctl = doc200x_enable_hwecc;
+ nand->ecc.calculate = doc200x_calculate_ecc;
+ nand->ecc.correct = doc200x_correct_data;
+
+ nand->ecc.layout = &doc200x_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 6;
+ nand->ecc.strength = 2;
+ nand->bbt_options = NAND_BBT_USE_FLASH;
+
+ doc->physadr = physadr;
+ doc->virtadr = virtadr;
+ doc->ChipID = ChipID;
+ doc->curfloor = -1;
+ doc->curchip = -1;
+ doc->mh0_page = -1;
+ doc->mh1_page = -1;
+ doc->nextdoc = doclist;
+
+ if (ChipID == DOC_ChipID_Doc2k)
+ numchips = doc2000_init(mtd);
+ else if (ChipID == DOC_ChipID_DocMilPlus16)
+ numchips = doc2001plus_init(mtd);
+ else
+ numchips = doc2001_init(mtd);
+
+ if ((ret = nand_scan(mtd, numchips))) {
+ /* DBB note: i believe nand_release is necessary here, as
+ buffers may have been allocated in nand_base. Check with
+ Thomas. FIX ME! */
+ /* nand_release will call mtd_device_unregister, but we
+ haven't yet added it. This is handled without incident by
+ mtd_device_unregister, as far as I can tell. */
+ nand_release(mtd);
+ kfree(mtd);
+ goto fail;
+ }
+
+ /* Success! */
+ doclist = mtd;
+ return 0;
+
+ notfound:
+ /* Put back the contents of the DOCControl register, in case it's not
+ actually a DiskOnChip. */
+ WriteDOC(save_control, virtadr, DOCControl);
+ fail:
+ iounmap(virtadr);
+ return ret;
+}
+
+static void release_nanddoc(void)
+{
+ struct mtd_info *mtd, *nextmtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+
+ for (mtd = doclist; mtd; mtd = nextmtd) {
+ nand = mtd->priv;
+ doc = nand->priv;
+
+ nextmtd = doc->nextdoc;
+ nand_release(mtd);
+ iounmap(doc->virtadr);
+ kfree(mtd);
+ }
+}
+
+static int __init init_nanddoc(void)
+{
+ int i, ret = 0;
+
+ /* We could create the decoder on demand, if memory is a concern.
+ * This way we have it handy, if an error happens
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 510
+ * primitve element to generate roots = 1
+ * generator polinomial degree = 4
+ */
+ rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+ if (!rs_decoder) {
+ printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
+ return -ENOMEM;
+ }
+
+ if (doc_config_location) {
+ printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
+ ret = doc_probe(doc_config_location);
+ if (ret < 0)
+ goto outerr;
+ } else {
+ for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!doclist) {
+ printk(KERN_INFO "No valid DiskOnChip devices found\n");
+ ret = -ENODEV;
+ goto outerr;
+ }
+ return 0;
+ outerr:
+ free_rs(rs_decoder);
+ return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+ /* Cleanup the nand/DoC resources */
+ release_nanddoc();
+
+ /* Free the reed solomon resources */
+ if (rs_decoder) {
+ free_rs(rs_decoder);
+ }
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 00000000..b0820266
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1377 @@
+/*
+ * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
+ *
+ * mtd nand driver for M-Systems DiskOnChip G4
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
+ * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
+ * Should work on these as well. Let me know!
+ *
+ * TODO:
+ *
+ * Mechanism for management of password-protected areas
+ *
+ * Hamming ecc when reading oob only
+ *
+ * According to the M-Sys documentation, this device is also available in a
+ * "dual-die" configuration having a 256MB capacity, but no mechanism for
+ * detecting this variant is documented. Currently this driver assumes 128MB
+ * capacity.
+ *
+ * Support for multiple cascaded devices ("floors"). Not sure which gadgets
+ * contain multiple G4s in a cascaded configuration, if any.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+#include <linux/bitrev.h>
+
+/*
+ * You'll want to ignore badblocks if you're reading a partition that contains
+ * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
+ * it does not use mtd nand's method for marking bad blocks (using oob area).
+ * This will also skip the check of the "page written" flag.
+ */
+static bool ignore_badblocks;
+module_param(ignore_badblocks, bool, 0);
+MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
+
+struct docg4_priv {
+ struct mtd_info *mtd;
+ struct device *dev;
+ void __iomem *virtadr;
+ int status;
+ struct {
+ unsigned int command;
+ int column;
+ int page;
+ } last_command;
+ uint8_t oob_buf[16];
+ uint8_t ecc_buf[7];
+ int oob_page;
+ struct bch_control *bch;
+};
+
+/*
+ * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
+ * shared with other diskonchip devices (P3, G3 at least).
+ *
+ * Functions with names prefixed with docg4_ are mtd / nand interface functions
+ * (though they may also be called internally). All others are internal.
+ */
+
+#define DOC_IOSPACE_DATA 0x0800
+
+/* register offsets */
+#define DOC_CHIPID 0x1000
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_DATAEND 0x101e
+#define DOC_NOP 0x103e
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+#define DOCG4_MYSTERY_REG 0x1050
+
+/* apparently used only to write oob bytes 6 and 7 */
+#define DOCG4_OOB_6_7 0x1052
+
+/* DOC_FLASHSEQUENCE register commands */
+#define DOC_SEQ_RESET 0x00
+#define DOCG4_SEQ_PAGE_READ 0x03
+#define DOCG4_SEQ_FLUSH 0x29
+#define DOCG4_SEQ_PAGEWRITE 0x16
+#define DOCG4_SEQ_PAGEPROG 0x1e
+#define DOCG4_SEQ_BLOCKERASE 0x24
+
+/* DOC_FLASHCOMMAND register commands */
+#define DOCG4_CMD_PAGE_READ 0x00
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOCG4_CMD_FLUSH 0x70
+#define DOCG4_CMD_READ2 0x30
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOCG4_CMD_PAGEWRITE 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_RESET 0xff
+
+/* DOC_POWERMODE register bits */
+#define DOC_POWERDOWN_READY 0x80
+
+/* DOC_FLASHCONTROL register bits */
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN 0x40
+#define DOC_CTRL_FLASHREADY 0x01
+
+/* DOC_ECCCONF0 register bits */
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_UNKNOWN 0x2000
+#define DOC_ECCCONF0_ECC_ENABLE 0x1000
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/* DOC_ECCCONF1 register bits */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_ECC_ENABLE 0x07
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+
+/* DOC_ASICMODE register bits */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/* good status values read after read/write/erase operations */
+#define DOCG4_PROGSTATUS_GOOD 0x51
+#define DOCG4_PROGSTATUS_GOOD_2 0xe0
+
+/*
+ * On read operations (page and oob-only), the first byte read from I/O reg is a
+ * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
+ * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
+ */
+#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
+
+/* anatomy of the device */
+#define DOCG4_CHIP_SIZE 0x8000000
+#define DOCG4_PAGE_SIZE 0x200
+#define DOCG4_PAGES_PER_BLOCK 0x200
+#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
+#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
+#define DOCG4_OOB_SIZE 0x10
+#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
+#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
+#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
+
+/* all but the last byte is included in ecc calculation */
+#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
+
+#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
+
+/* expected values from the ID registers */
+#define DOCG4_IDREG1_VALUE 0x0400
+#define DOCG4_IDREG2_VALUE 0xfbff
+
+/* primitive polynomial used to build the Galois field used by hw ecc gen */
+#define DOCG4_PRIMITIVE_POLY 0x4443
+
+#define DOCG4_M 14 /* Galois field is of order 2^14 */
+#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
+
+#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+
+/*
+ * Oob bytes 0 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
+ * Byte 15 (the last) is used by the driver as a "page written" flag.
+ */
+static struct nand_ecclayout docg4_oobinfo = {
+ .eccbytes = 9,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+ .oobavail = 7,
+ .oobfree = { {0, 7} }
+};
+
+/*
+ * The device has a nop register which M-Sys claims is for the purpose of
+ * inserting precise delays. But beware; at least some operations fail if the
+ * nop writes are replaced with a generic delay!
+ */
+static inline void write_nop(void __iomem *docptr)
+{
+ writew(0, docptr + DOC_NOP);
+}
+
+static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(nand->IO_ADDR_R);
+}
+
+static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], nand->IO_ADDR_W);
+}
+
+static int poll_status(struct docg4_priv *doc)
+{
+ /*
+ * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
+ * register. Operations known to take a long time (e.g., block erase)
+ * should sleep for a while before calling this.
+ */
+
+ uint16_t flash_status;
+ unsigned int timeo;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* hardware quirk requires reading twice initially */
+ flash_status = readw(docptr + DOC_FLASHCONTROL);
+
+ timeo = 1000;
+ do {
+ cpu_relax();
+ flash_status = readb(docptr + DOC_FLASHCONTROL);
+ } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+
+
+ if (!timeo) {
+ dev_err(doc->dev, "%s: timed out!\n", __func__);
+ return NAND_STATUS_FAIL;
+ }
+
+ if (unlikely(timeo < 50))
+ dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
+ __func__, timeo);
+
+ return 0;
+}
+
+
+static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
+{
+
+ struct docg4_priv *doc = nand->priv;
+ int status = NAND_STATUS_WP; /* inverse logic?? */
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* report any previously unreported error */
+ if (doc->status) {
+ status |= doc->status;
+ doc->status = 0;
+ return status;
+ }
+
+ status |= poll_status(doc);
+ return status;
+}
+
+static void docg4_select_chip(struct mtd_info *mtd, int chip)
+{
+ /*
+ * Select among multiple cascaded chips ("floors"). Multiple floors are
+ * not yet supported, so the only valid non-negative value is 0.
+ */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
+
+ if (chip < 0)
+ return; /* deselected */
+
+ if (chip > 0)
+ dev_warn(doc->dev, "multiple floors currently unsupported\n");
+
+ writew(0, docptr + DOC_DEVICESELECT);
+}
+
+static void reset(struct mtd_info *mtd)
+{
+ /* full device reset */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+ write_nop(docptr);
+
+ writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
+
+ poll_status(doc);
+}
+
+static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
+{
+ /* read the 7 hw-generated ecc bytes */
+
+ int i;
+ for (i = 0; i < 7; i++) { /* hw quirk; read twice */
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ }
+}
+
+static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ /*
+ * Called after a page read when hardware reports bitflips.
+ * Up to four bitflips can be corrected.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i, numerrs, errpos[4];
+ const uint8_t blank_read_hwecc[8] = {
+ 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
+
+ read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
+
+ /* check if read error is due to a blank page */
+ if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
+ return 0; /* yes */
+
+ /* skip additional check of "written flag" if ignore_badblocks */
+ if (ignore_badblocks == false) {
+
+ /*
+ * If the hw ecc bytes are not those of a blank page, there's
+ * still a chance that the page is blank, but was read with
+ * errors. Check the "written flag" in last oob byte, which
+ * is set to zero when a page is written. If more than half
+ * the bits are set, assume a blank page. Unfortunately, the
+ * bit flips(s) are not reported in stats.
+ */
+
+ if (doc->oob_buf[15]) {
+ int bit, numsetbits = 0;
+ unsigned long written_flag = doc->oob_buf[15];
+ for_each_set_bit(bit, &written_flag, 8)
+ numsetbits++;
+ if (numsetbits > 4) { /* assume blank */
+ dev_warn(doc->dev,
+ "error(s) in blank page "
+ "at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis!
+ */
+ for (i = 0; i < 7; i++)
+ doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
+
+ numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
+ doc->ecc_buf, NULL, errpos);
+
+ if (numerrs == -EBADMSG) {
+ dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return -EBADMSG;
+ }
+
+ BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
+
+ /* undo last step in BCH alg (modulo mirroring not needed) */
+ for (i = 0; i < numerrs; i++)
+ errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
+
+ /* fix the errors */
+ for (i = 0; i < numerrs; i++) {
+
+ /* ignore if error within oob ecc bytes */
+ if (errpos[i] > DOCG4_USERDATA_LEN * 8)
+ continue;
+
+ /* if error within oob area preceeding ecc bytes... */
+ if (errpos[i] > DOCG4_PAGE_SIZE * 8)
+ change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
+ (unsigned long *)doc->oob_buf);
+
+ else /* error in page data */
+ change_bit(errpos[i], (unsigned long *)buf);
+ }
+
+ dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
+ numerrs, page * DOCG4_PAGE_SIZE);
+
+ return numerrs;
+}
+
+static uint8_t docg4_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ dev_dbg(doc->dev, "%s\n", __func__);
+
+ if (doc->last_command.command == NAND_CMD_STATUS) {
+ int status;
+
+ /*
+ * Previous nand command was status request, so nand
+ * infrastructure code expects to read the status here. If an
+ * error occurred in a previous operation, report it.
+ */
+ doc->last_command.command = 0;
+
+ if (doc->status) {
+ status = doc->status;
+ doc->status = 0;
+ }
+
+ /* why is NAND_STATUS_WP inverse logic?? */
+ else
+ status = NAND_STATUS_WP | NAND_STATUS_READY;
+
+ return status;
+ }
+
+ dev_warn(doc->dev, "unexpectd call to read_byte()\n");
+
+ return 0;
+}
+
+static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
+{
+ /* write the four address bytes packed in docg4_addr to the device */
+
+ void __iomem *docptr = doc->virtadr;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+}
+
+static int read_progstatus(struct docg4_priv *doc)
+{
+ /*
+ * This apparently checks the status of programming. Done after an
+ * erasure, and after page data is written. On error, the status is
+ * saved, to be later retrieved by the nand infrastructure code.
+ */
+ void __iomem *docptr = doc->virtadr;
+
+ /* status is read from the I/O reg */
+ uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
+ __func__, status1, status2, status3);
+
+ if (status1 != DOCG4_PROGSTATUS_GOOD
+ || status2 != DOCG4_PROGSTATUS_GOOD_2
+ || status3 != DOCG4_PROGSTATUS_GOOD_2) {
+ doc->status = NAND_STATUS_FAIL;
+ dev_warn(doc->dev, "read_progstatus failed: "
+ "%02x, %02x, %02x\n", status1, status2, status3);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pageprog(struct mtd_info *mtd)
+{
+ /*
+ * Final step in writing a page. Writes the contents of its
+ * internal buffer out to the flash array, or some such.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int retval = 0;
+
+ dev_dbg(doc->dev, "docg4: %s\n", __func__);
+
+ writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* Just busy-wait; usleep_range() slows things down noticeably. */
+ poll_status(doc);
+
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ retval = read_progstatus(doc);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+
+ return retval;
+}
+
+static void sequence_reset(struct mtd_info *mtd)
+{
+ /* common starting sequence for all operations */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
+ writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+}
+
+static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in reading a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ write_addr(doc, docg4_addr);
+
+ write_nop(docptr);
+ writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ poll_status(doc);
+}
+
+static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in writing a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
+ sequence_reset(mtd);
+ writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_addr(doc, docg4_addr);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+}
+
+static uint32_t mtd_to_docg4_address(int page, int column)
+{
+ /*
+ * Convert mtd address to format used by the device, 32 bit packed.
+ *
+ * Some notes on G4 addressing... The M-Sys documentation on this device
+ * claims that pages are 2K in length, and indeed, the format of the
+ * address used by the device reflects that. But within each page are
+ * four 512 byte "sub-pages", each with its own oob data that is
+ * read/written immediately after the 512 bytes of page data. This oob
+ * data contains the ecc bytes for the preceeding 512 bytes.
+ *
+ * Rather than tell the mtd nand infrastructure that page size is 2k,
+ * with four sub-pages each, we engage in a little subterfuge and tell
+ * the infrastructure code that pages are 512 bytes in size. This is
+ * done because during the course of reverse-engineering the device, I
+ * never observed an instance where an entire 2K "page" was read or
+ * written as a unit. Each "sub-page" is always addressed individually,
+ * its data read/written, and ecc handled before the next "sub-page" is
+ * addressed.
+ *
+ * This requires us to convert addresses passed by the mtd nand
+ * infrastructure code to those used by the device.
+ *
+ * The address that is written to the device consists of four bytes: the
+ * first two are the 2k page number, and the second is the index into
+ * the page. The index is in terms of 16-bit half-words and includes
+ * the preceeding oob data, so e.g., the index into the second
+ * "sub-page" is 0x108, and the full device address of the start of mtd
+ * page 0x201 is 0x00800108.
+ */
+ int g4_page = page / 4; /* device's 2K page */
+ int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
+ return (g4_page << 16) | g4_index; /* pack */
+}
+
+static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
+ int page_addr)
+{
+ /* handle standard nand commands */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
+
+ dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
+ __func__, command, page_addr, column);
+
+ /*
+ * Save the command and its arguments. This enables emulation of
+ * standard flash devices, and also some optimizations.
+ */
+ doc->last_command.command = command;
+ doc->last_command.column = column;
+ doc->last_command.page = page_addr;
+
+ switch (command) {
+
+ case NAND_CMD_RESET:
+ reset(mtd);
+ break;
+
+ case NAND_CMD_READ0:
+ read_page_prologue(mtd, g4_addr);
+ break;
+
+ case NAND_CMD_STATUS:
+ /* next call to read_byte() will expect a status */
+ break;
+
+ case NAND_CMD_SEQIN:
+ write_page_prologue(mtd, g4_addr);
+
+ /* hack for deferred write of oob bytes */
+ if (doc->oob_page == page_addr)
+ memcpy(nand->oob_poi, doc->oob_buf, 16);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ pageprog(mtd);
+ break;
+
+ /* we don't expect these, based on review of nand_base.c */
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ dev_warn(doc->dev, "docg4_command: "
+ "unexpected nand command 0x%x\n", command);
+ break;
+
+ }
+}
+
+static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status, edc_err, *buf16;
+
+ dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+
+ writew(DOC_ECCCONF0_READ_MODE |
+ DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is page data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_err(doc->dev,
+ "docg4_read_page: bad status: 0x%02x\n", status);
+ writew(0, docptr + DOC_DATAEND);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
+
+ /*
+ * Diskonchips read oob immediately after a page read. Mtd
+ * infrastructure issues a separate command for reading oob after the
+ * page is read. So we save the oob bytes in a local buffer and just
+ * copy it if the next command reads oob from the same page.
+ */
+
+ /* first 14 oob bytes read from I/O reg */
+ docg4_read_buf(mtd, doc->oob_buf, 14);
+
+ /* last 2 read from another reg */
+ buf16 = (uint16_t *)(doc->oob_buf + 14);
+ *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ write_nop(docptr);
+
+ if (likely(use_ecc == true)) {
+
+ /* read the register that tells us if bitflip(s) detected */
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
+
+ /* If bitflips are reported, attempt to correct with ecc */
+ if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
+ int bits_corrected = correct_data(mtd, buf, page);
+ if (bits_corrected == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += bits_corrected;
+ }
+ }
+
+ writew(0, docptr + DOC_DATAEND);
+ return 0;
+}
+
+
+static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page)
+{
+ return read_page(mtd, nand, buf, page, false);
+}
+
+static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page)
+{
+ return read_page(mtd, nand, buf, page, true);
+}
+
+static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page, int sndcmd)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status;
+
+ dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
+
+ /*
+ * Oob bytes are read as part of a normal page read. If the previous
+ * nand command was a read of the page whose oob is now being read, just
+ * copy the oob bytes that we saved in a local buffer and avoid a
+ * separate oob read.
+ */
+ if (doc->last_command.command == NAND_CMD_READ0 &&
+ doc->last_command.page == page) {
+ memcpy(nand->oob_poi, doc->oob_buf, 16);
+ return 0;
+ }
+
+ /*
+ * Separate read of oob data only.
+ */
+ docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+
+ writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is oob data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_warn(doc->dev,
+ "docg4_read_oob failed: status = 0x%02x\n", status);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, nand->oob_poi, 16);
+
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+
+ return 0;
+}
+
+static void docg4_erase_block(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t g4_page;
+
+ dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ /* only 2 bytes of address are written to specify erase block */
+ g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ g4_page >>= 8;
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ write_nop(docptr);
+
+ /* start the erasure */
+ writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ usleep_range(500, 1000); /* erasure is long; take a snooze */
+ poll_status(doc);
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ read_progstatus(doc);
+
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+}
+
+static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t ecc_buf[8];
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ writew(DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+
+ /* write the page data */
+ docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
+
+ /* oob bytes 0 through 5 are written to I/O reg */
+ docg4_write_buf16(mtd, nand->oob_poi, 6);
+
+ /* oob byte 6 written to a separate reg */
+ writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
+
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* write hw-generated ecc bytes to oob */
+ if (likely(use_ecc == true)) {
+ /* oob byte 7 is hamming code */
+ uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
+ hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
+ writew(hamming, docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+
+ /* read the 7 bch bytes from ecc regs */
+ read_hw_ecc(docptr, ecc_buf);
+ ecc_buf[7] = 0; /* clear the "page written" flag */
+ }
+
+ /* write user-supplied bytes to oob */
+ else {
+ writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+ memcpy(ecc_buf, &nand->oob_poi[8], 8);
+ }
+
+ docg4_write_buf16(mtd, ecc_buf, 8);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+}
+
+static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf)
+{
+ return write_page(mtd, nand, buf, false);
+}
+
+static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf)
+{
+ return write_page(mtd, nand, buf, true);
+}
+
+static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ /*
+ * Writing oob-only is not really supported, because MLC nand must write
+ * oob bytes at the same time as page data. Nonetheless, we save the
+ * oob buffer contents here, and then write it along with the page data
+ * if the same page is subsequently written. This allows user space
+ * utilities that write the oob data prior to the page data to work
+ * (e.g., nandwrite). The disdvantage is that, if the intention was to
+ * write oob only, the operation is quietly ignored. Also, oob can get
+ * corrupted if two concurrent processes are running nandwrite.
+ */
+
+ /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
+ struct docg4_priv *doc = nand->priv;
+ doc->oob_page = page;
+ memcpy(doc->oob_buf, nand->oob_poi, 16);
+ return 0;
+}
+
+static int __init read_factory_bbt(struct mtd_info *mtd)
+{
+ /*
+ * The device contains a read-only factory bad block table. Read it and
+ * update the memory-based bbt accordingly.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
+ uint8_t *buf;
+ int i, block, status;
+
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ read_page_prologue(mtd, g4_addr);
+ status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+ if (status)
+ goto exit;
+
+ /*
+ * If no memory-based bbt was created, exit. This will happen if module
+ * parameter ignore_badblocks is set. Then why even call this function?
+ * For an unknown reason, block erase always fails if it's the first
+ * operation after device power-up. The above read ensures it never is.
+ * Ugly, I know.
+ */
+ if (nand->bbt == NULL) /* no memory-based bbt */
+ goto exit;
+
+ /*
+ * Parse factory bbt and update memory-based bbt. Factory bbt format is
+ * simple: one bit per block, block numbers increase left to right (msb
+ * to lsb). Bit clear means bad block.
+ */
+ for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
+ int bitnum;
+ unsigned long bits = ~buf[i];
+ for_each_set_bit(bitnum, &bits, 8) {
+ int badblock = block + 7 - bitnum;
+ nand->bbt[badblock / 4] |=
+ 0x03 << ((badblock % 4) * 2);
+ mtd->ecc_stats.badblocks++;
+ dev_notice(doc->dev, "factory-marked bad block: %d\n",
+ badblock);
+ }
+ }
+ exit:
+ kfree(buf);
+ return status;
+}
+
+static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ /*
+ * Mark a block as bad. Bad blocks are marked in the oob area of the
+ * first page of the block. The default scan_bbt() in the nand
+ * infrastructure code works fine for building the memory-based bbt
+ * during initialization, as does the nand infrastructure function that
+ * checks if a block is bad by reading the bbt. This function replaces
+ * the nand default because writes to oob-only are not supported.
+ */
+
+ int ret, i;
+ uint8_t *buf;
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ struct nand_bbt_descr *bbtd = nand->badblock_pattern;
+ int block = (int)(ofs >> nand->bbt_erase_shift);
+ int page = (int)(ofs >> nand->page_shift);
+ uint32_t g4_addr = mtd_to_docg4_address(page, 0);
+
+ dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
+
+ if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
+ dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
+ __func__, ofs);
+
+ /* allocate blank buffer for page data */
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* update bbt in memory */
+ nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
+
+ /* write bit-wise negation of pattern to oob buffer */
+ memset(nand->oob_poi, 0xff, mtd->oobsize);
+ for (i = 0; i < bbtd->len; i++)
+ nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
+
+ /* write first page of block */
+ write_page_prologue(mtd, g4_addr);
+ docg4_write_page(mtd, nand, buf);
+ ret = pageprog(mtd);
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* only called when module_param ignore_badblocks is set */
+ return 0;
+}
+
+static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /*
+ * Put the device into "deep power-down" mode. Note that CE# must be
+ * deasserted for this to take effect. The xscale, e.g., can be
+ * configured to float this signal when the processor enters power-down,
+ * and a suitable pull-up ensures its deassertion.
+ */
+
+ int i;
+ uint8_t pwr_down;
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* poll the register that tells us we're ready to go to sleep */
+ for (i = 0; i < 10; i++) {
+ pwr_down = readb(docptr + DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ usleep_range(1000, 4000);
+ }
+
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ dev_err(doc->dev, "suspend failed; "
+ "timeout polling DOC_POWERDOWN_READY\n");
+ return -EIO;
+ }
+
+ writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_resume(struct platform_device *pdev)
+{
+
+ /*
+ * Exit power-down. Twelve consecutive reads of the address below
+ * accomplishes this, assuming CE# has been asserted.
+ */
+
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ for (i = 0; i < 12; i++)
+ readb(docptr + 0x1fff);
+
+ return 0;
+}
+
+static void __init init_mtd_structs(struct mtd_info *mtd)
+{
+ /* initialize mtd and nand data structures */
+
+ /*
+ * Note that some of the following initializations are not usually
+ * required within a nand driver because they are performed by the nand
+ * infrastructure code as part of nand_scan(). In this case they need
+ * to be initialized here because we skip call to nand_scan_ident() (the
+ * first half of nand_scan()). The call to nand_scan_ident() is skipped
+ * because for this device the chip id is not read in the manner of a
+ * standard nand device. Unfortunately, nand_scan_ident() does other
+ * things as well, such as call nand_set_defaults().
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ mtd->size = DOCG4_CHIP_SIZE;
+ mtd->name = "Msys_Diskonchip_G4";
+ mtd->writesize = DOCG4_PAGE_SIZE;
+ mtd->erasesize = DOCG4_BLOCK_SIZE;
+ mtd->oobsize = DOCG4_OOB_SIZE;
+ nand->chipsize = DOCG4_CHIP_SIZE;
+ nand->chip_shift = DOCG4_CHIP_SHIFT;
+ nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
+ nand->chip_delay = 20;
+ nand->page_shift = DOCG4_PAGE_SHIFT;
+ nand->pagemask = 0x3ffff;
+ nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ nand->badblockbits = 8;
+ nand->ecc.layout = &docg4_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = DOCG4_PAGE_SIZE;
+ nand->ecc.prepad = 8;
+ nand->ecc.bytes = 8;
+ nand->ecc.strength = DOCG4_T;
+ nand->options =
+ NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+ nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
+ nand->controller = &nand->hwcontrol;
+ spin_lock_init(&nand->controller->lock);
+ init_waitqueue_head(&nand->controller->wq);
+
+ /* methods */
+ nand->cmdfunc = docg4_command;
+ nand->waitfunc = docg4_wait;
+ nand->select_chip = docg4_select_chip;
+ nand->read_byte = docg4_read_byte;
+ nand->block_markbad = docg4_block_markbad;
+ nand->read_buf = docg4_read_buf;
+ nand->write_buf = docg4_write_buf16;
+ nand->scan_bbt = nand_default_bbt;
+ nand->erase_cmd = docg4_erase_block;
+ nand->ecc.read_page = docg4_read_page;
+ nand->ecc.write_page = docg4_write_page;
+ nand->ecc.read_page_raw = docg4_read_page_raw;
+ nand->ecc.write_page_raw = docg4_write_page_raw;
+ nand->ecc.read_oob = docg4_read_oob;
+ nand->ecc.write_oob = docg4_write_oob;
+
+ /*
+ * The way the nand infrastructure code is written, a memory-based bbt
+ * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
+ * nand->block_bad() is used. So when ignoring bad blocks, we skip the
+ * scan and define a dummy block_bad() which always returns 0.
+ */
+ if (ignore_badblocks) {
+ nand->options |= NAND_SKIP_BBTSCAN;
+ nand->block_bad = docg4_block_neverbad;
+ }
+
+}
+
+static int __init read_id_reg(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t id1, id2;
+
+ /* check for presence of g4 chip by reading id registers */
+ id1 = readw(docptr + DOC_CHIPID);
+ id1 = readw(docptr + DOCG4_MYSTERY_REG);
+ id2 = readw(docptr + DOC_CHIPID_INV);
+ id2 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
+ dev_info(doc->dev,
+ "NAND device: 128MiB Diskonchip G4 detected\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int __init probe_docg4(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ void __iomem *virtadr;
+ struct docg4_priv *doc;
+ int len, retval;
+ struct resource *r;
+ struct device *dev = &pdev->dev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(dev, "no io memory resource defined!\n");
+ return -ENODEV;
+ }
+
+ virtadr = ioremap(r->start, resource_size(r));
+ if (!virtadr) {
+ dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
+ return -EIO;
+ }
+
+ len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
+ sizeof(struct docg4_priv);
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (mtd == NULL) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct docg4_priv *) (nand + 1);
+ mtd->priv = nand;
+ nand->priv = doc;
+ mtd->owner = THIS_MODULE;
+ doc->virtadr = virtadr;
+ doc->dev = dev;
+
+ init_mtd_structs(mtd);
+
+ /* initialize kernel bch algorithm */
+ doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+ if (doc->bch == NULL) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, doc);
+
+ reset(mtd);
+ retval = read_id_reg(mtd);
+ if (retval == -ENODEV) {
+ dev_warn(dev, "No diskonchip G4 device found.\n");
+ goto fail;
+ }
+
+ retval = nand_scan_tail(mtd);
+ if (retval)
+ goto fail;
+
+ retval = read_factory_bbt(mtd);
+ if (retval)
+ goto fail;
+
+ retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (retval)
+ goto fail;
+
+ doc->mtd = mtd;
+ return 0;
+
+ fail:
+ iounmap(virtadr);
+ if (mtd) {
+ /* re-declarations avoid compiler warning */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ nand_release(mtd); /* deletes partitions and mtd devices */
+ platform_set_drvdata(pdev, NULL);
+ free_bch(doc->bch);
+ kfree(mtd);
+ }
+
+ return retval;
+}
+
+static int __exit cleanup_docg4(struct platform_device *pdev)
+{
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ nand_release(doc->mtd);
+ platform_set_drvdata(pdev, NULL);
+ free_bch(doc->bch);
+ kfree(doc->mtd);
+ iounmap(doc->virtadr);
+ return 0;
+}
+
+static struct platform_driver docg4_driver = {
+ .driver = {
+ .name = "docg4",
+ .owner = THIS_MODULE,
+ },
+ .suspend = docg4_suspend,
+ .resume = docg4_resume,
+ .remove = __exit_p(cleanup_docg4),
+};
+
+static int __init docg4_init(void)
+{
+ return platform_driver_probe(&docg4_driver, probe_docg4);
+}
+
+static void __exit docg4_exit(void)
+{
+ platform_driver_unregister(&docg4_driver);
+}
+
+module_init(docg4_init);
+module_exit(docg4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Dunn");
+MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
new file mode 100644
index 00000000..80b5264f
--- /dev/null
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -0,0 +1,998 @@
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
+ * Roy Zang <tie-fei.zang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/fsl_lbc.h>
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_lbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* Freescale eLBC FCM controller information */
+
+struct fsl_elbc_fcm_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int counter; /* counter for the initializations */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+/* Small Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
+ .eccbytes = 3,
+ .eccpos = {6, 7, 8},
+ .oobfree = { {0, 5}, {9, 7} },
+};
+
+/* Small Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
+ .eccbytes = 3,
+ .eccpos = {8, 9, 10},
+ .oobfree = { {0, 5}, {6, 2}, {11, 5} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
+ .eccbytes = 12,
+ .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
+ .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
+ .eccbytes = 12,
+ .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
+ .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+};
+
+/*
+ * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
+ * 1, so we have to adjust bad block pattern. This pattern should be used for
+ * x8 chips only. So far hardware does not support x16 chips anyway.
+ */
+static u8 scan_ff_pattern[] = { 0xff, };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ int buf_num;
+
+ elbc_fcm_ctrl->page = page_addr;
+
+ if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
+ elbc_fcm_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
+
+ dev_vdbg(priv->dev, "set_addr: bank=%d, "
+ "elbc_fcm_ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, elbc_fcm_ctrl->addr, priv->vbase,
+ elbc_fcm_ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (elbc_fcm_ctrl->use_mdr)
+ out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
+
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ ctrl->irq_status = 0;
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+ FCM_TIMEOUT_MSECS * HZ/1000);
+ elbc_fcm_ctrl->status = ctrl->irq_status;
+ /* store mdr value in case it was needed */
+ if (elbc_fcm_ctrl->use_mdr)
+ elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC) {
+ dev_info(priv->dev,
+ "command failed: fir %x fcr %x status %x mdr %x\n",
+ in_be32(&lbc->fir), in_be32(&lbc->fcr),
+ elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
+ return -EIO;
+ }
+
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+ uint32_t lteccr = in_be32(&lbc->lteccr);
+ /*
+ * if command was a full page read and the ELBC
+ * has the LTECCR register, then bits 12-15 (ppc order) of
+ * LTECCR indicates which 512 byte sub-pages had fixed errors.
+ * bits 28-31 are uncorrectable errors, marked elsewhere.
+ * for small page nand only 1 bit is used.
+ * if the ELBC doesn't have the lteccr register it reads 0
+ */
+ if (lteccr & 0x000F000F)
+ out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+ if (lteccr & 0x000F0000)
+ mtd->ecc_stats.corrected++;
+ }
+
+ return 0;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ elbc_fcm_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ elbc_fcm_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+
+ /* fall-through */
+ case NAND_CMD_READ0:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ elbc_fcm_ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
+
+ out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
+ elbc_fcm_ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->mdr = column;
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RS << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ elbc_fcm_ctrl->read_bytes = 0;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ __be32 fcr;
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ elbc_fcm_ctrl->column = column;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
+ fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM2 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT) |
+ (FIR_OP_RS << FIR_OP6_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP5_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP6_SHIFT) |
+ (FIR_OP_RS << FIR_OP7_SHIFT));
+
+ if (elbc_fcm_ctrl->oob)
+ /* OOB area --> READOOB */
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ else
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", elbc_fcm_ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
+ else
+ out_be32(&lbc->fbcr, 0);
+
+ fsl_elbc_run_command(mtd);
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ elbc_fcm_ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev,
+ "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "write_buf of %d bytes", len);
+ elbc_fcm_ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
+ "write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - elbc_fcm_ctrl->index);
+ len = bufsize - elbc_fcm_ctrl->index;
+ }
+
+ memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
+ /*
+ * This is workaround for the weird elbc hangs during nand write,
+ * Scott Wood says: "...perhaps difference in how long it takes a
+ * write to make it through the localbus compared to a write to IMMR
+ * is causing problems, and sync isn't helping for some reason."
+ * Reading back the last byte helps though.
+ */
+ in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
+
+ elbc_fcm_ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
+ return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
+
+ dev_err(priv->dev, "read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len,
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+ memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
+ elbc_fcm_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/*
+ * Verify buffer against the FCM Controller Data Buffer
+ */
+static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ int i;
+
+ if (len < 0) {
+ dev_err(priv->dev, "write_buf of %d bytes", len);
+ return -EINVAL;
+ }
+
+ if ((unsigned int)len >
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
+ "verify_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+
+ elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++)
+ if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
+ != buf[i])
+ break;
+
+ elbc_fcm_ctrl->index += len;
+ return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
+}
+
+static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ priv->fmr |= al << FMR_AL_SHIFT;
+
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ chip->numchips);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ chip->chipsize);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
+ chip->chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
+ chip->ecclayout);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
+ chip->ecc.layout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ /* adjust ecc setup if needed */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.size = 512;
+ chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_lp_eccm1 :
+ &fsl_elbc_oob_lp_eccm0;
+ chip->badblock_pattern = &largepage_memorybased;
+ }
+ } else {
+ dev_err(priv->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf,
+ int page)
+{
+ fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static void fsl_elbc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct nand_chip *chip = &priv->chip;
+
+ dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+ /* Fill in fsl_elbc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ chip->read_byte = fsl_elbc_read_byte;
+ chip->write_buf = fsl_elbc_write_buf;
+ chip->read_buf = fsl_elbc_read_buf;
+ chip->verify_buf = fsl_elbc_verify_buf;
+ chip->select_chip = fsl_elbc_select_chip;
+ chip->cmdfunc = fsl_elbc_cmdfunc;
+ chip->waitfunc = fsl_elbc_wait;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ chip->controller = &elbc_fcm_ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ /* put in small page settings and adjust later if needed */
+ chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ /*
+ * FIXME: can hardware ecc correct 4 bitflips if page size is
+ * 2k? Then does hardware report number of corrections for this
+ * case? If so, ecc_stats reporting needs to be fixed as well.
+ */
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ nand_release(&priv->mtd);
+
+ kfree(priv->mtd.name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ elbc_fcm_ctrl->chips[priv->bank] = NULL;
+ kfree(priv);
+ return 0;
+}
+
+static DEFINE_MUTEX(fsl_elbc_nand_mutex);
+
+static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
+{
+ struct fsl_lbc_regs __iomem *lbc;
+ struct fsl_elbc_mtd *priv;
+ struct resource res;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device *dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = pdev->dev.of_node;
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+ lbc = fsl_lbc_ctrl_dev->regs;
+ dev = fsl_lbc_ctrl_dev->dev;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to get resource\n");
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < MAX_BANKS; bank++)
+ if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+ (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+ (in_be32(&lbc->bank[bank].br) &
+ in_be32(&lbc->bank[bank].or) & BR_BA)
+ == fsl_lbc_addr(res.start))
+ break;
+
+ if (bank >= MAX_BANKS) {
+ dev_err(dev, "address did not match any chip selects\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ if (!fsl_lbc_ctrl_dev->nand) {
+ elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
+ if (!elbc_fcm_ctrl) {
+ dev_err(dev, "failed to allocate memory\n");
+ mutex_unlock(&fsl_elbc_nand_mutex);
+ ret = -ENOMEM;
+ goto err;
+ }
+ elbc_fcm_ctrl->counter++;
+
+ spin_lock_init(&elbc_fcm_ctrl->controller.lock);
+ init_waitqueue_head(&elbc_fcm_ctrl->controller.wq);
+ fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
+ } else {
+ elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ elbc_fcm_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_lbc_ctrl_dev;
+ priv->dev = dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(dev, "failed to map chip region\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_elbc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
+ if (ret)
+ goto err;
+
+ ret = fsl_elbc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
+
+ printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_elbc_chip_remove(priv);
+ return ret;
+}
+
+static int fsl_elbc_nand_remove(struct platform_device *pdev)
+{
+ int i;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ for (i = 0; i < MAX_BANKS; i++)
+ if (elbc_fcm_ctrl->chips[i])
+ fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ elbc_fcm_ctrl->counter--;
+ if (!elbc_fcm_ctrl->counter) {
+ fsl_lbc_ctrl_dev->nand = NULL;
+ kfree(elbc_fcm_ctrl);
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ return 0;
+
+}
+
+static const struct of_device_id fsl_elbc_nand_match[] = {
+ { .compatible = "fsl,elbc-fcm-nand", },
+ {}
+};
+
+static struct platform_driver fsl_elbc_nand_driver = {
+ .driver = {
+ .name = "fsl,elbc-fcm-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_elbc_nand_match,
+ },
+ .probe = fsl_elbc_nand_probe,
+ .remove = fsl_elbc_nand_remove,
+};
+
+module_platform_driver(fsl_elbc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
new file mode 100644
index 00000000..c30ac7b8
--- /dev/null
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -0,0 +1,1072 @@
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/fsl_ifc.h>
+
+#define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+ for IFC NAND Machine */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+ u8 __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes;/* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+ unsigned int counter; /* counter for the initializations */
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/* 512-byte page with 4-bit ECC, 8-bit */
+static struct nand_ecclayout oob_512_8bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {0, 5}, {6, 2} },
+};
+
+/* 512-byte page with 4-bit ECC, 16-bit */
+static struct nand_ecclayout oob_512_16bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 6}, },
+};
+
+/* 2048-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_2048_ecc4 = {
+ .eccbytes = 32,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobfree = { {2, 6}, {40, 24} },
+};
+
+/* 4096-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_4096_ecc4 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ },
+ .oobfree = { {2, 6}, {72, 56} },
+};
+
+/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_4096_ecc8 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 82} },
+};
+
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+ out_be32(&ifc->ifc_nand.row0, page_addr);
+ out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ifc_nand_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ifc_nand_ctrl->index += mtd->writesize;
+}
+
+static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
+ u32 __iomem *mainarea = (u32 *)addr;
+ u8 __iomem *oob = addr + mtd->writesize;
+ int i;
+
+ for (i = 0; i < mtd->writesize / 4; i++) {
+ if (__raw_readl(&mainarea[i]) != 0xffffffff)
+ return 0;
+ }
+
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
+ int pos = chip->ecc.layout->eccpos[i];
+
+ if (__raw_readb(&oob[pos]) != 0xff)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 *eccstat, unsigned int bufnum)
+{
+ u32 reg = eccstat[bufnum / 4];
+ int errors;
+
+ errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
+
+ return errors;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 eccstat[4];
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+ in_be32(&ifc->ifc_nand.nand_fir0),
+ in_be32(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+ out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+ dev_err(priv->dev, "Controller is not responding\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+ dev_err(priv->dev, "NAND Flash Timeout Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+ dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+ int sector = bufnum * chip->ecc.steps;
+ int sector_end = sector + chip->ecc.steps - 1;
+
+ for (i = sector / 4; i <= sector_end / 4; i++)
+ eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
+
+ for (i = sector; i <= sector_end; i++) {
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * OK only if the whole page is blank.
+ *
+ * We disable ECCER reporting due to...
+ * erratum IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ if (!is_blank(mtd, bufnum))
+ ctrl->nand_stat |=
+ IFC_NAND_EVTER_STAT_ECCER;
+ break;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ }
+
+ nctrl->eccread = 0;
+ }
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ } else {
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ if (oob)
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+ else
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr) {
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ifc_nand_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ifc_nand_ctrl->index += column;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ ifc_nand_ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ /* READID must read all 8 possible bytes */
+ case NAND_CMD_READID:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
+ /* 8 bytes for manuf, device and exts */
+ out_be32(&ifc->ifc_nand.nand_fbcr, 8);
+ ifc_nand_ctrl->read_bytes = 8;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ifc_nand_ctrl->column = column;
+ ifc_nand_ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
+
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT));
+
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fir1,
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+ out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+ out_be32(&ifc->ifc_nand.nand_fbcr,
+ ifc_nand_ctrl->index - ifc_nand_ctrl->column);
+ } else {
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ case NAND_CMD_STATUS:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ case NAND_CMD_RESET:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, bufsize - ifc_nand_ctrl->index);
+ len = bufsize - ifc_nand_ctrl->index;
+ }
+
+ memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len);
+ ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes)
+ return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]);
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ data = in_be16((uint16_t *)&ifc_nand_ctrl->
+ addr[ifc_nand_ctrl->index]);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ int avail;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ avail = min((unsigned int)len,
+ ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+ memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail);
+ ifc_nand_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/*
+ * Verify buffer against the IFC Controller Data Buffer
+ */
+static int fsl_ifc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ int i;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len);
+ return -EINVAL;
+ }
+
+ if ((unsigned int)len > nctrl->read_bytes - nctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, nctrl->read_bytes - nctrl->index);
+
+ nctrl->index = nctrl->read_bytes;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++)
+ if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i])
+ break;
+
+ nctrl->index += len;
+
+ if (i != len)
+ return -EIO;
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 nand_fsr;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return nand_fsr | NAND_STATUS_WP;
+}
+
+static int fsl_ifc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+
+ fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
+ dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static void fsl_ifc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+
+ dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+ chip->numchips);
+ dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+ chip->chipsize);
+ dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+ chip->pagemask);
+ dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
+ chip->chip_delay);
+ dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+ chip->badblockpos);
+ dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+ chip->chip_shift);
+ dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+ chip->page_shift);
+ dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
+ chip->ecclayout);
+ dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+ chip->ecc.total);
+ dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
+ chip->ecc.layout);
+ dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+ dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+ dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+ mtd->erasesize);
+ dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+ mtd->writesize);
+ dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+ mtd->oobsize);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct nand_chip *chip = &priv->chip;
+ struct nand_ecclayout *layout;
+ u32 csor;
+
+ /* Fill in fsl_ifc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ chip->read_byte = fsl_ifc_read_byte16;
+ else
+ chip->read_byte = fsl_ifc_read_byte;
+
+ chip->write_buf = fsl_ifc_write_buf;
+ chip->read_buf = fsl_ifc_read_buf;
+ chip->verify_buf = fsl_ifc_verify_buf;
+ chip->select_chip = fsl_ifc_select_chip;
+ chip->cmdfunc = fsl_ifc_cmdfunc;
+ chip->waitfunc = fsl_ifc_wait;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ out_be32(&ifc->ifc_nand.ncfgr, 0x0);
+
+ /* set up nand options */
+ chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+
+ if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ chip->read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+ chip->read_byte = fsl_ifc_read_byte;
+ }
+
+ chip->controller = &ifc_nand_ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+ csor = in_be32(&ifc->csor_cs[priv->bank].csor);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 8;
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (chip->options & NAND_BUSWIDTH_16) {
+ layout = &oob_512_16bit_ecc4;
+ } else {
+ layout = &oob_512_8bit_ecc4;
+
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ layout = &oob_2048_ecc4;
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_4096_ecc4;
+ } else {
+ layout = &oob_4096_ecc8;
+ chip->ecc.bytes = 16;
+ }
+
+ priv->bufnum_mask = 1;
+ break;
+
+ default:
+ dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.layout = layout;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+ nand_release(&priv->mtd);
+
+ kfree(priv->mtd.name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ifc_nand_ctrl->chips[priv->bank] = NULL;
+ dev_set_drvdata(priv->dev, NULL);
+ kfree(priv);
+
+ return 0;
+}
+
+static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
+ phys_addr_t addr)
+{
+ u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+ if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+ return 0;
+
+ return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int __devinit fsl_ifc_nand_probe(struct platform_device *dev)
+{
+ struct fsl_ifc_regs __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device_node *node = dev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = dev->dev.of_node;
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+ ifc = fsl_ifc_ctrl_dev->regs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
+ if (match_bank(ifc, bank, res.start))
+ break;
+ }
+
+ if (bank >= FSL_IFC_BANK_COUNT) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ if (!fsl_ifc_ctrl_dev->nand) {
+ ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+ if (!ifc_nand_ctrl) {
+ dev_err(&dev->dev, "failed to allocate memory\n");
+ mutex_unlock(&fsl_ifc_nand_mutex);
+ return -ENOMEM;
+ }
+
+ ifc_nand_ctrl->read_bytes = 0;
+ ifc_nand_ctrl->index = 0;
+ ifc_nand_ctrl->addr = NULL;
+ fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+ spin_lock_init(&ifc_nand_ctrl->controller.lock);
+ init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
+ } else {
+ ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ ifc_nand_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_ifc_ctrl_dev;
+ priv->dev = &dev->dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(priv->dev, priv);
+
+ out_be32(&ifc->ifc_nand.nand_evter_en,
+ IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN);
+
+ /* enable NAND Machine Interrupts */
+ out_be32(&ifc->ifc_nand.nand_evter_intr_en,
+ IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN);
+
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_ifc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
+ if (ret)
+ goto err;
+
+ ret = fsl_ifc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
+
+ dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_ifc_chip_remove(priv);
+ return ret;
+}
+
+static int fsl_ifc_nand_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+
+ fsl_ifc_chip_remove(priv);
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ ifc_nand_ctrl->counter--;
+ if (!ifc_nand_ctrl->counter) {
+ fsl_ifc_ctrl_dev->nand = NULL;
+ kfree(ifc_nand_ctrl);
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+ {
+ .compatible = "fsl,ifc-nand",
+ },
+ {}
+};
+
+static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+ .name = "fsl,ifc-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_ifc_nand_match,
+ },
+ .probe = fsl_ifc_nand_probe,
+ .remove = fsl_ifc_nand_remove,
+};
+
+static int __init fsl_ifc_nand_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&fsl_ifc_nand_driver);
+ if (ret)
+ printk(KERN_ERR "fsl-ifc: Failed to register platform"
+ "driver\n");
+
+ return ret;
+}
+
+static void __exit fsl_ifc_nand_exit(void)
+{
+ platform_driver_unregister(&fsl_ifc_nand_driver);
+}
+
+module_init(fsl_ifc_nand_init);
+module_exit(fsl_ifc_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
new file mode 100644
index 00000000..45df542b
--- /dev/null
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -0,0 +1,361 @@
+/*
+ * Freescale UPM NAND driver.
+ *
+ * Copyright © 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/fsl_lbc.h>
+
+#define FSL_UPM_WAIT_RUN_PATTERN 0x1
+#define FSL_UPM_WAIT_WRITE_BYTE 0x2
+#define FSL_UPM_WAIT_WRITE_BUFFER 0x4
+
+struct fsl_upm_nand {
+ struct device *dev;
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int last_ctrl;
+ struct mtd_partition *parts;
+ struct fsl_upm upm;
+ uint8_t upm_addr_offset;
+ uint8_t upm_cmd_offset;
+ void __iomem *io_base;
+ int rnb_gpio[NAND_MAX_CHIPS];
+ uint32_t mchip_offsets[NAND_MAX_CHIPS];
+ uint32_t mchip_count;
+ uint32_t mchip_number;
+ int chip_delay;
+ uint32_t wait_flags;
+};
+
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct fsl_upm_nand, mtd);
+}
+
+static int fun_chip_ready(struct mtd_info *mtd)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+ if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
+ return 1;
+
+ dev_vdbg(fun->dev, "busy\n");
+ return 0;
+}
+
+static void fun_wait_rnb(struct fsl_upm_nand *fun)
+{
+ if (fun->rnb_gpio[fun->mchip_number] >= 0) {
+ int cnt = 1000000;
+
+ while (--cnt && !fun_chip_ready(&fun->mtd))
+ cpu_relax();
+ if (!cnt)
+ dev_err(fun->dev, "tired waiting for RNB\n");
+ } else {
+ ndelay(100);
+ }
+}
+
+static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ u32 mar;
+
+ if (!(ctrl & fun->last_ctrl)) {
+ fsl_upm_end_pattern(&fun->upm);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
+ }
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_ALE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+ else if (ctrl & NAND_CLE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+ }
+
+ mar = (cmd << (32 - fun->upm.width)) |
+ fun->mchip_offsets[fun->mchip_number];
+ fsl_upm_run_pattern(&fun->upm, chip->IO_ADDR_R, mar);
+
+ if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
+ fun_wait_rnb(fun);
+}
+
+static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+ if (mchip_nr == -1) {
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
+ fun->mchip_number = mchip_nr;
+ chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+ } else {
+ BUG();
+ }
+}
+
+static uint8_t fun_read_byte(struct mtd_info *mtd)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+ return in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ out_8(fun->chip.IO_ADDR_W, buf[i]);
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
+ fun_wait_rnb(fun);
+ }
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
+ fun_wait_rnb(fun);
+}
+
+static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
+{
+ int ret;
+ struct device_node *flash_np;
+ struct mtd_part_parser_data ppdata;
+
+ fun->chip.IO_ADDR_R = fun->io_base;
+ fun->chip.IO_ADDR_W = fun->io_base;
+ fun->chip.cmd_ctrl = fun_cmd_ctrl;
+ fun->chip.chip_delay = fun->chip_delay;
+ fun->chip.read_byte = fun_read_byte;
+ fun->chip.read_buf = fun_read_buf;
+ fun->chip.write_buf = fun_write_buf;
+ fun->chip.ecc.mode = NAND_ECC_SOFT;
+ if (fun->mchip_count > 1)
+ fun->chip.select_chip = fun_select_chip;
+
+ if (fun->rnb_gpio[0] >= 0)
+ fun->chip.dev_ready = fun_chip_ready;
+
+ fun->mtd.priv = &fun->chip;
+ fun->mtd.owner = THIS_MODULE;
+
+ flash_np = of_get_next_child(upm_np, NULL);
+ if (!flash_np)
+ return -ENODEV;
+
+ fun->mtd.name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
+ flash_np->name);
+ if (!fun->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(&fun->mtd, fun->mchip_count);
+ if (ret)
+ goto err;
+
+ ppdata.of_node = flash_np;
+ ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
+err:
+ of_node_put(flash_np);
+ if (ret)
+ kfree(fun->mtd.name);
+ return ret;
+}
+
+static int __devinit fun_probe(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun;
+ struct resource io_res;
+ const __be32 *prop;
+ int rnb_gpio;
+ int ret;
+ int size;
+ int i;
+
+ fun = kzalloc(sizeof(*fun), GFP_KERNEL);
+ if (!fun)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't get IO base\n");
+ goto err1;
+ }
+
+ ret = fsl_upm_find(io_res.start, &fun->upm);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't find UPM\n");
+ goto err1;
+ }
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
+ &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM address offset\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+ fun->upm_addr_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM command offset\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+ fun->upm_cmd_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node,
+ "fsl,upm-addr-line-cs-offsets", &size);
+ if (prop && (size / sizeof(uint32_t)) > 0) {
+ fun->mchip_count = size / sizeof(uint32_t);
+ if (fun->mchip_count >= NAND_MAX_CHIPS) {
+ dev_err(&ofdev->dev, "too much multiple chips\n");
+ goto err1;
+ }
+ for (i = 0; i < fun->mchip_count; i++)
+ fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
+ } else {
+ fun->mchip_count = 1;
+ }
+
+ for (i = 0; i < fun->mchip_count; i++) {
+ fun->rnb_gpio[i] = -1;
+ rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
+ if (rnb_gpio >= 0) {
+ ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
+ if (ret) {
+ dev_err(&ofdev->dev,
+ "can't request RNB gpio #%d\n", i);
+ goto err2;
+ }
+ gpio_direction_input(rnb_gpio);
+ fun->rnb_gpio[i] = rnb_gpio;
+ } else if (rnb_gpio == -EINVAL) {
+ dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
+ goto err2;
+ }
+ }
+
+ prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
+ if (prop)
+ fun->chip_delay = be32_to_cpup(prop);
+ else
+ fun->chip_delay = 50;
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
+ if (prop && size == sizeof(uint32_t))
+ fun->wait_flags = be32_to_cpup(prop);
+ else
+ fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
+ FSL_UPM_WAIT_WRITE_BYTE;
+
+ fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+ resource_size(&io_res));
+ if (!fun->io_base) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ fun->dev = &ofdev->dev;
+ fun->last_ctrl = NAND_CLE;
+
+ ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
+ if (ret)
+ goto err2;
+
+ dev_set_drvdata(&ofdev->dev, fun);
+
+ return 0;
+err2:
+ for (i = 0; i < fun->mchip_count; i++) {
+ if (fun->rnb_gpio[i] < 0)
+ break;
+ gpio_free(fun->rnb_gpio[i]);
+ }
+err1:
+ kfree(fun);
+
+ return ret;
+}
+
+static int __devexit fun_remove(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
+ int i;
+
+ nand_release(&fun->mtd);
+ kfree(fun->mtd.name);
+
+ for (i = 0; i < fun->mchip_count; i++) {
+ if (fun->rnb_gpio[i] < 0)
+ break;
+ gpio_free(fun->rnb_gpio[i]);
+ }
+
+ kfree(fun);
+
+ return 0;
+}
+
+static const struct of_device_id of_fun_match[] = {
+ { .compatible = "fsl,upm-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_fun_match);
+
+static struct platform_driver of_fun_driver = {
+ .driver = {
+ .name = "fsl,upm-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_fun_match,
+ },
+ .probe = fun_probe,
+ .remove = __devexit_p(fun_remove),
+};
+
+module_platform_driver(of_fun_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
+ "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
new file mode 100644
index 00000000..1b8330e1
--- /dev/null
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -0,0 +1,1267 @@
+/*
+ * drivers/mtd/nand/fsmc_nand.c
+ *
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * Driver for NAND portions
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ * Ashish Priyadarshi
+ *
+ * Based on drivers/mtd/nand/nomadik_nand.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
+#include <mtd/mtd-abi.h>
+
+static struct nand_ecclayout fsmc_ecc1_128_layout = {
+ .eccbytes = 24,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
+ 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ {.offset = 72, .length = 8},
+ {.offset = 88, .length = 8},
+ {.offset = 104, .length = 8},
+ {.offset = 120, .length = 8}
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_64_layout = {
+ .eccbytes = 12,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_16_layout = {
+ .eccbytes = 3,
+ .eccpos = {2, 3, 4},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
+ * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_256_layout = {
+ .eccbytes = 208,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126,
+ 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142,
+ 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158,
+ 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174,
+ 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190,
+ 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206,
+ 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222,
+ 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238,
+ 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 3},
+ {.offset = 143, .length = 3},
+ {.offset = 159, .length = 3},
+ {.offset = 175, .length = 3},
+ {.offset = 191, .length = 3},
+ {.offset = 207, .length = 3},
+ {.offset = 223, .length = 3},
+ {.offset = 239, .length = 3},
+ {.offset = 255, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_224_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 97}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_128_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
+ * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_64_layout = {
+ .eccbytes = 52,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 1},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
+ * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
+ * byte is free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_16_layout = {
+ .eccbytes = 13,
+ .eccpos = { 0, 1, 2, 3, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14
+ },
+ .oobfree = {
+ {.offset = 15, .length = 1},
+ }
+};
+
+/*
+ * ECC placement definitions in oobfree type format.
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data.
+ * Managing the ecc bytes in the following way makes it easier for software to
+ * read ecc bytes consecutive to data bytes. This way is similar to
+ * oobfree structure maintained already in generic nand driver
+ */
+static struct fsmc_eccplace fsmc_ecc4_lp_place = {
+ .eccplace = {
+ {.offset = 2, .length = 13},
+ {.offset = 18, .length = 13},
+ {.offset = 34, .length = 13},
+ {.offset = 50, .length = 13},
+ {.offset = 66, .length = 13},
+ {.offset = 82, .length = 13},
+ {.offset = 98, .length = 13},
+ {.offset = 114, .length = 13}
+ }
+};
+
+static struct fsmc_eccplace fsmc_ecc4_sp_place = {
+ .eccplace = {
+ {.offset = 0, .length = 4},
+ {.offset = 6, .length = 9}
+ }
+};
+
+/**
+ * struct fsmc_nand_data - structure for FSMC NAND device state
+ *
+ * @pid: Part ID on the AMBA PrimeCell format
+ * @mtd: MTD info for a NAND flash.
+ * @nand: Chip related info for a NAND flash.
+ * @partitions: Partition info for a NAND Flash.
+ * @nr_partitions: Total number of partition of a NAND flash.
+ *
+ * @ecc_place: ECC placing locations in oobfree type format.
+ * @bank: Bank number for probed device.
+ * @clk: Clock structure for FSMC.
+ *
+ * @read_dma_chan: DMA channel for read access
+ * @write_dma_chan: DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @data_pa: NAND Physical port for Data.
+ * @data_va: NAND port for Data.
+ * @cmd_va: NAND port for Command.
+ * @addr_va: NAND port for Address.
+ * @regs_va: FSMC regs base address.
+ */
+struct fsmc_nand_data {
+ u32 pid;
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
+
+ struct fsmc_eccplace *ecc_place;
+ unsigned int bank;
+ struct device *dev;
+ enum access_mode mode;
+ struct clk *clk;
+
+ /* DMA related objects */
+ struct dma_chan *read_dma_chan;
+ struct dma_chan *write_dma_chan;
+ struct completion dma_access_complete;
+
+ struct fsmc_nand_timings *dev_timings;
+
+ dma_addr_t data_pa;
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ void __iomem *regs_va;
+
+ void (*select_chip)(uint32_t bank, uint32_t busw);
+};
+
+/* Assert CS signal based on chipnr */
+static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ if (host->select_chip)
+ host->select_chip(chipnr,
+ chip->options & NAND_BUSWIDTH_16);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * fsmc_cmd_ctrl - For facilitaing Hardware access
+ * This routine allows hardware specific access to control-lines(ALE,CLE)
+ */
+static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void *__iomem *regs = host->regs_va;
+ unsigned int bank = host->bank;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u32 pc;
+
+ if (ctrl & NAND_CLE) {
+ this->IO_ADDR_R = host->cmd_va;
+ this->IO_ADDR_W = host->cmd_va;
+ } else if (ctrl & NAND_ALE) {
+ this->IO_ADDR_R = host->addr_va;
+ this->IO_ADDR_W = host->addr_va;
+ } else {
+ this->IO_ADDR_R = host->data_va;
+ this->IO_ADDR_W = host->data_va;
+ }
+
+ pc = readl(FSMC_NAND_REG(regs, bank, PC));
+ if (ctrl & NAND_NCE)
+ pc |= FSMC_ENABLE;
+ else
+ pc &= ~FSMC_ENABLE;
+ writel(pc, FSMC_NAND_REG(regs, bank, PC));
+ }
+
+ mb();
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+/*
+ * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
+ *
+ * This routine initializes timing parameters related to NAND memory access in
+ * FSMC registers
+ */
+static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
+ uint32_t busw, struct fsmc_nand_timings *timings)
+{
+ uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ uint32_t tclr, tar, thiz, thold, twait, tset;
+ struct fsmc_nand_timings *tims;
+ struct fsmc_nand_timings default_timings = {
+ .tclr = FSMC_TCLR_1,
+ .tar = FSMC_TAR_1,
+ .thiz = FSMC_THIZ_1,
+ .thold = FSMC_THOLD_4,
+ .twait = FSMC_TWAIT_6,
+ .tset = FSMC_TSET_0,
+ };
+
+ if (timings)
+ tims = timings;
+ else
+ tims = &default_timings;
+
+ tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+ tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+ thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+ thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+ twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+ tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
+
+ if (busw)
+ writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
+ else
+ writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
+
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
+ writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
+}
+
+/*
+ * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
+ */
+static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ uint32_t bank = host->bank;
+
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
+}
+
+/*
+ * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
+ * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
+ * max of 8-bits)
+ */
+static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ uint32_t bank = host->bank;
+ uint32_t ecc_tmp;
+ unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
+
+ do {
+ if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
+ break;
+ else
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ if (time_after_eq(jiffies, deadline)) {
+ dev_err(host->dev, "calculate ecc timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc[0] = (uint8_t) (ecc_tmp >> 0);
+ ecc[1] = (uint8_t) (ecc_tmp >> 8);
+ ecc[2] = (uint8_t) (ecc_tmp >> 16);
+ ecc[3] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc[4] = (uint8_t) (ecc_tmp >> 0);
+ ecc[5] = (uint8_t) (ecc_tmp >> 8);
+ ecc[6] = (uint8_t) (ecc_tmp >> 16);
+ ecc[7] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc[8] = (uint8_t) (ecc_tmp >> 0);
+ ecc[9] = (uint8_t) (ecc_tmp >> 8);
+ ecc[10] = (uint8_t) (ecc_tmp >> 16);
+ ecc[11] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc[12] = (uint8_t) (ecc_tmp >> 16);
+
+ return 0;
+}
+
+/*
+ * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
+ * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ uint32_t bank = host->bank;
+ uint32_t ecc_tmp;
+
+ ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc[0] = (uint8_t) (ecc_tmp >> 0);
+ ecc[1] = (uint8_t) (ecc_tmp >> 8);
+ ecc[2] = (uint8_t) (ecc_tmp >> 16);
+
+ return 0;
+}
+
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+ struct fsmc_nand_data *host = param;
+
+ complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ enum dma_data_direction direction)
+{
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src, dma_addr;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int ret;
+
+ if (direction == DMA_TO_DEVICE)
+ chan = host->write_dma_chan;
+ else if (direction == DMA_FROM_DEVICE)
+ chan = host->read_dma_chan;
+ else
+ return -EINVAL;
+
+ dma_dev = chan->device;
+ dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+ if (direction == DMA_TO_DEVICE) {
+ dma_src = dma_addr;
+ dma_dst = host->data_pa;
+ flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
+ } else {
+ dma_src = host->data_pa;
+ dma_dst = dma_addr;
+ flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+
+ if (!tx) {
+ dev_err(host->dev, "device_prep_dma_memcpy error\n");
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+ return -EIO;
+ }
+
+ tx->callback = dma_complete;
+ tx->callback_param = host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(host->dev, "dma_submit_error %d\n", cookie);
+ return ret;
+ }
+
+ dma_async_issue_pending(chan);
+
+ ret =
+ wait_for_completion_interruptible_timeout(&host->dma_access_complete,
+ msecs_to_jiffies(3000));
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(host->dev, "wait_for_completion_timeout\n");
+ return ret ? ret : -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ writel(p[i], chip->IO_ADDR_W);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb(buf[i], chip->IO_ADDR_W);
+ }
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ p[i] = readl(chip->IO_ADDR_R);
+ } else {
+ for (i = 0; i < len; i++)
+ buf[i] = readb(chip->IO_ADDR_R);
+ }
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * This routine is needed for fsmc version 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(up to a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_eccplace *ecc_place = host->ecc_place;
+ int i, j, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ int off, len, group = 0;
+ /*
+ * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
+ * end up reading 14 bytes (7 words) from oob. The local array is
+ * to maintain word alignment
+ */
+ uint16_t ecc_oob[7];
+ uint8_t *oob = (uint8_t *)&ecc_oob[0];
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ for (j = 0; j < eccbytes;) {
+ off = ecc_place->eccplace[group].offset;
+ len = ecc_place->eccplace[group].length;
+ group++;
+
+ /*
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
+ chip->read_buf(mtd, oob + j, len);
+ j += len;
+ }
+
+ memcpy(&ecc_code[i], oob, chip->ecc.bytes);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+
+ return 0;
+}
+
+/*
+ * fsmc_bch8_correct_data
+ * @mtd: mtd info structure
+ * @dat: buffer of read data
+ * @read_ecc: ecc read from device spare area
+ * @calc_ecc: ecc calculated from read data
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each in 512 bytes of read data.
+ */
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *regs = host->regs_va;
+ unsigned int bank = host->bank;
+ uint32_t err_idx[8];
+ uint32_t num_err, i;
+ uint32_t ecc1, ecc2, ecc3, ecc4;
+
+ num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+
+ /* no bit flipping */
+ if (likely(num_err == 0))
+ return 0;
+
+ /* too many errors */
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+ int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, chip->ecc.size);
+ return bits_data;
+ }
+
+ return -EBADMSG;
+ }
+
+ /*
+ * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
+ * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each. calc_ecc is copied into a
+ * uint64_t array and error offset indexes are populated in err_idx
+ * array
+ */
+ ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
+
+ i = 0;
+ while (num_err--) {
+ change_bit(0, (unsigned long *)&err_idx[i]);
+ change_bit(1, (unsigned long *)&err_idx[i]);
+
+ if (err_idx[i] < chip->ecc.size * 8) {
+ change_bit(err_idx[i], (unsigned long *)dat);
+ i++;
+ }
+ }
+ return i;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+#ifdef CONFIG_OF
+static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 val;
+
+ /* Set default NAND width to 8 bits */
+ pdata->width = 8;
+ if (!of_property_read_u32(np, "bank-width", &val)) {
+ if (val == 2) {
+ pdata->width = 16;
+ } else if (val != 1) {
+ dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+ of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
+ of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
+ if (of_get_property(np, "nand-skip-bbtscan", NULL))
+ pdata->options = NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+#else
+static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+/*
+ * fsmc_nand_probe - Probe function
+ * @pdev: platform device structure
+ */
+static int __init fsmc_nand_probe(struct platform_device *pdev)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node __maybe_unused *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata = {};
+ struct fsmc_nand_data *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct resource *res;
+ dma_cap_mask_t mask;
+ int ret = 0;
+ u32 pid;
+ int i;
+
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ ret = fsmc_nand_probe_config_dt(pdev, np);
+ if (ret) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data is NULL\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "failed to allocate device structure\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory data resourse\n");
+ return -ENOENT;
+ }
+
+ host->data_pa = (dma_addr_t)res->start;
+ host->data_va = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!host->data_va) {
+ dev_err(&pdev->dev, "data ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
+ return -ENOENT;
+ }
+
+ host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
+ resource_size(res));
+ if (!host->addr_va) {
+ dev_err(&pdev->dev, "ale ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
+ return -ENOENT;
+ }
+
+ host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
+ resource_size(res));
+ if (!host->cmd_va) {
+ dev_err(&pdev->dev, "ale ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
+ return -ENOENT;
+ }
+
+ host->regs_va = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!host->regs_va) {
+ dev_err(&pdev->dev, "regs ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to fetch block clock\n");
+ return PTR_ERR(host->clk);
+ }
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ goto err_clk_enable;
+
+ /*
+ * This device ID is actually a common AMBA ID as used on the
+ * AMBA PrimeCell bus. However it is not a PrimeCell.
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+ host->pid = pid;
+ dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+ "revision %02x, config %02x\n",
+ AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+ AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
+ host->bank = pdata->bank;
+ host->select_chip = pdata->select_bank;
+ host->partitions = pdata->partitions;
+ host->nr_partitions = pdata->nr_partitions;
+ host->dev = &pdev->dev;
+ host->dev_timings = pdata->nand_timings;
+ host->mode = pdata->mode;
+
+ if (host->mode == USE_DMA_ACCESS)
+ init_completion(&host->dma_access_complete);
+
+ /* Link all private pointers */
+ mtd = &host->mtd;
+ nand = &host->nand;
+ mtd->priv = nand;
+ nand->priv = host;
+
+ host->mtd.owner = THIS_MODULE;
+ nand->IO_ADDR_R = host->data_va;
+ nand->IO_ADDR_W = host->data_va;
+ nand->cmd_ctrl = fsmc_cmd_ctrl;
+ nand->chip_delay = 30;
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
+ nand->ecc.size = 512;
+ nand->options = pdata->options;
+ nand->select_chip = fsmc_select_chip;
+ nand->badblockbits = 7;
+
+ if (pdata->width == FSMC_NAND_BW16)
+ nand->options |= NAND_BUSWIDTH_16;
+
+ switch (host->mode) {
+ case USE_DMA_ACCESS:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->read_dma_chan = dma_request_channel(mask, filter,
+ pdata->read_dma_priv);
+ if (!host->read_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ goto err_req_read_chnl;
+ }
+ host->write_dma_chan = dma_request_channel(mask, filter,
+ pdata->write_dma_priv);
+ if (!host->write_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ goto err_req_write_chnl;
+ }
+ nand->read_buf = fsmc_read_buf_dma;
+ nand->write_buf = fsmc_write_buf_dma;
+ break;
+
+ default:
+ case USE_WORD_ACCESS:
+ nand->read_buf = fsmc_read_buf;
+ nand->write_buf = fsmc_write_buf;
+ break;
+ }
+
+ fsmc_nand_setup(host->regs_va, host->bank,
+ nand->options & NAND_BUSWIDTH_16,
+ host->dev_timings);
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+ nand->ecc.correct = fsmc_bch8_correct_data;
+ nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
+ } else {
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ }
+
+ /*
+ * Scan to find existence of the device
+ */
+ if (nand_scan_ident(&host->mtd, 1, NULL)) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "No NAND Device found!\n");
+ goto err_scan_ident;
+ }
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc4_16_layout;
+ host->ecc_place = &fsmc_ecc4_sp_place;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc4_64_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc4_128_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 224:
+ nand->ecc.layout = &fsmc_ecc4_224_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 256:
+ nand->ecc.layout = &fsmc_ecc4_256_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ default:
+ printk(KERN_WARNING "No oob scheme defined for "
+ "oobsize %d\n", mtd->oobsize);
+ BUG();
+ }
+ } else {
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc1_16_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc1_64_layout;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc1_128_layout;
+ break;
+ default:
+ printk(KERN_WARNING "No oob scheme defined for "
+ "oobsize %d\n", mtd->oobsize);
+ BUG();
+ }
+ }
+
+ /* Second stage of scan to fill MTD data-structures */
+ if (nand_scan_tail(&host->mtd)) {
+ ret = -ENXIO;
+ goto err_probe;
+ }
+
+ /*
+ * The partition information can is accessed by (in the same precedence)
+ *
+ * command line through Bootloader,
+ * platform data,
+ * default partition information present in driver.
+ */
+ /*
+ * Check for partition info passed
+ */
+ host->mtd.name = "nand";
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
+ host->partitions, host->nr_partitions);
+ if (ret)
+ goto err_probe;
+
+ platform_set_drvdata(pdev, host);
+ dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+ return 0;
+
+err_probe:
+err_scan_ident:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->write_dma_chan);
+err_req_write_chnl:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->read_dma_chan);
+err_req_read_chnl:
+ clk_disable(host->clk);
+err_clk_enable:
+ clk_put(host->clk);
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int fsmc_nand_remove(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (host) {
+ nand_release(&host->mtd);
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_release_channel(host->write_dma_chan);
+ dma_release_channel(host->read_dma_chan);
+ }
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fsmc_nand_suspend(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ if (host)
+ clk_disable(host->clk);
+ return 0;
+}
+
+static int fsmc_nand_resume(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ if (host) {
+ clk_enable(host->clk);
+ fsmc_nand_setup(host->regs_va, host->bank,
+ host->nand.options & NAND_BUSWIDTH_16,
+ host->dev_timings);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id fsmc_nand_id_table[] = {
+ { .compatible = "st,spear600-fsmc-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
+#endif
+
+static struct platform_driver fsmc_nand_driver = {
+ .remove = fsmc_nand_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "fsmc-nand",
+ .of_match_table = of_match_ptr(fsmc_nand_id_table),
+#ifdef CONFIG_PM
+ .pm = &fsmc_nand_pm_ops,
+#endif
+ },
+};
+
+static int __init fsmc_nand_init(void)
+{
+ return platform_driver_probe(&fsmc_nand_driver,
+ fsmc_nand_probe);
+}
+module_init(fsmc_nand_init);
+
+static void __exit fsmc_nand_exit(void)
+{
+ platform_driver_unregister(&fsmc_nand_driver);
+}
+module_exit(fsmc_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
+MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
new file mode 100644
index 00000000..27000a5f
--- /dev/null
+++ b/drivers/mtd/nand/gpio.c
@@ -0,0 +1,476 @@
+/*
+ * drivers/mtd/nand/gpio.c
+ *
+ * Updated, and converted to generic GPIO based driver by Russell King.
+ *
+ * Written by Ben Dooks <ben@simtec.co.uk>
+ * Based on 2.4 version by Mark Whittaker
+ *
+ * © 2004 Simtec Electronics
+ *
+ * Device driver for NAND connected via GPIO
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+
+struct gpiomtd {
+ void __iomem *io_sync;
+ struct mtd_info mtd_info;
+ struct nand_chip nand_chip;
+ struct gpio_nand_platdata plat;
+};
+
+#define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info)
+
+
+#ifdef CONFIG_ARM
+/* gpio_nand_dosync()
+ *
+ * Make sure the GPIO state changes occur in-order with writes to NAND
+ * memory region.
+ * Needed on PXA due to bus-reordering within the SoC itself (see section on
+ * I/O ordering in PXA manual (section 2.3, p35)
+ */
+static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
+{
+ unsigned long tmp;
+
+ if (gpiomtd->io_sync) {
+ /*
+ * Linux memory barriers don't cater for what's required here.
+ * What's required is what's here - a read from a separate
+ * region with a dependency on that read.
+ */
+ tmp = readl(gpiomtd->io_sync);
+ asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
+ }
+}
+#else
+static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
+#endif
+
+static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+
+ gpio_nand_dosync(gpiomtd);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
+ gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
+ gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ gpio_nand_dosync(gpiomtd);
+ }
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
+ gpio_nand_dosync(gpiomtd);
+}
+
+static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ writesb(this->IO_ADDR_W, buf, len);
+}
+
+static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ readsb(this->IO_ADDR_R, buf, len);
+}
+
+static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned char read, *p = (unsigned char *) buf;
+ int i, err = 0;
+
+ for (i = 0; i < len; i++) {
+ read = readb(this->IO_ADDR_R);
+ if (read != p[i]) {
+ pr_debug("%s: err at %d (read %04x vs %04x)\n",
+ __func__, i, read, p[i]);
+ err = -EFAULT;
+ }
+ }
+ return err;
+}
+
+static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (IS_ALIGNED((unsigned long)buf, 2)) {
+ writesw(this->IO_ADDR_W, buf, len>>1);
+ } else {
+ int i;
+ unsigned short *ptr = (unsigned short *)buf;
+
+ for (i = 0; i < len; i += 2, ptr++)
+ writew(*ptr, this->IO_ADDR_W);
+ }
+}
+
+static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (IS_ALIGNED((unsigned long)buf, 2)) {
+ readsw(this->IO_ADDR_R, buf, len>>1);
+ } else {
+ int i;
+ unsigned short *ptr = (unsigned short *)buf;
+
+ for (i = 0; i < len; i += 2, ptr++)
+ *ptr = readw(this->IO_ADDR_R);
+ }
+}
+
+static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned short read, *p = (unsigned short *) buf;
+ int i, err = 0;
+ len >>= 1;
+
+ for (i = 0; i < len; i++) {
+ read = readw(this->IO_ADDR_R);
+ if (read != p[i]) {
+ pr_debug("%s: err at %d (read %04x vs %04x)\n",
+ __func__, i, read, p[i]);
+ err = -EFAULT;
+ }
+ }
+ return err;
+}
+
+
+static int gpio_nand_devready(struct mtd_info *mtd)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ u64 addr;
+
+ if (!r || of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+#define gpio_nand_id_table NULL
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev->platform_data) {
+ memcpy(plat, dev->platform_data, sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
+static int __devexit gpio_nand_remove(struct platform_device *dev)
+{
+ struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
+ struct resource *res;
+
+ nand_release(&gpiomtd->mtd_info);
+
+ res = gpio_nand_get_io_sync(dev);
+ iounmap(gpiomtd->io_sync);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ iounmap(gpiomtd->nand_chip.IO_ADDR_R);
+ release_mem_region(res->start, resource_size(res));
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+
+ gpio_free(gpiomtd->plat.gpio_cle);
+ gpio_free(gpiomtd->plat.gpio_ale);
+ gpio_free(gpiomtd->plat.gpio_nce);
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_free(gpiomtd->plat.gpio_nwp);
+ gpio_free(gpiomtd->plat.gpio_rdy);
+
+ kfree(gpiomtd);
+
+ return 0;
+}
+
+static void __iomem *request_and_remap(struct resource *res, size_t size,
+ const char *name, int *err)
+{
+ void __iomem *ptr;
+
+ if (!request_mem_region(res->start, resource_size(res), name)) {
+ *err = -EBUSY;
+ return NULL;
+ }
+
+ ptr = ioremap(res->start, size);
+ if (!ptr) {
+ release_mem_region(res->start, resource_size(res));
+ *err = -ENOMEM;
+ }
+ return ptr;
+}
+
+static int __devinit gpio_nand_probe(struct platform_device *dev)
+{
+ struct gpiomtd *gpiomtd;
+ struct nand_chip *this;
+ struct resource *res0, *res1;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
+
+ if (!dev->dev.of_node && !dev->dev.platform_data)
+ return -EINVAL;
+
+ res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res0)
+ return -EINVAL;
+
+ gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
+ if (gpiomtd == NULL) {
+ dev_err(&dev->dev, "failed to create NAND MTD\n");
+ return -ENOMEM;
+ }
+
+ this = &gpiomtd->nand_chip;
+ this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
+ if (!this->IO_ADDR_R) {
+ dev_err(&dev->dev, "unable to map NAND\n");
+ goto err_map;
+ }
+
+ res1 = gpio_nand_get_io_sync(dev);
+ if (res1) {
+ gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
+ if (!gpiomtd->io_sync) {
+ dev_err(&dev->dev, "unable to map sync NAND\n");
+ goto err_sync;
+ }
+ }
+
+ ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+ if (ret)
+ goto err_nce;
+
+ ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
+ if (ret)
+ goto err_nce;
+ gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
+ ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
+ if (ret)
+ goto err_nwp;
+ gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ }
+ ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
+ if (ret)
+ goto err_ale;
+ gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+ ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
+ if (ret)
+ goto err_cle;
+ gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
+ ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+ if (ret)
+ goto err_rdy;
+ gpio_direction_input(gpiomtd->plat.gpio_rdy);
+
+
+ this->IO_ADDR_W = this->IO_ADDR_R;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->options = gpiomtd->plat.options;
+ this->chip_delay = gpiomtd->plat.chip_delay;
+
+ /* install our routines */
+ this->cmd_ctrl = gpio_nand_cmd_ctrl;
+ this->dev_ready = gpio_nand_devready;
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ this->read_buf = gpio_nand_readbuf16;
+ this->write_buf = gpio_nand_writebuf16;
+ this->verify_buf = gpio_nand_verifybuf16;
+ } else {
+ this->read_buf = gpio_nand_readbuf;
+ this->write_buf = gpio_nand_writebuf;
+ this->verify_buf = gpio_nand_verifybuf;
+ }
+
+ /* set the mtd private data for the nand driver */
+ gpiomtd->mtd_info.priv = this;
+ gpiomtd->mtd_info.owner = THIS_MODULE;
+
+ if (nand_scan(&gpiomtd->mtd_info, 1)) {
+ dev_err(&dev->dev, "no nand chips found?\n");
+ ret = -ENXIO;
+ goto err_wp;
+ }
+
+ if (gpiomtd->plat.adjust_parts)
+ gpiomtd->plat.adjust_parts(&gpiomtd->plat,
+ gpiomtd->mtd_info.size);
+
+ ppdata.of_node = dev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (ret)
+ goto err_wp;
+ platform_set_drvdata(dev, gpiomtd);
+
+ return 0;
+
+err_wp:
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ gpio_free(gpiomtd->plat.gpio_rdy);
+err_rdy:
+ gpio_free(gpiomtd->plat.gpio_cle);
+err_cle:
+ gpio_free(gpiomtd->plat.gpio_ale);
+err_ale:
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_free(gpiomtd->plat.gpio_nwp);
+err_nwp:
+ gpio_free(gpiomtd->plat.gpio_nce);
+err_nce:
+ iounmap(gpiomtd->io_sync);
+ if (res1)
+ release_mem_region(res1->start, resource_size(res1));
+err_sync:
+ iounmap(gpiomtd->nand_chip.IO_ADDR_R);
+ release_mem_region(res0->start, resource_size(res0));
+err_map:
+ kfree(gpiomtd);
+ return ret;
+}
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .driver = {
+ .name = "gpio-nand",
+ .of_match_table = gpio_nand_id_table,
+ },
+};
+
+static int __init gpio_nand_init(void)
+{
+ printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
+
+ return platform_driver_register(&gpio_nand_driver);
+}
+
+static void __exit gpio_nand_exit(void)
+{
+ platform_driver_unregister(&gpio_nand_driver);
+}
+
+module_init(gpio_nand_init);
+module_exit(gpio_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 00000000..3a462487
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
+gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 00000000..4effb8c5
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,84 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL 0x00000000
+#define HW_BCH_CTRL_SET 0x00000004
+#define HW_BCH_CTRL_CLR 0x00000008
+#define HW_BCH_CTRL_TOG 0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
+
+#define HW_BCH_STATUS0 0x00000010
+#define HW_BCH_MODE 0x00000020
+#define HW_BCH_ENCODEPTR 0x00000030
+#define HW_BCH_DATAPTR 0x00000040
+#define HW_BCH_METAPTR 0x00000050
+#define HW_BCH_LAYOUTSELECT 0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0 0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0 12
+#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+
+#define HW_BCH_FLASH0LAYOUT1 0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
+ (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN 12
+#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 00000000..e8ea7107
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1076 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/mtd/gpmi-nand.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <mach/mxs.h>
+
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+struct timing_threshod timing_default_threshold = {
+ .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
+ BP_GPMI_TIMING0_DATA_SETUP),
+ .internal_data_setup_in_ns = 0,
+ .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
+ BP_GPMI_CTRL1_RDN_DELAY),
+ .max_dll_clock_period_in_ns = 32,
+ .max_dll_delay_in_ns = 16,
+};
+
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ /* clear the bit */
+ __mxs_clrl(mask, addr);
+
+ /*
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
+ * recommends to wait 1us.
+ */
+ udelay(1);
+
+ /* poll the bit becoming clear */
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+#define MODULE_CLKGATE (1 << 30)
+#define MODULE_SFTRST (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ __mxs_clrl(MODULE_CLKGATE, reset_addr);
+
+ if (!just_enable) {
+ /* set SFTRST to reset the block */
+ __mxs_setl(MODULE_SFTRST, reset_addr);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+ }
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+
+int gpmi_init(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = clk_prepare_enable(r->clock);
+ if (ret)
+ goto err_out;
+ ret = gpmi_reset_block(r->gpmi_regs, false);
+ if (ret)
+ goto err_out;
+
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Set the IRQ polarity. */
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Disable Write-Protection. */
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Select BCH ECC. */
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ clk_disable_unprepare(r->clock);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *geo = &this->bch_geometry;
+ u32 reg;
+ int i;
+
+ pr_err("Show GPMI registers :\n");
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+ reg = readl(r->gpmi_regs + i * 0x10);
+ pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+
+ /* start to print out the BCH info */
+ pr_err("BCH Geometry :\n");
+ pr_err("GF length : %u\n", geo->gf_len);
+ pr_err("ECC Strength : %u\n", geo->ecc_strength);
+ pr_err("Page Size in Bytes : %u\n", geo->page_size);
+ pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
+ pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
+ pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
+ pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
+ pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
+ pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
+ pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
+ pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
+}
+
+/* Configures the geometry for BCH. */
+int bch_set_geometry(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ unsigned int block_count;
+ unsigned int block_size;
+ unsigned int metadata_size;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ int ret;
+
+ if (common_nfc_set_geometry(this))
+ return !0;
+
+ block_count = bch_geo->ecc_chunk_count - 1;
+ block_size = bch_geo->ecc_chunk_size;
+ metadata_size = bch_geo->metadata_size;
+ ecc_strength = bch_geo->ecc_strength >> 1;
+ page_size = bch_geo->page_size;
+
+ ret = clk_prepare_enable(r->clock);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+ * On the other hand, the MX28 needs the reset, because one case has been
+ * seen where the BCH produced ECC errors constantly after 10000
+ * consecutive reboots. The latter case has not been seen on the MX23 yet,
+ * still we don't know if it could happen there as well.
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+ /* Configure layout 0. */
+ writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
+ | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
+ | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
+ | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
+ | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* Set *all* chip selects to use layout 0. */
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+ /* Enable interrupts. */
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ r->bch_regs + HW_BCH_CTRL_SET);
+
+ clk_disable_unprepare(r->clock);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* Converts time in nanoseconds to cycles. */
+static unsigned int ns_to_cycles(unsigned int time,
+ unsigned int period, unsigned int min)
+{
+ unsigned int k;
+
+ k = (time + period - 1) / period;
+ return max(k, min);
+}
+
+/* Apply timing to current hardware conditions. */
+static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct gpmi_nand_platform_data *pdata = this->pdata;
+ struct timing_threshod *nfc = &timing_default_threshold;
+ struct nand_chip *nand = &this->nand;
+ struct nand_timing target = this->timing;
+ bool improved_timing_is_available;
+ unsigned long clock_frequency_in_hz;
+ unsigned int clock_period_in_ns;
+ bool dll_use_half_periods;
+ unsigned int dll_delay_shift;
+ unsigned int max_sample_delay_in_ns;
+ unsigned int address_setup_in_cycles;
+ unsigned int data_setup_in_ns;
+ unsigned int data_setup_in_cycles;
+ unsigned int data_hold_in_cycles;
+ int ideal_sample_delay_in_ns;
+ unsigned int sample_delay_factor;
+ int tEYE;
+ unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
+ unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+
+ /*
+ * If there are multiple chips, we need to relax the timings to allow
+ * for signal distortion due to higher capacitance.
+ */
+ if (nand->numchips > 2) {
+ target.data_setup_in_ns += 10;
+ target.data_hold_in_ns += 10;
+ target.address_setup_in_ns += 10;
+ } else if (nand->numchips > 1) {
+ target.data_setup_in_ns += 5;
+ target.data_hold_in_ns += 5;
+ target.address_setup_in_ns += 5;
+ }
+
+ /* Check if improved timing information is available. */
+ improved_timing_is_available =
+ (target.tREA_in_ns >= 0) &&
+ (target.tRLOH_in_ns >= 0) &&
+ (target.tRHOH_in_ns >= 0) ;
+
+ /* Inspect the clock. */
+ clock_frequency_in_hz = nfc->clock_frequency_in_hz;
+ clock_period_in_ns = 1000000000 / clock_frequency_in_hz;
+
+ /*
+ * The NFC quantizes setup and hold parameters in terms of clock cycles.
+ * Here, we quantize the setup and hold timing parameters to the
+ * next-highest clock period to make sure we apply at least the
+ * specified times.
+ *
+ * For data setup and data hold, the hardware interprets a value of zero
+ * as the largest possible delay. This is not what's intended by a zero
+ * in the input parameter, so we impose a minimum of one cycle.
+ */
+ data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
+ clock_period_in_ns, 1);
+ data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
+ clock_period_in_ns, 1);
+ address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
+ clock_period_in_ns, 0);
+
+ /*
+ * The clock's period affects the sample delay in a number of ways:
+ *
+ * (1) The NFC HAL tells us the maximum clock period the sample delay
+ * DLL can tolerate. If the clock period is greater than half that
+ * maximum, we must configure the DLL to be driven by half periods.
+ *
+ * (2) We need to convert from an ideal sample delay, in ns, to a
+ * "sample delay factor," which the NFC uses. This factor depends on
+ * whether we're driving the DLL with full or half periods.
+ * Paraphrasing the reference manual:
+ *
+ * AD = SDF x 0.125 x RP
+ *
+ * where:
+ *
+ * AD is the applied delay, in ns.
+ * SDF is the sample delay factor, which is dimensionless.
+ * RP is the reference period, in ns, which is a full clock period
+ * if the DLL is being driven by full periods, or half that if
+ * the DLL is being driven by half periods.
+ *
+ * Let's re-arrange this in a way that's more useful to us:
+ *
+ * 8
+ * SDF = AD x ----
+ * RP
+ *
+ * The reference period is either the clock period or half that, so this
+ * is:
+ *
+ * 8 AD x DDF
+ * SDF = AD x ----- = --------
+ * f x P P
+ *
+ * where:
+ *
+ * f is 1 or 1/2, depending on how we're driving the DLL.
+ * P is the clock period.
+ * DDF is the DLL Delay Factor, a dimensionless value that
+ * incorporates all the constants in the conversion.
+ *
+ * DDF will be either 8 or 16, both of which are powers of two. We can
+ * reduce the cost of this conversion by using bit shifts instead of
+ * multiplication or division. Thus:
+ *
+ * AD << DDS
+ * SDF = ---------
+ * P
+ *
+ * or
+ *
+ * AD = (SDF >> DDS) x P
+ *
+ * where:
+ *
+ * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
+ */
+ if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
+ dll_use_half_periods = true;
+ dll_delay_shift = 3 + 1;
+ } else {
+ dll_use_half_periods = false;
+ dll_delay_shift = 3;
+ }
+
+ /*
+ * Compute the maximum sample delay the NFC allows, under current
+ * conditions. If the clock is running too slowly, no sample delay is
+ * possible.
+ */
+ if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
+ max_sample_delay_in_ns = 0;
+ else {
+ /*
+ * Compute the delay implied by the largest sample delay factor
+ * the NFC allows.
+ */
+ max_sample_delay_in_ns =
+ (nfc->max_sample_delay_factor * clock_period_in_ns) >>
+ dll_delay_shift;
+
+ /*
+ * Check if the implied sample delay larger than the NFC
+ * actually allows.
+ */
+ if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
+ max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
+ }
+
+ /*
+ * Check if improved timing information is available. If not, we have to
+ * use a less-sophisticated algorithm.
+ */
+ if (!improved_timing_is_available) {
+ /*
+ * Fold the read setup time required by the NFC into the ideal
+ * sample delay.
+ */
+ ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
+ nfc->internal_data_setup_in_ns;
+
+ /*
+ * The ideal sample delay may be greater than the maximum
+ * allowed by the NFC. If so, we can trade off sample delay time
+ * for more data setup time.
+ *
+ * In each iteration of the following loop, we add a cycle to
+ * the data setup time and subtract a corresponding amount from
+ * the sample delay until we've satisified the constraints or
+ * can't do any better.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ data_setup_in_cycles++;
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds most closely
+ * to the ideal sample delay. If the result is too large for the
+ * NFC, use the maximum value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the
+ * sample delay factor. We do this because the form of the
+ * computation is the same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /* Skip to the part where we return our results. */
+ goto return_results;
+ }
+
+ /*
+ * If control arrives here, we have more detailed timing information,
+ * so we can use a better algorithm.
+ */
+
+ /*
+ * Fold the read setup time required by the NFC into the maximum
+ * propagation delay.
+ */
+ max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
+
+ /*
+ * Earlier, we computed the number of clock cycles required to satisfy
+ * the data setup time. Now, we need to know the actual nanoseconds.
+ */
+ data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
+
+ /*
+ * Compute tEYE, the width of the data eye when reading from the NAND
+ * Flash. The eye width is fundamentally determined by the data setup
+ * time, perturbed by propagation delays and some characteristics of the
+ * NAND Flash device.
+ *
+ * start of the eye = max_prop_delay + tREA
+ * end of the eye = min_prop_delay + tRHOH + data_setup
+ */
+ tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
+ (int)data_setup_in_ns;
+
+ tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
+
+ /*
+ * The eye must be open. If it's not, we can try to open it by
+ * increasing its main forcer, the data setup time.
+ *
+ * In each iteration of the following loop, we increase the data setup
+ * time by a single clock cycle. We do this until either the eye is
+ * open or we run into NFC limits.
+ */
+ while ((tEYE <= 0) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+ }
+
+ /*
+ * When control arrives here, the eye is open. The ideal time to sample
+ * the data is in the center of the eye:
+ *
+ * end of the eye + start of the eye
+ * --------------------------------- - data_setup
+ * 2
+ *
+ * After some algebra, this simplifies to the code immediately below.
+ */
+ ideal_sample_delay_in_ns =
+ ((int)max_prop_delay_in_ns +
+ (int)target.tREA_in_ns +
+ (int)min_prop_delay_in_ns +
+ (int)target.tRHOH_in_ns -
+ (int)data_setup_in_ns) >> 1;
+
+ /*
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ *
+ * __ _____________________________________
+ * RDN \_________________/
+ *
+ * <---- tEYE ----->
+ * /-----------------\
+ * Read Data ----------------------------< >---------
+ * \-----------------/
+ * ^ ^ ^ ^
+ * | | | |
+ * |<--Data Setup -->|<--Delay Time -->| |
+ * | | | |
+ * | | |
+ * | |<-- Quantized Delay Time -->|
+ * | | |
+ *
+ *
+ * We have some issues we must now address:
+ *
+ * (1) The *ideal* sample delay time must not be negative. If it is, we
+ * jam it to zero.
+ *
+ * (2) The *ideal* sample delay time must not be greater than that
+ * allowed by the NFC. If it is, we can increase the data setup
+ * time, which will reduce the delay between the end of the data
+ * setup and the center of the eye. It will also make the eye
+ * larger, which might help with the next issue...
+ *
+ * (3) The *quantized* sample delay time must not fall either before the
+ * eye opens or after it closes (the latter is the problem
+ * illustrated in the above figure).
+ */
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * Extend the data setup as needed to reduce the ideal sample delay
+ * below the maximum permitted by the NFC.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds to the ideal sample
+ * delay. If the result is too large, then use the maximum allowed
+ * value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the sample
+ * delay factor. We do this because the form of the computation is the
+ * same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /*
+ * These macros conveniently encapsulate a computation we'll use to
+ * continuously evaluate whether or not the data sample delay is inside
+ * the eye.
+ */
+ #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
+
+ #define QUANTIZED_DELAY \
+ ((int) ((sample_delay_factor * clock_period_in_ns) >> \
+ dll_delay_shift))
+
+ #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
+
+ #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
+
+ /*
+ * While the quantized sample time falls outside the eye, reduce the
+ * sample delay or extend the data setup to move the sampling point back
+ * toward the eye. Do not allow the number of data setup cycles to
+ * exceed the maximum allowed by the NFC.
+ */
+ while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * outside the eye. Check if it's before the eye opens, or after
+ * the eye closes.
+ */
+ if (QUANTIZED_DELAY > IDEAL_DELAY) {
+ /*
+ * If control arrives here, the quantized sample delay
+ * falls after the eye closes. Decrease the quantized
+ * delay time and then go back to re-evaluate.
+ */
+ if (sample_delay_factor != 0)
+ sample_delay_factor--;
+ continue;
+ }
+
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * before the eye opens. Shift the sample point by increasing
+ * data setup time. This will also make the eye larger.
+ */
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* ...and one less period for the delay time. */
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * We have a new ideal sample delay, so re-compute the quantized
+ * delay.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+ }
+
+ /* Control arrives here when we're ready to return our results. */
+return_results:
+ hw->data_setup_in_cycles = data_setup_in_cycles;
+ hw->data_hold_in_cycles = data_hold_in_cycles;
+ hw->address_setup_in_cycles = address_setup_in_cycles;
+ hw->use_half_periods = dll_use_half_periods;
+ hw->sample_delay_factor = sample_delay_factor;
+
+ /* Return success. */
+ return 0;
+}
+
+/* Begin the I/O */
+void gpmi_begin(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct timing_threshod *nfc = &timing_default_threshold;
+ unsigned char *gpmi_regs = r->gpmi_regs;
+ unsigned int clock_period_in_ns;
+ uint32_t reg;
+ unsigned int dll_wait_time_in_us;
+ struct gpmi_nfc_hardware_timing hw;
+ int ret;
+
+ /* Enable the clock. */
+ ret = clk_prepare_enable(r->clock);
+ if (ret) {
+ pr_err("We failed in enable the clk\n");
+ goto err_out;
+ }
+
+ /* set ready/busy timeout */
+ writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
+ gpmi_regs + HW_GPMI_TIMING1);
+
+ /* Get the timing information we need. */
+ nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
+ clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
+
+ gpmi_nfc_compute_hardware_timing(this, &hw);
+
+ /* Set up all the simple timing parameters. */
+ reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
+
+ writel(reg, gpmi_regs + HW_GPMI_TIMING0);
+
+ /*
+ * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
+ */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Clear out the DLL control fields. */
+ writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* If no sample delay is called for, return immediately. */
+ if (!hw.sample_delay_factor)
+ return;
+
+ /* Configure the HALF_PERIOD flag. */
+ if (hw.use_half_periods)
+ writel(BM_GPMI_CTRL1_HALF_PERIOD,
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Set the delay factor. */
+ writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Enable the DLL. */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * After we enable the GPMI DLL, we have to wait 64 clock cycles before
+ * we can use the GPMI.
+ *
+ * Calculate the amount of time we need to wait, in microseconds.
+ */
+ dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
+
+ if (!dll_wait_time_in_us)
+ dll_wait_time_in_us = 1;
+
+ /* Wait for the DLL to settle. */
+ udelay(dll_wait_time_in_us);
+
+err_out:
+ return;
+}
+
+void gpmi_end(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ clk_disable_unprepare(r->clock);
+}
+
+/* Clears a BCH interrupt. */
+void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+/* Returns the Ready/Busy status of the given chip. */
+int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
+{
+ struct resources *r = &this->resources;
+ uint32_t mask = 0;
+ uint32_t reg = 0;
+
+ if (GPMI_IS_MX23(this)) {
+ mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
+ reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
+ } else if (GPMI_IS_MX28(this)) {
+ mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
+ reg = readl(r->gpmi_regs + HW_GPMI_STAT);
+ } else
+ pr_err("unknow arch.\n");
+ return reg & mask;
+}
+
+static inline void set_dma_type(struct gpmi_nand_data *this,
+ enum dma_ops_type type)
+{
+ this->last_dma_type = this->dma_type;
+ this->dma_type = type;
+}
+
+int gpmi_send_command(struct gpmi_nand_data *this)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ int chip = this->current_chip;
+ u32 pio[3];
+
+ /* [1] send out the PIO words */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+ | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
+ pio[1] = pio[2] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
+ sgl = &this->cmd_sgl;
+
+ sg_init_one(sgl, this->cmd_buffer, this->command_length);
+ dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel,
+ sgl, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_COMMAND);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ uint32_t command_mode;
+ uint32_t address;
+ u32 pio[2];
+
+ /* [1] PIO */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] send DMA request */
+ prepare_data_dma(this, DMA_TO_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_WRITE_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_read_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[2];
+
+ /* [1] : send PIO */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] : send DMA request */
+ prepare_data_dma(this, DMA_FROM_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+
+ /* [3] : submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* A DMA descriptor that does an ECC page read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_CTRL_ACK);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+ set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
+
+int gpmi_read_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* [1] Wait for the chip to report ready. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] Enable the BCH block and read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+
+ /* [3] Disable the BCH block */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+ pio[1] = 0;
+ pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 3,
+ DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ pr_err("step 3 error\n");
+ return -1;
+ }
+
+ /* [4] submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 00000000..9ec51cec
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,1620 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/gpmi-nand.h>
+#include <linux/mtd/partitions.h>
+#include "gpmi-nand.h"
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+/* We will use all the (page + OOB). */
+static struct nand_ecclayout gpmi_hw_ecclayout = {
+ .eccbytes = 0,
+ .eccpos = { 0, },
+ .oobfree = { {.offset = 0, .length = 0} }
+};
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+ struct gpmi_nand_data *this = cookie;
+
+ gpmi_clear_bch(this);
+ complete(&this->bch_done);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Calculate the ECC strength by hand:
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * O : the oobsize of the NAND chip.
+ * M : the metasize of per page.
+ *
+ * The formula is :
+ * E * G * N
+ * ------------ <= (O - M)
+ * 8
+ *
+ * So, we get E by:
+ * (O - M) * 8
+ * E <= -------------
+ * G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ int ecc_strength;
+
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ /* We need the minor even number. */
+ return round_down(ecc_strength, 2);
+}
+
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int metadata_size;
+ unsigned int status_size;
+ unsigned int block_mark_bit_offset;
+
+ /*
+ * The size of the metadata can be changed, though we set it to 10
+ * bytes now. But it can't be too large, because we have to save
+ * enough space for BCH.
+ */
+ geo->metadata_size = 10;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. There is no oobsize greater then 512. */
+ geo->ecc_chunk_size = 512;
+ while (geo->ecc_chunk_size < mtd->oobsize)
+ geo->ecc_chunk_size *= 2; /* keep C >= O */
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /* We use the same ECC strength for all chunks. */
+ geo->ecc_strength = get_ecc_strength(this);
+ if (!geo->ecc_strength) {
+ pr_err("We get a wrong ECC strength.\n");
+ return -EINVAL;
+ }
+
+ geo->page_size = mtd->writesize + mtd->oobsize;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ metadata_size = ALIGN(geo->metadata_size, 4);
+ status_size = ALIGN(geo->ecc_chunk_count, 4);
+
+ geo->auxiliary_size = metadata_size + status_size;
+ geo->auxiliary_status_offset = metadata_size;
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /*
+ * We need to compute the byte and bit offsets of
+ * the physical block mark within the ECC-based view of the page.
+ *
+ * NAND chip with 2K page shows below:
+ * (Block Mark)
+ * | |
+ * | D |
+ * |<---->|
+ * V V
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ * | M | data |E| data |E| data |E| data |E|
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * With the formula to compute the ECC strength, and the condition
+ * : C >= O (C is the ecc chunk size)
+ *
+ * It's easy to deduce to the following result:
+ *
+ * E * G (O - M) C - M C - M
+ * ----------- <= ------- <= -------- < ---------
+ * 8 N N (N - 1)
+ *
+ * So, we get:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M) < C
+ * 8
+ *
+ * The above inequality means the position of block mark
+ * within the ECC-based view of the page is still in the data chunk,
+ * and it's NOT in the ECC bits of the chunk.
+ *
+ * Use the following to compute the bit position of the
+ * physical block mark within the ECC-based view of the page:
+ * (page_size - D) * 8
+ *
+ * --Huang Shijie
+ */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+ int chipnr = this->current_chip;
+
+ return this->dma_chans[chipnr];
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+{
+ struct scatterlist *sgl = &this->data_sgl;
+ int ret;
+
+ this->direct_dma_map_ok = true;
+
+ /* first try to map the upper buffer directly */
+ sg_init_one(sgl, this->upper_buf, this->upper_len);
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0) {
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
+
+ if (dr == DMA_TO_DEVICE)
+ memcpy(this->data_buffer_dma, this->upper_buf,
+ this->upper_len);
+
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0)
+ pr_err("map failed.\n");
+
+ this->direct_dma_map_ok = false;
+ }
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+ complete(dma_c);
+
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+ if (this->direct_dma_map_ok == false)
+ memcpy(this->upper_buf, this->data_buffer_dma,
+ this->upper_len);
+ break;
+
+ case DMA_FOR_WRITE_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_ECC_PAGE:
+ case DMA_FOR_WRITE_ECC_PAGE:
+ /* We have to wait the BCH interrupt to finish. */
+ break;
+
+ default:
+ pr_err("in wrong DMA operation.\n");
+ }
+}
+
+int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *dma_c = &this->dma_done;
+ int err;
+
+ init_completion(dma_c);
+
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
+
+ /* Wait for the interrupt from the DMA block. */
+ err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+ if (!err) {
+ pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * This function is used in BCH reading or BCH writing pages.
+ * It will wait for the BCH interrupt as long as ONE second.
+ * Actually, we must wait for two interrupts :
+ * [1] firstly the DMA interrupt and
+ * [2] secondly the BCH interrupt.
+ */
+int start_dma_with_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *bch_c = &this->bch_done;
+ int err;
+
+ /* Prepare to receive an interrupt from the BCH block. */
+ init_completion(bch_c);
+
+ /* start the DMA */
+ start_dma_without_bch_irq(this, desc);
+
+ /* Wait for the interrupt from the BCH block. */
+ err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+ if (!err) {
+ pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int __devinit
+acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ struct resource *r;
+ void *p;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ if (!r) {
+ pr_err("Can't get resource for %s\n", res_name);
+ return -ENXIO;
+ }
+
+ p = ioremap(r->start, resource_size(r));
+ if (!p) {
+ pr_err("Can't remap %s\n", res_name);
+ return -ENOMEM;
+ }
+
+ if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+ res->gpmi_regs = p;
+ else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+ res->bch_regs = p;
+ else
+ pr_err("unknown resource name : %s\n", res_name);
+
+ return 0;
+}
+
+static void release_register_block(struct gpmi_nand_data *this)
+{
+ struct resources *res = &this->resources;
+ if (res->gpmi_regs)
+ iounmap(res->gpmi_regs);
+ if (res->bch_regs)
+ iounmap(res->bch_regs);
+ res->gpmi_regs = NULL;
+ res->bch_regs = NULL;
+}
+
+static int __devinit
+acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+ struct resource *r;
+ int err;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+ if (!r) {
+ pr_err("Can't get resource for %s\n", res_name);
+ return -ENXIO;
+ }
+
+ err = request_irq(r->start, irq_h, 0, res_name, this);
+ if (err) {
+ pr_err("Can't own %s\n", res_name);
+ return err;
+ }
+
+ res->bch_low_interrupt = r->start;
+ res->bch_high_interrupt = r->end;
+ return 0;
+}
+
+static void release_bch_irq(struct gpmi_nand_data *this)
+{
+ struct resources *res = &this->resources;
+ int i = res->bch_low_interrupt;
+
+ for (; i <= res->bch_high_interrupt; i++)
+ free_irq(i, this);
+}
+
+static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct resource *r = this->private;
+
+ if (!mxs_dma_is_apbh(chan))
+ return false;
+ /*
+ * only catch the GPMI dma channels :
+ * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
+ * (These four channels share the same IRQ!)
+ *
+ * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
+ * (These eight channels share the same IRQ!)
+ */
+ if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+ chan->private = &this->dma_data;
+ return true;
+ }
+ return false;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+ unsigned int i;
+ for (i = 0; i < DMA_CHANS; i++)
+ if (this->dma_chans[i]) {
+ dma_release_channel(this->dma_chans[i]);
+ this->dma_chans[i] = NULL;
+ }
+}
+
+static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
+{
+ struct platform_device *pdev = this->pdev;
+ struct gpmi_nand_platform_data *pdata = this->pdata;
+ struct resources *res = &this->resources;
+ struct resource *r, *r_dma;
+ unsigned int i;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ GPMI_NAND_DMA_CHANNELS_RES_NAME);
+ r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ GPMI_NAND_DMA_INTERRUPT_RES_NAME);
+ if (!r || !r_dma) {
+ pr_err("Can't get resource for DMA\n");
+ return -ENXIO;
+ }
+
+ /* used in gpmi_dma_filter() */
+ this->private = r;
+
+ for (i = r->start; i <= r->end; i++) {
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+
+ if (i - r->start >= pdata->max_chip_count)
+ break;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* get the DMA interrupt */
+ if (r_dma->start == r_dma->end) {
+ /* only register the first. */
+ if (i == r->start)
+ this->dma_data.chan_irq = r_dma->start;
+ else
+ this->dma_data.chan_irq = NO_IRQ;
+ } else
+ this->dma_data.chan_irq = r_dma->start + (i - r->start);
+
+ dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+ if (!dma_chan)
+ goto acquire_err;
+
+ /* fill the first empty item */
+ this->dma_chans[i - r->start] = dma_chan;
+ }
+
+ res->dma_low_channel = r->start;
+ res->dma_high_channel = i;
+ return 0;
+
+acquire_err:
+ pr_err("Can't acquire DMA channel %u\n", i);
+ release_dma_channels(this);
+ return -EINVAL;
+}
+
+static int __devinit acquire_resources(struct gpmi_nand_data *this)
+{
+ struct resources *res = &this->resources;
+ int ret;
+
+ ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_bch_irq(this, bch_irq);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_dma_channels(this);
+ if (ret)
+ goto exit_dma_channels;
+
+ res->clock = clk_get(&this->pdev->dev, NULL);
+ if (IS_ERR(res->clock)) {
+ pr_err("can not get the clock\n");
+ ret = -ENOENT;
+ goto exit_clock;
+ }
+ return 0;
+
+exit_clock:
+ release_dma_channels(this);
+exit_dma_channels:
+ release_bch_irq(this);
+exit_regs:
+ release_register_block(this);
+ return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+
+ clk_put(r->clock);
+ release_register_block(this);
+ release_bch_irq(this);
+ release_dma_channels(this);
+}
+
+static int __devinit init_hardware(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /*
+ * This structure contains the "safe" GPMI timing that should succeed
+ * with any NAND Flash device
+ * (although, with less-than-optimal performance).
+ */
+ struct nand_timing safe_timing = {
+ .data_setup_in_ns = 80,
+ .data_hold_in_ns = 60,
+ .address_setup_in_ns = 25,
+ .gpmi_sample_delay_in_ns = 6,
+ .tREA_in_ns = -1,
+ .tRLOH_in_ns = -1,
+ .tRHOH_in_ns = -1,
+ };
+
+ /* Initialize the hardwares. */
+ ret = gpmi_init(this);
+ if (ret)
+ return ret;
+
+ this->timing = safe_timing;
+ return 0;
+}
+
+static int read_page_prepare(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(destination)) {
+ dma_addr_t dest_phys;
+
+ dest_phys = dma_map_single(dev, destination,
+ length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_phys)) {
+ if (alt_size < length) {
+ pr_err("Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = destination;
+ *use_phys = dest_phys;
+ this->direct_dma_map_ok = true;
+ return 0;
+ }
+
+map_failed:
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ this->direct_dma_map_ok = false;
+ return 0;
+}
+
+static inline void read_page_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (this->direct_dma_map_ok)
+ dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
+}
+
+static inline void read_page_swap_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (!this->direct_dma_map_ok)
+ memcpy(destination, alt_virt, length);
+}
+
+static int send_page_prepare(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(source)) {
+ dma_addr_t source_phys;
+
+ source_phys = dma_map_single(dev, (void *)source, length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, source_phys)) {
+ if (alt_size < length) {
+ pr_err("Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = source;
+ *use_phys = source_phys;
+ return 0;
+ }
+map_failed:
+ /*
+ * Copy the content of the source buffer into the alternate
+ * buffer and set up the return values accordingly.
+ */
+ memcpy(alt_virt, source, length);
+
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ return 0;
+}
+
+static void send_page_end(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void *used_virt, dma_addr_t used_phys)
+{
+ struct device *dev = this->dev;
+ if (used_virt == source)
+ dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+
+ if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
+ dma_free_coherent(dev, this->page_buffer_size,
+ this->page_buffer_virt,
+ this->page_buffer_phys);
+ kfree(this->cmd_buffer);
+ kfree(this->data_buffer_dma);
+
+ this->cmd_buffer = NULL;
+ this->data_buffer_dma = NULL;
+ this->page_buffer_virt = NULL;
+ this->page_buffer_size = 0;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct device *dev = this->dev;
+
+ /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
+ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA);
+ if (this->cmd_buffer == NULL)
+ goto error_alloc;
+
+ /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
+ this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA);
+ if (this->data_buffer_dma == NULL)
+ goto error_alloc;
+
+ /*
+ * [3] Allocate the page buffer.
+ *
+ * Both the payload buffer and the auxiliary buffer must appear on
+ * 32-bit boundaries. We presume the size of the payload buffer is a
+ * power of two and is much larger than four, which guarantees the
+ * auxiliary buffer will appear on a 32-bit boundary.
+ */
+ this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
+ this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
+ &this->page_buffer_phys, GFP_DMA);
+ if (!this->page_buffer_virt)
+ goto error_alloc;
+
+
+ /* Slice up the page buffer. */
+ this->payload_virt = this->page_buffer_virt;
+ this->payload_phys = this->page_buffer_phys;
+ this->auxiliary_virt = this->payload_virt + geo->payload_size;
+ this->auxiliary_phys = this->payload_phys + geo->payload_size;
+ return 0;
+
+error_alloc:
+ gpmi_free_dma_buffer(this);
+ pr_err("allocate DMA buffer ret!!\n");
+ return -ENOMEM;
+}
+
+static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret;
+
+ /*
+ * Every operation begins with a command byte and a series of zero or
+ * more address bytes. These are distinguished by either the Address
+ * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+ * asserted. When MTD is ready to execute the command, it will deassert
+ * both latch enables.
+ *
+ * Rather than run a separate DMA operation for every single byte, we
+ * queue them up and run a single DMA operation for the entire series
+ * of command and data bytes. NAND_CMD_NONE means the END of the queue.
+ */
+ if ((ctrl & (NAND_ALE | NAND_CLE))) {
+ if (data != NAND_CMD_NONE)
+ this->cmd_buffer[this->command_length++] = data;
+ return;
+ }
+
+ if (!this->command_length)
+ return;
+
+ ret = gpmi_send_command(this);
+ if (ret)
+ pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
+
+ this->command_length = 0;
+}
+
+static int gpmi_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ return gpmi_is_ready(this, this->current_chip);
+}
+
+static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ if ((this->current_chip < 0) && (chipnr >= 0))
+ gpmi_begin(this);
+ else if ((this->current_chip >= 0) && (chipnr < 0))
+ gpmi_end(this);
+
+ this->current_chip = chipnr;
+}
+
+static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ pr_debug("len is %d\n", len);
+ this->upper_buf = buf;
+ this->upper_len = len;
+
+ gpmi_read_data(this);
+}
+
+static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ pr_debug("len is %d\n", len);
+ this->upper_buf = (uint8_t *)buf;
+ this->upper_len = len;
+
+ gpmi_send_data(this);
+}
+
+static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ uint8_t *buf = this->data_buffer_dma;
+
+ gpmi_read_buf(mtd, buf, 1);
+ return buf[0];
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+ void *payload, void *auxiliary)
+{
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ unsigned char *p;
+ unsigned char *a;
+ unsigned int bit;
+ unsigned char mask;
+ unsigned char from_data;
+ unsigned char from_oob;
+
+ if (!this->swap_block_mark)
+ return;
+
+ /*
+ * If control arrives here, we're swapping. Make some convenience
+ * variables.
+ */
+ bit = nfc_geo->block_mark_bit_offset;
+ p = payload + nfc_geo->block_mark_byte_offset;
+ a = auxiliary;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+ /* Get the byte from the OOB. */
+ from_oob = a[0];
+
+ /* Swap them. */
+ a[0] = from_data;
+
+ mask = (0x1 << bit) - 1;
+ p[0] = (p[0] & mask) | (from_oob << bit);
+
+ mask = ~0 << bit;
+ p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ void *payload_virt;
+ dma_addr_t payload_phys;
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ unsigned int i;
+ unsigned char *status;
+ unsigned int failed;
+ unsigned int corrected;
+ int ret;
+
+ pr_debug("page number is : %d\n", page);
+ ret = read_page_prepare(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ pr_err("Inadequate DMA buffer\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* go! */
+ ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
+ read_page_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ if (ret) {
+ pr_err("Error in ECC-based read: %d\n", ret);
+ goto exit_nfc;
+ }
+
+ /* handle the block mark swapping */
+ block_mark_swapping(this, payload_virt, auxiliary_virt);
+
+ /* Loop over status bytes, accumulating ECC status. */
+ failed = 0;
+ corrected = 0;
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+
+ for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+ if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+ continue;
+
+ if (*status == STATUS_UNCORRECTABLE) {
+ failed++;
+ continue;
+ }
+ corrected += *status;
+ }
+
+ /*
+ * Propagate ECC status to the owning MTD only when failed or
+ * corrected times nearly reaches our ECC correction threshold.
+ */
+ if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
+ mtd->ecc_stats.failed += failed;
+ mtd->ecc_stats.corrected += corrected;
+ }
+
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
+ * details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the block
+ * mark to th caller's buffer. Note that, if block mark swapping was
+ * necessary, it has already been done, so we can rely on the first
+ * byte of the auxiliary buffer to contain the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+
+ read_page_swap_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+exit_nfc:
+ return ret;
+}
+
+static void gpmi_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ const void *payload_virt;
+ dma_addr_t payload_phys;
+ const void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ int ret;
+
+ pr_debug("ecc write page.\n");
+ if (this->swap_block_mark) {
+ /*
+ * If control arrives here, we're doing block mark swapping.
+ * Since we can't modify the caller's buffers, we must copy them
+ * into our own.
+ */
+ memcpy(this->payload_virt, buf, mtd->writesize);
+ payload_virt = this->payload_virt;
+ payload_phys = this->payload_phys;
+
+ memcpy(this->auxiliary_virt, chip->oob_poi,
+ nfc_geo->auxiliary_size);
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* Handle block mark swapping. */
+ block_mark_swapping(this,
+ (void *) payload_virt, (void *) auxiliary_virt);
+ } else {
+ /*
+ * If control arrives here, we're not doing block mark swapping,
+ * so we can to try and use the caller's buffers.
+ */
+ ret = send_page_prepare(this,
+ buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ pr_err("Inadequate payload DMA buffer\n");
+ return;
+ }
+
+ ret = send_page_prepare(this,
+ chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ &auxiliary_virt, &auxiliary_phys);
+ if (ret) {
+ pr_err("Inadequate auxiliary DMA buffer\n");
+ goto exit_auxiliary;
+ }
+ }
+
+ /* Ask the NFC. */
+ ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
+ if (ret)
+ pr_err("Error in ECC-based write: %d\n", ret);
+
+ if (!this->swap_block_mark) {
+ send_page_end(this, chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ auxiliary_virt, auxiliary_phys);
+exit_auxiliary:
+ send_page_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ }
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ * | Raw | ECC-based |
+ * -------------+-------------------------+-------------------------+
+ * | Read the conventional | |
+ * | OOB at the end of the | |
+ * Swapping | page and return it. It | |
+ * | contains exactly what | |
+ * | we want. | Read the block mark and |
+ * -------------+-------------------------+ return it in a buffer |
+ * | Read the conventional | full of set bits. |
+ * | OOB at the end of the | |
+ * | page and also the block | |
+ * Transcribing | mark in the metadata. | |
+ * | Copy the block mark | |
+ * | into the first byte of | |
+ * | the OOB. | |
+ * -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ *
+ * Since MTD assumes the OOB is not covered by ECC, there is no pair of
+ * ECC-based/raw functions for reading or or writing the OOB. The fact that the
+ * caller wants an ECC-based or raw view of the page is not propagated down to
+ * this driver.
+ */
+static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ struct gpmi_nand_data *this = chip->priv;
+
+ pr_debug("page number is %d\n", page);
+ /* clear the OOB buffer */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+
+ /* Read out the conventional OOB. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /*
+ * Now, we want to make sure the block mark is correct. In the
+ * Swapping/Raw case, we already have it. Otherwise, we need to
+ * explicitly read it.
+ */
+ if (!this->swap_block_mark) {
+ /* Read the block mark into the first byte of the OOB buffer. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ chip->oob_poi[0] = chip->read_byte(mtd);
+ }
+
+ /*
+ * Return true, indicating that the next call to this function must send
+ * a command.
+ */
+ return true;
+}
+
+static int
+gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ /*
+ * The BCH will use all the (page + oob).
+ * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
+ * But it can not stop some ioctls such MEMWRITEOOB which uses
+ * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
+ * these ioctls too.
+ */
+ return -EPERM;
+}
+
+static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int block, ret = 0;
+ uint8_t *block_mark;
+ int column, page, status, chipnr;
+
+ /* Get block number */
+ block = (int)(ofs >> chip->bbt_erase_shift);
+ if (chip->bbt)
+ chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* Do we have a flash based bad block table ? */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, ofs);
+ else {
+ chipnr = (int)(ofs >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ column = this->swap_block_mark ? mtd->writesize : 0;
+
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
+
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+ chip->write_buf(mtd, block_mark, 1);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
+ chip->select_chip(mtd, -1);
+ }
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+ /*
+ * Set the boot block stride size.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->stride_size_in_pages = 64;
+
+ /*
+ * Set the search area stride exponent.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->search_area_stride_exponent = 2;
+ return 0;
+}
+
+static const char *fingerprint = "STMP";
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int search_area_size_in_strides;
+ unsigned int stride;
+ unsigned int page;
+ loff_t byte;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int found_an_ncb_fingerprint = false;
+
+ /* Compute the number of strides in a search area. */
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Loop through the first search area, looking for the NCB fingerprint.
+ */
+ dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page and byte addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+ byte = page * mtd->writesize;
+
+ dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+ /*
+ * Read the NCB fingerprint. The fingerprint is four bytes long
+ * and starts in the 12th byte of the page.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ chip->read_buf(mtd, buffer, strlen(fingerprint));
+
+ /* Look for the fingerprint. */
+ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+ found_an_ncb_fingerprint = true;
+ break;
+ }
+
+ }
+
+ chip->select_chip(mtd, saved_chip_number);
+
+ if (found_an_ncb_fingerprint)
+ dev_dbg(dev, "\tFound a fingerprint\n");
+ else
+ dev_dbg(dev, "\tNo fingerprint found\n");
+ return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int block_size_in_pages;
+ unsigned int search_area_size_in_strides;
+ unsigned int search_area_size_in_pages;
+ unsigned int search_area_size_in_blocks;
+ unsigned int block;
+ unsigned int stride;
+ unsigned int page;
+ loff_t byte;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int status;
+
+ /* Compute the search area geometry. */
+ block_size_in_pages = mtd->erasesize / mtd->writesize;
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+ search_area_size_in_pages = search_area_size_in_strides *
+ rom_geo->stride_size_in_pages;
+ search_area_size_in_blocks =
+ (search_area_size_in_pages + (block_size_in_pages - 1)) /
+ block_size_in_pages;
+
+ dev_dbg(dev, "Search Area Geometry :\n");
+ dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+ dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+ dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
+
+ /* Select chip 0. */
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /* Loop over blocks in the first search area, erasing them. */
+ dev_dbg(dev, "Erasing the search area...\n");
+
+ for (block = 0; block < search_area_size_in_blocks; block++) {
+ /* Compute the page address. */
+ page = block * block_size_in_pages;
+
+ /* Erase this block. */
+ dev_dbg(dev, "\tErasing block 0x%x\n", block);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ /* Wait for the erase to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Erase failed.\n", __func__);
+ }
+
+ /* Write the NCB fingerprint into the page buffer. */
+ memset(buffer, ~0, mtd->writesize);
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+ /* Loop through the first search area, writing NCB fingerprints. */
+ dev_dbg(dev, "Writing NCB fingerprints...\n");
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page and byte addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+ byte = page * mtd->writesize;
+
+ /* Write the first page of the current stride. */
+ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ chip->ecc.write_page_raw(mtd, chip, buffer);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ /* Wait for the write to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Write failed.\n", __func__);
+ }
+
+ /* Deselect chip 0. */
+ chip->select_chip(mtd, saved_chip_number);
+ return 0;
+}
+
+static int mx23_boot_init(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int block_count;
+ unsigned int block;
+ int chipnr;
+ int page;
+ loff_t byte;
+ uint8_t block_mark;
+ int ret = 0;
+
+ /*
+ * If control arrives here, we can't use block mark swapping, which
+ * means we're forced to use transcription. First, scan for the
+ * transcription stamp. If we find it, then we don't have to do
+ * anything -- the block marks are already transcribed.
+ */
+ if (mx23_check_transcription_stamp(this))
+ return 0;
+
+ /*
+ * If control arrives here, we couldn't find a transcription stamp, so
+ * so we presume the block marks are in the conventional location.
+ */
+ dev_dbg(dev, "Transcribing bad block marks...\n");
+
+ /* Compute the number of blocks in the entire medium. */
+ block_count = chip->chipsize >> chip->phys_erase_shift;
+
+ /*
+ * Loop over all the blocks in the medium, transcribing block marks as
+ * we go.
+ */
+ for (block = 0; block < block_count; block++) {
+ /*
+ * Compute the chip, page and byte addresses for this block's
+ * conventional mark.
+ */
+ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+ page = block << (chip->phys_erase_shift - chip->page_shift);
+ byte = block << chip->phys_erase_shift;
+
+ /* Send the command to read the conventional block mark. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ block_mark = chip->read_byte(mtd);
+ chip->select_chip(mtd, -1);
+
+ /*
+ * Check if the block is marked bad. If so, we need to mark it
+ * again, but this time the result will be a mark in the
+ * location where we transcribe block marks.
+ */
+ if (block_mark != 0xff) {
+ dev_dbg(dev, "Transcribing mark in block %u\n", block);
+ ret = chip->block_markbad(mtd, byte);
+ if (ret)
+ dev_err(dev, "Failed to mark block bad with "
+ "ret %d\n", ret);
+ }
+ }
+
+ /* Write the stamp that indicates we've transcribed the block marks. */
+ mx23_write_transcription_stamp(this);
+ return 0;
+}
+
+static int nand_boot_init(struct gpmi_nand_data *this)
+{
+ nand_boot_set_geometry(this);
+
+ /* This is ROM arch-specific initilization before the BBT scanning. */
+ if (GPMI_IS_MX23(this))
+ return mx23_boot_init(this);
+ return 0;
+}
+
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Free the temporary DMA memory for reading ID. */
+ gpmi_free_dma_buffer(this);
+
+ /* Set up the NFC geometry which is used by BCH. */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ pr_err("set geometry ret : %d\n", ret);
+ return ret;
+ }
+
+ /* Alloc the new DMA buffers according to the pagesize and oobsize */
+ return gpmi_alloc_dma_buffer(this);
+}
+
+static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ if (GPMI_IS_MX23(this))
+ this->swap_block_mark = false;
+ else
+ this->swap_block_mark = true;
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
+ if (ret)
+ return ret;
+
+ /* NAND boot init, depends on the gpmi_set_geometry(). */
+ return nand_boot_init(this);
+}
+
+static int gpmi_scan_bbt(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret;
+
+ /* Prepare for the BBT scan. */
+ ret = gpmi_pre_bbt_scan(this);
+ if (ret)
+ return ret;
+
+ /* use the default BBT implementation */
+ return nand_default_bbt(mtd);
+}
+
+void gpmi_nfc_exit(struct gpmi_nand_data *this)
+{
+ nand_release(&this->mtd);
+ gpmi_free_dma_buffer(this);
+}
+
+static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
+{
+ struct gpmi_nand_platform_data *pdata = this->pdata;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ int ret;
+
+ /* init current chip */
+ this->current_chip = -1;
+
+ /* init the MTD data structures */
+ mtd->priv = chip;
+ mtd->name = "gpmi-nand";
+ mtd->owner = THIS_MODULE;
+
+ /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+ chip->priv = this;
+ chip->select_chip = gpmi_select_chip;
+ chip->cmd_ctrl = gpmi_cmd_ctrl;
+ chip->dev_ready = gpmi_dev_ready;
+ chip->read_byte = gpmi_read_byte;
+ chip->read_buf = gpmi_read_buf;
+ chip->write_buf = gpmi_write_buf;
+ chip->ecc.read_page = gpmi_ecc_read_page;
+ chip->ecc.write_page = gpmi_ecc_write_page;
+ chip->ecc.read_oob = gpmi_ecc_read_oob;
+ chip->ecc.write_oob = gpmi_ecc_write_oob;
+ chip->scan_bbt = gpmi_scan_bbt;
+ chip->badblock_pattern = &gpmi_bbt_descr;
+ chip->block_markbad = gpmi_block_markbad;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 1;
+ chip->ecc.layout = &gpmi_hw_ecclayout;
+
+ /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
+ this->bch_geometry.payload_size = 1024;
+ this->bch_geometry.auxiliary_size = 128;
+ ret = gpmi_alloc_dma_buffer(this);
+ if (ret)
+ goto err_out;
+
+ ret = nand_scan(mtd, pdata->max_chip_count);
+ if (ret) {
+ pr_err("Chip scan failed\n");
+ goto err_out;
+ }
+
+ ret = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata->partitions, pdata->partition_count);
+ if (ret)
+ goto err_out;
+ return 0;
+
+err_out:
+ gpmi_nfc_exit(this);
+ return ret;
+}
+
+static int __devinit gpmi_nand_probe(struct platform_device *pdev)
+{
+ struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct gpmi_nand_data *this;
+ int ret;
+
+ this = kzalloc(sizeof(*this), GFP_KERNEL);
+ if (!this) {
+ pr_err("Failed to allocate per-device memory\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, this);
+ this->pdev = pdev;
+ this->dev = &pdev->dev;
+ this->pdata = pdata;
+
+ if (pdata->platform_init) {
+ ret = pdata->platform_init();
+ if (ret)
+ goto platform_init_error;
+ }
+
+ ret = acquire_resources(this);
+ if (ret)
+ goto exit_acquire_resources;
+
+ ret = init_hardware(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ ret = gpmi_nfc_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ return 0;
+
+exit_nfc_init:
+ release_resources(this);
+platform_init_error:
+exit_acquire_resources:
+ platform_set_drvdata(pdev, NULL);
+ kfree(this);
+ return ret;
+}
+
+static int __exit gpmi_nand_remove(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+
+ gpmi_nfc_exit(this);
+ release_resources(this);
+ platform_set_drvdata(pdev, NULL);
+ kfree(this);
+ return 0;
+}
+
+static const struct platform_device_id gpmi_ids[] = {
+ {
+ .name = "imx23-gpmi-nand",
+ .driver_data = IS_MX23,
+ }, {
+ .name = "imx28-gpmi-nand",
+ .driver_data = IS_MX28,
+ }, {},
+};
+
+static struct platform_driver gpmi_nand_driver = {
+ .driver = {
+ .name = "gpmi-nand",
+ },
+ .probe = gpmi_nand_probe,
+ .remove = __exit_p(gpmi_nand_remove),
+ .id_table = gpmi_ids,
+};
+
+static int __init gpmi_nand_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&gpmi_nand_driver);
+ if (err == 0)
+ printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
+ else
+ pr_err("i.MX GPMI NAND driver registration failed\n");
+ return err;
+}
+
+static void __exit gpmi_nand_exit(void)
+{
+ platform_driver_unregister(&gpmi_nand_driver);
+}
+
+module_init(gpmi_nand_init);
+module_exit(gpmi_nand_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 00000000..ec6180d4
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,273 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl/mxs-dma.h>
+
+struct resources {
+ void *gpmi_regs;
+ void *bch_regs;
+ unsigned int bch_low_interrupt;
+ unsigned int bch_high_interrupt;
+ unsigned int dma_low_channel;
+ unsigned int dma_high_channel;
+ struct clk *clock;
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len: The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength: A number that describes the strength of the ECC
+ * algorithm.
+ * @page_size: The size, in bytes, of a physical page, including
+ * both data and OOB.
+ * @metadata_size: The size, in bytes, of the metadata.
+ * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
+ * the first chunk in the page includes both data and
+ * metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count: The number of ECC chunks in the page,
+ * @payload_size: The size, in bytes, of the payload buffer.
+ * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset: The offset into the auxiliary buffer at which
+ * the ECC status appears.
+ * @block_mark_byte_offset: The byte offset in the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @block_mark_bit_offset: The bit offset into the ECC-based page view at
+ * which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+ unsigned int gf_len;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int metadata_size;
+ unsigned int ecc_chunk_size;
+ unsigned int ecc_chunk_count;
+ unsigned int payload_size;
+ unsigned int auxiliary_size;
+ unsigned int auxiliary_status_offset;
+ unsigned int block_mark_byte_offset;
+ unsigned int block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages: The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ * search area in boot block strides.
+ */
+struct boot_rom_geometry {
+ unsigned int stride_size_in_pages;
+ unsigned int search_area_stride_exponent;
+};
+
+/* DMA operations types */
+enum dma_ops_type {
+ DMA_FOR_COMMAND = 1,
+ DMA_FOR_READ_DATA,
+ DMA_FOR_WRITE_DATA,
+ DMA_FOR_READ_ECC_PAGE,
+ DMA_FOR_WRITE_ECC_PAGE
+};
+
+/**
+ * struct nand_timing - Fundamental timing attributes for NAND.
+ * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
+ * maximum of tDS and tWP. A negative value
+ * indicates this characteristic isn't known.
+ * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
+ * maximum of tDH, tWH and tREH. A negative value
+ * indicates this characteristic isn't known.
+ * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
+ * the maximum of tCLS, tCS and tALS. A negative
+ * value indicates this characteristic isn't known.
+ * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
+ * indicates this characteristic isn't known.
+ * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ */
+struct nand_timing {
+ int8_t data_setup_in_ns;
+ int8_t data_hold_in_ns;
+ int8_t address_setup_in_ns;
+ int8_t gpmi_sample_delay_in_ns;
+ int8_t tREA_in_ns;
+ int8_t tRLOH_in_ns;
+ int8_t tRHOH_in_ns;
+};
+
+struct gpmi_nand_data {
+ /* System Interface */
+ struct device *dev;
+ struct platform_device *pdev;
+ struct gpmi_nand_platform_data *pdata;
+
+ /* Resources */
+ struct resources resources;
+
+ /* Flash Hardware */
+ struct nand_timing timing;
+
+ /* BCH */
+ struct bch_geometry bch_geometry;
+ struct completion bch_done;
+
+ /* NAND Boot issue */
+ bool swap_block_mark;
+ struct boot_rom_geometry rom_geometry;
+
+ /* MTD / NAND */
+ struct nand_chip nand;
+ struct mtd_info mtd;
+
+ /* General-use Variables */
+ int current_chip;
+ unsigned int command_length;
+
+ /* passed from upper layer */
+ uint8_t *upper_buf;
+ int upper_len;
+
+ /* for DMA operations */
+ bool direct_dma_map_ok;
+
+ struct scatterlist cmd_sgl;
+ char *cmd_buffer;
+
+ struct scatterlist data_sgl;
+ char *data_buffer_dma;
+
+ void *page_buffer_virt;
+ dma_addr_t page_buffer_phys;
+ unsigned int page_buffer_size;
+
+ void *payload_virt;
+ dma_addr_t payload_phys;
+
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+
+ /* DMA channels */
+#define DMA_CHANS 8
+ struct dma_chan *dma_chans[DMA_CHANS];
+ struct mxs_dma_data dma_data;
+ enum dma_ops_type last_dma_type;
+ enum dma_ops_type dma_type;
+ struct completion dma_done;
+
+ /* private */
+ void *private;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @data_setup_in_cycles: The data setup time, in cycles.
+ * @data_hold_in_cycles: The data hold time, in cycles.
+ * @address_setup_in_cycles: The address setup time, in cycles.
+ * @use_half_periods: Indicates the clock is running slowly, so the
+ * NFC DLL should use half-periods.
+ * @sample_delay_factor: The sample delay factor.
+ */
+struct gpmi_nfc_hardware_timing {
+ uint8_t data_setup_in_cycles;
+ uint8_t data_hold_in_cycles;
+ uint8_t address_setup_in_cycles;
+ bool use_half_periods;
+ uint8_t sample_delay_factor;
+};
+
+/**
+ * struct timing_threshod - Timing threshold
+ * @max_data_setup_cycles: The maximum number of data setup cycles that
+ * can be expressed in the hardware.
+ * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
+ * for data read internal setup. In the Reference
+ * Manual, see the chapter "High-Speed NAND
+ * Timing" for more details.
+ * @max_sample_delay_factor: The maximum sample delay factor that can be
+ * expressed in the hardware.
+ * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
+ * sample delay DLL hardware can possibly work
+ * with (the DLL is unusable with longer periods).
+ * If the full-cycle period is greater than HALF
+ * this value, the DLL must be configured to use
+ * half-periods.
+ * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
+ * DLL can implement.
+ * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
+ * I/O transaction. If no I/O transaction is in
+ * progress, this is the clock frequency during
+ * the most recent I/O transaction.
+ */
+struct timing_threshod {
+ const unsigned int max_chip_count;
+ const unsigned int max_data_setup_cycles;
+ const unsigned int internal_data_setup_in_ns;
+ const unsigned int max_sample_delay_factor;
+ const unsigned int max_dll_clock_period_in_ns;
+ const unsigned int max_dll_delay_in_ns;
+ unsigned long clock_frequency_in_hz;
+
+};
+
+/* Common Services */
+extern int common_nfc_set_geometry(struct gpmi_nand_data *);
+extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+extern void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+
+/* GPMI-NAND helper function library */
+extern int gpmi_init(struct gpmi_nand_data *);
+extern void gpmi_clear_bch(struct gpmi_nand_data *);
+extern void gpmi_dump_info(struct gpmi_nand_data *);
+extern int bch_set_geometry(struct gpmi_nand_data *);
+extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+extern int gpmi_send_command(struct gpmi_nand_data *);
+extern void gpmi_begin(struct gpmi_nand_data *);
+extern void gpmi_end(struct gpmi_nand_data *);
+extern int gpmi_read_data(struct gpmi_nand_data *);
+extern int gpmi_send_data(struct gpmi_nand_data *);
+extern int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+extern int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD 0x00
+#define STATUS_ERASED 0xff
+#define STATUS_UNCORRECTABLE 0xfe
+
+/* Use the platform_id to distinguish different Archs. */
+#define IS_MX23 0x1
+#define IS_MX28 0x2
+#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 00000000..83431240
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,172 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0 0x00000000
+#define HW_GPMI_CTRL0_SET 0x00000004
+#define HW_GPMI_CTRL0_CLR 0x00000008
+#define HW_GPMI_CTRL0_TOG 0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE 24
+#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
+ (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
+
+/*
+ * Difference in LOCK_CS between imx23 and imx28 :
+ * This bit may impact the _POWER_ consumption. So some chips
+ * do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
+#define LOCK_CS_ENABLE 0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS 20
+#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
+ (GPMI_IS_MX23((x)) \
+ ? MX23_BM_GPMI_CTRL0_CS \
+ : MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS 17
+#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v) \
+ (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT 0
+#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v) \
+ (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE 0x00000010
+
+#define HW_GPMI_ECCCTRL 0x00000020
+#define HW_GPMI_ECCCTRL_SET 0x00000024
+#define HW_GPMI_ECCCTRL_CLR 0x00000028
+#define HW_GPMI_ECCCTRL_TOG 0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD 13
+#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
+ (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
+ (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
+
+#define HW_GPMI_ECCCOUNT 0x00000030
+#define HW_GPMI_PAYLOAD 0x00000040
+#define HW_GPMI_AUXILIARY 0x00000050
+#define HW_GPMI_CTRL1 0x00000060
+#define HW_GPMI_CTRL1_SET 0x00000064
+#define HW_GPMI_CTRL1_CLR 0x00000068
+#define HW_GPMI_CTRL1_TOG 0x0000006c
+
+#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE 17
+#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD 16
+#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY 12
+#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v) \
+ (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
+
+#define HW_GPMI_TIMING0 0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD 8
+#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP 0
+#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1 0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+
+#define HW_GPMI_TIMING2 0x00000090
+#define HW_GPMI_DATA 0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT 0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY 24
+#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
+ (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG 0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0 28
+#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
new file mode 100644
index 00000000..11e48781
--- /dev/null
+++ b/drivers/mtd/nand/h1910.c
@@ -0,0 +1,168 @@
+/*
+ * drivers/mtd/nand/h1910.c
+ *
+ * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
+ *
+ * Derived from drivers/mtd/nand/edb7312.c
+ * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
+ * a 128Mibit (16MiB x 8 bits) NAND flash device.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
+#include <asm/sizes.h>
+#include <mach/h1900-gpio.h>
+#include <mach/ipaq.h>
+
+/*
+ * MTD structure for EDB7312 board
+ */
+static struct mtd_info *h1910_nand_mtd = NULL;
+
+/*
+ * Module stuff
+ */
+
+/*
+ * Define static partitions for flash device
+ */
+static struct mtd_partition partition_info[] = {
+ {name:"h1910 NAND Flash",
+ offset:0,
+ size:16 * 1024 * 1024}
+};
+
+#define NUM_PARTITIONS 1
+
+/*
+ * hardware specific access to control-lines
+ *
+ * NAND_NCE: bit 0 - don't care
+ * NAND_CLE: bit 1 - address bit 2
+ * NAND_ALE: bit 2 - address bit 3
+ */
+static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
+}
+
+/*
+ * read device ready pin
+ */
+#if 0
+static int h1910_device_ready(struct mtd_info *mtd)
+{
+ return (GPLR(55) & GPIO_bit(55));
+}
+#endif
+
+/*
+ * Main initialization routine
+ */
+static int __init h1910_init(void)
+{
+ struct nand_chip *this;
+ void __iomem *nandaddr;
+
+ if (!machine_is_h1900())
+ return -ENODEV;
+
+ nandaddr = ioremap(0x08000000, 0x1000);
+ if (!nandaddr) {
+ printk("Failed to ioremap nand flash.\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!h1910_nand_mtd) {
+ printk("Unable to allocate h1910 NAND MTD device structure.\n");
+ iounmap((void *)nandaddr);
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&h1910_nand_mtd[1]);
+
+ /* Initialize structures */
+ memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ h1910_nand_mtd->priv = this;
+ h1910_nand_mtd->owner = THIS_MODULE;
+
+ /*
+ * Enable VPEN
+ */
+ GPSR(37) = GPIO_bit(37);
+
+ /* insert callbacks */
+ this->IO_ADDR_R = nandaddr;
+ this->IO_ADDR_W = nandaddr;
+ this->cmd_ctrl = h1910_hwcontrol;
+ this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
+ /* 15 us command delay time */
+ this->chip_delay = 50;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->options = NAND_NO_AUTOINCR;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(h1910_nand_mtd, 1)) {
+ printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
+ kfree(h1910_nand_mtd);
+ iounmap((void *)nandaddr);
+ return -ENXIO;
+ }
+
+ /* Register the partitions */
+ mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
+ NUM_PARTITIONS);
+
+ /* Return happy */
+ return 0;
+}
+
+module_init(h1910_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit h1910_cleanup(void)
+{
+ struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
+
+ /* Release resources, unregister device */
+ nand_release(h1910_nand_mtd);
+
+ /* Release io resource */
+ iounmap((void *)this->IO_ADDR_W);
+
+ /* Free the MTD device structure */
+ kfree(h1910_nand_mtd);
+}
+
+module_exit(h1910_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
+MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
new file mode 100644
index 00000000..e4147e8a
--- /dev/null
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC NAND controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/gpio.h>
+
+#include <asm/mach-jz4740/jz4740_nand.h>
+
+#define JZ_REG_NAND_CTRL 0x50
+#define JZ_REG_NAND_ECC_CTRL 0x100
+#define JZ_REG_NAND_DATA 0x104
+#define JZ_REG_NAND_PAR0 0x108
+#define JZ_REG_NAND_PAR1 0x10C
+#define JZ_REG_NAND_PAR2 0x110
+#define JZ_REG_NAND_IRQ_STAT 0x114
+#define JZ_REG_NAND_IRQ_CTRL 0x118
+#define JZ_REG_NAND_ERR(x) (0x11C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
+
+#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
+#define JZ_NAND_MEM_CMD_OFFSET 0x08000
+
+struct jz_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *base;
+ struct resource *mem;
+
+ void __iomem *bank_base;
+ struct resource *bank_mem;
+
+ struct jz_nand_platform_data *pdata;
+ bool is_reading;
+};
+
+static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct jz_nand, mtd);
+}
+
+static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct nand_chip *chip = mtd->priv;
+ uint32_t reg;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
+ if (ctrl & NAND_ALE)
+ chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET;
+ else if (ctrl & NAND_CLE)
+ chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET;
+ else
+ chip->IO_ADDR_W = nand->bank_base;
+
+ reg = readl(nand->base + JZ_REG_NAND_CTRL);
+ if (ctrl & NAND_NCE)
+ reg |= JZ_NAND_CTRL_ASSERT_CHIP(0);
+ else
+ reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0);
+ writel(reg, nand->base + JZ_REG_NAND_CTRL);
+ }
+ if (dat != NAND_CMD_NONE)
+ writeb(dat, chip->IO_ADDR_W);
+}
+
+static int jz_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ return gpio_get_value_cansleep(nand->pdata->busy_gpio);
+}
+
+static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ uint32_t reg;
+
+ writel(0, nand->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+ nand->is_reading = true;
+ break;
+ case NAND_ECC_WRITE:
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ nand->is_reading = false;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ uint32_t reg, status;
+ int i;
+ unsigned int timeout = 1000;
+ static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4,
+ 0x8b, 0xff, 0xb7, 0x6f};
+
+ if (nand->is_reading)
+ return 0;
+
+ do {
+ status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < 9; ++i)
+ ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i);
+
+ /* If the written data is completly 0xff, we also want to write 0xff as
+ * ecc, otherwise we will get in trouble when doing subpage writes. */
+ if (memcmp(ecc_code, empty_block_ecc, 9) == 0)
+ memset(ecc_code, 0xff, 9);
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *dat, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = dat[index];
+ data |= dat[index+1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ dat[index] = data & 0xff;
+ dat[index+1] = (data >> 8) & 0xff;
+}
+
+static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ uint32_t t;
+ unsigned int timeout = 1000;
+
+ t = read_ecc[0];
+
+ if (t == 0xff) {
+ for (i = 1; i < 9; ++i)
+ t &= read_ecc[i];
+
+ t &= dat[0];
+ t &= dat[nand->chip.ecc.size / 2];
+ t &= dat[nand->chip.ecc.size - 1];
+
+ if (t == 0xff) {
+ for (i = 1; i < nand->chip.ecc.size - 1; ++i)
+ t &= dat[i];
+ if (t == 0xff)
+ return 0;
+ }
+ }
+
+ for (i = 0; i < 9; ++i)
+ writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -1;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(nand->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < 512)
+ jz_nand_correct_data(dat, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+static int jz_nand_ioremap_resource(struct platform_device *pdev,
+ const char *name, struct resource **res, void __iomem **base)
+{
+ int ret;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!*res) {
+ dev_err(&pdev->dev, "Failed to get platform %s memory\n", name);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ *res = request_mem_region((*res)->start, resource_size(*res),
+ pdev->name);
+ if (!*res) {
+ dev_err(&pdev->dev, "Failed to request %s memory region\n", name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ *base = ioremap((*res)->start, resource_size(*res));
+ if (!*base) {
+ dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name);
+ ret = -EBUSY;
+ goto err_release_mem;
+ }
+
+ return 0;
+
+err_release_mem:
+ release_mem_region((*res)->start, resource_size(*res));
+err:
+ *res = NULL;
+ *base = NULL;
+ return ret;
+}
+
+static int __devinit jz_nand_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jz_nand *nand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+
+ nand = kzalloc(sizeof(*nand), GFP_KERNEL);
+ if (!nand) {
+ dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
+ if (ret)
+ goto err_free;
+ ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem,
+ &nand->bank_base);
+ if (ret)
+ goto err_iounmap_mmio;
+
+ if (pdata && gpio_is_valid(pdata->busy_gpio)) {
+ ret = gpio_request(pdata->busy_gpio, "NAND busy pin");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request busy gpio %d: %d\n",
+ pdata->busy_gpio, ret);
+ goto err_iounmap_mem;
+ }
+ }
+
+ mtd = &nand->mtd;
+ chip = &nand->chip;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->name = "jz4740-nand";
+
+ chip->ecc.hwctl = jz_nand_hwctl;
+ chip->ecc.calculate = jz_nand_calculate_ecc_rs;
+ chip->ecc.correct = jz_nand_correct_ecc_rs;
+ chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 9;
+ chip->ecc.strength = 2;
+ /*
+ * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
+ * conservative guess, given 9 ecc bytes and reed-solomon alg.
+ */
+
+ if (pdata)
+ chip->ecc.layout = pdata->ecc_layout;
+
+ chip->chip_delay = 50;
+ chip->cmd_ctrl = jz_nand_cmd_ctrl;
+
+ if (pdata && gpio_is_valid(pdata->busy_gpio))
+ chip->dev_ready = jz_nand_dev_ready;
+
+ chip->IO_ADDR_R = nand->bank_base;
+ chip->IO_ADDR_W = nand->bank_base;
+
+ nand->pdata = pdata;
+ platform_set_drvdata(pdev, nand);
+
+ writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL);
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to scan nand\n");
+ goto err_gpio_free;
+ }
+
+ if (pdata && pdata->ident_callback) {
+ pdata->ident_callback(pdev, chip, &pdata->partitions,
+ &pdata->num_partitions);
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to scan nand\n");
+ goto err_gpio_free;
+ }
+
+ ret = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mtd device\n");
+ goto err_nand_release;
+ }
+
+ dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
+
+ return 0;
+
+err_nand_release:
+ nand_release(&nand->mtd);
+err_gpio_free:
+ platform_set_drvdata(pdev, NULL);
+ gpio_free(pdata->busy_gpio);
+err_iounmap_mem:
+ iounmap(nand->bank_base);
+err_iounmap_mmio:
+ iounmap(nand->base);
+err_free:
+ kfree(nand);
+ return ret;
+}
+
+static int __devexit jz_nand_remove(struct platform_device *pdev)
+{
+ struct jz_nand *nand = platform_get_drvdata(pdev);
+
+ nand_release(&nand->mtd);
+
+ /* Deassert and disable all chips */
+ writel(0, nand->base + JZ_REG_NAND_CTRL);
+
+ iounmap(nand->bank_base);
+ release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem));
+ iounmap(nand->base);
+ release_mem_region(nand->mem->start, resource_size(nand->mem));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(nand);
+
+ return 0;
+}
+
+static struct platform_driver jz_nand_driver = {
+ .probe = jz_nand_probe,
+ .remove = __devexit_p(jz_nand_remove),
+ .driver = {
+ .name = "jz4740-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(jz_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC");
+MODULE_ALIAS("platform:jz4740-nand");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 00000000..c240cf1a
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(mtd, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(mtd, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+ int ret = 0;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+out:
+ of_node_put(rmnode);
+ return ret;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ if (prv->clk) {
+ clk_disable(prv->clk);
+ clk_put(prv->clk);
+ }
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int __devinit mpc5121_nfc_probe(struct platform_device *op)
+{
+ struct device_node *rootnode, *dn = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const __be32 *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+ struct mtd_part_parser_data ppdata;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &prv->mtd;
+ chip = &prv->chip;
+
+ mtd->priv = chip;
+ chip->priv = prv;
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = resource_size(&res);
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ ppdata.of_node = dn;
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Support external chip-select logic on ADS5121 board */
+ rootnode = of_find_node_by_path("/");
+ if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ of_node_put(rootnode);
+ return retval;
+ }
+
+ chip->select_chip = ads5121_select_chip;
+ }
+ of_node_put(rootnode);
+
+ /* Enable NFC clock */
+ prv->clk = clk_get(dev, "nfc_clk");
+ if (IS_ERR(prv->clk)) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = PTR_ERR(prv->clk);
+ goto error;
+ }
+
+ clk_enable(prv->clk);
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, be32_to_cpup(chips_no))) {
+ dev_err(dev, "NAND Flash not found !\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+ retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int __devexit mpc5121_nfc_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nand_release(mtd);
+ devm_free_irq(dev, prv->irq, mtd);
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+
+static struct platform_driver mpc5121_nfc_driver = {
+ .probe = mpc5121_nfc_probe,
+ .remove = __devexit_p(mpc5121_nfc_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc5121_nfc_match,
+ },
+};
+
+module_platform_driver(mpc5121_nfc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
new file mode 100644
index 00000000..6f87c746
--- /dev/null
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -0,0 +1,1298 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
+
+#include <asm/mach/flash.h>
+#include <mach/mxc_nand.h>
+#include <mach/hardware.h>
+
+#define DRIVER_NAME "mxc_nand"
+
+#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
+#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
+#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53())
+#define nfc_is_v3() nfc_is_v3_2()
+
+/* Addresses for NFC registers */
+#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
+#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
+#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06)
+#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08)
+#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
+#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
+#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
+#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V1_V2_WRPROT (host->regs + 0x12)
+#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
+#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
+#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
+#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
+#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+
+#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
+#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
+#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
+#define NFC_V1_V2_CONFIG1_RST (1 << 6)
+#define NFC_V1_V2_CONFIG1_CE (1 << 7)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
+
+#define NFC_V1_V2_CONFIG2_INT (1 << 15)
+
+/*
+ * Operation modes for the NFC. Valid for v1, v2 and v3
+ * type controllers.
+ */
+#define NFC_CMD (1 << 0)
+#define NFC_ADDR (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+
+#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
+#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04)
+
+#define NFC_V3_CONFIG1 (host->regs_axi + 0x34)
+#define NFC_V3_CONFIG1_SP_EN (1 << 0)
+#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4)
+
+#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38)
+
+#define NFC_V3_LAUNCH (host->regs_axi + 0x40)
+
+#define NFC_V3_WRPROT (host->regs_ip + 0x0)
+#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0)
+#define NFC_V3_WRPROT_LOCK (1 << 1)
+#define NFC_V3_WRPROT_UNLOCK (1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
+
+#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04)
+
+#define NFC_V3_CONFIG2 (host->regs_ip + 0x24)
+#define NFC_V3_CONFIG2_PS_512 (0 << 0)
+#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
+#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
+#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
+#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
+#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
+
+#define NFC_V3_CONFIG3 (host->regs_ip + 0x28)
+#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8 (1 << 3)
+#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
+
+#define NFC_V3_IPC (host->regs_ip + 0x2C)
+#define NFC_V3_IPC_CREQ (1 << 0)
+#define NFC_V3_IPC_INT (1 << 31)
+
+#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+
+struct mxc_nand_host {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct device *dev;
+
+ void *spare0;
+ void *main_area0;
+
+ void __iomem *base;
+ void __iomem *regs;
+ void __iomem *regs_axi;
+ void __iomem *regs_ip;
+ int status_request;
+ struct clk *clk;
+ int clk_act;
+ int irq;
+ int eccsize;
+ int active_cs;
+
+ struct completion op_completion;
+
+ uint8_t *data_buf;
+ unsigned int buf_start;
+ int spare_len;
+
+ void (*preset)(struct mtd_info *);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+};
+
+/* OOB placement block for use with hardware ecc generation */
+static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
+ .eccbytes = 5,
+ .eccpos = {6, 7, 8, 9, 10},
+ .oobfree = {{0, 5}, {12, 4}, }
+};
+
+static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
+};
+
+/* OOB description for 512 byte pages with 16 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
+ .eccbytes = 1 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15
+ },
+ .oobfree = {
+ {.offset = 0, .length = 5}
+ }
+};
+
+/* OOB description for 2048 byte pages with 64 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
+ .eccbytes = 4 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 7},
+ {.offset = 32, .length = 7},
+ {.offset = 48, .length = 7}
+ }
+};
+
+/* OOB description for 4096 byte pages with 128 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_4k = {
+ .eccbytes = 8 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 7},
+ {.offset = 32, .length = 7},
+ {.offset = 48, .length = 7},
+ {.offset = 64, .length = 7},
+ {.offset = 80, .length = 7},
+ {.offset = 96, .length = 7},
+ {.offset = 112, .length = 7},
+ }
+};
+
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->check_int(host))
+ return IRQ_NONE;
+
+ host->irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int check_int_v3(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_IPC);
+ if (!(tmp & NFC_V3_IPC_INT))
+ return 0;
+
+ tmp &= ~NFC_V3_IPC_INT;
+ writel(tmp, NFC_V3_IPC);
+
+ return 1;
+}
+
+static int check_int_v1_v2(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG2);
+ if (!(tmp & NFC_V1_V2_CONFIG2_INT))
+ return 0;
+
+ if (!cpu_is_mx21())
+ writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+
+ return 1;
+}
+
+/*
+ * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
+ * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
+ * driver can enable/disable the irq line rather than simply masking the
+ * interrupts.
+ */
+static void irq_control_mx21(struct mxc_nand_host *host, int activate)
+{
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+ uint16_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG1);
+
+ if (activate)
+ tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+ else
+ tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG2);
+
+ if (activate)
+ tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+ else
+ tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+ writel(tmp, NFC_V3_CONFIG2);
+}
+
+/* This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static void wait_op_done(struct mxc_nand_host *host, int useirq)
+{
+ int max_retries = 8000;
+
+ if (useirq) {
+ if (!host->check_int(host)) {
+ INIT_COMPLETION(host->op_completion);
+ host->irq_control(host, 1);
+ wait_for_completion(&host->op_completion);
+ }
+ } else {
+ while (max_retries-- > 0) {
+ if (host->check_int(host))
+ break;
+
+ udelay(1);
+ }
+ if (max_retries < 0)
+ pr_debug("%s: INT not set\n", __func__);
+ }
+}
+
+static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ /* fill command */
+ writel(cmd, NFC_V3_FLASH_CMD);
+
+ /* send out command */
+ writel(NFC_CMD, NFC_V3_LAUNCH);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+}
+
+/* This function issues the specified command to the NAND device and
+ * waits for completion. */
+static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+
+ writew(cmd, NFC_V1_V2_FLASH_CMD);
+ writew(NFC_CMD, NFC_V1_V2_CONFIG2);
+
+ if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(NFC_V1_V2_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ pr_debug("%s: RESET failed\n", __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
+}
+
+static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ /* fill address */
+ writel(addr, NFC_V3_FLASH_ADDR0);
+
+ /* send out address */
+ writel(NFC_ADDR, NFC_V3_LAUNCH);
+
+ wait_op_done(host, 0);
+}
+
+/* This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command. */
+static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+
+ writew(addr, NFC_V1_V2_FLASH_ADDR);
+ writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, islast);
+}
+
+static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG1);
+ tmp &= ~(7 << 4);
+ writel(tmp, NFC_V3_CONFIG1);
+
+ /* transfer data from NFC ram to nand */
+ writel(ops, NFC_V3_LAUNCH);
+
+ wait_op_done(host, false);
+}
+
+static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ int bufs, i;
+
+ if (nfc_is_v1() && mtd->writesize > 512)
+ bufs = 4;
+ else
+ bufs = 1;
+
+ for (i = 0; i < bufs; i++) {
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+ }
+}
+
+static void send_read_id_v3(struct mxc_nand_host *host)
+{
+ /* Read ID into main buffer */
+ writel(NFC_ID, NFC_V3_LAUNCH);
+
+ wait_op_done(host, true);
+
+ memcpy(host->data_buf, host->main_area0, 16);
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id_v1_v2(struct mxc_nand_host *host)
+{
+ struct nand_chip *this = &host->nand;
+
+ /* NANDFC buffer 0 is used for device ID output */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(NFC_ID, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+
+ memcpy(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
+}
+
+static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
+{
+ writew(NFC_STATUS, NFC_V3_LAUNCH);
+ wait_op_done(host, true);
+
+ return readl(NFC_V3_CONFIG1) >> 16;
+}
+
+/* This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status. */
+static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
+{
+ void __iomem *main_buf = host->main_area0;
+ uint32_t store;
+ uint16_t ret;
+
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ /*
+ * The device status is stored in main_area0. To
+ * prevent corruption of the buffer save the value
+ * and restore it afterwards.
+ */
+ store = readl(main_buf);
+
+ writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
+ wait_op_done(host, true);
+
+ ret = readw(main_buf);
+
+ writel(store, main_buf);
+
+ return ret;
+}
+
+/* This functions is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles R/B internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ /*
+ * If HW ECC is enabled, we turn it on during init. There is
+ * no need to enable again here.
+ */
+}
+
+static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /*
+ * 1-Bit errors are automatically corrected in HW. No need for
+ * additional correction. 2-Bit errors cannot be corrected by
+ * HW ECC, so we need to return failure
+ */
+ uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+
+ if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
+ pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u32 ecc_stat, err;
+ int no_subpages = 1;
+ int ret = 0;
+ u8 ecc_bit_mask, err_limit;
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ if (nfc_is_v21())
+ ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
+ else
+ ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ return -1;
+ } else {
+ ret += err;
+ }
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ mtd->ecc_stats.corrected += ret;
+ pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+
+ return ret;
+}
+
+static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ return 0;
+}
+
+static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint8_t ret;
+
+ /* Check for status request */
+ if (host->status_request)
+ return host->get_dev_status(host) & 0xFF;
+
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
+
+ return ret;
+}
+
+static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t ret;
+
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+ host->buf_start += 2;
+
+ return ret;
+}
+
+/* Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash */
+static void mxc_nand_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(host->data_buf + col, buf, n);
+
+ host->buf_start += n;
+}
+
+/* Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(buf, host->data_buf + col, n);
+
+ host->buf_start += n;
+}
+
+/* Used by the upper layer to verify the data in NAND Flash
+ * with the data in the buf. */
+static int mxc_nand_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ return -EFAULT;
+}
+
+/* This function is used by upper layer for select and
+ * deselect of the NAND chip */
+static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_enable(host->clk);
+ host->clk_act = 1;
+ }
+
+ if (nfc_is_v21()) {
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+ }
+}
+
+/*
+ * Function to transfer data to/from spare area.
+ */
+static void copy_spare(struct mtd_info *mtd, bool bfrom)
+{
+ struct nand_chip *this = mtd->priv;
+ struct mxc_nand_host *host = this->priv;
+ u16 i, j;
+ u16 n = mtd->writesize >> 9;
+ u8 *d = host->data_buf + mtd->writesize;
+ u8 *s = host->spare0;
+ u16 t = host->spare_len;
+
+ j = (mtd->oobsize / n >> 1) << 1;
+
+ if (bfrom) {
+ for (i = 0; i < n - 1; i++)
+ memcpy(d + i * j, s + i * t, j);
+
+ /* the last section */
+ memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+ } else {
+ for (i = 0; i < n - 1; i++)
+ memcpy(&s[i * t], &d[i * j], j);
+
+ /* the last section */
+ memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ }
+}
+
+static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /* Write out column address, if necessary */
+ if (column != -1) {
+ /*
+ * MXC NANDFC can only perform full page+spare or
+ * spare-only read/write. When the upper layers
+ * perform a read/write buf operation, the saved column
+ * address is used to index into the full page.
+ */
+ host->send_addr(host, 0, page_addr == -1);
+ if (mtd->writesize > 512)
+ /* another col addr cycle for 2k page */
+ host->send_addr(host, 0, false);
+ }
+
+ /* Write out page address, if necessary */
+ if (page_addr != -1) {
+ /* paddr_0 - p_addr_7 */
+ host->send_addr(host, (page_addr & 0xff), false);
+
+ if (mtd->writesize > 512) {
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ host->send_addr(host, (page_addr >> 8) & 0xff, false);
+ host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ } else {
+ /* One more address cycle for higher density devices */
+ if (mtd->size >= 0x4000000) {
+ /* paddr_8 - paddr_15 */
+ host->send_addr(host, (page_addr >> 8) & 0xff, false);
+ host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ }
+ }
+}
+
+/*
+ * v2 and v3 type controllers can do 4bit or 8bit ecc depending
+ * on how much oob the nand chip has. For 8bit ecc we need at least
+ * 26 bytes of oob data per 512 byte block.
+ */
+static int get_eccsize(struct mtd_info *mtd)
+{
+ int oobbytes_per_512 = 0;
+
+ oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
+
+ if (oobbytes_per_512 < 26)
+ return 4;
+ else
+ return 8;
+}
+
+static void preset_v1_v2(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (nfc_is_v21())
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!cpu_is_mx21())
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ if (nfc_is_v21() && mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 4)
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
+ } else {
+ host->eccsize = 1;
+ }
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ if (nfc_is_v21()) {
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
+ } else if (nfc_is_v1()) {
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+ } else
+ BUG();
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v3(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mxc_nand_host *host = chip->priv;
+ uint32_t config2, config3;
+ int i, addr_phases;
+
+ writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
+ writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
+
+ /* Unlock the internal RAM Buffer */
+ writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+ NFC_V3_WRPROT);
+
+ /* Blocks to be unlocked */
+ for (i = 0; i < NAND_MAX_CHIPS; i++)
+ writel(0x0 | (0xffff << 16),
+ NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
+
+ writel(0, NFC_V3_IPC);
+
+ config2 = NFC_V3_CONFIG2_ONE_CYCLE |
+ NFC_V3_CONFIG2_2CMD_PHASES |
+ NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
+ NFC_V3_CONFIG2_ST_CMD(0x70) |
+ NFC_V3_CONFIG2_INT_MSK |
+ NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
+ addr_phases = fls(chip->pagemask) >> 3;
+
+ if (mtd->writesize == 2048) {
+ config2 |= NFC_V3_CONFIG2_PS_2048;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else if (mtd->writesize == 4096) {
+ config2 |= NFC_V3_CONFIG2_PS_4096;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else {
+ config2 |= NFC_V3_CONFIG2_PS_512;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
+ }
+
+ if (mtd->writesize) {
+ config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 8)
+ config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
+ }
+
+ writel(config2, NFC_V3_CONFIG2);
+
+ config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ config3 |= NFC_V3_CONFIG3_FW8;
+
+ writel(config3, NFC_V3_CONFIG3);
+
+ writel(0, NFC_V3_DELAY_LINE);
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
+
+ /* Command pre-processing step */
+ switch (command) {
+ case NAND_CMD_RESET:
+ host->preset(mtd);
+ host->send_cmd(host, command, false);
+ break;
+
+ case NAND_CMD_STATUS:
+ host->buf_start = 0;
+ host->status_request = true;
+
+ host->send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READ0)
+ host->buf_start = column;
+ else
+ host->buf_start = column + mtd->writesize;
+
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+
+ host->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
+ if (mtd->writesize > 512)
+ host->send_cmd(host, NAND_CMD_READSTART, true);
+
+ host->send_page(mtd, NFC_OUTPUT);
+
+ memcpy(host->data_buf, host->main_area0, mtd->writesize);
+ copy_spare(mtd, true);
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (column >= mtd->writesize)
+ /* call ourself to read a page */
+ mxc_nand_command(mtd, NAND_CMD_READ0, 0, page_addr);
+
+ host->buf_start = column;
+
+ host->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ memcpy(host->main_area0, host->data_buf, mtd->writesize);
+ copy_spare(mtd, false);
+ host->send_page(mtd, NFC_INPUT);
+ host->send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_READID:
+ host->send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->send_read_id(host);
+ host->buf_start = column;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ host->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
+ break;
+ }
+}
+
+/*
+ * The generic flash bbt decriptors overlap with our ecc
+ * hardware, so define some i.MX specific ones.
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+static int __init mxcnd_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct mxc_nand_host *host;
+ struct resource *res;
+ int err = 0;
+ struct nand_ecclayout *oob_smallpage, *oob_largepage;
+
+ /* Allocate memory for MTD device structure and private data */
+ host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
+ NAND_MAX_OOBSIZE, GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->data_buf = (uint8_t *)(host + 1);
+
+ host->dev = &pdev->dev;
+ /* structures must be linked */
+ this = &host->nand;
+ mtd = &host->mtd;
+ mtd->priv = this;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = DRIVER_NAME;
+
+ /* 50 us command delay time */
+ this->chip_delay = 5;
+
+ this->priv = host;
+ this->dev_ready = mxc_nand_dev_ready;
+ this->cmdfunc = mxc_nand_command;
+ this->select_chip = mxc_nand_select_chip;
+ this->read_byte = mxc_nand_read_byte;
+ this->read_word = mxc_nand_read_word;
+ this->write_buf = mxc_nand_write_buf;
+ this->read_buf = mxc_nand_read_buf;
+ this->verify_buf = mxc_nand_verify_buf;
+
+ host->clk = clk_get(&pdev->dev, "nfc");
+ if (IS_ERR(host->clk)) {
+ err = PTR_ERR(host->clk);
+ goto eclk;
+ }
+
+ clk_enable(host->clk);
+ host->clk_act = 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto eres;
+ }
+
+ host->base = ioremap(res->start, resource_size(res));
+ if (!host->base) {
+ err = -ENOMEM;
+ goto eres;
+ }
+
+ host->main_area0 = host->base;
+
+ if (nfc_is_v1() || nfc_is_v21()) {
+ host->preset = preset_v1_v2;
+ host->send_cmd = send_cmd_v1_v2;
+ host->send_addr = send_addr_v1_v2;
+ host->send_page = send_page_v1_v2;
+ host->send_read_id = send_read_id_v1_v2;
+ host->get_dev_status = get_dev_status_v1_v2;
+ host->check_int = check_int_v1_v2;
+ if (cpu_is_mx21())
+ host->irq_control = irq_control_mx21;
+ else
+ host->irq_control = irq_control_v1_v2;
+ }
+
+ if (nfc_is_v21()) {
+ host->regs = host->base + 0x1e00;
+ host->spare0 = host->base + 0x1000;
+ host->spare_len = 64;
+ oob_smallpage = &nandv2_hw_eccoob_smallpage;
+ oob_largepage = &nandv2_hw_eccoob_largepage;
+ this->ecc.bytes = 9;
+ } else if (nfc_is_v1()) {
+ host->regs = host->base + 0xe00;
+ host->spare0 = host->base + 0x800;
+ host->spare_len = 16;
+ oob_smallpage = &nandv1_hw_eccoob_smallpage;
+ oob_largepage = &nandv1_hw_eccoob_largepage;
+ this->ecc.bytes = 3;
+ host->eccsize = 1;
+ } else if (nfc_is_v3_2()) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ err = -ENODEV;
+ goto eirq;
+ }
+ host->regs_ip = ioremap(res->start, resource_size(res));
+ if (!host->regs_ip) {
+ err = -ENOMEM;
+ goto eirq;
+ }
+ host->regs_axi = host->base + 0x1e00;
+ host->spare0 = host->base + 0x1000;
+ host->spare_len = 64;
+ host->preset = preset_v3;
+ host->send_cmd = send_cmd_v3;
+ host->send_addr = send_addr_v3;
+ host->send_page = send_page_v3;
+ host->send_read_id = send_read_id_v3;
+ host->check_int = check_int_v3;
+ host->get_dev_status = get_dev_status_v3;
+ host->irq_control = irq_control_v3;
+ oob_smallpage = &nandv2_hw_eccoob_smallpage;
+ oob_largepage = &nandv2_hw_eccoob_largepage;
+ } else
+ BUG();
+
+ this->ecc.size = 512;
+ this->ecc.layout = oob_smallpage;
+
+ if (pdata->hw_ecc) {
+ this->ecc.calculate = mxc_nand_calculate_ecc;
+ this->ecc.hwctl = mxc_nand_enable_hwecc;
+ if (nfc_is_v1())
+ this->ecc.correct = mxc_nand_correct_data_v1;
+ else
+ this->ecc.correct = mxc_nand_correct_data_v2_v3;
+ this->ecc.mode = NAND_ECC_HW;
+ } else {
+ this->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ /* NAND bus width determines access funtions used by upper layer */
+ if (pdata->width == 2)
+ this->options |= NAND_BUSWIDTH_16;
+
+ if (pdata->flash_bbt) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ /* update flash based bbt */
+ this->bbt_options |= NAND_BBT_USE_FLASH;
+ }
+
+ init_completion(&host->op_completion);
+
+ host->irq = platform_get_irq(pdev, 0);
+
+ /*
+ * mask the interrupt. For i.MX21 explicitely call
+ * irq_control_v1_v2 to use the mask bit. We can't call
+ * disable_irq_nosync() for an interrupt we do not own yet.
+ */
+ if (cpu_is_mx21())
+ irq_control_v1_v2(host, 0);
+ else
+ host->irq_control(host, 0);
+
+ err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ if (err)
+ goto eirq;
+
+ host->irq_control(host, 0);
+
+ /*
+ * Now that the interrupt is disabled make sure the interrupt
+ * mask bit is cleared on i.MX21. Otherwise we can't read
+ * the interrupt status bit on this machine.
+ */
+ if (cpu_is_mx21())
+ irq_control_v1_v2(host, 1);
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
+ err = -ENXIO;
+ goto escan;
+ }
+
+ /* Call preset again, with correct writesize this time */
+ host->preset(mtd);
+
+ if (mtd->writesize == 2048)
+ this->ecc.layout = oob_largepage;
+ if (nfc_is_v21() && mtd->writesize == 4096)
+ this->ecc.layout = &nandv2_hw_eccoob_4k;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ if (nfc_is_v1())
+ this->ecc.strength = 1;
+ else
+ this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ goto escan;
+ }
+
+ /* Register the partitions */
+ mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
+ pdata->nr_parts);
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+escan:
+ free_irq(host->irq, host);
+eirq:
+ if (host->regs_ip)
+ iounmap(host->regs_ip);
+ iounmap(host->base);
+eres:
+ clk_put(host->clk);
+eclk:
+ kfree(host);
+
+ return err;
+}
+
+static int __devexit mxcnd_remove(struct platform_device *pdev)
+{
+ struct mxc_nand_host *host = platform_get_drvdata(pdev);
+
+ clk_put(host->clk);
+
+ platform_set_drvdata(pdev, NULL);
+
+ nand_release(&host->mtd);
+ free_irq(host->irq, host);
+ if (host->regs_ip)
+ iounmap(host->regs_ip);
+ iounmap(host->base);
+ kfree(host);
+
+ return 0;
+}
+
+static struct platform_driver mxcnd_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .remove = __devexit_p(mxcnd_remove),
+};
+
+static int __init mxc_nd_init(void)
+{
+ return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
+}
+
+static void __exit mxc_nd_cleanup(void)
+{
+ /* Unregister the device structure */
+ platform_driver_unregister(&mxcnd_driver);
+}
+
+module_init(mxc_nd_init);
+module_exit(mxc_nd_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
new file mode 100644
index 00000000..00be451a
--- /dev/null
+++ b/drivers/mtd/nand/nand_base.c
@@ -0,0 +1,4332 @@
+/*
+ * drivers/mtd/nand.c
+ *
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ * Basic support for AG-AND chips is provided.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ECC support.
+ * The AG-AND chips have nice features for speed improvement,
+ * which are not supported yet. Read / program 4 pages in one go.
+ * BBT table is not serialized, has to be fixed
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+#include <linux/mtd/partitions.h>
+#include "../../../arch/arm/mach-wmt/wmt_clk.h"
+
+#include "wmt_nand.h"
+#define myDEBUG
+//#undef myDEBUG
+#ifdef myDEBUG
+#define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+//#define DBG_60BIT_ECC
+
+#ifdef NAND_BBT_BCH_ECC
+
+#if(CONFIG_MTD_NAND_PAGE_SIZE == 2048)
+static struct nand_ecclayout wmt_oobinfo_2048_backup = {
+ /* nand flash new structure and BCH ECC oob info */
+ .eccbytes = 40,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 61, 62, 63},
+ .oobavail = 16,
+ .oobfree = {{9, 4},{25, 4},{41, 4},{57, 4}}
+};
+
+static struct nand_ecclayout wmt_hm_oobinfo_2048_backup = {
+ /* nand flash old structure and Harming ECC oob info */
+ .eccbytes = 14,
+ .eccpos = { 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49},
+ .oobavail = 32,
+ .oobfree = {{0, 32}}
+};
+#else
+
+static struct nand_ecclayout wmt_hm_oobinfo_4096_backup = {
+ /* nand flash old structure and Harming ECC oob info */
+ .eccbytes = 27,
+ .eccpos = { 64, 65, 66, 68, 69, 70, 72, 73, 74, 76, 77, 78,
+ 80, 81, 82, 84, 85, 86, 88, 89, 90, 92, 93, 94,
+ 96, 97, 98},
+ .oobavail = 64,
+ .oobfree = {{0, 32}}
+};
+
+static struct nand_ecclayout wmt_oobinfo_4096_backup = {
+ /* nand flash old structure and Harming ECC oob info */
+ .eccbytes = 80,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 61, 62, 63},
+ // 64, 65, 66, 67, 68, 69, 70, 77, 78, 79,
+ // 80, 81, 82, 83, 84, 85, 86, 93, 94, 95,
+ // 96, 97, 98, 99, 100,101,102,109,110,111,
+ // 112,113,114,115,116,117,118,125,126,127},
+ .oobavail = 16,
+ .oobfree = {{9, 4},{25, 4},{41, 4},{57, 4}}
+ // .oobfree = {{9, 4},{25, 4},{41, 4},{57, 4},{73,4},{89,4},{105,4},{121,4}}
+};
+#endif
+
+#endif
+extern struct nand_bbt_descr largepage_flashbased;
+extern int second_chip;
+/* Define default oob placement schemes for large and small page devices */
+static struct nand_ecclayout nand_oob_8 = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3,
+ .length = 2},
+ {.offset = 6,
+ .length = 2} }
+};
+
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 6, 7},
+ .oobfree = {
+ {.offset = 8,
+ . length = 8} }
+};
+
+static struct nand_ecclayout nand_oob_64 = {
+ .eccbytes = 24,
+ .eccpos = {
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ {.offset = 2,
+ .length = 38} }
+};
+
+static struct nand_ecclayout nand_oob_128 = {
+ .eccbytes = 48,
+ .eccpos = {
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ .oobfree = {
+ {.offset = 2,
+ .length = 78} }
+};
+
+static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
+ int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+
+/*
+ * For devices which display every fart in the system on a separate LED. Is
+ * compiled away when LED support is disabled.
+ */
+//DEFINE_LED_TRIGGER(nand_led_trigger);
+
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device.
+ */
+static void nand_release_device(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /* De-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ /* Release the controller and the chip */
+ spin_lock(&chip->controller->lock);
+ chip->controller->active = NULL;
+ chip->state = FL_READY;
+ wake_up(&chip->controller->wq);
+ spin_unlock(&chip->controller->lock);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+}
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return readb(chip->IO_ADDR_R);
+}
+
+int wmt_recovery_call(struct notifier_block *nb, unsigned long code, void *_cmd)
+{
+ struct mtd_info *mtd = NULL;
+ struct nand_chip *chip = NULL;
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ chip = (struct nand_chip *)mtd->priv;
+
+ if(chip->cur_chip && (((mtd->id >>24)&0xff) == NAND_MFR_HYNIX)) {
+ nand_get_device(chip, mtd, FL_WRITING);
+ #ifdef RETRY_DEBUG
+ printk("current try times: %d\n", chip->cur_chip->cur_try_times);
+ #endif
+ chip->select_chip(mtd, 0);
+ chip->cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ //chip->cur_chip->get_parameter(mtd,READ_RETRY_MODE);
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+ }
+ return NOTIFY_DONE;
+}
+EXPORT_SYMBOL(wmt_recovery_call);
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
+}
+
+/**
+ * nand_read_word - [DEFAULT] read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth without endianness conversion.
+ */
+static u16 nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], chip->IO_ADDR_W);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_verify_buf - [DEFAULT] Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * Default verify function for 8bit buswidth.
+ */
+static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != readb(chip->IO_ADDR_R))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], chip->IO_ADDR_W);
+
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * Default verify function for 16bit buswidth.
+ */
+static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ if (p[i] != readw(chip->IO_ADDR_R))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ int page, chipnr, res = 0, i = 0;
+ struct nand_chip *chip = mtd->priv;
+ u16 bad;
+ int page1 = 0, pagecnt = mtd->pagecnt;
+
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
+ if (mtd->planenum > 1) {//dan_multi
+ page = ((int)(ofs >> chip->page_shift) * mtd->planenum);
+ page1 = page + pagecnt;
+ page &= chip->pagemask;
+ page1 &= chip->pagemask;
+ } else
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ if (getchip) {
+ //chipnr = (int)(ofs >> chip->chip_shift);
+ chipnr = ((int)(ofs >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+ }
+
+ do {
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ chip->badblockpos & 0xFE, page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ /*else
+ bad &= 0xFF;*/ //masked dan_multi
+ if ((bad & 0xFF) != 0xff)//dan_multi
+ res = 1;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
+ //bad = chip->read_byte(mtd);
+ if (chip->read_byte(mtd) != 0xff)
+ res = 1;
+ if (mtd->planenum > 1) {
+ //printk("\n multiplane block bad check! \n");
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page1);
+ if (chip->read_byte(mtd) != 0xff)
+ res = 1;
+ }
+ }
+
+ /*if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;*/ //masked dan_multi
+ ofs += mtd->writesize;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+
+ if (getchip)
+ nand_release_device(mtd);
+
+ return res;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. We try operations in the following order, according to our
+ * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) update in-memory BBT
+ * (3) write bad block marker to OOB area of affected block
+ * (4) update flash-based BBT
+ * Note that we retain the first error encountered in (3) or (4), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs, int type)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t buf[2] = { 0, 0 };
+ int block, res = 0, ret = 0, i = 0, bits;
+ int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
+
+ if (write_oob) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ //einfo.len = 1 << chip->phys_erase_shift;
+ einfo.len = mtd->erasesize;
+ nand_erase_nand(mtd, &einfo, 0);
+ }
+
+ /* Get block number */
+ //block = (int)(ofs >> chip->bbt_erase_shift);
+ block = (((int)(ofs >> 10))/mtd->pageSizek) >> chip->pagecnt_shift;
+ /* Mark block bad in memory-based BBT */
+ if (chip->bbt) {
+ if (chip->realplanenum) {
+ if (block == (chip->status_plane[0]/mtd->pagecnt && (chip->status_plane[1]&7))) {
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_TOSHIBA)
+ bits = ((chip->status_plane[1]&2) ? 1 : 0) + ((chip->status_plane[1]&4) ? 4 : 0);//toshiba
+ else
+ bits = ((chip->status_plane[1]&1) ? 1 : 0) + ((chip->status_plane[1]&2) ? 4 : 0);//others
+ chip->bbt[block >> 1] &= (~(0xF << ((block & 0x01) << 2)));//prevent from mark read fail then mark wort out!
+ chip->bbt[block >> 1] |= bits << ((block & 0x01) << 2);
+ } else {
+ //printk("markbad block=%d diff last err block=%d\n", block, (chip->status_plane[0]/mtd->pagecnt));
+ bits = 5;
+ if (type == 1)
+ bits = 0xa;
+ chip->bbt[block >> 1] |= bits << ((block & 0x01) << 2);
+ }
+ } else {
+ bits = 1;
+ if (type == 1)
+ bits = 0x2;
+ chip->bbt[block >> 2] &= (~(3 << ((block & 0x03) << 1)));//prevent from mark read fail then mark wort out!
+ chip->bbt[block >> 2] |= bits << ((block & 0x03) << 1);
+ }
+ }
+
+ /* Write bad block marker to OOB */
+ if (write_oob) {
+ struct mtd_oob_ops ops;
+ loff_t wr_ofs = ofs;
+
+ nand_get_device(chip, mtd, FL_WRITING);
+
+ ops.datbuf = NULL;
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ wr_ofs += mtd->erasesize - mtd->writesize;
+ do {
+ res = nand_do_write_oob(mtd, wr_ofs, &ops);
+ if (!ret)
+ ret = res;
+
+ i++;
+ wr_ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+ nand_release_device(mtd);
+ }
+
+ /* Update flash-based bad block table */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ res = nand_update_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+//printk("markbad blk fin res=%d\n",res);
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
+ int allowbbt, int allow_readfail)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (!chip->bbt)
+ return chip->block_bad(mtd, ofs, getchip);
+
+ /* Return info from the table */
+ if (chip->realplanenum)
+ return nand_isbad_bbt_multi(mtd, ofs, allowbbt, allow_readfail);
+ else
+ return nand_isbad_bbt(mtd, ofs, allowbbt, allow_readfail);
+}
+
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ struct nand_chip *chip = mtd->priv;
+ int i;
+
+ /* Wait for the device to get ready */
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready(mtd))
+ break;
+ touch_softlockup_watchdog();
+ mdelay(1);
+ }
+}
+
+/* Wait for the ready pin, after a command. The timeout is caught later. */
+void nand_wait_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ unsigned long timeo = jiffies + 2;
+
+ /* 400ms timeout */
+ if (in_interrupt() || oops_in_progress)
+ return panic_nand_wait_ready(mtd, 400);
+
+// led_trigger_event(nand_led_trigger, LED_FULL);
+ /* Wait until command is processed or timeout occurs */
+ do {
+ if (chip->dev_ready(mtd))
+ break;
+// touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+// led_trigger_event(nand_led_trigger, LED_OFF);
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (256/512 Bytes per page).
+ */
+static void nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->cmd_ctrl(mtd, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ chip->cmd_ctrl(mtd, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd,
+ NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
+ ;
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ chip->cmd_ctrl(mtd, command & 0xff,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ chip->cmd_ctrl(mtd, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_DEPLETE1:
+ return;
+
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ case NAND_CMD_STATUS_ERROR1:
+ case NAND_CMD_STATUS_ERROR2:
+ case NAND_CMD_STATUS_ERROR3:
+ /* Read error status commands require only a short delay */
+ udelay(chip->chip_delay);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
+ ;
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ return;
+
+ case NAND_CMD_READ0:
+ chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip,
+ struct mtd_info *mtd, int new_state)
+{
+ /* Hardware controller shared among independent devices */
+ chip->controller->active = chip;
+ chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
+{
+ spinlock_t *lock = &chip->controller->lock;
+ wait_queue_head_t *wq = &chip->controller->wq;
+ DECLARE_WAITQUEUE(wait, current);
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+retry:
+ spin_lock(lock);
+
+ /* Hardware controller shared among independent devices */
+ if (!chip->controller->active)
+ chip->controller->active = chip;
+
+ if (chip->controller->active == chip && chip->state == FL_READY) {
+ chip->state = new_state;
+ spin_unlock(lock);
+ return 0;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ if (chip->controller->active->state == FL_PM_SUSPENDED) {
+ chip->state = FL_PM_SUSPENDED;
+ spin_unlock(lock);
+ return 0;
+ }
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(wq, &wait);
+ spin_unlock(lock);
+ schedule();
+ remove_wait_queue(wq, &wait);
+ goto retry;
+}
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
+ unsigned long timeo)
+{
+ int i;
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only. Erase can
+ * take up to 400ms and program up to 20ms according to general NAND and
+ * SmartMedia specs.
+ */
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+
+ unsigned long timeo = jiffies;
+ int status, state = chip->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+// led_trigger_event(nand_led_trigger, LED_FULL);
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ if ((state == FL_ERASING || state == FL_WRITING) &&
+ ((chip->options & NAND_IS_AND) || chip->realplanenum)) {
+ /*if (state == FL_ERASING)
+ printk("read status multi erase\n");
+ if (state == FL_WRITING)
+ printk("read status multi write\n");*/
+ //printk("read status multi write id=0x%x\n", 0xFF&(mtd->id>>24));
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_HYNIX || (0xFF&(mtd->id>>24)) == NAND_MFR_MICRON || (0xFF&(mtd->id>>24)) == NAND_MFR_INTEL) {
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ } else
+ chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
+ } else
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+
+ if (in_interrupt() || oops_in_progress)
+ panic_nand_wait(mtd, chip, timeo);
+ else {
+ while (time_before(jiffies, timeo)) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
+ }
+ }
+// led_trigger_event(nand_led_trigger, LED_OFF);
+
+ status = (int)chip->read_byte(mtd);
+ /*if ((0xFF&(mtd->id>>24)) == 0xAD && chip->realplanenum)
+ while (status&0x1 || !(status&0x40)) {
+ chip->cmdfunc(mtd, 0x75, -1, -1);
+ status = (int)chip->read_byte(mtd);
+ printk("read status 75 multi=%x\n", status);
+ if (status&0x40)
+ break;
+ }*/
+ return status;
+}
+
+/**
+ * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * when = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * Returs unlock status.
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ pr_debug("%s: error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * Returns unlock status.
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(chip, mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ //chipnr = ofs >> chip->chip_shift;
+ chipnr = ((int)(ofs >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ nand_release_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_unlock);
+
+/**
+ * nand_lock - [REPLACEABLE] locks all blocks present in the device
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
+ *
+ * Returns lock status.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ nand_get_device(chip, mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ //chipnr = ofs >> chip->chip_shift;
+ chipnr = ((int)(ofs >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ pr_debug("%s: error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ nand_release_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_lock);
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->read_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->read_buf(mtd, oob, size);
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ chip->ecc.read_page_raw(mtd, chip, buf, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ */
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
+{
+ int start_step, end_step, num_steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index = 0;
+printk(KERN_NOTICE "r nand_read_subpage -------------------------\n");
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ if (data_col_addr != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+ p = bufpoi + data_col_addr;
+ chip->read_buf(mtd, p, datafrag_len);
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ for (i = 0; i < eccfrag_len - 1; i++) {
+ if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
+ eccpos[i + start_step * chip->ecc.bytes + 1]) {
+ gaps = 1;
+ break;
+ }
+ }
+ if (gaps) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ index = start_step * chip->ecc.bytes;
+
+ aligned_pos = eccpos[index] & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (eccpos[index] & (busw - 1))
+ aligned_len++;
+ if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
+ aligned_len++;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + aligned_pos, -1);
+ chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ }
+
+ for (i = 0; i < eccfrag_len; i++)
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p,
+ &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
+ */
+static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+ chip->read_buf(mtd, oob, eccbytes);
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->read_buf(mtd, oob, i);
+
+ return 0;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, roffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Read request not from offset 0? */
+ if (unlikely(roffs)) {
+ if (roffs >= free->length) {
+ roffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + roffs;
+ bytes = min_t(size_t, len,
+ (free->length - roffs));
+ roffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(oob, chip->oob_poi + boffs, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned;
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
+ //int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int blkcheck = mtd->pagecnt -1;
+ int sndcmd = 1;
+ int ret = 0, nocache = 1;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
+ mtd->oobavail : mtd->oobsize;
+
+ uint8_t *bufpoi, *oob, *buf;
+
+ stats = mtd->ecc_stats;
+ mtd->ecc_err_cnt = 0;
+
+ //chipnr = (int)(from >> chip->chip_shift);
+ chipnr = ((int)(from >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chip->select_chip(mtd, chipnr);
+ if(chipnr > 0) {
+ second_chip = 1;
+ } else {
+ second_chip = 0;
+ }
+ //realpage = (int)(from >> chip->page_shift);
+ realpage = ((int)(from >> 10))/mtd->pageSizek;
+ page = realpage & chip->pagemask;
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) == 1) {
+ col = (int)(from & (mtd->writesize - 1));
+ } else {
+ col = ((int)(from>>10)) % mtd->pageSizek;
+ col = col << 10;
+ }
+ //printk("chip=%d realpage=0x%x page=0x%x mask=0x%x col=0x%x \n",chipnr, realpage, page, chip->pagemask, col);
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+
+ while (1) {
+ nocache = 1;
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+ //if (!aligned || col)
+//printk("readlen=%d byte=%d align=%d col=%d\n", readlen, bytes, aligned, col);
+ /* Is the current page in the buffer? */
+ if (realpage != chip->pagebuf || oob) {
+ bufpoi = aligned ? buf : chip->buffers->databuf;
+
+ if (likely(sndcmd)) {
+ if (!chip->realplanenum) {//dan_multi
+ /*page = (page / pagecnt) * pagecnt + page;//dan_multi 65->129, 129->257
+ else*/
+ if (aligned)
+ nocache = cache_read_data(mtd, chip, page, buf);
+ if (nocache)
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ }
+ sndcmd = 0;
+ }
+
+ /* Now read the page into the buffer */
+ /*if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(mtd, chip,
+ bufpoi, page);
+ else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
+ ret = chip->ecc.read_subpage(mtd, chip,
+ col, bytes, bufpoi);
+ else*/
+ /* dannier comment: copy data + oob to bufpoi */
+ if (!chip->realplanenum && nocache == 0)
+ ret = 0;
+ else
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ page);
+ if (ret < 0) {
+ if (!aligned)
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ break;
+ }
+
+ /* Transfer not aligned data */
+ if (!aligned) {
+ if (!NAND_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - stats.failed) &&
+ (ops->mode != MTD_OPS_RAW))
+ chip->pagebuf = realpage;
+ else
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ }
+
+ buf += bytes;
+
+ if (unlikely(oob)) {
+
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
+ }
+
+ if (!(chip->options & NAND_NO_READRDY)) {
+ /*
+ * Apply delay or wait for ready/busy pin. Do
+ * this before the AUTOINCR check, so no
+ * problems arise if a chip which does auto
+ * increment is marked as NOAUTOINCR by the
+ * board driver.
+ */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+ } else {
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ buf += bytes;
+ }
+
+ readlen -= bytes;
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+
+ /*
+ * Check, if the chip supports auto page increment or if we
+ * have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
+ sndcmd = 1;
+ }
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ if (mtd->ecc_err_cnt > mtd->ecc_err_level) {
+ return -NEED_REPLACEMENT;
+ }
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+static int nand_block_markbad_wmt(struct mtd_info *mtd, loff_t ofs, int type);
+
+
+/**
+ * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * Get hold of the chip and call nand_do_read.
+ */
+static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ nand_get_device(chip, mtd, FL_READING);
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ops.mode = 0;
+ ret = nand_do_read_ops(mtd, from, &ops);
+ *retlen = ops.retlen;
+ nand_release_device(mtd);
+
+ if (ret == -EBADMSG) {
+ nand_block_markbad_wmt(mtd, from, 1);
+ }
+
+ return ret;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = buf;
+ int i, toread, sndrnd = 0, pos;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ else
+ chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0)
+ chip->read_buf(mtd, bufpoi, length);
+
+ return 1;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int status = 0;
+ const uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, buf, length);
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+ chip->write_buf(mtd, (uint8_t *)&fill,
+ num);
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+ chip->write_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0)
+ chip->write_buf(mtd, bufpoi, length);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+
+/**
+ * nand_do_read_bb_oob - [Intern] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area
+ */
+static int nand_do_read_bb_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int page, realpage, chipnr, sndcmd = 1;
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+ len = mtd->oobsize;
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ //chipnr = (int)(from >> chip->chip_shift);
+ chipnr = ((int)(from >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ //realpage = (int)(from >> chip->page_shift);
+ realpage = ((int)(from >> 10))/mtd->pageSizek;
+ page = realpage & chip->pagemask;
+
+ while(1) {
+ sndcmd = chip->ecc.read_bb_oob(mtd, chip, page, sndcmd);
+
+ len = min(len, readlen);
+ if (((mtd->id>>24)&0xff) == 0x45) {
+ memcpy(buf, chip->oob_poi - mtd->writesize, 1024);
+ len = min((int)mtd->oobsize, readlen);
+ } else
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ if (!(chip->options & NAND_NO_READRDY)) {
+ /*
+ * Apply delay or wait for ready/busy pin. Do this
+ * before the AUTOINCR check, so no problems arise if a
+ * chip which does auto increment is marked as
+ * NOAUTOINCR by the board driver.
+ */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+
+ /* Check, if the chip supports auto page increment
+ * or if we have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
+ sndcmd = 1;
+ }
+
+ ops->oobretlen = ops->ooblen;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int page, realpage, chipnr, sndcmd = 1;
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf, *buf1;
+
+ mtd->ecc_err_cnt = 0;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ //chipnr = (int)(from >> chip->chip_shift);
+ chipnr = ((int)(from >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ //realpage = (int)(from >> chip->page_shift);
+ realpage = ((int)(from >> 10))/mtd->pageSizek;
+ page = realpage & chip->pagemask;
+ if(chipnr > 0) {
+ second_chip = 1;
+ } else {
+ second_chip = 0;
+ }
+ buf1 = buf;
+ while (1) {
+ if (ops->mode == MTD_OPS_RAW)
+ sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+ else
+ sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ if (!(chip->options & NAND_NO_READRDY)) {
+ /*
+ * Apply delay or wait for ready/busy pin. Do this
+ * before the AUTOINCR check, so no problems arise if a
+ * chip which does auto increment is marked as
+ * NOAUTOINCR by the board driver.
+ */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+
+ /*
+ * Check, if the chip supports auto page increment or if we
+ * have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
+ sndcmd = 1;
+ }
+
+ ops->oobretlen = ops->ooblen;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ if (mtd->ecc_err_cnt > mtd->ecc_err_level) {
+ return -NEED_REPLACEMENT;
+ }
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf) {
+ /* DannierChen20101022 : Patch for avoiding yaffs2 read checkpoint signature from a bad block*/
+ if (chip->bbt && nand_block_checkbad(mtd, from, 1, 0xFF, 1)) {
+ memset(ops->oobbuf, 0xff, ops->ooblen);
+ //printk("nand_do_read_oob: memset ops->ooblen=%d Byte\n", ops->ooblen);
+ /* DannierChen20101022 : Patch end */
+ } else {
+ ret = nand_do_read_oob(mtd, from, ops);
+ if (ret == -EBADMSG) {
+ nand_release_device(mtd);
+ nand_block_markbad_wmt(mtd, from, 1);
+ return ret;
+ }
+ }
+ } else {
+ //printk("In nand_read_oob() call nand_do_read_ops():and ops->len is %d\n", ops->len);
+ ret = nand_do_read_ops(mtd, from, ops);
+ if (ret == -EBADMSG) {
+ nand_release_device(mtd);
+ nand_block_markbad_wmt(mtd, from, 1);
+ return ret;
+ }
+ }
+
+ out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+
+/**
+ * nand_read_bbt_facmk - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read factory-marked bad block information
+ */
+static int nand_read_bbt_facmk(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+ //printk("enter nand_read_bbt_facmk\n");
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf) {
+ ret = nand_do_read_bb_oob(mtd, from, ops);
+ //printk("enter nand_read_bbt_facmk nand_do_read_bb_oob yes\n");
+ } else {
+ //printk("enter nand_read_bbt_facmk nand_do_read_ops no\n");
+ ret = nand_do_read_ops(mtd, from, ops);
+ }
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->write_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->write_buf(mtd, oob, size);
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ */
+static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ chip->ecc.write_page_raw(mtd, chip, buf);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ */
+static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static void nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(mtd, p, oob);
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->write_buf(mtd, oob, i);
+}
+
+/**
+ * nand_write_page - [REPLACEABLE] write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @buf: the data to write
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int page, int cached, int raw)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ if (unlikely(raw))
+ chip->ecc.write_page_raw(mtd, chip, buf);
+ else
+ chip->ecc.write_page(mtd, chip, buf);
+
+ /*
+ * Cached progamming disabled for now. Not sure if it's worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
+ */
+ cached = 0;
+
+ if (!cached || !(chip->options & NAND_CACHEPRG)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ /*
+ * See if operation failed and additional status checks are
+ * available.
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status,
+ page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /* Send command to read back the data */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ if (chip->verify_buf(mtd, buf, mtd->writesize))
+ return -EIO;
+#endif
+ return 0;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, woffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Write request not from offset 0? */
+ if (unlikely(woffs)) {
+ if (woffs >= free->length) {
+ woffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + woffs;
+ bytes = min_t(size_t, len,
+ (free->length - woffs));
+ woffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(chip->oob_poi + boffs, oob, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, realpage, page, blockmask, column;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
+ mtd->oobavail : mtd->oobsize;
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret, subpage;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ /* Reject writes, which are not page aligned */
+ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ //column = to & (mtd->writesize - 1);
+ column = ((int)(to>>10)) % mtd->pageSizek;
+ column = column << 10;
+ //subpage = column || (writelen & (mtd->writesize - 1));
+ subpage = column || (writelen < mtd->writesize);
+//printk("column=%d subpage=%d writelen=%d\n", column, subpage, writelen);
+ if (subpage && oob)
+ return -EINVAL;
+
+ //chipnr = (int)(to >> chip->chip_shift);
+ chipnr = ((int)(to >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd))
+ return -EIO;
+
+ //realpage = (int)(to >> chip->page_shift);
+ realpage = ((int)(to >> 10))/mtd->pageSizek;
+ page = realpage & chip->pagemask;
+ //blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ blockmask = (1 << (chip->pagecnt_shift)) - 1;
+
+ if(chipnr > 0) {
+ second_chip = 1;
+ } else {
+ second_chip = 0;
+ }
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= (chip->pagebuf << chip->page_shift) &&
+ (chip->pagebuf << chip->page_shift) < (to + ops->len))
+ chip->pagebuf = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
+ return -EINVAL;
+
+ while (1) {
+ int bytes = mtd->writesize;
+ int cached = writelen > bytes && page != blockmask;
+ uint8_t *wbuf = buf;
+
+ /* Partial page write? */
+ if (unlikely(column || writelen < (mtd->writesize - 1))) {
+ cached = 0;
+ bytes = min_t(int, bytes - column, (int) writelen);
+ chip->pagebuf = -1;
+ memset(chip->buffers->databuf, 0xff, mtd->writesize);
+ memcpy(&chip->buffers->databuf[column], buf, bytes);
+ wbuf = chip->buffers->databuf;
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ memset(chip->oob_poi, 0xff, mtd->oobsize); /* edward wan add 20080606 */
+ oob = nand_fill_oob(mtd, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+
+ // ret = chip->write_page(mtd, chip, wbuf, page, cached,
+ // (ops->mode == MTD_OOB_RAW));
+ ret = chip->write_page(mtd, chip, wbuf, page, cached, ops->mode);
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+ return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
+ /* Grab the device */
+ panic_nand_get_device(chip, mtd, FL_WRITING);
+
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = 0;
+
+ ret = nand_do_write_ops(mtd, to, &ops);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC.
+ */
+static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ nand_get_device(chip, mtd, FL_WRITING);
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = 0;
+ ret = nand_do_write_ops(mtd, to, &ops);
+ *retlen = ops.retlen;
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, status, len;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow write past end of device */
+ if (unlikely(to >= mtd->size ||
+ ops->ooboffs + ops->ooblen >
+ ((mtd->size >> chip->page_shift) -
+ (to >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ //chipnr = (int)(to >> chip->chip_shift);
+ chipnr = ((int)(to >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd))
+ return -EROFS;
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(chip, mtd, FL_WRITING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(mtd, to, ops);
+ else
+ ret = nand_do_write_ops(mtd, to, ops);
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * get_para - [MTD Interface] NAND get retry and eslc information
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int get_para(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+
+ nand_get_device(chip, mtd, FL_READING);
+
+
+ chip->select_chip(mtd, chipnr);
+
+ chip->get_para(mtd, chip);
+
+ chip->select_chip(mtd, -1);
+
+
+ nand_release_device(mtd);
+ return ret;
+}
+/*
+ * single_erase_cmd - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips.
+ */
+extern unsigned int par4_ofs;
+extern unsigned int prob_end;
+static void single_erase_cmd(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * multi_erase_cmd - [GENERIC] AND specific block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * AND multi block erase command function. Erase 4 consecutive blocks.
+ */
+static void multi_erase_cmd(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ /* Send commands to erase a block */
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd, instr, 0);
+}
+
+#define BBT_PAGE_MASK 0xffffff3f
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, status, pages_per_block, ret, chipnr;
+ struct nand_chip *chip = mtd->priv;
+ loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
+ unsigned int bbt_masked_page = 0xffffffff;
+ loff_t len;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(mtd, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip, mtd, FL_ERASING);
+
+ /* Shift to get first page */
+ //page = (int)(instr->addr >> chip->page_shift);
+ page = ((int)(instr->addr >> 10))/mtd->pageSizek;
+ //chipnr = (int)(instr->addr >> chip->chip_shift);
+ chipnr = ((int)(instr->addr >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+
+ if(chipnr > 0)
+ second_chip = 1;
+ else
+ second_chip = 0;
+
+ if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX && prob_end == 1) {
+ if (page < par4_ofs && second_chip == 0) {
+ //printk("SKIP Multi erase page 0x%x, par4_ofs 0x%x\n", page, par4_ofs);
+ instr->state = MTD_ERASE_DONE;
+ ret = 0;
+ nand_release_device(mtd);
+ return ret;
+ }
+ }
+
+ /* Calculate pages in each block */
+ //pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ pages_per_block = 1 << chip->pagecnt_shift;
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /*
+ * If BBT requires refresh, set the BBT page mask to see if the BBT
+ * should be rewritten. Otherwise the mask is set to 0xffffffff which
+ * can not be matched. This is also done when the bbt is actually
+ * erased to avoid recursive updates.
+ */
+ if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
+ bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if(allowbbt != 0xFF) { /* normal flow */
+ //if (nand_block_checkbad(mtd, ((loff_t) page) << chip->page_shift, 0, allowbbt)) {
+ if (nand_block_checkbad(mtd, ((loff_t) (page*mtd->pageSizek)) << 10, 0, allowbbt, 1)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
+ printk("nand_erase: attempt to erase a "
+ "bad block at page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagebuf && chip->pagebuf <
+ (page + pages_per_block))
+ chip->pagebuf = -1;
+
+ chip->erase_cmd(mtd, page & chip->pagemask);
+
+ status = chip->waitfunc(mtd, chip);
+
+ if (chip->realplanenum && (status & NAND_STATUS_FAIL)) {
+ /*if (abv != 13479) {
+ status = 0xe3;//0xe5;
+ abv = 13479;
+ printk("erase page=%x error abv=%d\n", page, abv);
+ }*/
+ chip->status_plane[0] = page;
+ chip->status_plane[1] = status;
+ printk("erase blk=%x error status=0x%x\n", page/mtd->pagecnt, status);
+ //while(1);
+ }
+
+ /*
+ * See if operation failed and additional status checks are
+ * available
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_ERASING,
+ status, page);
+
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ printk( "nand_erase: "
+ "Failed erase, page 0x%08x ", page);
+ if(allowbbt == 0xFF) {
+ //len -= (1 << chip->phys_erase_shift);
+ len -= mtd->erasesize;
+ page += pages_per_block;
+ printk( "continue next\n");
+ continue;
+ } else
+ printk( "\n");
+
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr =
+ //((loff_t)page << chip->page_shift);
+ ((loff_t)(page*mtd->pageSizek)) << 10;
+ printk("nand_erase: goto erase_exit\n");
+ goto erase_exit;
+ }
+
+ /*
+ * If BBT requires refresh, set the BBT rewrite flag to the
+ * page being erased.
+ */
+ if (bbt_masked_page != 0xffffffff &&
+ (page & BBT_PAGE_MASK) == bbt_masked_page)
+ rewrite_bbt[chipnr] =
+ //((loff_t)page << chip->page_shift);
+ ((loff_t)(page*mtd->pageSizek)) << 10;
+
+ /* Increment page address and decrement length */
+ //len -= (1 << chip->phys_erase_shift);
+ len -= mtd->erasesize;
+ page += pages_per_block;
+ if (len)
+printk("-----------------------------------er%d=blk=%d len=%llu\n",page,page/256, (unsigned long long)len);
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+
+ /*
+ * If BBT requires refresh and BBT-PERCHIP, set the BBT
+ * page mask to see if this BBT should be rewritten.
+ */
+ if (bbt_masked_page != 0xffffffff &&
+ (chip->bbt_td->options & NAND_BBT_PERCHIP))
+ bbt_masked_page = chip->bbt_td->pages[chipnr] &
+ BBT_PAGE_MASK;
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /*
+ * If BBT requires refresh and erase was successful, rewrite any
+ * selected bad block tables.
+ */
+ if (bbt_masked_page == 0xffffffff || ret)
+ return ret;
+
+ for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
+ if (!rewrite_bbt[chipnr])
+ continue;
+ /* Update the BBT for chip */
+ pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
+ __func__, chipnr, rewrite_bbt[chipnr],
+ chip->bbt_td->pages[chipnr]);
+ printk( "nand_erase_nand: nand_update_bbt "
+ "(%d:0x%0llx 0x%0x) page=%x\n", chipnr, rewrite_bbt[chipnr],
+ chip->bbt_td->pages[chipnr], page);
+ nand_update_bbt(mtd, rewrite_bbt[chipnr]);
+ }
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip, mtd, FL_SYNCING);
+ /* Release it and go back */
+ nand_release_device(mtd);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ return nand_block_checkbad(mtd, offs, 1, 0, 1);
+}
+
+static int nand_block_isbad_wmt(struct mtd_info *mtd, loff_t offs)
+{
+ return nand_block_checkbad(mtd, offs, 1, 0, 0);
+}
+
+/**
+ * nand_block_markbad_wmt - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @type: worn out or reserved(unrecoveryable error occurs).
+ */
+static int nand_block_markbad_wmt(struct mtd_info *mtd, loff_t ofs, int type)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_block_isbad_wmt(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return chip->block_markbad(mtd, ofs, type);
+}
+
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return chip->block_markbad(mtd, ofs, 0);
+}
+
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (chip->state == FL_PM_SUSPENDED)
+ nand_release_device(mtd);
+ else
+ pr_err("called for a chip which is not in suspended state\n");
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip, int busw)
+{
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->chip_delay)
+ chip->chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (chip->cmdfunc == NULL)
+ chip->cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->waitfunc == NULL)
+ chip->waitfunc = nand_wait;
+
+ if (!chip->select_chip)
+ chip->select_chip = nand_select_chip;
+ if (!chip->read_byte)
+ chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->read_word)
+ chip->read_word = nand_read_word;
+ if (!chip->block_bad)
+ chip->block_bad = nand_block_bad;
+ if (!chip->block_markbad)
+ chip->block_markbad = nand_default_block_markbad;
+ if (!chip->write_buf)
+ chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->read_buf)
+ chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+ if (!chip->verify_buf)
+ chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
+ if (!chip->scan_bbt)
+ chip->scan_bbt = nand_default_bbt;
+
+ if (!chip->controller) {
+ chip->controller = &chip->hwcontrol;
+ spin_lock_init(&chip->controller->lock);
+ init_waitqueue_head(&chip->controller->wq);
+ }
+
+}
+#if 0
+
+/* Sanitize ONFI strings so we can safely print them */
+static void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+#endif
+static int shift_bit(uint64_t value)
+{
+ int i = 0;
+ while (!(value & 1)) {
+ value >>= 1;
+ i++;
+ if (i == 63)
+ break;
+ }
+ /* return the number count of "zero" bit */
+ return i;
+}
+#if 0
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_onfi_params *p = &chip->onfi_params;
+ int i;
+ int val;
+
+ /* Try ONFI for unknown chip or LP */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
+ if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+ chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ return 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ for (i = 0; i < 3; i++) {
+ chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+ le16_to_cpu(p->crc)) {
+ pr_info("ONFI param page %d valid\n", i);
+ break;
+ }
+ }
+
+ if (i == 3)
+ return 0;
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 5))
+ chip->onfi_version = 23;
+ else if (val & (1 << 4))
+ chip->onfi_version = 22;
+ else if (val & (1 << 3))
+ chip->onfi_version = 21;
+ else if (val & (1 << 2))
+ chip->onfi_version = 20;
+ else if (val & (1 << 1))
+ chip->onfi_version = 10;
+ else
+ chip->onfi_version = 0;
+
+ if (!chip->onfi_version) {
+ pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+ mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ *busw = 0;
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= (NAND_NO_READRDY |
+ NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+
+ pr_info("ONFI flash detected\n");
+ return 1;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
+ */
+static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int busw,
+ int *maf_id, int *dev_id,
+ struct nand_flash_dev *type)
+{
+ int i, maf_idx;
+ u8 id_data[8];
+ int ret;
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ *maf_id = chip->read_byte(mtd);
+ *dev_id = chip->read_byte(mtd);
+
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ for (i = 0; i < 2; i++)
+ id_data[i] = chip->read_byte(mtd);
+
+ if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
+ pr_info("%s: second ID read did not match "
+ "%02x,%02x against %02x,%02x\n", __func__,
+ *maf_id, *dev_id, id_data[0], id_data[1]);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++)
+ if (*dev_id == type->id)
+ break;
+
+ chip->onfi_version = 0;
+ if (!type->name || !type->pagesize) {
+ /* Check is chip is ONFI compliant */
+ ret = nand_flash_detect_onfi(mtd, chip, &busw);
+ if (ret)
+ goto ident_done;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read entire ID string */
+
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
+
+ if (!type->name)
+ return ERR_PTR(-ENODEV);
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+
+ if (!type->pagesize && chip->init_size) {
+ /* Set the pagesize, oobsize, erasesize by the driver */
+ busw = chip->init_size(mtd, chip, id_data);
+ } else if (!type->pagesize) {
+ int extid;
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->cellinfo = id_data[2];
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
+ *
+ * Check for wraparound + Samsung ID + nonzero 6th byte
+ * to decide what to do.
+ */
+ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
+ id_data[0] == NAND_MFR_SAMSUNG &&
+ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (extid & 0x03) {
+ case 1:
+ mtd->oobsize = 128;
+ break;
+ case 2:
+ mtd->oobsize = 218;
+ break;
+ case 3:
+ mtd->oobsize = 400;
+ break;
+ default:
+ mtd->oobsize = 436;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ }
+ } else {
+ /*
+ * Old devices have chip data hardcoded in the device id table.
+ */
+ mtd->erasesize = type->erasesize;
+ mtd->writesize = type->pagesize;
+ mtd->oobsize = mtd->writesize / 32;
+ busw = type->options & NAND_BUSWIDTH_16;
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
+ id_data[5] == 0x00 && id_data[6] == 0x00 &&
+ id_data[7] == 0x00 && mtd->writesize == 512) {
+ mtd->erasesize = 128 * 1024;
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+ }
+ /* Get chip options, preserve non chip based options */
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+ * options for chips which do not have an extended id.
+ */
+ if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ident_done:
+
+ /*
+ * Set chip as a default. Board drivers can override it, if necessary.
+ */
+ chip->options |= NAND_NO_AUTOINCR;
+
+ /* Try to identify manufacturer */
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+ if (nand_manuf_ids[maf_idx].id == *maf_id)
+ break;
+ }
+
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ pr_info("NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
+ *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("NAND bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (chip->chipsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+ chip->chip_shift += 32 - 1;
+ }
+
+ chip->badblockbits = 8;
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+
+ /*
+ * Bad block marker is stored in the last page of each block
+ * on Samsung and Hynix MLC devices; stored in first two pages
+ * of each block on Micron devices with 2KiB pages and on
+ * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
+ * All others scan only the first page.
+ */
+ if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (*maf_id == NAND_MFR_SAMSUNG ||
+ *maf_id == NAND_MFR_HYNIX))
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (*maf_id == NAND_MFR_SAMSUNG ||
+ *maf_id == NAND_MFR_HYNIX ||
+ *maf_id == NAND_MFR_TOSHIBA ||
+ *maf_id == NAND_MFR_AMD ||
+ *maf_id == NAND_MFR_MACRONIX)) ||
+ (mtd->writesize == 2048 &&
+ *maf_id == NAND_MFR_MICRON))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+
+ /* Check for AND chips with 4 page planes */
+ if (chip->options & NAND_4PAGE_ARRAY)
+ chip->erase_cmd = multi_erase_cmd;
+ else
+ chip->erase_cmd = single_erase_cmd;
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ pr_info("NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
+ nand_manuf_ids[maf_idx].name,
+ chip->onfi_version ? chip->onfi_params.model : type->name);
+
+ return type;
+}
+#endif
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported
+ */
+static struct WMT_nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int busw, int *maf_id)
+{
+ struct WMT_nand_flash_dev *type = NULL, type_env;
+ int i, dev_id, maf_idx, ret = 0, varlen = 10;
+ unsigned int id = 0, id_5th = 0, id1, flash_bank;
+ char varval[10];
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
+
+ /* reset test: edwardwan add for debug 20071229 start*/
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ /* reset test: edwardwan add for debug 20071229 end*/
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ *maf_id = chip->read_byte(mtd);
+ for (i = 0; i < 3; i++) {
+ dev_id = chip->read_byte(mtd);
+ id += ((unsigned char)dev_id) <<((2-i)*8);
+ }
+ for (i = 0; i < 4; i++) {
+ dev_id = chip->read_byte(mtd);
+ id_5th += ((unsigned char)dev_id) <<((3-i)*8);
+ }
+ printk("nand chip device id = 0x%x 0x%x\n", id, id_5th);
+ #ifdef NAND_DEBUG
+ printk("nand chip device maf_id is %x, and dev_id is %x\n",*maf_id,dev_id);
+ #endif
+ id1 = (unsigned int)id + ((*maf_id)<<24);
+
+
+ /* Lookup the flash id */
+ /*for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (dev_id == nand_flash_ids[i].id) {*/
+ for (i = 0; WMT_nand_flash_ids[i].dwFlashID != 0; i++) {
+ if (((unsigned int)id + ((*maf_id)<<24)) == WMT_nand_flash_ids[i].dwFlashID) {
+ if (WMT_nand_flash_ids[i].dwFlashID == 0x98D79432)
+ if (id_5th != WMT_nand_flash_ids[i].dwFlashID2)
+ continue;
+ if (WMT_nand_flash_ids[i].dwFlashID == 0x98DE8493)
+ if (id_5th != WMT_nand_flash_ids[i].dwFlashID2)
+ continue;
+ type = &WMT_nand_flash_ids[i];
+ //printk("find nand chip device id\n");
+ break;
+ }
+ }
+ #ifdef CONFIG_MTD_NAND_WMT
+ ret = get_flash_info_from_env(id1, id_5th, &type_env);
+
+ if (!ret) {
+ if (type)
+ printk(KERN_WARNING "Both table and env have flash id info, use env info first\n");
+ type = &type_env;
+ }
+ #endif
+
+ if (!type) {
+ return ERR_PTR(-ENODEV);
+ }
+ if (!mtd->name)
+ /*mtd->name = type->name;*/
+ mtd->name = "WMT.nand";
+
+ if (wmt_getsyspara("wmt.nand.ecc", varval, &varlen) == 0) {
+ varlen = simple_strtoul(varval, NULL, 10);
+ #ifdef DBG_60BIT_ECC
+ printk("wmt_nand_ecc=%s len=%d\n", varval, varlen);
+ printk("val=%s len=%d\n", varval, varlen);
+ #endif
+ flash_bank = type->dwPageSize >> 10;
+ if ((type->dwFlashID == 0x2C64444B && type->dwFlashID2 == 0xA9000000)
+ || (type->dwFlashID == 0xADDE94EB && type->dwFlashID2 == 0x74440000)) {
+ if (varlen > type->dwECCBitNum) {
+ type->dwPageSize = type->dwPageSize - 2048;
+ type->dwBlockSize = (type->dwBlockSize/flash_bank)*(flash_bank-2);
+ type->dwECCBitNum = varlen;
+ }
+ }
+ #ifdef DBG_60BIT_ECC
+ printk("blksize=0x%x pagesize=0x%x ecc=%d\n", type->dwBlockSize, type->dwPageSize, type->dwECCBitNum);
+ #endif
+ }
+
+ /*chip->chipsize = type->chipsize << 20;*/
+ chip->chipsize = (uint64_t)type->dwBlockCount * (uint64_t)type->dwBlockSize;
+ if (((PLANE2_READ|PLANE2_PROG|PLANE2_ERASE) & type->dwSpeedUpCmd)
+ == (PLANE2_READ|PLANE2_PROG|PLANE2_ERASE)) {
+ chip->realplanenum = 1;
+ printk("\n ****realplanenum**** is %d",chip->realplanenum);
+ } else
+ chip->realplanenum = 0;
+
+ /* get all information from table */
+ mtd->blkcnt = type->dwBlockCount;
+ chip->cellinfo = type->dwNandType << 2;
+ mtd->realwritesize = mtd->writesize = type->dwPageSize;
+ mtd->realoobsize = mtd->oobsize = type->dwSpareSize;
+ mtd->realerasesize = mtd->erasesize = type->dwBlockSize;
+ if (chip->realplanenum) {//dan_multi
+ mtd->planenum = 2;
+ mtd->writesize *= 2;
+ mtd->erasesize *= 2;
+ mtd->oobsize *= 2;
+ mtd->blkcnt >>= 1;
+ } else
+ mtd->planenum = 1;
+ mtd->dwECCBitNum = type->dwECCBitNum;
+ mtd->ecc_err_level = 20;
+ if (mtd->dwECCBitNum >= 40)
+ mtd->ecc_err_level = mtd->dwECCBitNum - 10;
+
+ mtd->dwRetry = type->dwRetry;
+ mtd->dwRdmz = type->dwRdmz;
+ mtd->id = type->dwFlashID;
+ mtd->id2 = type->dwFlashID2;
+ if (((mtd->id>>24)&0xFF) == NAND_MFR_TOSHIBA && type->dwDDR == 2)
+ mtd->dwDDR = type->dwDDR;
+ else
+ mtd->dwDDR = 0;
+ mtd->pageSizek = mtd->writesize >> 10;
+ mtd->pagecnt = mtd->erasesize/mtd->writesize;
+ mtd->spec_clk = type->dwRWTimming;
+ mtd->spec_tadl = type->dwTadl;
+
+ busw = type->dwDataWidth ? NAND_BUSWIDTH_16 : 0;
+ chip->page_offset[0] = type->dwBI0Position;
+ chip->page_offset[1] = type->dwBI1Position;
+
+ /* Try to identify manufacturer */
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+ if (nand_manuf_ids[maf_idx].id == *maf_id)
+ break;
+ }
+
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct !
+ */
+ if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ printk(KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
+ /*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);*/
+ id, nand_manuf_ids[maf_idx].name, mtd->name);
+ printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ chip->pagecnt_shift = ffs(mtd->pagecnt) - 1;
+ //printk("------------------page_shift=%d pgcnt_shift=%d\n", chip->page_shift, chip->pagecnt_shift);
+ /* Convert chipsize to number of pages per chip -1. */
+ //chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ chip->pagemask = (mtd->blkcnt*mtd->pagecnt) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (chip->chipsize > 0x80000000)
+ chip->chip_shift = shift_bit(chip->chipsize);
+ else
+ chip->chip_shift = ffs(chip->chipsize) - 1;
+ //chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
+
+ chip->badblockbits = 8;
+ /* Set the bad block position */
+ chip->badblockpos = mtd->writesize > 512 ?
+ NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+
+ /* Get chip options, preserve non chip based options */
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+
+ /*
+ * Set chip as a default. Board drivers can override it, if necessary
+ */
+ chip->options |= NAND_NO_AUTOINCR;
+
+ /* Check if chip is a not a samsung device. Do not clear the
+ * options for chips which are not having an extended id.
+ */
+ /*if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)*//* Dannier:to support new table*/
+ if (*maf_id != NAND_MFR_SAMSUNG && type->dwPageSize > 512)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+
+ chip->options |= NAND_BBT_SCAN2NDPAGE;
+ /* Check for AND chips with 4 page planes */
+ if (!chip->realplanenum) {//dan_multi
+ if (chip->options & NAND_4PAGE_ARRAY)
+ chip->erase_cmd = multi_erase_cmd;
+ else
+ chip->erase_cmd = single_erase_cmd;
+ }
+
+ /* Do not replace user supplied command function ! */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ printk(KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, id,
+ nand_manuf_ids[maf_idx].name, type->ProductName);
+
+#ifdef CONFIG_MTD_NAND_WMT
+ set_partition_size(mtd);
+ wmt_init_nfc(mtd, mtd->spec_clk, mtd->spec_tadl, busw);
+ set_ecc_info(mtd);
+ ret = alloc_write_cache(mtd);
+ if (ret)
+ return 0;
+ ret = alloc_rdmz_buffer(mtd);
+ if (ret)
+ return 0;
+#endif
+
+ return type;
+}
+
+/**
+ * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ * The mtd->owner field must be set to the module of the caller.
+ */
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
+{
+ int i = 1, busw, nand_maf_id/*, nand_dev_id*/;
+ struct nand_chip *chip = mtd->priv;
+ struct WMT_nand_flash_dev *type;
+
+ /* Get buswidth to select the correct functions */
+ busw = chip->options & NAND_BUSWIDTH_16;
+ /* Set the default functions */
+ nand_set_defaults(chip, busw);
+
+ /* Read the flash type */
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ //type = nand_get_flash_type(mtd, chip, busw,
+ //&nand_maf_id, &nand_dev_id, table);
+
+ if (IS_ERR(type)) {
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
+ chip->select_chip(mtd, -1);
+ return PTR_ERR(type);
+ }
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ chip->select_chip(mtd, i);
+ /* See comment in nand_get_flash_type for reset */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != chip->read_byte(mtd) ||
+ /*nand_dev_id != chip->read_byte(mtd))*/
+ ((type->dwFlashID>>16)&0xFF) != chip->read_byte(mtd))
+ break;
+ }
+ if (i > 1)
+ pr_info("%d NAND chips detected\n", i);
+
+ /* Store the number of chips and calc total size for mtd */
+ chip->numchips = i;
+ mtd->size = i * chip->chipsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_scan_ident);
+
+
+/**
+ * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
+ */
+int nand_scan_tail(struct mtd_info *mtd)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH));
+
+ if (!(chip->options & NAND_OWN_BUFFERS))
+ chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
+ if (!chip->buffers)
+ return -ENOMEM;
+
+ /* Set the internal oob buffer location, just after the page data */
+ chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+
+ /*
+ * If no default placement scheme is given, select an appropriate one.
+ */
+ if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
+ switch (mtd->oobsize) {
+ case 8:
+ chip->ecc.layout = &nand_oob_8;
+ break;
+ case 16:
+ chip->ecc.layout = &nand_oob_16;
+ break;
+ case 64:
+ chip->ecc.layout = &nand_oob_64;
+ break;
+ case 128:
+ chip->ecc.layout = &nand_oob_128;
+ break;
+ default:
+ pr_warn("No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
+ }
+
+ if (!chip->write_page)
+ chip->write_page = nand_write_page;
+
+ /*
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
+ */
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_HW_OOB_FIRST:
+ /* Similar to NAND_ECC_HW, but a separate read_page handle */
+ if (!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) {
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
+ BUG();
+ }
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ case NAND_ECC_HW:
+ /* Use standard hwecc read page function? */
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_hwecc;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_std;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_std;
+
+ case NAND_ECC_HW_SYNDROME:
+ if ((!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) &&
+ (!chip->ecc.read_page ||
+ chip->ecc.read_page == nand_read_page_hwecc ||
+ !chip->ecc.write_page ||
+ chip->ecc.write_page == nand_write_page_hwecc)) {
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
+ BUG();
+ }
+ /* Use standard syndrome read/write page function? */
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_syndrome;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_syndrome;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_syndrome;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= chip->ecc.size)
+ break;
+ pr_warn("%d byte HW ECC not possible on "
+ "%d byte page size, fallback to SW ECC\n",
+ chip->ecc.size, mtd->writesize);
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ case NAND_ECC_SOFT:
+ chip->ecc.calculate = nand_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
+ chip->ecc.write_page = nand_write_page_swecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.write_oob = nand_write_oob_std;
+ if (!chip->ecc.size)
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ break;
+
+ case NAND_ECC_SOFT_BCH:
+ if (!mtd_nand_has_bch()) {
+ pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
+ BUG();
+ }
+ chip->ecc.calculate = nand_bch_calculate_ecc;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
+ chip->ecc.write_page = nand_write_page_swecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.write_oob = nand_write_oob_std;
+ /*
+ * Board driver should supply ecc.size and ecc.bytes values to
+ * select how many bits are correctable; see nand_bch_init()
+ * for details. Otherwise, default to 4 bits for large page
+ * devices.
+ */
+ if (!chip->ecc.size && (mtd->oobsize >= 64)) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ }
+ chip->ecc.priv = nand_bch_init(mtd,
+ chip->ecc.size,
+ chip->ecc.bytes,
+ &chip->ecc.layout);
+ if (!chip->ecc.priv) {
+ pr_warn("BCH ECC initialization failed!\n");
+ BUG();
+ }
+ chip->ecc.strength =
+ chip->ecc.bytes*8 / fls(8*chip->ecc.size);
+ break;
+
+ case NAND_ECC_NONE:
+ pr_warn("NAND_ECC_NONE selected by board driver. "
+ "This is not recommended!\n");
+ chip->ecc.read_page = nand_read_page_raw;
+ chip->ecc.write_page = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.write_oob = nand_write_oob_std;
+ chip->ecc.size = mtd->writesize;
+ chip->ecc.bytes = 0;
+ chip->ecc.strength = 0;
+ break;
+
+ default:
+ pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
+ BUG();
+ }
+
+ /* For many systems, the standard OOB write also works for raw */
+ if (!chip->ecc.read_oob_raw)
+ chip->ecc.read_oob_raw = chip->ecc.read_oob;
+ if (!chip->ecc.write_oob_raw)
+ chip->ecc.write_oob_raw = chip->ecc.write_oob;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ chip->ecc.layout->oobavail = 0;
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
+ chip->ecc.layout->oobavail +=
+ chip->ecc.layout->oobfree[i].length;
+ mtd->oobavail = chip->ecc.layout->oobavail;
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+ * mode.
+ */
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+ pr_warn("Invalid ECC parameters\n");
+ BUG();
+ }
+ chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+ switch (chip->ecc.steps) {
+ case 2:
+ mtd->subpage_sft = 1;
+ break;
+ case 4:
+ case 8:
+ case 16:
+ mtd->subpage_sft = 2;
+ break;
+ }
+ }
+ //chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+ if (mtd->dwECCBitNum >= 24)
+ chip->subpagesize = 1024;
+ else
+ chip->subpagesize = 512;
+
+ /* Initialize state */
+ chip->state = FL_READY;
+
+ /* De-select the device */
+ chip->select_chip(mtd, -1);
+
+ /* Invalidate the pagebuffer reference */
+ chip->pagebuf = -1;
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
+ mtd->_erase = nand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = nand_read;
+ mtd->_write = nand_write;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = NULL;
+ mtd->_unlock = NULL;
+ mtd->_suspend = nand_suspend;
+ mtd->_resume = nand_resume;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
+ mtd->writebufsize = mtd->writesize;
+ mtd->get_para = get_para;
+
+ /* propagate ecc info to mtd_info */
+ mtd->ecclayout = chip->ecc.layout;
+ mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+
+ /* edwardwan add support 4 bits BCH ECC */
+ mtd->read_bbinfo_facmk = nand_read_bbt_facmk;
+ /* Check, if we should skip the bad block table scan */
+ if (chip->options & NAND_SKIP_BBTSCAN)
+ return 0;
+
+ /* Build bad block table */
+ return chip->scan_bbt(mtd);
+}
+EXPORT_SYMBOL(nand_scan_tail);
+
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
+ * test if this is a module _anyway_ -- they'd have to try _really_ hard
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
+#ifdef MODULE
+#define caller_is_module() (1)
+#else
+#define caller_is_module() \
+ is_module_text_address((unsigned long)__builtin_return_address(0))
+#endif
+
+/**
+ * nand_scan - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
+ */
+int nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int ret;
+ unsigned int ret1;
+
+ /* Many callers got this wrong, so check for it for a while... */
+ /*if (!mtd->owner && caller_is_module()) {
+ pr_crit("%s called with NULL mtd->owner!\n", __func__);
+ BUG();
+ }*/
+ ret1 = *(volatile unsigned long *)PMCEU_ADDR;
+ if (!(ret1&0x0010000))
+ printk(KERN_NOTICE "1 pmc_nand: 0x%x\n", ret1);
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ ret1 = *(volatile unsigned long *)PMCEU_ADDR;
+ if (!(ret1&0x0010000))
+ printk(KERN_NOTICE "2 pmc_nand: 0x%x\n", ret1);
+ ret = nand_scan_ident(mtd, maxchips, NULL);
+ if (!ret)
+ ret = nand_scan_tail(mtd);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+ ret1 = *(volatile unsigned long *)PMCEU_ADDR;
+ if (ret1&0x0010000)
+ printk(KERN_NOTICE "3 pmc_nand: 0x%x\n", ret1);
+ return ret;
+}
+EXPORT_SYMBOL(nand_scan);
+
+/**
+ * nand_release - [NAND Interface] Free resources held by the NAND device
+ * @mtd: MTD device structure
+ */
+void nand_release(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+ nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+
+ mtd_device_unregister(mtd);
+
+ /* Free bad block table memory */
+ kfree(chip->bbt);
+ if (!(chip->options & NAND_OWN_BUFFERS))
+ kfree(chip->buffers);
+
+ /* Free bad block descriptor memory */
+ if (chip->badblock_pattern && chip->badblock_pattern->options
+ & NAND_BBT_DYNAMICSTRUCT)
+ kfree(chip->badblock_pattern);
+}
+EXPORT_SYMBOL_GPL(nand_release);
+
+static int __init nand_base_init(void)
+{
+// led_trigger_register_simple("nand-disk", &nand_led_trigger);
+ return 0;
+}
+
+static void __exit nand_base_exit(void)
+{
+// led_trigger_unregister_simple(nand_led_trigger);
+}
+
+module_init(nand_base_init);
+module_exit(nand_base_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
new file mode 100644
index 00000000..a6a9e661
--- /dev/null
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -0,0 +1,2720 @@
+/*
+ * drivers/mtd/nand_bbt.c
+ *
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number than the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <mach/hardware.h>
+//#define RETRY_DEBUG
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ int ret;
+
+ ret = memcmp(buf, td->pattern, td->len);
+ if (!ret)
+ return ret;
+ return -1;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
+ * all bytes except the pattern area contain 0xff.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ int i, end = 0;
+ uint8_t *p = buf;
+
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ end = paglen + td->offs;
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ for (i = 0; i < end; i++) {
+ if (p[i] != 0xff)
+ return -1;
+ }
+ }
+ p += end;
+
+ /* Compare the pattern */
+ if (memcmp(p, td->pattern, td->len))
+ return -1;
+
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ p += td->len;
+ end += td->len;
+ for (i = end; i < len; i++) {
+ if (*p++ != 0xff)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td, int ano_bytes)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[td->offs + i] != td->pattern[i])
+ return -1;
+ }
+ if (ano_bytes) {
+ //printk("sandisk flash");
+ for (i = 0; i < ano_bytes; i++) {
+ //printk("of=0x%x da=0x%x len=%x\n", td->offs + i, p[td->offs + i], td->len);
+ if (p[i] != td->pattern[0]) {
+ printk("p[%d]=0x%x of=0x%x da=0x%x len=%x\n", i, p[i], td->offs + i, p[td->offs + i], td->len);
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ int res, ret = 0, i, j, act = 0;
+ struct nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ //from = ((loff_t)page) << this->page_shift;
+ from = ((loff_t)page*mtd->pageSizek) << 10;
+
+ while (totlen) {
+ //len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(mtd->erasesize));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act += 2) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: (read fail)reserved block at 0x%012llx\n",
+ //(loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ (loff_t)(((offs << 2) + (act >> 1))*mtd->pageSizek) << (10+this->pagecnt_shift));
+ this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx (block%d)\n",
+ //(loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift,
+ (loff_t)(((offs << 2) + (act >> 1))*mtd->pageSizek) << (10+this->pagecnt_shift),
+ (offs << 2) + (act >> 1));
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+ else
+ this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+extern void print_nand_buffer(char *value, unsigned int length);
+/**
+ * read_bbt_multi - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt_multi(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ int res, ret = 0, i, j, act = 0;
+ struct nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = (td->options & NAND_BBT_NRBITS_MSK)<<1;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;//=0
+//printk("--------bit=%d, msk=%d, code=%d\n",bits, msk, reserved_block_code);
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ //from = ((loff_t)page) << this->page_shift;
+ from = ((loff_t)page*mtd->pageSizek) << 10;
+//printk("----totlen=%d, marker_len=%d, page=%d\n", totlen, marker_len, page);
+ while (totlen) {
+ //len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(mtd->erasesize));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+//printk("+++++++++++++++++len=%d, offs=%d\n", len, offs);
+//print_nand_buffer(buf, 8192+64);
+//print_nand_buffer(buf+8192, 8192+64);
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ if (this->bbt_plane[0] == page || this->bbt_plane[1] == page)
+ dat = buf[i+mtd->realwritesize];
+ for (j = 0; j < 8; j += bits, act += 4) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: (read fail)reserved block at 0x%012llx\n",
+ //(loff_t)((offs << 1) + (act >> 2)) << this->bbt_erase_shift);
+ (loff_t)(((offs << 1) + (act >> 2))*mtd->pageSizek) << (10+this->pagecnt_shift));
+ this->bbt[offs + (act >> 3)] |= 0xa << (act & 0x04);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx (block%d)\n",
+ //(loff_t)((offs << 1) + (act >> 2)) << this->bbt_erase_shift,
+ ((loff_t)(((offs << 1) + (act >> 2))*mtd->pageSizek)) << (10+this->pagecnt_shift),
+ (offs << 1) + (act >> 2));
+
+ /* Factory marked bad or worn out? */
+ if (tmp == 0) {
+ this->bbt[offs + (act >> 3)] |= 0xf << (act & 0x04);
+ //printk("bbt[%d]=0x%x", offs + (act >> 3), this->bbt[offs + (act >> 3)] |= 0xf << (act & 0x04));
+ } else if (tmp == 0x3) {
+ this->bbt[offs + (act >> 3)] |= 0xc << (act & 0x04);
+ //printk("bbt[%d]=0x%x", offs + (act >> 3), this->bbt[offs + (act >> 3)] |= 0xc << (act & 0x04));
+ } else if (tmp == 0xc) {
+ this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x04);
+ //printk("bbt[%d]=0x%x", offs + (act >> 3), this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x04));
+ } else
+ this->bbt[offs + (act >> 3)] |= 0x5 << (act & 0x04);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int res = 0, i;
+
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < this->numchips; i++) {
+ if (chip == -1 || chip == i) {
+ if (this->realplanenum) {
+ /* multi plane mode use 4-bit as an block instead of 2-bit */
+ res = read_bbt_multi(mtd, buf, td->pages[i],
+ //this->chipsize >> this->bbt_erase_shift,
+ (int)(this->chipsize >> (10+this->pagecnt_shift))/mtd->pageSizek,
+ td, offs);
+ } else {
+ res = read_bbt(mtd, buf, td->pages[i],
+ //this->chipsize >> this->bbt_erase_shift,
+ (int)(this->chipsize >> (10+this->pagecnt_shift))/mtd->pageSizek,
+ td, offs);
+ }
+ }
+ if (res)
+ return res;
+ if (this->realplanenum) {
+ //offs += this->chipsize >> (this->bbt_erase_shift + 1);
+ offs += ((int)(this->chipsize >> (10+this->pagecnt_shift+1))/mtd->pageSizek);
+ } else {
+ //offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ offs += ((int)(this->chipsize >> (10+this->pagecnt_shift+2))/mtd->pageSizek);
+ }
+ }
+ } else {
+ if (this->realplanenum) {
+ /* multi plane mode use 4-bit as an block instead of 2-bit */
+ res = read_bbt_multi(mtd, buf, td->pages[0],
+ //mtd->size >> this->bbt_erase_shift, td, 0);
+ (int)(mtd->size >> (10+this->pagecnt_shift))/mtd->pageSizek, td, 0);
+ } else {
+ res = read_bbt(mtd, buf, td->pages[0],
+ //mtd->size >> this->bbt_erase_shift, td, 0);
+ (int)(mtd->size >> (10+this->pagecnt_shift))/mtd->pageSizek, td, 0);
+ if (res)
+ return res;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * read_retry_table - [GENERIC] Read the retry table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
+ *
+ * Read the read retry table starting from page.
+ *
+ */
+static int read_retry_table(struct mtd_info *mtd, uint8_t *buf, int page, int chip)
+{
+ int res;
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *rdtry;
+ size_t retlen;
+ loff_t from;
+
+ //from = ((loff_t) page) << this->page_shift;
+ from = ((loff_t) (page*mtd->pageSizek)) << 10;
+
+ res = mtd->_read(mtd, from, mtd->writesize, &retlen, buf);
+ if (res < 0) {
+ if (retlen != mtd->writesize) {
+ printk(KERN_INFO "nand_bbt: Error reading retry table\n");
+ return res;
+ }
+ printk(KERN_WARNING "nand_bbt: ECC error while reading retry table\n");
+ }
+
+ /* Analyse data */
+ rdtry = (struct nand_read_retry_param *)buf;
+ #ifdef RETRY_DEBUG
+ print_nand_buffer((uint8_t *)this->cur_chip, sizeof(chip_table[0]));
+ #endif
+ if (strcmp("readretry", rdtry->magic) /*|| info->data_ecc_uncor_err == 2*/) {
+ printk(KERN_WARNING "nand_bbt: retry table magic number wrong%s\n", rdtry->magic);
+ return -1;
+ }
+ #ifdef RETRY_DEBUG
+ printk(KERN_WARNING "nand_bbt: copy from buf\n");
+ #endif
+ memcpy(/*(uint8_t *)*/this->cur_chip, buf, sizeof(chip_table[0])-16);
+ this->cur_chip->retry_def_value[this->cur_chip->retry_reg_num] = 0xff;
+ this->cur_chip->retry_def_value[this->cur_chip->retry_reg_num+1] = 0xff;
+ #ifdef RETRY_DEBUG
+ print_nand_buffer((uint8_t *)this->cur_chip, sizeof(chip_table[0]));
+ #endif
+
+ /*if (rdtry->eslc_reg_num) {
+ if (rdtry->eslc_reg_num > 5)
+ printk(KERN_WARNING "nand_bbt: eslc reg size=%d is too big\n", rdtry->eslc_reg_num);
+ this->eslc_reg_num = rdtry->eslc_reg_num;
+ this->eslc_cmd = kzalloc(this->eslc_reg_num, GFP_KERNEL);
+ if (!this->eslc_cmd) {
+ printk(KERN_ERR "nand_scan_bbt: create eslc_cmd Out of memory\n");
+ return -ENOMEM;
+ }
+ }
+ memcpy(this->eslc_cmd, ((uint8_t *)&rdtry->retry_reg_num)+4, this->eslc_reg_num);
+ print_nand_buffer(this->eslc_cmd, this->eslc_reg_num);
+
+ if (rdtry->total_retry_cnt && rdtry->retry_reg_num) {
+ if ((rdtry->total_retry_cnt * rdtry->retry_reg_num) > 64)
+ printk(KERN_WARNING "nand_bbt: eslc reg size=%d is too big\n",
+ (rdtry->total_retry_cnt * rdtry->retry_reg_num));
+ this->total_retry_cnt = rdtry->total_retry_cnt;
+ this->retry_reg_num = rdtry->retry_reg_num;
+ this->retry_cmd = kzalloc((this->retry_reg_num*this->total_retry_cnt), GFP_KERNEL);
+ if (!this->retry_cmd) {
+ printk(KERN_ERR "nand_scan_bbt: create retry_cmd Out of memory\n");
+ return -ENOMEM;
+ }
+ }
+ memcpy(this->retry_cmd, ((uint8_t *)&rdtry->retry_reg_num)+4+this->eslc_reg_num,
+ (this->retry_reg_num*this->total_retry_cnt));
+
+
+ for (i = 0; i < this->total_retry_cnt; i++) {
+ print_nand_buffer(&this->retry_cmd[i*this->retry_reg_num], this->retry_reg_num);
+ }*/
+
+ return 0;
+}
+
+/**
+ * read_abs_retry_table - [GENERIC] Read the retry table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips.
+ *
+ * Read the retry table for all chips starting at a given page
+*/
+static int read_abs_retry_table(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ //struct nand_chip *this = mtd->priv;
+ int res = 0, i, chips;
+
+ //chips = this->numchips
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_retry_table(mtd, buf, td->pages[i], chip);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/* Scan read raw data from flash */
+static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_RAW;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ res = mtd_read_oob(mtd, offs, &ops);
+
+ if (res)
+ return res;
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return 0;
+}
+
+static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_raw_data(mtd, buf, offs, td);
+ else
+ return scan_read_raw_oob(mtd, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ return mtd_write_oob(mtd, offs, &ops); /* call mtd->_write_oob*/
+}
+
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ //struct nand_chip *this = mtd->priv;
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ //scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ scan_read_raw(mtd, buf, (loff_t)(td->pages[0]*mtd->pageSizek) << 10,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ //scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ scan_read_raw(mtd, buf, (loff_t)(md->pages[0]*mtd->pageSizek) << 10,
+ mtd->writesize, td);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+ return 1;
+}
+
+/* Scan a given block full */
+static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, size_t readlen,
+ int scanlen, int len)
+{
+ int ret, j;
+
+ ret = scan_read_raw_oob(mtd, buf, offs, readlen);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ for (j = 0; j < len; j++, buf += scanlen) {
+ if (check_pattern(buf, scanlen, mtd->writesize, bd))
+ return 1;
+ }
+ return 0;
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, int len)
+{
+ struct mtd_oob_ops ops;
+ int j, ret, more_bytes = 0, flag = 0;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if ((mtd->id>>24) == 0x45) {
+ more_bytes = 6;
+ }
+ for (j = 0; j < len; j++) {
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ /* Dannier Chen patch 2010.04.20: start
+ check invalid bad block on which page of blocks
+ should be based on flash spec, for example flash type:HY27UT088G2M-T(P) bad block
+ is marked at page 125 and 127 of each block.
+ NOT always page 0 and 1.
+ */
+ //printk("scan_block_fast: j=%d len=%d bd->page_offset[0]=%d offset[1]=%d\n ", j, len, bd->page_offset[0], bd->page_offset[1]);
+#ifdef CONFIG_MTD_NAND_WMT_HWECC
+ ret = mtd->read_bbinfo_facmk(mtd, offs + bd->page_offset[j]*mtd->writesize, &ops);
+#else
+ ret = mtd->read_oob(mtd, offs + bd->page_offset[j]*mtd->writesize, &ops);
+#endif
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ if (check_short_pattern(buf, bd, more_bytes))
+ flag |= 1;//return 1;
+
+ if ((flag&1) == 0)
+ if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000)
+ if (check_short_pattern(buf+1, bd, 0))
+ flag |= 1;//return 1;
+
+ if (mtd->planenum > 1) {
+ if (check_short_pattern(buf+more_bytes, bd, more_bytes))
+ flag |= 2;//return 1;
+
+ if (check_short_pattern(buf+32, bd, 0))
+ flag |= 2;//return 1;
+
+ if ((flag&2) == 0)
+ if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000)
+ if (check_short_pattern(buf+33, bd, 0))
+ flag |= 2;//return 1;
+ }
+ if (flag)
+ return flag;
+ /*offs += mtd->writesize;*/
+ /* Dannier Chen patch 2010.04.20: end */
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ //len = 1 << (this->bbt_erase_shift - this->page_shift);
+ len = 1 << (this->pagecnt_shift);
+ else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ len = 2;
+ else
+ len = 1;
+
+ if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+ /* We need only read few bytes from the OOB area */
+ scanlen = 0;
+ readlen = bd->len;
+ } else {
+ /* Full page content should be read */
+ scanlen = mtd->writesize + mtd->oobsize;
+ readlen = len * mtd->writesize;
+ }
+
+ if (chip == -1) {
+ /*
+ * Note that numblocks is 2 * (real numblocks) here, see i+=2
+ * below as it makes shifting and masking less painful
+ */
+ //numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift-1)))/mtd->pageSizek;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ //numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift-1)))/mtd->pageSizek;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ //from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+ from = (loff_t)(startblock*mtd->pageSizek) << (10+this->pagecnt_shift-1);
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * len);
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ if (((mtd->id>>24)&0xFF) == 0x45) {
+ /* dannierchen add to erase sandisk all blocks before check bad block 20121217 */
+ /*printk(KERN_INFO "create_bbt: erase all blocks for sandisk\n");*/
+ struct erase_info einfo;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx einfo.len is %llx\n", einfo.addr, einfo.len);*/
+ nand_erase_nand(mtd, &einfo, 0xFF);
+ } /* end of dannierchen erase 20121217 */
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ ret = scan_block_full(mtd, bd, from, buf, readlen,
+ scanlen, len);
+ else
+ ret = scan_block_fast(mtd, bd, from, buf, len);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i >> 1, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+
+ /* edwardwan add for debug 20071229 start */
+#if 0
+ if(mtd->ecc_stats.badblocks > 10){
+ printk("\rnand flash bad block number is greater than 10\n");
+ return 0;
+ }
+ /* edwardwan add for debug 20071229 end */
+#endif
+ } else if (((mtd->id>>24)&0xFF) != 0x45) { /* dannierchen add to erase good block when first creat table 20091014 */
+ /*printk(KERN_INFO "create_bbt: erase good blocks\n");*/
+ struct erase_info einfo;
+ int res = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx\n",einfo.addr);
+ printk("einfo.len is %llx\n",einfo.len);*/
+ res = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res < 0)
+ printk("enand_erase_nand addr 0x%llx result is %x\n", einfo.addr, res);
+ } /* end of dannierchen erase 20091014 */
+
+ i += 2;
+ //from += (1 << this->bbt_erase_shift);
+ from += (mtd->erasesize);
+ }
+ return 0;
+}
+
+/**
+ * create_bbt_multi - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt_multi(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ //len = 1 << (this->bbt_erase_shift - this->page_shift);
+ len = 1 << this->pagecnt_shift;
+ else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ len = 2;
+ else
+ len = 1;
+
+ if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+ /* We need only read few bytes from the OOB area */
+ scanlen = 0;
+ readlen = bd->len;
+ } else {
+ /* Full page content should be read */
+ scanlen = mtd->writesize + mtd->oobsize;
+ readlen = len * mtd->writesize;
+ }
+
+ if (chip == -1) {
+ /*
+ * Note that numblocks is 2 * (real numblocks) here, see i+=2
+ * below as it makes shifting and masking less painful
+ */
+ //numblocks = mtd->size >> (this->bbt_erase_shift - 2);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift-2)))/mtd->pageSizek;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ pr_warn("create_bbt_multi(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ //numblocks = this->chipsize >> (this->bbt_erase_shift - 2);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift-2)))/mtd->pageSizek;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ //from = (loff_t)startblock << (this->bbt_erase_shift - 2);
+ from = (loff_t)(startblock*mtd->pageSizek) << (10+this->pagecnt_shift-2);
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * len);
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ if ((mtd->id>>24) == 0x45) {
+ /* dannierchen add to erase sandisk all blocks before check bad block 20121217 */
+ /*printk(KERN_INFO "create_bbt_multi: erase all blocks for sandisk\n");*/
+ struct erase_info einfo;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx einfo.len is %llx\n", einfo.addr, einfo.len);*/
+ nand_erase_nand(mtd, &einfo, 0xFF);
+ } /* end of dannierchen erase 20121217 */
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ ret = scan_block_full(mtd, bd, from, buf, readlen,
+ scanlen, len);
+ else
+ ret = scan_block_fast(mtd, bd, from, buf, len);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ this->bbt[i >> 3] |= 0x0F << (i & 0x4);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i >> 2, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+
+ /* edwardwan add for debug 20071229 start */
+#if 0
+ if(mtd->ecc_stats.badblocks > 10){
+ printk("\rnand flash bad block number is greater than 10\n");
+ return 0;
+ }
+ /* edwardwan add for debug 20071229 end */
+#endif
+ } else if ((mtd->id>>24) != 0x45) { /* dannierchen add to erase good block when first creat table 20091014 */
+ /*printk(KERN_INFO "create_bbt_multi: erase good blocks\n");*/
+ struct erase_info einfo;
+ int res = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx\n",einfo.addr);
+ printk("einfo.len is %llx\n",einfo.len);*/
+ res = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res < 0)
+ printk("enand_erase_nand addr 0x%llx result is %x\n", einfo.addr, res);
+ } /* end of dannierchen erase 20091014 */
+
+ i += 4;
+ //from += (1 << this->bbt_erase_shift);
+ from += (mtd->erasesize);
+ }
+ return 0;
+}
+
+int create_hynix_table(struct mtd_info *mtd, int chip)
+{
+ int res;
+ res = mtd->get_para(mtd, chip);
+
+ return res;
+}
+
+static int check_retry_pattern(uint8_t *buf, int paglen, struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf+paglen;
+
+ for (i = 0; i < 10; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+/*
+*
+* read oob to search retry table
+*
+*/
+
+static int search_hynix_retry_table(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, chips;
+ int startblock, block, dir;
+ int bbtblocks;
+ //int blocktopage = this->bbt_erase_shift - this->page_shift;
+ int blocktopage = this->pagecnt_shift;
+
+ /* Search direction top -> down ? */
+ //if (td->options & NAND_BBT_LASTBLOCK) {
+ //startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ startblock = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek - 1;
+ dir = -1;
+ /*} else {
+ startblock = 0;
+ dir = 1;
+ }*/
+
+ //so far use first chip parameter for read retry on 2-die chip
+ //chips = this->numchips;
+ chips = 1;
+
+ //bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ bbtblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ startblock &= bbtblocks - 5;
+
+ for (i = 0; i < chips; i++) {
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ //loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+ loff_t offs = (loff_t)(actblock*mtd->pageSizek) << (10+this->pagecnt_shift);
+
+ /* Read first page */
+ scan_read_raw(mtd, buf, offs, mtd->writesize, td);
+
+ if (!check_retry_pattern(buf, mtd->writesize, this->retry_pattern)) {
+ td->pages[i] = actblock << blocktopage;
+ break;
+ }
+ }
+ //startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ printk(KERN_WARNING "Retry block table not found for chip %d\n", i);
+ else
+ printk(KERN_WARNING "Retry block table is found for chip %d\n", i);
+ }
+ return 0;
+}
+
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, chips;
+ int bits, startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ //int blocktopage = this->bbt_erase_shift - this->page_shift;
+ int blocktopage = this->pagecnt_shift;
+
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ //startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ startblock = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ //bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ bbtblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ //bbtblocks = mtd->size >> this->bbt_erase_shift;
+ bbtblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+
+ /* Number of bits for each erase block in the bbt */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ if (this->realplanenum)
+ bits<<=1;
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ //loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+ loff_t offs = (loff_t)(actblock*mtd->pageSizek) << (10+this->pagecnt_shift);
+
+ /* Read first page */
+ scan_read_raw(mtd, buf, offs, mtd->writesize, td);
+ //print_nand_buffer(buf+mtd->writesize, 64);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ printk("get bbt0 from %x\n",td->pages[i]);
+ this->bbt_plane[i] = 0;
+ if (td->options & NAND_BBT_VERSION) {
+ u32 offs_ver;
+ offs_ver = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs_ver];
+ }
+ break;
+ }
+ if (this->realplanenum)
+ if (!check_pattern(buf, scanlen, mtd->writesize+20, td)) {
+ td->pages[i] = actblock << blocktopage;
+ //printk("get bbt1 from %x\n",td->pages[i]);
+ this->bbt_plane[i] = td->pages[i];
+ //printk("get bbt plane[%d] from %x\n",i, this->bbt_plane[i]);
+ if (td->options & NAND_BBT_VERSION) {
+ u32 offs_ver;
+ offs_ver = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[20+offs_ver];
+ }
+ break;
+ }
+ }
+ //startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ pr_warn("Bad block table not found for chip %d\n", i);
+ else
+ pr_info("Bad block table found at page %d, version "
+ "0x%02X\n", td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+extern int reset_nfc(struct mtd_info *mtd, unsigned int *buf, int step);
+extern void nfc_hw_rdmz(struct mtd_info *mtd, int on);
+extern void print_nand_register(struct mtd_info *mtd);
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(mtd, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(mtd, buf, md);
+
+ /* Force result check */
+ return 1;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, j, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, bbtoffs, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ //numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ //numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch ((this->bbt[block >> 2] >>
+ (2 * (block & 0x03))) & 0x03) {
+ case 0x01:
+ case 0x03:
+ continue;
+ }
+ page = block <<
+ //(this->bbt_erase_shift - this->page_shift);
+ this->pagecnt_shift;
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ pr_err("No space left to write bad block table\n");
+ return -ENOSPC;
+ write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = 2;//~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ bbtoffs = chip * (numblocks >> 2);
+ if (this->realplanenum)
+ bbtoffs = chip * (numblocks >> 1);
+
+ //to = ((loff_t)page) << this->page_shift;
+ to = ((loff_t) (page*mtd->pageSizek)) << 10;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ printk("inlegal------not go-----------\n");
+ /* Make it block aligned */
+ //to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((mtd->erasesize) - 1));//danbbg
+ //len = 1 << this->bbt_erase_shift;
+ len = mtd->erasesize;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block "
+ "for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading "
+ "block for writing bad block table\n");
+ }
+ /* Read oob data */
+ //ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.ooblen = (mtd->pagecnt) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ //pageoffs = page - (int)(to >> this->page_shift);
+ pageoffs = page - ((int)(to >> 10))/mtd->pageSizek;
+ //offs = pageoffs << this->page_shift;
+ offs = (pageoffs*mtd->pageSizek) << 10;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ if (len < mtd->writesize)
+ len = mtd->writesize;
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ //(len >> this->page_shift)* mtd->oobsize);
+ mtd->pagecnt* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks;) {
+ uint8_t dat;
+ dat = this->bbt[bbtoffs + (i >> 2)];
+ for (j = 0; j < 4; j++, i++) {
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ /* Do not store the reserved bbt blocks! */
+
+ /* dannier 2014/03/01 add a condition only retry and bbt blocks are not store
+ read retry command fail blocks are marked as reserved blk and need to be stored to flash */
+ if (i >= (numblocks - td->maxblocks - 4) && (dat&0x3) == 0x2) {
+ //buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
+ //printk("offs + (i >> sft)=%d data=0x%x, dat=0x%x sft=%d\n",offs + (i >> sft), ~(msk[dat & 0x03] << sftcnt), dat, sft);
+ } else
+ buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
+ dat >>= 2;
+ }
+ }
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+//while(1);
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt_multi(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, j, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, bbtoffs, pageoffs, ooboffs;
+ uint8_t msk[16];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ //numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ //numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ //printk("blockstatus=%x\n",((this->bbt[block >> 1] >> (2 * (block & 0x01))) & 0x0F));
+ //printk("block=%x, sht=%d\n",block,(4 * (block & 0x01)) & 0x0F);
+ /* Check, if the block is bad */
+ switch ((this->bbt[block >> 1] >>
+ (4 * (block & 0x01))) & 0x0F) {
+ case 0x01:
+ case 0x04:
+ case 0x05://case 0x07: case 0x0D: not exist for bad_fact+bad_wort
+ case 0x03://case 0x0B: case 0x0E: not exist for bad_fact+reserved
+ case 0x0C://case 0x02: case 0x08: not exist for good + reserved
+ case 0x0F://case 0x06: case 0x09: not exist for bad_wort+reserved
+ continue;
+ //case 0x00: case 0x0A: only good or reserved is used (so far no reserved)
+ }
+ page = block <<
+ //(this->bbt_erase_shift - this->page_shift);
+ this->pagecnt_shift;
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ pr_err("No space left to write bad block table\n");
+ return -ENOSPC;
+ write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ if (this->realplanenum)
+ bits<<=1;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04;
+ msk[0] = 0x00; msk[1] = 0x01; msk[2] = 0x2; msk[3] = 0x03;
+ msk[4] = 0x04; msk[5] = 0x05; msk[6] = 0x06; msk[7] = 0x07;
+ msk[8] = 0x08; msk[9] = 0x09; msk[10] = /*~rcode*/0x0a; msk[11] = 0x0b;
+ msk[12] = 0x0c; msk[13] = 0x0d; msk[14] = 0x0e; msk[15] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ bbtoffs = chip * (numblocks >> 2);
+ if (this->realplanenum)
+ bbtoffs = chip * (numblocks >> 1);
+
+ //to = ((loff_t)page) << this->page_shift;
+ to = ((loff_t) (page*mtd->pageSizek)) << 10;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */printk("write bbt multi inlegal-----------------\n");
+ //to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((mtd->erasesize) - 1));//danbbg
+ //len = 1 << this->bbt_erase_shift;
+ len = mtd->erasesize;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block "
+ "for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading "
+ "block for writing bad block table\n");
+ }
+ /* Read oob data */
+ //ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.ooblen = (mtd->pagecnt) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ //pageoffs = page - (int)(to >> this->page_shift);
+ pageoffs = page - ((int)(to >> 10))/mtd->pageSizek;
+ //offs = pageoffs << this->page_shift;
+ offs = (pageoffs*mtd->pageSizek) << 10;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ if (len < mtd->writesize)
+ len = mtd->writesize;
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ //(len >> this->page_shift)* mtd->oobsize);
+ mtd->pagecnt* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ //printk("td->len=%d ooboffs=%d td->offs=%d\n", td->len, ooboffs, td->offs);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks;) {
+ uint8_t dat;
+ dat = this->bbt[bbtoffs + (i >> 1)];
+ for (j = 0; j < 2; j++, i++) {
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ /* Do not store the reserved bbt blocks! */
+ /* dannier 2014/03/01 add a condition only retry and bbt blocks are not store
+ read retry command fail blocks are marked as reserved blk and need to be stored to flash */
+ if (i >= (numblocks - td->maxblocks - 4) && (dat&0xF)==0xa) {
+ //buf[offs + (i >> sft)] &= ~(msk[dat & 0x0F] << sftcnt);
+ //printk("offs + (i >> sft)=%d data=0x%x, dat=0x%x sft=%d\n",offs + (i >> sft), ~(msk[dat & 0x0F] << sftcnt), dat, sft);
+ } else
+ buf[offs + (i >> sft)] &= ~(msk[dat & 0x0F] << sftcnt);
+
+ dat >>= 4;
+ }
+ }
+ memcpy(&buf[mtd->realwritesize], buf, (numblocks>>1));
+//printk("print bbt write info ");print_nand_buffer(buf, 1536);
+/*printk("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);dump_stack();while(1);*/
+//printk("erase blk=%d, page=0x%x len=%d copy=%d\n", (unsigned int)(to>>this->bbt_erase_shift), page, len, (numblocks>>1));
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ //printk("erase ret=%d\n",res);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: multi error while writing bad block table %d\n", res);
+ return res;
+}
+
+void copy_retry_info_to_buf(struct mtd_info *mtd, uint8_t *buf)
+{
+ /*uint8_t *bf;
+ struct nand_read_retry_param *rdtry = (struct nand_read_retry_param *)buf;*/
+ struct nand_chip *this = mtd->priv;
+
+ memcpy(buf, /*(uint8_t *)*/this->cur_chip, sizeof(chip_table[0])-16);
+ #ifdef RETRY_DEBUG
+ print_nand_buffer((uint8_t *)this->cur_chip, sizeof(chip_table[0]));
+ #endif
+
+ /*
+ memcpy(buf, "ANDROID!", 8);
+ rdtry->nand_id = FlashId;//this->nand_id;
+ //rdtry->nand_id_5th = this->nand_id_5th;
+ rdtry->eslc_reg_num = this->eslc_reg_num;
+ rdtry->total_retry_cnt = this->total_retry_cnt;
+ rdtry->retry_reg_num = this->retry_reg_num;
+ bf = buf + 28;
+ if (this->eslc_reg_num)
+ memcpy(bf, this->eslc_cmd, this->eslc_reg_num);
+ bf = buf + this->eslc_reg_num;
+ if (this->retry_reg_num)
+ memcpy(bf, this->retry_cmd, this->retry_reg_num * this->total_retry_cnt);
+ else
+ printk("no retry param is writen to retry table block\n");
+
+ printk("save rdtry to block\n");
+ print_nand_buffer(buf, 128);
+ */
+}
+
+/**
+ * write_hynix_table - [GENERIC] (Re)write the hynix table
+ *
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the retry table block
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table
+ *
+*/
+static int write_hynix_table(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int startblock, dir, page, numblocks, nrchips;
+ uint8_t rcode = td->reserved_block_code;
+ size_t len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+
+ //numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ nrchips = chipsel + 1;
+ chip = chipsel;
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+
+ /* Search direction top -> down ? */
+ startblock = numblocks * (chip + 1) - 5;
+ dir = -1;
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ if (this->realplanenum) {
+ switch ((this->bbt[block >> 1] >>
+ (4 * (block & 0x01))) & 0x0F) {
+ case 0x01:
+ case 0x04:
+ case 0x05://case 0x07: case 0x0D: not exist for bad_fact+bad_wort
+ case 0x03://case 0x0B: case 0x0E: not exist for bad_fact+reserved
+ case 0x0C://case 0x02: case 0x08: not exist for good + reserved
+ case 0x0F://case 0x06: case 0x09: not exist for bad_wort+reserved
+ continue;
+ //case 0x00: case 0x0A: only good or reserved is used (so far no reserved)
+ }
+ } else {
+ switch ((this->bbt[block >> 2] >>
+ (2 * (block & 0x03))) & 0x03) {
+ case 0x01:
+ case 0x03:
+ continue;
+ }
+ }
+ //page = block << (this->bbt_erase_shift - this->page_shift);
+ page = block << this->pagecnt_shift;
+ goto write;
+ }
+ printk(KERN_ERR "No space left to write read retry table\n");
+ return -ENOSPC;
+ write:
+
+ //to = ((loff_t) page) << this->page_shift;
+ to = ((loff_t) (page*mtd->pageSizek)) << 10;
+ len = mtd->writesize;
+ /* Preset the buffer with 0xff */
+ //memset(buf, 0xff, len + (len >> this->page_shift)* mtd->oobsize);
+ memset(buf, 0xff, len + mtd->pagecnt* mtd->oobsize);
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[len], td->pattern, 10);
+
+ //------write signature into buf retry into--/
+ #ifdef RETRY_DEBUG
+ printk("save rdtry to page=0x%x\n", page);
+ #endif
+ copy_retry_info_to_buf(mtd, buf);
+
+ //------erase block-----------/
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+ printk("writing rdtry to nand flash and page addr is 0x%x, len=0x%x\n", page, len);
+ res = scan_write_bbt(mtd, to, len, buf, &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ printk(KERN_WARNING
+ "nand_bbt: Error while writing read retry table %d\n", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ bd->options &= ~NAND_BBT_SCANEMPTY;
+ if (this->realplanenum)
+ return create_bbt_multi(mtd, this->buffers->databuf, bd, -1);
+ else
+ return create_bbt(mtd, this->buffers->databuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct nand_chip *this = mtd->priv;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = this->numchips;
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else {
+ rd = td;
+ }
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY)) {
+ //print_nand_register(mtd);
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ //print_nand_register(mtd);
+
+ if (this->realplanenum)
+ create_bbt_multi(mtd, buf, bd, chipsel);
+ else
+ create_bbt(mtd, buf, bd, chipsel);
+
+
+ }
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, td, md, chipsel);
+ else
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, md, td, chipsel);
+ else
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+static int check_retry_table(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *rd)
+{
+ int i, chips, chipsel, res = 0, need_save = 0;
+ struct nand_chip *this = mtd->priv;
+
+ /* Do we have a retry table per chip ? */
+ /* so far, use chip 0 retry param on chip 0 and chip 1 */
+ //chips = this->numchips;
+ chips = 1;
+ for (i = 0; i < chips; i++) {
+ /* Per chip */
+ chipsel = i;
+ if (rd->pages[i] == -1) {
+ goto create;
+ }
+ #ifdef RETRY_DEBUG
+ printk("read_abs_retry_table\n");
+ #endif
+ /* Read the retry table starting at a given page */
+ res = read_abs_retry_table(mtd, buf, rd, chipsel);
+ if (res == 0) {
+ if(this->cur_chip != NULL) {
+ this->select_chip(mtd, 0);
+ this->cur_chip->get_parameter(mtd, READ_RETRY_MODE);
+ this->select_chip(mtd, -1);
+ }
+ break;
+ }
+
+ create:
+ #ifdef RETRY_DEBUG
+ printk("create_hynix_table\n");
+ #endif
+ /* Create the table in memory by get feature or get otp cmd */
+ create_hynix_table(mtd, chipsel);
+
+ need_save = 1;
+
+ //printk("dannier write_hynix_table\n");
+ /* Write the retry block table to the device ? => leave it saved after bbt searched*/
+ /*res = write_hynix_table(mtd, buf, rd, chipsel);
+ if (res < 0)
+ return res;*/
+ }
+
+ return need_save;
+}
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval, newval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ //nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ nrblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ } else {
+ chips = 1;
+ //nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ //block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ block = td->pages[i] >> (this->pagecnt_shift);
+ block <<= 1;
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0x2 << (block & 0x06));
+ this->bbt[(block >> 3)] = newval;
+ if ((oldval != newval) && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)(block*mtd->pageSizek) << (10+this->pagecnt_shift-1));
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ if (td->pattern[0] == 'r' && td->pattern[1] == 'e') {
+ block = ((i + 1) * nrblocks) - td->maxblocks - 4;
+ //printk("mark_bbt_region set blocks =%d ~ %d\n", block, block+3);
+ }
+ } else
+ block = i * nrblocks;
+ block <<= 1;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0x2 << (block & 0x06));
+ this->bbt[(block >> 3)] = newval;
+ if (oldval != newval)
+ update = 1;
+ block += 2;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)((block - 2)*mtd->pageSizek) << (10+this->pagecnt_shift-1));
+ }
+}
+
+/**
+ * mark_bbt_regions_multi - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region_multi(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval, newval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ //nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ nrblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ } else {
+ chips = 1;
+ //nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ //block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ block = td->pages[i] >> (this->pagecnt_shift);
+ block <<= 2;
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0xA << (block & 0x04));
+ this->bbt[(block >> 3)] = newval;
+ if ((oldval != newval) && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 2));
+ nand_update_bbt(mtd, (loff_t)(block*mtd->pageSizek) << (10+this->pagecnt_shift-2));
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ if (td->pattern[0] == 'r' && td->pattern[1] == 'e') {
+ block = ((i + 1) * nrblocks) - td->maxblocks - 4;
+ //printk("mark_bbt_region set blocks =%d ~ %d\n", block, block+3);
+ }
+ } else
+ block = i * nrblocks;
+ block <<= 2;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0xA << (block & 0x04));
+ this->bbt[(block >> 3)] = newval;
+ if (oldval != newval)
+ update = 1;
+ block += 4;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)(block - 4) << (this->bbt_erase_shift - 2));
+ nand_update_bbt(mtd, (loff_t)((block - 4)*mtd->pageSizek) << (10+this->pagecnt_shift-2));
+ }//print_nand_buffer(this->bbt, 2048);
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+ if (this->realplanenum)
+ bits<<=1;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ //table_size = this->chipsize >> this->bbt_erase_shift;
+ table_size = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ else
+ //table_size = mtd->size >> this->bbt_erase_shift;
+ table_size = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ //BUG_ON(table_size > (1 << this->bbt_erase_shift));
+ BUG_ON(table_size > mtd->erasesize);
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+extern struct nand_read_retry_param chip_table[];
+int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0, i, need_save = 0;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ this->bbt_plane[1] = this->bbt_plane[0] = 0;
+
+ //len = mtd->size >> (this->bbt_erase_shift + 2);
+ len = ((int)(mtd->size >> (10+this->pagecnt_shift+2)))/mtd->pageSizek;
+ if (this->realplanenum)
+ len <<=1;
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt)
+ return -ENOMEM;
+
+ /* Clear the memory bad block table */
+ memset (this->bbt, 0x00, len);
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(mtd, bd))) {
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
+ kfree(this->bbt);
+ this->bbt = NULL;
+ }
+ return res;
+ }
+ verify_bbt_descr(mtd, td);
+ verify_bbt_descr(mtd, md);
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ //len = (1 << this->bbt_erase_shift);
+ len = mtd->erasesize;
+ //len += (len >> this->page_shift) * mtd->oobsize;
+ len += (mtd->pagecnt) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return -ENOMEM;
+ }
+
+ if (mtd->dwRetry /*&& (mtd->id>>24) == NAND_MFR_HYNIX*/) {
+ for (i = 0; /*i < READ_RETRY_CHIP_NUM*/; i++) {
+ if (chip_table[i].nand_id == 0 && chip_table[i].nand_id_5th == 0)
+ break;
+ if (mtd->id == chip_table[i].nand_id && mtd->id2 == chip_table[i].nand_id_5th) {
+ #ifdef RETRY_DEBUG
+ printk("get retry table id 0x%x, 0x%x\n", chip_table[i].nand_id, chip_table[i].nand_id_5th);
+ #endif
+ this->cur_chip = &chip_table[i];
+ break;
+ }
+ }
+ if(this->cur_chip != NULL && chip_table[i].nand_id != 0) {
+ #ifdef RETRY_DEBUG
+ printk("search_hynix_retry_table\n");
+ #endif
+ search_hynix_retry_table(mtd, buf, this->retry_pattern);
+ #ifdef RETRY_DEBUG
+ printk("check_retry_table\n");
+ #endif
+ need_save = check_retry_table(mtd, buf, this->retry_pattern);
+ }
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ res = read_abs_bbts(mtd, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ res = search_read_bbts(mtd, buf, td, md);
+ }
+
+ if (res)
+ res = check_create(mtd, buf, bd);
+
+
+ if (mtd->dwRetry && this->cur_chip != NULL && need_save) {
+ //printk("dannier write_hynix_table\n");
+ /* Write the retry block table to the device ? */
+ res = write_hynix_table(mtd, buf, this->retry_pattern, 0);
+
+ //testing
+ //this->cur_chip->cur_try_times = 5;
+ }
+
+ /* Prevent the rdtry block regions from erasing / writing */
+ if (this->realplanenum)
+ mark_bbt_region_multi(mtd, this->retry_pattern);
+ else
+ mark_bbt_region(mtd, this->retry_pattern);
+
+ /* Prevent the bbt regions from erasing / writing */
+ if (this->realplanenum)
+ mark_bbt_region_multi(mtd, td);
+ else
+ mark_bbt_region(mtd, td);
+ if (md) {
+ if (this->realplanenum)
+ mark_bbt_region_multi(mtd, md);
+ else
+ mark_bbt_region(mtd, md);
+ }
+
+
+ vfree(buf);
+ return res;
+}
+
+/**
+ * nand_update_bbt - [NAND Interface] update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ //len = (1 << this->bbt_erase_shift);
+ len = mtd->erasesize;
+ //len += (len >> this->page_shift) * mtd->oobsize;
+ len += (mtd->pagecnt) * mtd->oobsize;
+ //buf = kmalloc(len, GFP_KERNEL);
+ buf = vmalloc(len);
+ if (!buf) {
+ printk(KERN_ERR "nand_update_bbt: Out of memory\n");
+ return -ENOMEM;
+ }
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ //chip = (int)(offs >> this->chip_shift);
+ chip = ((int)(offs >> (10+this->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, td, md, chipsel);
+ else
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, md, td, chipsel);
+ else
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ }
+
+ out:
+ vfree(buf);
+ //printk("nand_update_bbt free mem res=%d\n", res);
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
+
+static struct nand_bbt_descr agand_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0x20,
+ .len = 6,
+ .pattern = scan_agand_pattern
+};
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_bbt_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ //struct mtd_info *mtd = this->priv;
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ //if ((0xFF&(mtd->id>>24)) == 0x45 || (0xFF&(mtd->id>>24)) == NAND_MFR_HYNIX)
+ bd->options |= NAND_BBT_SCAN2NDPAGE;//All type of flash need to scan 2 page per block.
+ bd->page_offset[0] = this->page_offset[0];
+ bd->page_offset[1] = this->page_offset[1];
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int nand_default_bbt(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /*
+ * Default for AG-AND. We must use a flash based bad block table as the
+ * devices have factory marked _good_ blocks. Erasing those blocks
+ * leads to loss of the good / bad information, so we _must_ store this
+ * information in a good / bad table during startup.
+ */
+ if (this->options & NAND_IS_AND) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ this->bbt_options |= NAND_BBT_USE_FLASH;
+ return nand_scan_bbt(mtd, &agand_flashbased);
+ }
+
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_bbt_descr;
+ this->bbt_md = &bbt_mirror_no_bbt_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ }
+
+ if (this->bbt_td->reserved_block_code && this->realplanenum) {
+ this->bbt_td->reserved_block_code = 0x5;
+ this->bbt_md->reserved_block_code = 0x5;
+ }
+
+ if (!this->badblock_pattern)
+ nand_create_badblock_pattern(this);
+
+ return nand_scan_bbt(mtd, this->badblock_pattern);
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt, int allow_readfail)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ //block = (int)(offs >> (this->bbt_erase_shift - 1));
+ block = ((int)(offs >> (10+this->pagecnt_shift - 1)))/mtd->pageSizek;
+ res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08llx: "
+ "(block %d) 0x%02x\n",
+ offs, block >> 1, res);
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ #if 0
+ if (allowbbt != 0xFF && this->state == FL_READY) {
+ struct erase_info einfo;
+ int res1 = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = offs;
+ //einfo.len = (1 << this->bbt_erase_shift);
+ einfo.len = mtd->erasesize;
+ printk("einfo.addr is 0x%llx\n",einfo.addr);
+ printk("einfo.len is 0x%llx\n", einfo.len);
+ res1 = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res1 < 0)
+ printk("nand_erase_nand addr 0x%llx result is %d\n", einfo.addr, res1);
+ }
+ #endif
+ return 1;
+ case 0x02:
+ if ((block>>1) < (mtd->blkcnt - 8)) {
+ if (allow_readfail)
+ return 0;
+ else
+ return 1;
+ } else
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+/**
+ * nand_isbad_bbt_multi - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt_multi(struct mtd_info *mtd, loff_t offs, int allowbbt, int allow_readfail)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 4 */
+ //block = (int)(offs >> (this->bbt_erase_shift - 2));
+ block = ((int)(offs >> (10+this->pagecnt_shift - 2)))/mtd->pageSizek;
+ res = (this->bbt[block >> 3] >> (block & 0x4)) & 0x0F;
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08llx: "
+ "(block %d) 0x%02x\n",
+ offs, block >> 2, res);
+ /*printk("nand_isbad_bbt(): bbt info for offs 0x%08llx: "
+ "(block %d) 0x%02x\n",
+ offs, block >> 2, res);*/
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ case 0x04:
+ case 0x05://1 or both 2 blocks worn out!
+ #if 0
+ if (allowbbt != 0xFF && this->state == FL_READY) {
+ struct erase_info einfo;
+ int res1 = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = offs;
+ //einfo.len = (1 << this->bbt_erase_shift);
+ einfo.len = mtd->erasesize;
+ printk("einfo.addr is 0x%llx\n",einfo.addr);
+ printk("einfo.len is 0x%llx\n", einfo.len);
+ res1 = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res1 < 0)
+ printk("nand_erase_nand addr 0x%llx result is %d\n", einfo.addr, res1);
+ }
+ #endif
+ return 1;
+ case 0x0A://usually two block are reserved
+ if ((block>>2) < (mtd->blkcnt - 8)) {
+ if (allow_readfail)
+ return 0;
+ else
+ return 1;
+ } else
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+
+EXPORT_SYMBOL(nand_scan_bbt);
+EXPORT_SYMBOL(nand_default_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
new file mode 100644
index 00000000..3803e0bb
--- /dev/null
+++ b/drivers/mtd/nand/nand_bch.c
@@ -0,0 +1,243 @@
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch: BCH control structure
+ * @ecclayout: private ecc layout for this BCH configuration
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int i;
+
+ memset(code, 0, chip->ecc.bytes);
+ encode_bch(nbc->bch, buf, chip->ecc.size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < chip->ecc.bytes; i++)
+ code[i] ^= nbc->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_bch_calculate_ecc);
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int *errloc = nbc->errloc;
+ int i, count;
+
+ count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (chip->ecc.size*8))
+ /* error is located in data, correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+ /* else error in ecc, no action needed */
+
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
+ }
+ } else if (count < 0) {
+ printk(KERN_ERR "ecc unrecoverable error\n");
+ count = -1;
+ }
+ return count;
+}
+EXPORT_SYMBOL(nand_bch_correct_data);
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd: MTD block structure
+ * @eccsize: ecc block size in bytes
+ * @eccbytes: ecc length in bytes
+ * @ecclayout: output default layout
+ *
+ * Returns:
+ * a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *
+nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
+ struct nand_ecclayout **ecclayout)
+{
+ unsigned int m, t, eccsteps, i;
+ struct nand_ecclayout *layout;
+ struct nand_bch_control *nbc = NULL;
+ unsigned char *erased_page;
+
+ if (!eccsize || !eccbytes) {
+ printk(KERN_WARNING "ecc parameters not supplied\n");
+ goto fail;
+ }
+
+ m = fls(1+8*eccsize);
+ t = (eccbytes*8)/m;
+
+ nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+ if (!nbc)
+ goto fail;
+
+ nbc->bch = init_bch(m, t, 0);
+ if (!nbc->bch)
+ goto fail;
+
+ /* verify that eccbytes has the expected value */
+ if (nbc->bch->ecc_bytes != eccbytes) {
+ printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
+ goto fail;
+ }
+
+ eccsteps = mtd->writesize/eccsize;
+
+ /* if no ecc placement scheme was provided, build one */
+ if (!*ecclayout) {
+
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ printk(KERN_WARNING "must provide an oob scheme for "
+ "oobsize %d\n", mtd->oobsize);
+ goto fail;
+ }
+
+ layout = &nbc->ecclayout;
+ layout->eccbytes = eccsteps*eccbytes;
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ printk(KERN_WARNING "no suitable oob scheme available "
+ "for oobsize %d eccbytes %u\n", mtd->oobsize,
+ eccbytes);
+ goto fail;
+ }
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+
+ *ecclayout = layout;
+ }
+
+ /* sanity checks */
+ if (8*(eccsize+eccbytes) >= (1 << m)) {
+ printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+ goto fail;
+ }
+ if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
+ printk(KERN_WARNING "invalid ecc layout\n");
+ goto fail;
+ }
+
+ nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ if (!nbc->eccmask || !nbc->errloc)
+ goto fail;
+ /*
+ * compute and store the inverted ecc of an erased ecc block
+ */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page)
+ goto fail;
+
+ memset(erased_page, 0xff, eccsize);
+ memset(nbc->eccmask, 0, eccbytes);
+ encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ nbc->eccmask[i] ^= 0xff;
+
+ return nbc;
+fail:
+ nand_bch_free(nbc);
+ return NULL;
+}
+EXPORT_SYMBOL(nand_bch_init);
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc: NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+ if (nbc) {
+ free_bch(nbc->bch);
+ kfree(nbc->errloc);
+ kfree(nbc->eccmask);
+ kfree(nbc);
+ }
+}
+EXPORT_SYMBOL(nand_bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
new file mode 100644
index 00000000..46a6bc9c
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.c
@@ -0,0 +1,149 @@
+/*****************************************************************************
+* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <mach/reg_umi.h>
+#include "nand_bcm_umi.h"
+#ifdef BOOT0_BUILD
+#include <uart.h>
+#endif
+
+/* ---- External Variable Declarations ----------------------------------- */
+/* ---- External Function Prototypes ------------------------------------- */
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+/* ---- Private Function Prototypes -------------------------------------- */
+/* ---- Private Variables ------------------------------------------------ */
+/* ---- Private Functions ------------------------------------------------ */
+
+#if NAND_ECC_BCH
+/****************************************************************************
+* nand_bch_ecc_flip_bit - Routine to flip an errored bit
+*
+* PURPOSE:
+* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
+* errored bit specified
+*
+* PARAMETERS:
+* datap - Container that holds the 512 byte data
+* errorLocation - Location of the bit that needs to be flipped
+*
+* RETURNS:
+* None
+****************************************************************************/
+static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
+{
+ int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
+ int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
+ int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
+
+ uint8_t errorByte = 0;
+ uint8_t byteMask = 1 << locWithinAByte;
+
+ /* BCH uses big endian, need to change the location
+ * bits to little endian */
+ locWithinAWord = 3 - locWithinAWord;
+
+ errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
+
+#ifdef BOOT0_BUILD
+ puthexs("\nECC Correct Offset: ",
+ locWithinAPage * sizeof(uint32_t) + locWithinAWord);
+ puthexs(" errorByte:", errorByte);
+ puthex8(" Bit: ", locWithinAByte);
+#endif
+
+ if (errorByte & byteMask) {
+ /* bit needs to be cleared */
+ errorByte &= ~byteMask;
+ } else {
+ /* bit needs to be set */
+ errorByte |= byteMask;
+ }
+
+ /* write back the value with the fixed bit */
+ datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
+}
+
+/****************************************************************************
+* nand_correct_page_bch - Routine to correct bit errors when reading NAND
+*
+* PURPOSE:
+* This routine reads the BCH registers to determine if there are any bit
+* errors during the read of the last 512 bytes of data + ECC bytes. If
+* errors exists, the routine fixes it.
+*
+* PARAMETERS:
+* datap - Container that holds the 512 byte data
+*
+* RETURNS:
+* 0 or greater = Number of errors corrected
+* (No errors are found or errors have been fixed)
+* -1 = Error(s) cannot be fixed
+****************************************************************************/
+int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
+ int numEccBytes)
+{
+ int numErrors;
+ int errorLocation;
+ int idx;
+ uint32_t regValue;
+
+ /* wait for read ECC to be valid */
+ regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
+
+ /*
+ * read the control status register to determine if there
+ * are error'ed bits
+ * see if errors are correctible
+ */
+ if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
+ int i;
+
+ for (i = 0; i < numEccBytes; i++) {
+ if (readEccData[i] != 0xff) {
+ /* errors cannot be fixed, return -1 */
+ return -1;
+ }
+ }
+ /* If ECC is unprogrammed then we can't correct,
+ * assume everything OK */
+ return 0;
+ }
+
+ if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
+ /* no errors */
+ return 0;
+ }
+
+ /*
+ * Fix errored bits by doing the following:
+ * 1. Read the number of errors in the control and status register
+ * 2. Read the error location registers that corresponds to the number
+ * of errors reported
+ * 3. Invert the bit in the data
+ */
+ numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
+
+ for (idx = 0; idx < numErrors; idx++) {
+ errorLocation =
+ REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
+
+ /* Flip bit */
+ nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
+ }
+ /* Errors corrected */
+ return numErrors;
+}
+#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
new file mode 100644
index 00000000..198b304d
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -0,0 +1,337 @@
+/*****************************************************************************
+* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+#ifndef NAND_BCM_UMI_H
+#define NAND_BCM_UMI_H
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <mach/reg_umi.h>
+#include <mach/reg_nand.h>
+#include <cfg_global.h>
+
+/* ---- Constants and Types ---------------------------------------------- */
+#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
+#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
+#else
+#define NAND_ECC_BCH 0
+#endif
+
+#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
+
+#if NAND_ECC_BCH
+#ifdef BOOT0_BUILD
+#define NAND_ECC_NUM_BYTES 13
+#else
+#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
+#endif
+#else
+#define NAND_ECC_NUM_BYTES 3
+#endif
+
+#define NAND_DATA_ACCESS_SIZE 512
+
+/* ---- Variable Externs ------------------------------------------ */
+/* ---- Function Prototypes --------------------------------------- */
+int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
+ int numEccBytes);
+
+/* Check in device is ready */
+static inline int nand_bcm_umi_dev_ready(void)
+{
+ return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
+}
+
+/* Wait until device is ready */
+static inline void nand_bcm_umi_wait_till_ready(void)
+{
+ while (nand_bcm_umi_dev_ready() == 0)
+ ;
+}
+
+/* Enable Hamming ECC */
+static inline void nand_bcm_umi_hamming_enable_hwecc(void)
+{
+ /* disable and reset ECC, 512 byte page */
+ REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
+ REG_UMI_NAND_ECC_CSR_256BYTE);
+ /* enable ECC */
+ REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
+}
+
+#if NAND_ECC_BCH
+/* BCH ECC specifics */
+#define ECC_BITS_PER_CORRECTABLE_BIT 13
+
+/* Enable BCH Read ECC */
+static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
+{
+ /* disable and reset ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
+ /* Turn on ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
+}
+
+/* Enable BCH Write ECC */
+static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
+{
+ /* disable and reset ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
+ /* Turn on ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
+}
+
+/* Config number of BCH ECC bytes */
+static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
+{
+ uint32_t nValue;
+ uint32_t tValue;
+ uint32_t kValue;
+ uint32_t numBits = numEccBytes * 8;
+
+ /* disable and reset ECC */
+ REG_UMI_BCH_CTRL_STATUS =
+ REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
+ REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
+
+ /* Every correctible bit requires 13 ECC bits */
+ tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
+
+ /* Total data in number of bits for generating and computing BCH ECC */
+ nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
+
+ /* K parameter is used internally. K = N - (T * 13) */
+ kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
+
+ /* Write the settings */
+ REG_UMI_BCH_N = nValue;
+ REG_UMI_BCH_T = tValue;
+ REG_UMI_BCH_K = kValue;
+}
+
+/* Pause during ECC read calculation to skip bytes in OOB */
+static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
+{
+ REG_UMI_BCH_CTRL_STATUS =
+ REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
+ REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
+}
+
+/* Resume during ECC read calculation after skipping bytes in OOB */
+static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
+{
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
+}
+
+/* Poll read ECC calc to check when hardware completes */
+static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
+{
+ uint32_t regVal;
+
+ do {
+ /* wait for ECC to be valid */
+ regVal = REG_UMI_BCH_CTRL_STATUS;
+ } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
+
+ return regVal;
+}
+
+/* Poll write ECC calc to check when hardware completes */
+static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
+{
+ /* wait for ECC to be valid */
+ while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
+ == 0)
+ ;
+}
+
+/* Read the OOB and ECC, for kernel write OOB to a buffer */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
+ uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
+#else
+static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
+ uint8_t *eccCalc, int numEccBytes)
+#endif
+{
+ int eccPos = 0;
+ int numToRead = 16; /* There are 16 bytes per sector in the OOB */
+
+ /* ECC is already paused when this function is called */
+ if (pageSize != NAND_DATA_ACCESS_SIZE) {
+ /* skip BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
+ /* read ECC bytes before BI */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+
+ while (numToRead > 11) {
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
+#else
+ eccCalc[eccPos++] = REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ nand_bcm_umi_bch_pause_read_ecc_calc();
+
+ if (numToRead == 11) {
+ /* read BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ }
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
+#else
+ eccCalc[eccPos++] = REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+}
+
+/* Helper function to write ECC */
+static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
+ uint8_t *oobp, uint8_t eccVal)
+{
+ if (eccBytePos <= numEccBytes)
+ *oobp = eccVal;
+}
+
+/* Write OOB with ECC */
+static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
+ uint8_t *oobp, int numEccBytes)
+{
+ uint32_t eccVal = 0xffffffff;
+
+ /* wait for write ECC to be valid */
+ nand_bcm_umi_bch_poll_write_ecc_calc();
+
+ /*
+ ** Get the hardware ecc from the 32-bit result registers.
+ ** Read after 512 byte accesses. Format B3B2B1B0
+ ** where B3 = ecc3, etc.
+ */
+
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
+ /* Now fill in the ECC bytes */
+ if (numEccBytes >= 13)
+ eccVal = REG_UMI_BCH_WR_ECC_3;
+
+ /* Usually we skip CM in oob[0,1] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
+ (eccVal >> 16) & 0xff);
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
+ (eccVal >> 8) & 0xff);
+
+ /* Write ECC in oob[2,3,4] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
+ eccVal & 0xff); /* ECC 12 */
+
+ if (numEccBytes >= 9)
+ eccVal = REG_UMI_BCH_WR_ECC_2;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
+ (eccVal >> 24) & 0xff); /* ECC11 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
+ (eccVal >> 16) & 0xff); /* ECC10 */
+
+ /* Always Skip BI in oob[5] */
+ } else {
+ /* Always Skip BI in oob[0] */
+
+ /* Now fill in the ECC bytes */
+ if (numEccBytes >= 13)
+ eccVal = REG_UMI_BCH_WR_ECC_3;
+
+ /* Usually skip CM in oob[1,2] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
+ (eccVal >> 16) & 0xff);
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
+ (eccVal >> 8) & 0xff);
+
+ /* Write ECC in oob[3-15] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
+ eccVal & 0xff); /* ECC12 */
+
+ if (numEccBytes >= 9)
+ eccVal = REG_UMI_BCH_WR_ECC_2;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
+ (eccVal >> 24) & 0xff); /* ECC11 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
+ (eccVal >> 16) & 0xff); /* ECC10 */
+ }
+
+ /* Fill in the remainder of ECC locations */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
+ (eccVal >> 8) & 0xff); /* ECC9 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
+ eccVal & 0xff); /* ECC8 */
+
+ if (numEccBytes >= 5)
+ eccVal = REG_UMI_BCH_WR_ECC_1;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
+ (eccVal >> 24) & 0xff); /* ECC7 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
+ (eccVal >> 16) & 0xff); /* ECC6 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
+ (eccVal >> 8) & 0xff); /* ECC5 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
+ eccVal & 0xff); /* ECC4 */
+
+ if (numEccBytes >= 1)
+ eccVal = REG_UMI_BCH_WR_ECC_0;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
+ (eccVal >> 24) & 0xff); /* ECC3 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
+ (eccVal >> 16) & 0xff); /* ECC2 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
+ (eccVal >> 8) & 0xff); /* ECC1 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
+ eccVal & 0xff); /* ECC0 */
+}
+#endif
+
+#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
new file mode 100644
index 00000000..b7cfe0d3
--- /dev/null
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -0,0 +1,534 @@
+/*
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
+ *
+ * drivers/mtd/nand/nand_ecc.c
+ *
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ * Author: Frans Meulenbroeks
+ *
+ * Completely replaces the previous ECC implementation which was written by:
+ * Steven J. Hill (sjhill@realitydiluted.com)
+ * Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/mtd/nand_ecc.txt
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ * The STANDALONE macro is useful when running the code outside the kernel
+ * e.g. when running the code in a testbed or a benchmark program.
+ * When STANDALONE is used, the module related macros are commented out
+ * as well as the linux include files.
+ * Instead a private definition of mtd_info is given to satisfy the compiler
+ * (the code does not use mtd_info, so the code does not care)
+ */
+#ifndef STANDALONE
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/byteorder.h>
+#else
+#include <stdint.h>
+struct mtd_info;
+#define EXPORT_SYMBOL(x) /* x */
+
+#define MODULE_LICENSE(x) /* x */
+#define MODULE_AUTHOR(x) /* x */
+#define MODULE_DESCRIPTION(x) /* x */
+
+#define printk printf
+#define KERN_ERR ""
+#endif
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ECC data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
+ */
+static const char addressbits[256] = {
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
+};
+
+/**
+ * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @buf: input buffer with raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ * @code: output buffer with ECC
+ */
+void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
+ unsigned char *code)
+{
+ int i;
+ const uint32_t *bp = (uint32_t *)buf;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+ uint32_t cur; /* current value in buffer */
+ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
+ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
+ uint32_t uninitialized_var(rp17); /* to make compiler happy */
+ uint32_t par; /* the cumulative parity for all data */
+ uint32_t tmppar; /* the cumulative parity for this iteration;
+ for rp12, rp14 and rp16 at the end of the
+ loop */
+
+ par = 0;
+ rp4 = 0;
+ rp6 = 0;
+ rp8 = 0;
+ rp10 = 0;
+ rp12 = 0;
+ rp14 = 0;
+ rp16 = 0;
+
+ /*
+ * The loop is unrolled a number of times;
+ * This avoids if statements to decide on which rp value to update
+ * Also we process the data by longwords.
+ * Note: passing unaligned data might give a performance penalty.
+ * It is assumed that the buffers are aligned.
+ * tmppar is the cumulative sum of this iteration.
+ * needed for calculating rp12, rp14, rp16 and par
+ * also used as a performance improvement for rp6, rp8 and rp10
+ */
+ for (i = 0; i < eccsize_mult << 2; i++) {
+ cur = *bp++;
+ tmppar = cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= tmppar;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp10 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= cur;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0)
+ rp12 ^= tmppar;
+ if ((i & 0x2) == 0)
+ rp14 ^= tmppar;
+ if (eccsize_mult == 2 && (i & 0x4) == 0)
+ rp16 ^= tmppar;
+ }
+
+ /*
+ * handle the fact that we use longword operations
+ * we'll bring rp4..rp14..rp16 back to single byte entities by
+ * shifting and xoring first fold the upper and lower 16 bits,
+ * then the upper and lower 8 bits.
+ */
+ rp4 ^= (rp4 >> 16);
+ rp4 ^= (rp4 >> 8);
+ rp4 &= 0xff;
+ rp6 ^= (rp6 >> 16);
+ rp6 ^= (rp6 >> 8);
+ rp6 &= 0xff;
+ rp8 ^= (rp8 >> 16);
+ rp8 ^= (rp8 >> 8);
+ rp8 &= 0xff;
+ rp10 ^= (rp10 >> 16);
+ rp10 ^= (rp10 >> 8);
+ rp10 &= 0xff;
+ rp12 ^= (rp12 >> 16);
+ rp12 ^= (rp12 >> 8);
+ rp12 &= 0xff;
+ rp14 ^= (rp14 >> 16);
+ rp14 ^= (rp14 >> 8);
+ rp14 &= 0xff;
+ if (eccsize_mult == 2) {
+ rp16 ^= (rp16 >> 16);
+ rp16 ^= (rp16 >> 8);
+ rp16 &= 0xff;
+ }
+
+ /*
+ * we also need to calculate the row parity for rp0..rp3
+ * This is present in par, because par is now
+ * rp3 rp3 rp2 rp2 in little endian and
+ * rp2 rp2 rp3 rp3 in big endian
+ * as well as
+ * rp1 rp0 rp1 rp0 in little endian and
+ * rp0 rp1 rp0 rp1 in big endian
+ * First calculate rp2 and rp3
+ */
+#ifdef __BIG_ENDIAN
+ rp2 = (par >> 16);
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+ rp3 = par & 0xffff;
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+#else
+ rp3 = (par >> 16);
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+ rp2 = par & 0xffff;
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+#endif
+
+ /* reduce par to 16 bits then calculate rp1 and rp0 */
+ par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+ rp0 = (par >> 8) & 0xff;
+ rp1 = (par & 0xff);
+#else
+ rp1 = (par >> 8) & 0xff;
+ rp0 = (par & 0xff);
+#endif
+
+ /* finally reduce par to 8 bits */
+ par ^= (par >> 8);
+ par &= 0xff;
+
+ /*
+ * and calculate rp5..rp15..rp17
+ * note that par = rp4 ^ rp5 and due to the commutative property
+ * of the ^ operator we can say:
+ * rp5 = (par ^ rp4);
+ * The & 0xff seems superfluous, but benchmarking learned that
+ * leaving it out gives slightly worse results. No idea why, probably
+ * it has to do with the way the pipeline in pentium is organized.
+ */
+ rp5 = (par ^ rp4) & 0xff;
+ rp7 = (par ^ rp6) & 0xff;
+ rp9 = (par ^ rp8) & 0xff;
+ rp11 = (par ^ rp10) & 0xff;
+ rp13 = (par ^ rp12) & 0xff;
+ rp15 = (par ^ rp14) & 0xff;
+ if (eccsize_mult == 2)
+ rp17 = (par ^ rp16) & 0xff;
+
+ /*
+ * Finally calculate the ECC bits.
+ * Again here it might seem that there are performance optimisations
+ * possible, but benchmarks showed that on the system this is developed
+ * the code below is the fastest
+ */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+ code[0] =
+ (invparity[rp7] << 7) |
+ (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) |
+ (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) |
+ (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) |
+ (invparity[rp0]);
+ code[1] =
+ (invparity[rp15] << 7) |
+ (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) |
+ (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) |
+ (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) |
+ (invparity[rp8]);
+#else
+ code[1] =
+ (invparity[rp7] << 7) |
+ (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) |
+ (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) |
+ (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) |
+ (invparity[rp0]);
+ code[0] =
+ (invparity[rp15] << 7) |
+ (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) |
+ (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) |
+ (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) |
+ (invparity[rp8]);
+#endif
+ if (eccsize_mult == 1)
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ 3;
+ else
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ (invparity[rp17] << 1) |
+ (invparity[rp16] << 0);
+}
+EXPORT_SYMBOL(__nand_calculate_ecc);
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ __nand_calculate_ecc(buf,
+ ((struct nand_chip *)mtd->priv)->ecc.size, code);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_calculate_ecc);
+
+/**
+ * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ *
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc,
+ unsigned int eccsize)
+{
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+
+ /*
+ * b0 to b2 indicate which bit is faulty (if any)
+ * we might need the xor result more than once,
+ * so keep them in a local var
+ */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+#else
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
+#endif
+ b2 = read_ecc[2] ^ calc_ecc[2];
+
+ /* check if there are any bitfaults */
+
+ /* repeated if statements are slightly more efficient than switch ... */
+ /* ordered in order of likelihood */
+
+ if ((b0 | b1 | b2) == 0)
+ return 0; /* no error */
+
+ if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+ (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+ ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+ (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+ /* single bit error */
+ /*
+ * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+ * byte, cp 5/3/1 indicate the faulty bit.
+ * A lookup table (called addressbits) is used to filter
+ * the bits from the byte they are in.
+ * A marginal optimisation is possible by having three
+ * different lookup tables.
+ * One as we have now (for b0), one for b2
+ * (that would avoid the >> 1), and one for b1 (with all values
+ * << 4). However it was felt that introducing two more tables
+ * hardly justify the gain.
+ *
+ * The b2 shift is there to get rid of the lowest two bits.
+ * We could also do addressbits[b2] >> 1 but for the
+ * performance it does not make any difference
+ */
+ if (eccsize_mult == 1)
+ byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+ else
+ byte_addr = (addressbits[b2 & 0x3] << 8) +
+ (addressbits[b1] << 4) + addressbits[b0];
+ bit_addr = addressbits[b2 >> 2];
+ /* flip the bit */
+ buf[byte_addr] ^= (1 << bit_addr);
+ return 1;
+
+ }
+ /* count nr of bits; use table lookup, faster than calculating it */
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+
+ printk(KERN_ERR "uncorrectable error : ");
+ return -1;
+}
+EXPORT_SYMBOL(__nand_correct_data);
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ return __nand_correct_data(buf, read_ecc, calc_ecc,
+ ((struct nand_chip *)mtd->priv)->ecc.size);
+}
+EXPORT_SYMBOL(nand_correct_data);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
+MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
new file mode 100644
index 00000000..723379af
--- /dev/null
+++ b/drivers/mtd/nand/nand_ids.c
@@ -0,0 +1,561 @@
+/*
+ * drivers/mtd/nandids.c
+ *
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+/*
+* Chip ID list
+*
+* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
+* options
+*
+* Pagesize; 0, 256, 512
+* 0 get this information from the extended chip ID
++ 256 256 Byte page size
+* 512 512 Byte page size
+*/
+struct nand_flash_dev nand_flash_ids[] = {
+
+#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
+ {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
+ {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
+ {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
+ {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
+ {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
+ {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
+ {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
+
+ {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
+ {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
+ {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+ {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+#endif
+
+ {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
+ {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
+ {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
+ {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
+ {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
+ {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
+ {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
+
+ /*
+ * These are the new chips with large page size. The pagesize and the
+ * erasesize is determined from the extended id bytes
+ */
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+ /* 512 Megabit */
+ {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
+ {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
+ {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
+ {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16},
+
+ /* 1 Gigabit */
+ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
+ {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
+ {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16},
+
+ /* 2 Gigabit */
+ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
+ {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS},
+ {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16},
+ {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16},
+
+ /* 4 Gigabit */
+ {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS},
+ {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS},
+ {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16},
+ {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16},
+
+ /* 8 Gigabit */
+ {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS},
+ {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS},
+ {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16},
+ {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16},
+
+ /* 16 Gigabit */
+ {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS},
+ {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS},
+ {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
+ {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
+
+ /* 32 Gigabit */
+ {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS},
+ {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
+ {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16},
+ {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16},
+
+ /* 64 Gigabit */
+ {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS},
+ {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS},
+ {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16},
+ {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16},
+
+ /* 128 Gigabit */
+ {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS},
+ {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS},
+ {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16},
+ {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16},
+
+ /* 256 Gigabit */
+ {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS},
+ {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS},
+ {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16},
+ {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16},
+
+ /* 512 Gigabit */
+ {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS},
+ {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS},
+ {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16},
+ {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16},
+
+ /*
+ * Renesas AND 1 Gigabit. Those chips do not support extended id and
+ * have a strange page/block layout ! The chosen minimum erasesize is
+ * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
+ * planes 1 block = 2 pages, but due to plane arrangement the blocks
+ * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
+ * increase the eraseblock size so we chose a combined one which can be
+ * erased in one go There are more speed improvements for reads and
+ * writes possible, but not implemented now
+ */
+ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
+ NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
+ BBT_AUTO_REFRESH
+ },
+
+ {NULL,}
+};
+
+#define MLC 1
+#define SLC 0
+#define WD8 0
+struct WMT_nand_flash_dev WMT_nand_flash_ids[] = {
+
+
+ {0xADD314A5, 4096, 2048, 64, 0x40000, 5, 125, 127, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64780046, 0, 0, 0, 0x00000000, 0x00000000, "HY27UT088G2M-T(P)", LP_OPTIONS},
+ {0xADF1801D, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0F12, 0x64780064, 0, 0, 0, 0x00000000, 0x00000000, "HY27UF081G2A", LP_OPTIONS},
+ {0xADF1001D, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0C12, 0x64780046, 0, 0, 0, 0x00000000, 0x00000000, "H27U1G8F2BFR", LP_OPTIONS},
+ {0xADD59425, 4096, 4096, 218, 0x80000, 5, 125, 127, 0, WD8, 1, 0, 1, MLC, 12, 0x140A0F12, 0x64780064, 0, 0, 0, 0x00000000, 0x00000000, "HY27UAG8T2A", LP_OPTIONS},
+ {0xADD7949A, 2048, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x64500064, 0, 0, 0, 0x74420000, 0x00000000, "H27UBG8T2ATR", LP_OPTIONS},
+ {0xADD5949A, 1024, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x64640064, 0, 0, 0, 0x74420000, 0x00000000, "H27UAG8T2BTR-BC", LP_OPTIONS},
+ {0xADD794DA, 2048, 8192, 640,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x10080A12, 0x645000C8, 0, 1, 1, 0x74C30000, 0x00000000, "H27UBG8T2BTR", LP_OPTIONS},
+ {0xADD79491, 2048, 8192, 640,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10060812, 0x647800C8, 0, 1, 1, 0x00000000, 0x00003FFF, "H27UBG8T2CTR-F20", LP_OPTIONS},
+ {0xADDE94DA, 4096, 8192, 640,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10060812, 0x647800C8, 1, 1, 1, 0x00000000, 0x00003FFF, "H27UCG8T2ATR-F20", LP_OPTIONS},
+ {0xADDE94EB, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10060812, 0x647800C8, 0, 1, 1, 0x74440000, 0x00003FFF, "H27UCG8T2BTR-F20", LP_OPTIONS},
+
+ {0xECD314A5, 4096, 2048, 64, 0x40000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9G8G08X0A", LP_OPTIONS},
+ {0xECD59429, 4096, 4096, 218, 0x80000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 12, 0x140A0F12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08UXD", LP_OPTIONS},
+ {0xECF10095, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140a1412, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9F1G08U0B", LP_OPTIONS},
+ {0xECD514B6, 4096, 4096, 128, 0x80000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08U0M", LP_OPTIONS},
+ {0xECD755B6, 8192, 4096, 128, 0x80000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9LBG08U0M", LP_OPTIONS},
+ {0xECD58472, 2048, 8192, 436,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0F12, 0x6440012C, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08U0E", LP_OPTIONS},
+ {0xECD7947A, 4096, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0F12, 0x6478012C, 0, 0, 1, 0x54430000, 0x00003FFF, "K9GBG08U0A", LP_OPTIONS},
+ {0xECD59476, 2048, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x6478012C, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08U0F", LP_OPTIONS},
+ {0xECD7947E, 4096, 8192,1024,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x140B0B12, 0x6478012C, 0, 1, 1, 0x64440000, 0x00000000, "K9GBG08U0B", LP_OPTIONS},
+ {0xECDED57A, 8192, 8192, 640,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x6478012C, 0, 0, 1, 0x58430000, 0x00000000, "K9LCG08U0A", LP_OPTIONS},
+ {0xECDED57E, 8192, 8192,1024,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x140B0C12, 0x6478012C, 0, 1, 1, 0x68440000, 0x00000000, "K9LCG08U0B", LP_OPTIONS},
+
+ {0x98D594BA, 4096, 4096, 218, 0x80000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 12, 0x190F0F12, 0x64b40070, 0, 0, 0, 0x00000000, 0x00000000, "TC58NVG4D1DTG0", LP_OPTIONS},
+ {0x98D19015, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0C12, 0x64B40011, 0, 0, 0, 0x00000000, 0x00000000, "TC58NVG0S3ETA00", LP_OPTIONS},
+ {0x98D59432, 2048, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x100A0C12, 0x64B40084, 0, 0, 0, 0x00000000, 0x00000000, "TC58NVG4D2FTA00", LP_OPTIONS},
+ {0x98D58432, 2048, 8192, 640,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x100A0F12, 0x64B4012C, 0, 1, 1, 0x72560000, 0x00000000, "TC58NVG4D2HTA00", LP_OPTIONS},
+ {0x98DE8493, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B4012C, 0, 1, 1, 0x72570000, 0x00000000, "TC58NVG6DCJTA00", LP_OPTIONS},
+ {0x98DE8493, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B4012C, 2, 1, 1, 0x72D70000, 0x00000000, "TC58TEG6DCJTA00-DDR", LP_OPTIONS},
+ {0x98D79432, 4096, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x100A0C12, 0x64FF0078, 0, 0, 0, 0x76550000, 0x00000000, "TC58NVG5D2FTAI0", LP_OPTIONS},
+ {0x98D79432, 4096, 8192, 640,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B4012C, 0, 1, 1, 0x76560000, 0x00000000, "TC58NVG5D2HTA00", LP_OPTIONS},
+ {0x98D78493, 1024,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x6478012C, 1, 1, 1, 0x72570000, 0x00000000, "TC58TEG5DCJTA00", LP_OPTIONS},
+ {0x98DE9493, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x6478012C, 1, 1, 1, 0x76570000, 0x00000000, "TC58TEG6DDKTA00", LP_OPTIONS},
+ {0x98D78493, 1024,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x6478012C, 1, 1, 1, 0x72500000, 0x00000000, "TC58TEG5DCKTA00", LP_OPTIONS},
+
+ {0x2C88044B, 4096, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0x00000000, 0x00000000, "MT29F64G08CBAAA", LP_OPTIONS},
+ {0x2C88044B, 4096, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0x00000000, 0x00000000, "MT29F128G08CFAAA", LP_OPTIONS},
+ {0x2C48044A, 2048, 4096, 224,0x100000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 12, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0xA5000000, 0x00003FFF, "MT29F16G08CBACA", LP_OPTIONS},
+ {0x2C68044A, 4096, 4096, 224,0x100000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 12, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0xA9000000, 0x00003FFF, "MT29F32G08CBACA", LP_OPTIONS},
+ {0x2C64444B, 4096, 8192, 744,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 1, 0, 0xA9000000, 0x00000000, "MT29F64G08CBABA", LP_OPTIONS},
+ {0x2C44444B, 2048, 8192, 744,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 1, 0, 0xA9000000, 0x00000000, "MT29F32G08CBADA", LP_OPTIONS},
+
+ {0x45DE9493, 2048,16384,1280,0x400000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B40140, 1, 1, 1, 0x76570000, 0x00003FFF, "SDTNQGAMA-008G", LP_OPTIONS},
+ {0x45D78493, 1024,16384,1280,0x400000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B40140, 1, 1, 1, 0x72570000, 0x00000000, "SDTNQFAMA-004G", LP_OPTIONS},
+
+ {0x8968044A, 4096, 4096, 224,0x100000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 12, 0x100B1210, 0x647800C8, 0, 0, 0, 0xA9000000, 0x00003FFF, "JS29F32G08AAME1", LP_OPTIONS},
+ {0x8988244B, 4096, 8192, 448,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 24, 0x10070A12, 0x64400046, 0, 0, 0, 0xA9000000, 0x00000000, "JS29F64G08AAME1", LP_OPTIONS},
+ {0x8988244B, 4096, 8192, 744,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B40046, 0, 0, 0, 0xA9840000, 0x00000000, "JS29F64G08AAMF1", LP_OPTIONS},
+
+
+ {0xC2F1801D, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0F12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "MX30LF1G08AA", LP_OPTIONS},
+
+ {0x92F18095, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x40000000, 0x00000000, "PSU1GA(3/4)0HT", LP_OPTIONS},
+ {0,}
+ /*add new product item here.*/
+};
+
+struct nand_read_retry_param chip_table[] = {
+#ifdef CONFIG_MTD_NAND_WMT
+ //Hynix
+ {
+ .magic = "readretry",
+ .nand_id = 0xADD794DA,
+ .nand_id_5th = 0x74C30000,
+ .eslc_reg_num = 5,
+ .eslc_offset = {0xa0, 0xa1, 0xb0, 0xb1, 0xc9},
+ .eslc_set_value = {0x26, 0x26, 0x26, 0x26, 0x1},
+ .retry_reg_num = 4,
+ .retry_offset = {0xa7, 0xad, 0xae, 0xaf},
+ .retry_value = {0, 0x6,0xa, 0x6, 0x0, 0x3, 0x7, 0x8, 0, 0x6, 0xd, 0xf, 0x0, 0x9, 0x14, 0x17, 0x0, 0x0, 0x1a, 0x1e, 0x0, 0x0, 0x20, 0x25},
+ .total_try_times = 6,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = NULL,
+ .retry = 0
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xADDE94DA,
+ .nand_id_5th = 0,
+ .eslc_reg_num = 4,
+ .eslc_offset = {0xb0, 0xb1, 0xa0, 0xa1},
+ .eslc_set_value = {0xa, 0xa, 0xa, 0xa},
+ .retry_reg_num = 8,
+ .retry_offset = {0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf},
+ .otp_len = 2,
+ .otp_offset = {0xff, 0xcc},
+ .otp_data = {0x40, 0x4d},
+ .total_try_times = 7,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = hynix_get_otp,
+ .retry = 0
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xADDE94EB,
+ .nand_id_5th = 0x74440000,
+ .eslc_reg_num = 4,
+ .eslc_offset = {0xa0, 0xa1, 0xa7, 0xa8},
+ .eslc_set_value = {0xa, 0xa, 0xa, 0xa},
+ .retry_reg_num = 8,
+ .retry_offset = {0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7},
+ .otp_len = 2,
+ .otp_offset = {0xae, 0xb0},
+ .otp_data = {0x00, 0x4d},
+ .total_try_times = 7,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = hynix_get_otp,
+ .retry = 0
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xADD79491,
+ .nand_id_5th = 0x0,
+ .eslc_reg_num = 4,
+ .eslc_offset = {0xa0, 0xa1, 0xa7, 0xa8},
+ .eslc_set_value = {0xa, 0xa, 0xa, 0xa},
+ .retry_reg_num = 8,
+ .retry_offset = {0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7},
+ .otp_len = 2,
+ .otp_offset = {0xae, 0xb0},
+ .otp_data = {0x00, 0x4d},
+ .total_try_times = 7,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = hynix_get_otp,
+ .retry = 0
+ },
+ //Toshiba
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D58432,
+ .nand_id_5th = 0x72560000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE8493,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE8493,
+ .nand_id_5th = 0x72D70000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE9482,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D79432,
+ .nand_id_5th = 0x76560000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D78493,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D78493,
+ .nand_id_5th = 0x72500000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE9493,
+ .nand_id_5th = 0x76570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+
+ //samsung
+ {
+ .magic = "readretry",
+ .nand_id = 0xECD7947E,
+ .nand_id_5th = 0x64440000,
+ .retry_reg_num = 4,
+ .retry_offset = {0xA7, 0xA4, 0xA5, 0xA6},
+ .retry_def_value = {0, 0, 0, 0},
+ .retry_value = {5, 0xA, 0, 0, 0x28, 0, 0xEC, 0xD8, 0xED, 0xF5, 0xED, 0xE6, 0xA, 0xF, 5, 0,
+ 0xF, 0xA, 0xFB, 0xEC, 0xE8, 0xEF, 0xE8, 0xDC, 0xF1, 0xFB, 0xFE, 0xF0, 0xA, 0x0, 0xFB, 0xEC,
+ 0xD0, 0xE2, 0xD0, 0xC2, 0x14, 0xF, 0xFB, 0xEC, 0xE8, 0xFB, 0xE8, 0xDC, 0x1E, 0x14, 0xFB, 0xEC,
+ 0xFB, 0xFF, 0xFB, 0xF8, 0x7, 0xC, 0x2, 0},
+ .total_try_times = 14,
+ .cur_try_times = 0,
+ .set_parameter = samsung_set_parameter,
+ .get_parameter = samsung_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xECDED57E,
+ .nand_id_5th = 0x68440000,
+ .retry_reg_num = 4,
+ .retry_offset = {0xA7, 0xA4, 0xA5, 0xA6},
+ .retry_def_value = {0, 0, 0, 0},
+ .retry_value = {5, 0xA, 0, 0, 0x28, 0, 0xEC, 0xD8, 0xED, 0xF5, 0xED, 0xE6, 0xA, 0xF, 5, 0,
+ 0xF, 0xA, 0xFB, 0xEC, 0xE8, 0xEF, 0xE8, 0xDC, 0xF1, 0xFB, 0xFE, 0xF0, 0xA, 0x0, 0xFB, 0xEC,
+ 0xD0, 0xE2, 0xD0, 0xC2, 0x14, 0xF, 0xFB, 0xEC, 0xE8, 0xFB, 0xE8, 0xDC, 0x1E, 0x14, 0xFB, 0xEC,
+ 0xFB, 0xFF, 0xFB, 0xF8, 0x7, 0xC, 0x2, 0},
+ .total_try_times = 14,
+ .cur_try_times = 0,
+ .set_parameter = samsung_set_parameter,
+ .get_parameter = samsung_get_parameter,
+ .retry = 0,
+ },
+ //Sandisk
+ {
+ .magic = "readretry",
+ .nand_id = 0x45DE9493,
+ .nand_id_5th = 0x76570000,
+ .retry_reg_num = 3,
+ .retry_offset = {4, 5, 7},
+ .retry_def_value = {0, 0, 0, 0xFF, 0xFF},
+ .retry_value = {0xF0, 0, 0xF0, 0xE0, 0, 0xE0, 0xD0, 0, 0xD0, 0x10, 0, 0x10, 0x20, 0, 0x20, 0x30, 0, 0x30,
+ 0xC0, 0, 0xD0, 0x00, 0, 0x10, 0x00, 0, 0x20, 0x10, 0, 0x20, 0xB0, 0, 0xD0, 0xA0, 0, 0xD0,
+ 0x90, 0, 0xD0, 0xB0, 0, 0xC0, 0xA0, 0, 0xC0, 0x90, 0, 0xC0,//lower page retry parameter
+ 0x00, 0xF0, 0, 0x0F, 0xE0, 0, 0x0F, 0xD0, 0, 0x0E, 0xE0, 0, 0x0E, 0xD0, 0, 0x0D, 0xF0, 0,
+ 0x0D, 0xE0, 0, 0x0D, 0xD0, 0, 0x01, 0x10, 0, 0x02, 0x20, 0, 0x02, 0x10, 0, 0x03, 0x20, 0,
+ 0x0F, 0x00, 0, 0x0E, 0xF0, 0, 0x0D, 0xC0, 0, 0x0F, 0xF0, 0, 0x01, 0x00, 0, 0x02, 0x00, 0,
+ 0x0D, 0xB0, 0, 0x0C, 0xA0, 0},//upper page retry parameter
+ .otp_len = 9,
+ .otp_offset = {0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC},
+ .otp_data = {0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .total_try_times = 0x1410,//bit15~8 for upper page, bit7~0 for lower page
+ .cur_try_times = -1,
+ .set_parameter = sandisk_set_parameter,
+ .get_parameter = sandisk_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x45D78493,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 3,
+ .retry_offset = {4, 5, 7},
+ .retry_def_value = {0, 0, 0, 0xFF, 0xFF},
+ .retry_value = {0xF0, 0, 0xF0, 0xE0, 0, 0xE0, 0xD0, 0, 0xD0, 0x10, 0, 0x10, 0x20, 0, 0x20, 0x30, 0, 0x30,
+ 0xC0, 0, 0xD0, 0x00, 0, 0x10, 0x00, 0, 0x20, 0x10, 0, 0x20, 0xB0, 0, 0xD0, 0xA0, 0, 0xD0,
+ 0x90, 0, 0xD0, 0xB0, 0, 0xC0, 0xA0, 0, 0xC0, 0x90, 0, 0xC0,//lower page retry parameter
+ 0x00, 0xF0, 0, 0x0F, 0xE0, 0, 0x0F, 0xD0, 0, 0x0E, 0xE0, 0, 0x0E, 0xD0, 0, 0x0D, 0xF0, 0,
+ 0x0D, 0xE0, 0, 0x0D, 0xD0, 0, 0x01, 0x10, 0, 0x02, 0x20, 0, 0x02, 0x10, 0, 0x03, 0x20, 0,
+ 0x0F, 0x00, 0, 0x0E, 0xF0, 0, 0x0D, 0xC0, 0, 0x0F, 0xF0, 0, 0x01, 0x00, 0, 0x02, 0x00, 0,
+ 0x0D, 0xB0, 0, 0x0C, 0xA0, 0},//upper page retry parameter
+ .otp_len = 9,
+ .otp_offset = {0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC},
+ .otp_data = {0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .total_try_times = 0x1410,//bit15~8 for upper page, bit7~0 for lower page
+ .cur_try_times = -1,
+ .set_parameter = sandisk_set_parameter,
+ .get_parameter = sandisk_get_parameter,
+ .retry = 0,
+ },
+
+ //Micron
+ {
+ .magic = "readretry",
+ .nand_id = 0x2C64444B,
+ .nand_id_5th = 0xA9000000,
+ .retry_reg_num = 1,
+ .retry_offset = {0x89},
+ .retry_def_value = {0},
+ .retry_value = {1, 2, 3, 4, 5, 6, 7},
+ .total_try_times = 7,
+ .cur_try_times = 0,
+ .set_parameter = micron_set_parameter,
+ .get_parameter = micron_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x2C44444B,
+ .nand_id_5th = 0xA9000000,
+ .retry_reg_num = 1,
+ .retry_offset = {0x89},
+ .retry_def_value = {0},
+ .retry_value = {1, 2, 3, 4, 5, 6, 7},
+ .total_try_times = 7,
+ .cur_try_times = 0,
+ .set_parameter = micron_set_parameter,
+ .get_parameter = micron_get_parameter,
+ .retry = 0,
+ },
+#endif
+ {
+ .nand_id = 0,
+ .nand_id_5th = 0,
+ }
+};
+
+
+
+/*
+* Manufacturer ID list
+*/
+struct nand_manufacturers nand_manuf_ids[] = {
+ {NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_SAMSUNG, "Samsung"},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_HYNIX, "Hynix"},
+ {NAND_MFR_MICRON, "Micron"},
+ {NAND_MFR_SANDISK, "Sandisk"},
+ {NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_MXIC, "Mxic"},
+ {NAND_MFR_MIRA, "Mira"},
+ {0x0, "Unknown"}
+};
+
+EXPORT_SYMBOL(nand_manuf_ids);
+EXPORT_SYMBOL(nand_flash_ids);
+EXPORT_SYMBOL(WMT_nand_flash_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
new file mode 100644
index 00000000..261f478f
--- /dev/null
+++ b/drivers/mtd/nand/nandsim.c
@@ -0,0 +1,2427 @@
+/*
+ * NAND flash simulator.
+ *
+ * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Note: NS means "NAND Simulator".
+ * Note: Input means input TO flash chip, output means output FROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <asm/div64.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+/* Default simulator parameters values */
+#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
+#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
+#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
+#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
+#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
+#endif
+
+#ifndef CONFIG_NANDSIM_ACCESS_DELAY
+#define CONFIG_NANDSIM_ACCESS_DELAY 25
+#endif
+#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
+#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
+#endif
+#ifndef CONFIG_NANDSIM_ERASE_DELAY
+#define CONFIG_NANDSIM_ERASE_DELAY 2
+#endif
+#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
+#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
+#endif
+#ifndef CONFIG_NANDSIM_INPUT_CYCLE
+#define CONFIG_NANDSIM_INPUT_CYCLE 50
+#endif
+#ifndef CONFIG_NANDSIM_BUS_WIDTH
+#define CONFIG_NANDSIM_BUS_WIDTH 8
+#endif
+#ifndef CONFIG_NANDSIM_DO_DELAYS
+#define CONFIG_NANDSIM_DO_DELAYS 0
+#endif
+#ifndef CONFIG_NANDSIM_LOG
+#define CONFIG_NANDSIM_LOG 0
+#endif
+#ifndef CONFIG_NANDSIM_DBG
+#define CONFIG_NANDSIM_DBG 0
+#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
+
+static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
+static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
+static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
+static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
+static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
+static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
+static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
+static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
+static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
+static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
+static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
+static uint log = CONFIG_NANDSIM_LOG;
+static uint dbg = CONFIG_NANDSIM_DBG;
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
+static unsigned int parts_num;
+static char *badblocks = NULL;
+static char *weakblocks = NULL;
+static char *weakpages = NULL;
+static unsigned int bitflips = 0;
+static char *gravepages = NULL;
+static unsigned int rptwear = 0;
+static unsigned int overridesize = 0;
+static char *cache_file = NULL;
+static unsigned int bbt;
+static unsigned int bch;
+
+module_param(first_id_byte, uint, 0400);
+module_param(second_id_byte, uint, 0400);
+module_param(third_id_byte, uint, 0400);
+module_param(fourth_id_byte, uint, 0400);
+module_param(access_delay, uint, 0400);
+module_param(programm_delay, uint, 0400);
+module_param(erase_delay, uint, 0400);
+module_param(output_cycle, uint, 0400);
+module_param(input_cycle, uint, 0400);
+module_param(bus_width, uint, 0400);
+module_param(do_delays, uint, 0400);
+module_param(log, uint, 0400);
+module_param(dbg, uint, 0400);
+module_param_array(parts, ulong, &parts_num, 0400);
+module_param(badblocks, charp, 0400);
+module_param(weakblocks, charp, 0400);
+module_param(weakpages, charp, 0400);
+module_param(bitflips, uint, 0400);
+module_param(gravepages, charp, 0400);
+module_param(rptwear, uint, 0400);
+module_param(overridesize, uint, 0400);
+module_param(cache_file, charp, 0400);
+module_param(bbt, uint, 0400);
+module_param(bch, uint, 0400);
+
+MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
+MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
+MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
+MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
+MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
+MODULE_PARM_DESC(log, "Perform logging if not zero");
+MODULE_PARM_DESC(dbg, "Output debug information if not zero");
+MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
+/* Page and erase block positions for the following parameters are independent of any partitions */
+MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
+MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
+ " separated by commas e.g. 113:2 means eb 113"
+ " can be erased only twice before failing");
+MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be written only twice before failing");
+MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
+MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be read only twice before failing");
+MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
+MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
+ "The size is specified in erase blocks and as the exponent of a power of two"
+ " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
+MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
+MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
+ "be correctable in 512-byte blocks");
+
+/* The largest possible page size */
+#define NS_LARGEST_PAGE_SIZE 4096
+
+/* The prefix for simulator output */
+#define NS_OUTPUT_PREFIX "[nandsim]"
+
+/* Simulator's output macros (logging, debugging, warning, error) */
+#define NS_LOG(args...) \
+ do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
+#define NS_DBG(args...) \
+ do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
+#define NS_WARN(args...) \
+ do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
+#define NS_ERR(args...) \
+ do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
+#define NS_INFO(args...) \
+ do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
+
+/* Busy-wait delay macros (microseconds, milliseconds) */
+#define NS_UDELAY(us) \
+ do { if (do_delays) udelay(us); } while(0)
+#define NS_MDELAY(us) \
+ do { if (do_delays) mdelay(us); } while(0)
+
+/* Is the nandsim structure initialized ? */
+#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
+
+/* Good operation completion status */
+#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
+
+/* Operation failed completion status */
+#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
+
+/* Calculate the page offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET(ns) \
+ (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
+
+/* Calculate the OOB offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+
+/* After a command is input, the simulator goes to one of the following states */
+#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
+#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
+#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
+#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
+#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
+#define STATE_CMD_STATUS 0x00000007 /* read status */
+#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
+#define STATE_CMD_READID 0x0000000A /* read ID */
+#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
+#define STATE_CMD_RESET 0x0000000C /* reset */
+#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
+#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
+#define STATE_CMD_MASK 0x0000000F /* command states mask */
+
+/* After an address is input, the simulator goes to one of these states */
+#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
+#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
+#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
+#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK 0x00000070 /* address states mask */
+
+/* During data input/output the simulator is in these states */
+#define STATE_DATAIN 0x00000100 /* waiting for data input */
+#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
+
+#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
+#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
+#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
+#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
+#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
+
+/* Previous operation is done, ready to accept new requests */
+#define STATE_READY 0x00000000
+
+/* This state is used to mark that the next state isn't known yet */
+#define STATE_UNKNOWN 0x10000000
+
+/* Simulator's actions bit masks */
+#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
+#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
+#define ACTION_SECERASE 0x00300000 /* erase sector */
+#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
+#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
+#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
+#define ACTION_MASK 0x00700000 /* action mask */
+
+#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
+#define NS_OPER_STATES 6 /* Maximum number of states in operation */
+
+#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
+#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
+#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
+#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
+#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
+#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
+#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
+#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
+#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
+
+/* Remove action bits from state */
+#define NS_STATE(x) ((x) & ~ACTION_MASK)
+
+/*
+ * Maximum previous states which need to be saved. Currently saving is
+ * only needed for page program operation with preceded read command
+ * (which is only valid for 512-byte pages).
+ */
+#define NS_MAX_PREVSTATES 1
+
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
+/*
+ * A union to represent flash memory contents and flash buffer.
+ */
+union ns_mem {
+ u_char *byte; /* for byte access */
+ uint16_t *word; /* for 16-bit word access */
+};
+
+/*
+ * The structure which describes all the internal simulator data.
+ */
+struct nandsim {
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
+ unsigned int nbparts;
+
+ uint busw; /* flash chip bus width (8 or 16) */
+ u_char ids[4]; /* chip's ID bytes */
+ uint32_t options; /* chip's characteristic bits */
+ uint32_t state; /* current chip state */
+ uint32_t nxstate; /* next expected state */
+
+ uint32_t *op; /* current operation, NULL operations isn't known yet */
+ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
+ uint16_t npstates; /* number of previous states saved */
+ uint16_t stateidx; /* current state index */
+
+ /* The simulated NAND flash pages array */
+ union ns_mem *pages;
+
+ /* Slab allocator for nand pages */
+ struct kmem_cache *nand_pages_slab;
+
+ /* Internal buffer of page + OOB size bytes */
+ union ns_mem buf;
+
+ /* NAND flash "geometry" */
+ struct {
+ uint64_t totsz; /* total flash size, bytes */
+ uint32_t secsz; /* flash sector (erase block) size, bytes */
+ uint pgsz; /* NAND flash page size, bytes */
+ uint oobsz; /* page OOB area size, bytes */
+ uint64_t totszoob; /* total flash size including OOB, bytes */
+ uint pgszoob; /* page size including OOB , bytes*/
+ uint secszoob; /* sector size including OOB, bytes */
+ uint pgnum; /* total number of pages */
+ uint pgsec; /* number of pages per sector */
+ uint secshift; /* bits number in sector size */
+ uint pgshift; /* bits number in page size */
+ uint oobshift; /* bits number in OOB size */
+ uint pgaddrbytes; /* bytes per page address */
+ uint secaddrbytes; /* bytes per sector address */
+ uint idbytes; /* the number ID bytes that this chip outputs */
+ } geom;
+
+ /* NAND flash internal registers */
+ struct {
+ unsigned command; /* the command register */
+ u_char status; /* the status register */
+ uint row; /* the page number */
+ uint column; /* the offset within page */
+ uint count; /* internal counter */
+ uint num; /* number of bytes which must be processed */
+ uint off; /* fixed page offset */
+ } regs;
+
+ /* NAND flash lines state */
+ struct {
+ int ce; /* chip Enable */
+ int cle; /* command Latch Enable */
+ int ale; /* address Latch Enable */
+ int wp; /* write Protect */
+ } lines;
+
+ /* Fields needed when using a cache file */
+ struct file *cfile; /* Open file */
+ unsigned char *pages_written; /* Which pages have been written */
+ void *file_buf;
+ struct page *held_pages[NS_MAX_HELD_PAGES];
+ int held_cnt;
+};
+
+/*
+ * Operations array. To perform any operation the simulator must pass
+ * through the correspondent states chain.
+ */
+static struct nandsim_operations {
+ uint32_t reqopts; /* options which are required to perform the operation */
+ uint32_t states[NS_OPER_STATES]; /* operation's states */
+} ops[NS_OPER_NUM] = {
+ /* Read page + OOB from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read page + OOB from the second half */
+ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
+ STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the second half */
+ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Erase sector */
+ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
+ /* Read status */
+ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
+ /* Read multi-plane status */
+ {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
+ /* Read ID */
+ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
+ /* Large page devices read page */
+ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Large page devices random page read */
+ {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+};
+
+struct weak_block {
+ struct list_head list;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ unsigned int erases_done;
+};
+
+static LIST_HEAD(weak_blocks);
+
+struct weak_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_writes;
+ unsigned int writes_done;
+};
+
+static LIST_HEAD(weak_pages);
+
+struct grave_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_reads;
+ unsigned int reads_done;
+};
+
+static LIST_HEAD(grave_pages);
+
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+static unsigned int rptwear_cnt = 0;
+
+/* MTD structure for NAND controller */
+static struct mtd_info *nsmtd;
+
+static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
+
+/*
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
+ *
+ * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
+ */
+static int alloc_device(struct nandsim *ns)
+{
+ struct file *cfile;
+ int i, err;
+
+ if (cache_file) {
+ cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+ if (IS_ERR(cfile))
+ return PTR_ERR(cfile);
+ if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+ NS_ERR("alloc_device: cache file not readable\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+ NS_ERR("alloc_device: cache file not writeable\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ ns->pages_written = vzalloc(ns->geom.pgnum);
+ if (!ns->pages_written) {
+ NS_ERR("alloc_device: unable to allocate pages written array\n");
+ err = -ENOMEM;
+ goto err_close;
+ }
+ ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->file_buf) {
+ NS_ERR("alloc_device: unable to allocate file buf\n");
+ err = -ENOMEM;
+ goto err_free;
+ }
+ ns->cfile = cfile;
+ return 0;
+ }
+
+ ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
+ if (!ns->pages) {
+ NS_ERR("alloc_device: unable to allocate page array\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ ns->pages[i].byte = NULL;
+ }
+ ns->nand_pages_slab = kmem_cache_create("nandsim",
+ ns->geom.pgszoob, 0, 0, NULL);
+ if (!ns->nand_pages_slab) {
+ NS_ERR("cache_create: unable to create kmem_cache\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+err_free:
+ vfree(ns->pages_written);
+err_close:
+ filp_close(cfile, NULL);
+ return err;
+}
+
+/*
+ * Free any allocated pages, and free the array of page pointers.
+ */
+static void free_device(struct nandsim *ns)
+{
+ int i;
+
+ if (ns->cfile) {
+ kfree(ns->file_buf);
+ vfree(ns->pages_written);
+ filp_close(ns->cfile, NULL);
+ return;
+ }
+
+ if (ns->pages) {
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ if (ns->pages[i].byte)
+ kmem_cache_free(ns->nand_pages_slab,
+ ns->pages[i].byte);
+ }
+ kmem_cache_destroy(ns->nand_pages_slab);
+ vfree(ns->pages);
+ }
+}
+
+static char *get_partition_name(int i)
+{
+ char buf[64];
+ sprintf(buf, "NAND simulator partition %d", i);
+ return kstrdup(buf, GFP_KERNEL);
+}
+
+static uint64_t divide(uint64_t n, uint32_t d)
+{
+ do_div(n, d);
+ return n;
+}
+
+/*
+ * Initialize the nandsim structure.
+ *
+ * RETURNS: 0 if success, -ERRNO if failure.
+ */
+static int init_nandsim(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct nandsim *ns = chip->priv;
+ int i, ret = 0;
+ uint64_t remains;
+ uint64_t next_offset;
+
+ if (NS_IS_INITIALIZED(ns)) {
+ NS_ERR("init_nandsim: nandsim is already initialized\n");
+ return -EIO;
+ }
+
+ /* Force mtd to not do delays */
+ chip->chip_delay = 0;
+
+ /* Initialize the NAND flash parameters */
+ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
+ ns->geom.totsz = mtd->size;
+ ns->geom.pgsz = mtd->writesize;
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+ ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+ ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
+ ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
+ ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
+ ns->options = 0;
+
+ if (ns->geom.pgsz == 256) {
+ ns->options |= OPT_PAGE256;
+ }
+ else if (ns->geom.pgsz == 512) {
+ ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+ if (ns->busw == 8)
+ ns->options |= OPT_PAGE512_8BIT;
+ } else if (ns->geom.pgsz == 2048) {
+ ns->options |= OPT_PAGE2048;
+ } else if (ns->geom.pgsz == 4096) {
+ ns->options |= OPT_PAGE4096;
+ } else {
+ NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
+ return -EIO;
+ }
+
+ if (ns->options & OPT_SMALLPAGE) {
+ if (ns->geom.totsz <= (32 << 20)) {
+ ns->geom.pgaddrbytes = 3;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 3;
+ }
+ } else {
+ if (ns->geom.totsz <= (128 << 20)) {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 5;
+ ns->geom.secaddrbytes = 3;
+ }
+ }
+
+ /* Fill the partition_info structure */
+ if (parts_num > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ remains = ns->geom.totsz;
+ next_offset = 0;
+ for (i = 0; i < parts_num; ++i) {
+ uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
+
+ if (!part_sz || part_sz > remains) {
+ NS_ERR("bad partition size.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ ns->partitions[i].name = get_partition_name(i);
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = part_sz;
+ next_offset += ns->partitions[i].size;
+ remains -= ns->partitions[i].size;
+ }
+ ns->nbparts = parts_num;
+ if (remains) {
+ if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ ns->partitions[i].name = get_partition_name(i);
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = remains;
+ ns->nbparts += 1;
+ }
+
+ /* Detect how many ID bytes the NAND chip outputs */
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (second_id_byte != nand_flash_ids[i].id)
+ continue;
+ if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
+ ns->options |= OPT_AUTOINCR;
+ }
+
+ if (ns->busw == 16)
+ NS_WARN("16-bit flashes support wasn't tested\n");
+
+ printk("flash size: %llu MiB\n",
+ (unsigned long long)ns->geom.totsz >> 20);
+ printk("page size: %u bytes\n", ns->geom.pgsz);
+ printk("OOB area size: %u bytes\n", ns->geom.oobsz);
+ printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
+ printk("pages number: %u\n", ns->geom.pgnum);
+ printk("pages per sector: %u\n", ns->geom.pgsec);
+ printk("bus width: %u\n", ns->busw);
+ printk("bits in sector size: %u\n", ns->geom.secshift);
+ printk("bits in page size: %u\n", ns->geom.pgshift);
+ printk("bits in OOB size: %u\n", ns->geom.oobshift);
+ printk("flash size with OOB: %llu KiB\n",
+ (unsigned long long)ns->geom.totszoob >> 10);
+ printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
+ printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
+ printk("options: %#x\n", ns->options);
+
+ if ((ret = alloc_device(ns)) != 0)
+ goto error;
+
+ /* Allocate / initialize the internal buffer */
+ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->buf.byte) {
+ NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
+ ns->geom.pgszoob);
+ ret = -ENOMEM;
+ goto error;
+ }
+ memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
+
+ return 0;
+
+error:
+ free_device(ns);
+
+ return ret;
+}
+
+/*
+ * Free the nandsim structure.
+ */
+static void free_nandsim(struct nandsim *ns)
+{
+ kfree(ns->buf.byte);
+ free_device(ns);
+
+ return;
+}
+
+static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ loff_t offset;
+
+ if (!badblocks)
+ return 0;
+ w = badblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ offset = erase_block_no * ns->geom.secsz;
+ if (mtd_block_markbad(mtd, offset)) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ if (*w == ',')
+ w += 1;
+ } while (*w);
+ return 0;
+}
+
+static int parse_weakblocks(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ struct weak_block *wb;
+
+ if (!weakblocks)
+ return 0;
+ w = weakblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid weakblocks.\n");
+ return -EINVAL;
+ }
+ max_erases = 3;
+ if (*w == ':') {
+ w += 1;
+ max_erases = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wb = kzalloc(sizeof(*wb), GFP_KERNEL);
+ if (!wb) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wb->erase_block_no = erase_block_no;
+ wb->max_erases = max_erases;
+ list_add(&wb->list, &weak_blocks);
+ } while (*w);
+ return 0;
+}
+
+static int erase_error(unsigned int erase_block_no)
+{
+ struct weak_block *wb;
+
+ list_for_each_entry(wb, &weak_blocks, list)
+ if (wb->erase_block_no == erase_block_no) {
+ if (wb->erases_done >= wb->max_erases)
+ return 1;
+ wb->erases_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int parse_weakpages(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_writes;
+ struct weak_page *wp;
+
+ if (!weakpages)
+ return 0;
+ w = weakpages;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ page_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid weakpagess.\n");
+ return -EINVAL;
+ }
+ max_writes = 3;
+ if (*w == ':') {
+ w += 1;
+ max_writes = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wp = kzalloc(sizeof(*wp), GFP_KERNEL);
+ if (!wp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wp->page_no = page_no;
+ wp->max_writes = max_writes;
+ list_add(&wp->list, &weak_pages);
+ } while (*w);
+ return 0;
+}
+
+static int write_error(unsigned int page_no)
+{
+ struct weak_page *wp;
+
+ list_for_each_entry(wp, &weak_pages, list)
+ if (wp->page_no == page_no) {
+ if (wp->writes_done >= wp->max_writes)
+ return 1;
+ wp->writes_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int parse_gravepages(void)
+{
+ char *g;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_reads;
+ struct grave_page *gp;
+
+ if (!gravepages)
+ return 0;
+ g = gravepages;
+ do {
+ zero_ok = (*g == '0' ? 1 : 0);
+ page_no = simple_strtoul(g, &g, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid gravepagess.\n");
+ return -EINVAL;
+ }
+ max_reads = 3;
+ if (*g == ':') {
+ g += 1;
+ max_reads = simple_strtoul(g, &g, 0);
+ }
+ if (*g == ',')
+ g += 1;
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ gp->page_no = page_no;
+ gp->max_reads = max_reads;
+ list_add(&gp->list, &grave_pages);
+ } while (*g);
+ return 0;
+}
+
+static int read_error(unsigned int page_no)
+{
+ struct grave_page *gp;
+
+ list_for_each_entry(gp, &grave_pages, list)
+ if (gp->page_no == page_no) {
+ if (gp->reads_done >= gp->max_reads)
+ return 1;
+ gp->reads_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static void free_lists(void)
+{
+ struct list_head *pos, *n;
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+ kfree(erase_block_wear);
+}
+
+static int setup_wear_reporting(struct mtd_info *mtd)
+{
+ size_t mem;
+
+ if (!rptwear)
+ return 0;
+ wear_eb_count = divide(mtd->size, mtd->erasesize);
+ mem = wear_eb_count * sizeof(unsigned long);
+ if (mem / sizeof(unsigned long) != wear_eb_count) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ erase_block_wear = kzalloc(mem, GFP_KERNEL);
+ if (!erase_block_wear) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void update_wear(unsigned int erase_block_no)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ if (!erase_block_wear)
+ return;
+ total_wear += 1;
+ if (total_wear == 0)
+ NS_ERR("Erase counter total overflow\n");
+ erase_block_wear[erase_block_no] += 1;
+ if (erase_block_wear[erase_block_no] == 0)
+ NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+ rptwear_cnt += 1;
+ if (rptwear_cnt < rptwear)
+ return;
+ rptwear_cnt = 0;
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+ /* Output wear report */
+ NS_INFO("*** Wear Report ***\n");
+ NS_INFO("Total numbers of erases: %lu\n", tot);
+ NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
+ NS_INFO("Average number of erases: %lu\n", avg);
+ NS_INFO("Maximum number of erases: %lu\n", wmax);
+ NS_INFO("Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+ NS_INFO("*** End of Wear Report ***\n");
+}
+
+/*
+ * Returns the string representation of 'state' state.
+ */
+static char *get_state_name(uint32_t state)
+{
+ switch (NS_STATE(state)) {
+ case STATE_CMD_READ0:
+ return "STATE_CMD_READ0";
+ case STATE_CMD_READ1:
+ return "STATE_CMD_READ1";
+ case STATE_CMD_PAGEPROG:
+ return "STATE_CMD_PAGEPROG";
+ case STATE_CMD_READOOB:
+ return "STATE_CMD_READOOB";
+ case STATE_CMD_READSTART:
+ return "STATE_CMD_READSTART";
+ case STATE_CMD_ERASE1:
+ return "STATE_CMD_ERASE1";
+ case STATE_CMD_STATUS:
+ return "STATE_CMD_STATUS";
+ case STATE_CMD_STATUS_M:
+ return "STATE_CMD_STATUS_M";
+ case STATE_CMD_SEQIN:
+ return "STATE_CMD_SEQIN";
+ case STATE_CMD_READID:
+ return "STATE_CMD_READID";
+ case STATE_CMD_ERASE2:
+ return "STATE_CMD_ERASE2";
+ case STATE_CMD_RESET:
+ return "STATE_CMD_RESET";
+ case STATE_CMD_RNDOUT:
+ return "STATE_CMD_RNDOUT";
+ case STATE_CMD_RNDOUTSTART:
+ return "STATE_CMD_RNDOUTSTART";
+ case STATE_ADDR_PAGE:
+ return "STATE_ADDR_PAGE";
+ case STATE_ADDR_SEC:
+ return "STATE_ADDR_SEC";
+ case STATE_ADDR_ZERO:
+ return "STATE_ADDR_ZERO";
+ case STATE_ADDR_COLUMN:
+ return "STATE_ADDR_COLUMN";
+ case STATE_DATAIN:
+ return "STATE_DATAIN";
+ case STATE_DATAOUT:
+ return "STATE_DATAOUT";
+ case STATE_DATAOUT_ID:
+ return "STATE_DATAOUT_ID";
+ case STATE_DATAOUT_STATUS:
+ return "STATE_DATAOUT_STATUS";
+ case STATE_DATAOUT_STATUS_M:
+ return "STATE_DATAOUT_STATUS_M";
+ case STATE_READY:
+ return "STATE_READY";
+ case STATE_UNKNOWN:
+ return "STATE_UNKNOWN";
+ }
+
+ NS_ERR("get_state_name: unknown state, BUG\n");
+ return NULL;
+}
+
+/*
+ * Check if command is valid.
+ *
+ * RETURNS: 1 if wrong command, 0 if right.
+ */
+static int check_command(int cmd)
+{
+ switch (cmd) {
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READSTART:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RESET:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_RNDOUTSTART:
+ return 0;
+
+ case NAND_CMD_STATUS_MULTI:
+ default:
+ return 1;
+ }
+}
+
+/*
+ * Returns state after command is accepted by command number.
+ */
+static uint32_t get_state_by_command(unsigned command)
+{
+ switch (command) {
+ case NAND_CMD_READ0:
+ return STATE_CMD_READ0;
+ case NAND_CMD_READ1:
+ return STATE_CMD_READ1;
+ case NAND_CMD_PAGEPROG:
+ return STATE_CMD_PAGEPROG;
+ case NAND_CMD_READSTART:
+ return STATE_CMD_READSTART;
+ case NAND_CMD_READOOB:
+ return STATE_CMD_READOOB;
+ case NAND_CMD_ERASE1:
+ return STATE_CMD_ERASE1;
+ case NAND_CMD_STATUS:
+ return STATE_CMD_STATUS;
+ case NAND_CMD_STATUS_MULTI:
+ return STATE_CMD_STATUS_M;
+ case NAND_CMD_SEQIN:
+ return STATE_CMD_SEQIN;
+ case NAND_CMD_READID:
+ return STATE_CMD_READID;
+ case NAND_CMD_ERASE2:
+ return STATE_CMD_ERASE2;
+ case NAND_CMD_RESET:
+ return STATE_CMD_RESET;
+ case NAND_CMD_RNDOUT:
+ return STATE_CMD_RNDOUT;
+ case NAND_CMD_RNDOUTSTART:
+ return STATE_CMD_RNDOUTSTART;
+ }
+
+ NS_ERR("get_state_by_command: unknown command, BUG\n");
+ return 0;
+}
+
+/*
+ * Move an address byte to the correspondent internal register.
+ */
+static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
+{
+ uint byte = (uint)bt;
+
+ if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
+ ns->regs.column |= (byte << 8 * ns->regs.count);
+ else {
+ ns->regs.row |= (byte << 8 * (ns->regs.count -
+ ns->geom.pgaddrbytes +
+ ns->geom.secaddrbytes));
+ }
+
+ return;
+}
+
+/*
+ * Switch to STATE_READY state.
+ */
+static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
+{
+ NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+
+ ns->state = STATE_READY;
+ ns->nxstate = STATE_UNKNOWN;
+ ns->op = NULL;
+ ns->npstates = 0;
+ ns->stateidx = 0;
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ ns->regs.off = 0;
+ ns->regs.row = 0;
+ ns->regs.column = 0;
+ ns->regs.status = status;
+}
+
+/*
+ * If the operation isn't known yet, try to find it in the global array
+ * of supported operations.
+ *
+ * Operation can be unknown because of the following.
+ * 1. New command was accepted and this is the first call to find the
+ * correspondent states chain. In this case ns->npstates = 0;
+ * 2. There are several operations which begin with the same command(s)
+ * (for example program from the second half and read from the
+ * second half operations both begin with the READ1 command). In this
+ * case the ns->pstates[] array contains previous states.
+ *
+ * Thus, the function tries to find operation containing the following
+ * states (if the 'flag' parameter is 0):
+ * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
+ *
+ * If (one and only one) matching operation is found, it is accepted (
+ * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
+ * zeroed).
+ *
+ * If there are several matches, the current state is pushed to the
+ * ns->pstates.
+ *
+ * The operation can be unknown only while commands are input to the chip.
+ * As soon as address command is accepted, the operation must be known.
+ * In such situation the function is called with 'flag' != 0, and the
+ * operation is searched using the following pattern:
+ * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
+ *
+ * It is supposed that this pattern must either match one operation or
+ * none. There can't be ambiguity in that case.
+ *
+ * If no matches found, the function does the following:
+ * 1. if there are saved states present, try to ignore them and search
+ * again only using the last command. If nothing was found, switch
+ * to the STATE_READY state.
+ * 2. if there are no saved states, switch to the STATE_READY state.
+ *
+ * RETURNS: -2 - no matched operations found.
+ * -1 - several matches.
+ * 0 - operation is found.
+ */
+static int find_operation(struct nandsim *ns, uint32_t flag)
+{
+ int opsfound = 0;
+ int i, j, idx = 0;
+
+ for (i = 0; i < NS_OPER_NUM; i++) {
+
+ int found = 1;
+
+ if (!(ns->options & ops[i].reqopts))
+ /* Ignore operations we can't perform */
+ continue;
+
+ if (flag) {
+ if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
+ continue;
+ } else {
+ if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
+ continue;
+ }
+
+ for (j = 0; j < ns->npstates; j++)
+ if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
+ && (ns->options & ops[idx].reqopts)) {
+ found = 0;
+ break;
+ }
+
+ if (found) {
+ idx = i;
+ opsfound += 1;
+ }
+ }
+
+ if (opsfound == 1) {
+ /* Exact match */
+ ns->op = &ops[idx].states[0];
+ if (flag) {
+ /*
+ * In this case the find_operation function was
+ * called when address has just began input. But it isn't
+ * yet fully input and the current state must
+ * not be one of STATE_ADDR_*, but the STATE_ADDR_*
+ * state must be the next state (ns->nxstate).
+ */
+ ns->stateidx = ns->npstates - 1;
+ } else {
+ ns->stateidx = ns->npstates;
+ }
+ ns->npstates = 0;
+ ns->state = ns->op[ns->stateidx];
+ ns->nxstate = ns->op[ns->stateidx + 1];
+ NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
+ idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+ return 0;
+ }
+
+ if (opsfound == 0) {
+ /* Nothing was found. Try to ignore previous commands (if any) and search again */
+ if (ns->npstates != 0) {
+ NS_DBG("find_operation: no operation found, try again with state %s\n",
+ get_state_name(ns->state));
+ ns->npstates = 0;
+ return find_operation(ns, 0);
+
+ }
+ NS_DBG("find_operation: no operations found\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return -2;
+ }
+
+ if (flag) {
+ /* This shouldn't happen */
+ NS_DBG("find_operation: BUG, operation must be known if address is input\n");
+ return -2;
+ }
+
+ NS_DBG("find_operation: there is still ambiguity\n");
+
+ ns->pstates[ns->npstates++] = ns->state;
+
+ return -1;
+}
+
+static void put_pages(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ns->held_cnt; i++)
+ page_cache_release(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+{
+ pgoff_t index, start_index, end_index;
+ struct page *page;
+ struct address_space *mapping = file->f_mapping;
+
+ start_index = pos >> PAGE_CACHE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+ return -EINVAL;
+ ns->held_cnt = 0;
+ for (index = start_index; index <= end_index; index++) {
+ page = find_get_page(mapping, index);
+ if (page == NULL) {
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (page == NULL) {
+ write_inode_now(mapping->host, 1);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ }
+ if (page == NULL) {
+ put_pages(ns);
+ return -ENOMEM;
+ }
+ unlock_page(page);
+ }
+ ns->held_pages[ns->held_cnt++] = page;
+ }
+ return 0;
+}
+
+static int set_memalloc(void)
+{
+ if (current->flags & PF_MEMALLOC)
+ return 0;
+ current->flags |= PF_MEMALLOC;
+ return 1;
+}
+
+static void clear_memalloc(int memalloc)
+{
+ if (memalloc)
+ current->flags &= ~PF_MEMALLOC;
+}
+
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+ mm_segment_t old_fs;
+ ssize_t tx;
+ int err, memalloc;
+
+ err = get_pages(ns, file, count, *pos);
+ if (err)
+ return err;
+ old_fs = get_fs();
+ set_fs(get_ds());
+ memalloc = set_memalloc();
+ tx = vfs_read(file, (char __user *)buf, count, pos);
+ clear_memalloc(memalloc);
+ set_fs(old_fs);
+ put_pages(ns);
+ return tx;
+}
+
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+ mm_segment_t old_fs;
+ ssize_t tx;
+ int err, memalloc;
+
+ err = get_pages(ns, file, count, *pos);
+ if (err)
+ return err;
+ old_fs = get_fs();
+ set_fs(get_ds());
+ memalloc = set_memalloc();
+ tx = vfs_write(file, (char __user *)buf, count, pos);
+ clear_memalloc(memalloc);
+ set_fs(old_fs);
+ put_pages(ns);
+ return tx;
+}
+
+/*
+ * Returns a pointer to the current page.
+ */
+static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
+{
+ return &(ns->pages[ns->regs.row]);
+}
+
+/*
+ * Retuns a pointer to the current byte, within the current page.
+ */
+static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
+{
+ return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
+}
+
+int do_read_error(struct nandsim *ns, int num)
+{
+ unsigned int page_no = ns->regs.row;
+
+ if (read_error(page_no)) {
+ int i;
+ memset(ns->buf.byte, 0xFF, num);
+ for (i = 0; i < num; ++i)
+ ns->buf.byte[i] = random32();
+ NS_WARN("simulating read error in page %u\n", page_no);
+ return 1;
+ }
+ return 0;
+}
+
+void do_bit_flips(struct nandsim *ns, int num)
+{
+ if (bitflips && random32() < (1 << 22)) {
+ int flips = 1;
+ if (bitflips > 1)
+ flips = (random32() % (int) bitflips) + 1;
+ while (flips--) {
+ int pos = random32() % (num * 8);
+ ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+ NS_WARN("read_page: flipping bit %d in page %d "
+ "reading from %d ecc: corrected=%u failed=%u\n",
+ pos, ns->regs.row, ns->regs.column + ns->regs.off,
+ nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+ }
+ }
+}
+
+/*
+ * Fill the NAND buffer with data read from the specified page.
+ */
+static void read_page(struct nandsim *ns, int num)
+{
+ union ns_mem *mypage;
+
+ if (ns->cfile) {
+ if (!ns->pages_written[ns->regs.row]) {
+ NS_DBG("read_page: page %d not written\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ loff_t pos;
+ ssize_t tx;
+
+ NS_DBG("read_page: page %d written, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (do_read_error(ns, num))
+ return;
+ pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+ tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+ if (tx != num) {
+ NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return;
+ }
+ do_bit_flips(ns, num);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ NS_DBG("read_page: page %d allocated, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (do_read_error(ns, num))
+ return;
+ memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
+ do_bit_flips(ns, num);
+ }
+}
+
+/*
+ * Erase all pages in the specified sector.
+ */
+static void erase_sector(struct nandsim *ns)
+{
+ union ns_mem *mypage;
+ int i;
+
+ if (ns->cfile) {
+ for (i = 0; i < ns->geom.pgsec; i++)
+ if (ns->pages_written[ns->regs.row + i]) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+ ns->pages_written[ns->regs.row + i] = 0;
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ for (i = 0; i < ns->geom.pgsec; i++) {
+ if (mypage->byte != NULL) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
+ kmem_cache_free(ns->nand_pages_slab, mypage->byte);
+ mypage->byte = NULL;
+ }
+ mypage++;
+ }
+}
+
+/*
+ * Program the specified page with the contents from the NAND buffer.
+ */
+static int prog_page(struct nandsim *ns, int num)
+{
+ int i;
+ union ns_mem *mypage;
+ u_char *pg_off;
+
+ if (ns->cfile) {
+ loff_t off, pos;
+ ssize_t tx;
+ int all;
+
+ NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+ pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+ off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+ if (!ns->pages_written[ns->regs.row]) {
+ all = 1;
+ memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+ } else {
+ all = 0;
+ pos = off;
+ tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+ if (tx != num) {
+ NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+ if (all) {
+ pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+ if (tx != ns->geom.pgszoob) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ ns->pages_written[ns->regs.row] = 1;
+ } else {
+ pos = off;
+ tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+ if (tx != num) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
+ /*
+ * We allocate memory with GFP_NOFS because a flash FS may
+ * utilize this. If it is holding an FS lock, then gets here,
+ * then kernel memory alloc runs writeback which goes to the FS
+ * again and deadlocks. This was seen in practice.
+ */
+ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
+ if (mypage->byte == NULL) {
+ NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
+ return -1;
+ }
+ memset(mypage->byte, 0xFF, ns->geom.pgszoob);
+ }
+
+ pg_off = NS_PAGE_BYTE_OFF(ns);
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+
+ return 0;
+}
+
+/*
+ * If state has any action bit, perform this action.
+ *
+ * RETURNS: 0 if success, -1 if error.
+ */
+static int do_state_action(struct nandsim *ns, uint32_t action)
+{
+ int num;
+ int busdiv = ns->busw == 8 ? 1 : 2;
+ unsigned int erase_block_no, page_no;
+
+ action &= ACTION_MASK;
+
+ /* Check that page address input is correct */
+ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
+ NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ switch (action) {
+
+ case ACTION_CPY:
+ /*
+ * Copy page data to the internal buffer.
+ */
+
+ /* Column shouldn't be very large */
+ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
+ NS_ERR("do_state_action: column number is too large\n");
+ break;
+ }
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ read_page(ns, num);
+
+ NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
+ num, NS_RAW_OFFSET(ns) + ns->regs.off);
+
+ if (ns->regs.off == 0)
+ NS_LOG("read page %d\n", ns->regs.row);
+ else if (ns->regs.off < ns->geom.pgsz)
+ NS_LOG("read page %d (second half)\n", ns->regs.row);
+ else
+ NS_LOG("read OOB of page %d\n", ns->regs.row);
+
+ NS_UDELAY(access_delay);
+ NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ break;
+
+ case ACTION_SECERASE:
+ /*
+ * Erase sector.
+ */
+
+ if (ns->lines.wp) {
+ NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
+ return -1;
+ }
+
+ if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
+ || (ns->regs.row & ~(ns->geom.secsz - 1))) {
+ NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ ns->regs.row = (ns->regs.row <<
+ 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
+ ns->regs.column = 0;
+
+ erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
+
+ NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
+ ns->regs.row, NS_RAW_OFFSET(ns));
+ NS_LOG("erase sector %u\n", erase_block_no);
+
+ erase_sector(ns);
+
+ NS_MDELAY(erase_delay);
+
+ if (erase_block_wear)
+ update_wear(erase_block_no);
+
+ if (erase_error(erase_block_no)) {
+ NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_PRGPAGE:
+ /*
+ * Program page - move internal buffer data to the page.
+ */
+
+ if (ns->lines.wp) {
+ NS_WARN("do_state_action: device is write-protected, programm\n");
+ return -1;
+ }
+
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ if (num != ns->regs.count) {
+ NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
+ ns->regs.count, num);
+ return -1;
+ }
+
+ if (prog_page(ns, num) == -1)
+ return -1;
+
+ page_no = ns->regs.row;
+
+ NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
+ num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
+ NS_LOG("programm page %d\n", ns->regs.row);
+
+ NS_UDELAY(programm_delay);
+ NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ if (write_error(page_no)) {
+ NS_WARN("simulating write failure in page %u\n", page_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_ZEROOFF:
+ NS_DBG("do_state_action: set internal offset to 0\n");
+ ns->regs.off = 0;
+ break;
+
+ case ACTION_HALFOFF:
+ if (!(ns->options & OPT_PAGE512_8BIT)) {
+ NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
+ "byte page size 8x chips\n");
+ return -1;
+ }
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
+ ns->regs.off = ns->geom.pgsz/2;
+ break;
+
+ case ACTION_OOBOFF:
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
+ ns->regs.off = ns->geom.pgsz;
+ break;
+
+ default:
+ NS_DBG("do_state_action: BUG! unknown action\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Switch simulator's state.
+ */
+static void switch_state(struct nandsim *ns)
+{
+ if (ns->op) {
+ /*
+ * The current operation have already been identified.
+ * Just follow the states chain.
+ */
+
+ ns->stateidx += 1;
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[ns->stateidx + 1];
+
+ NS_DBG("switch_state: operation is known, switch to the next state, "
+ "state: %s, nxstate: %s\n",
+ get_state_name(ns->state), get_state_name(ns->nxstate));
+
+ /* See, whether we need to do some action */
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ } else {
+ /*
+ * We don't yet know which operation we perform.
+ * Try to identify it.
+ */
+
+ /*
+ * The only event causing the switch_state function to
+ * be called with yet unknown operation is new command.
+ */
+ ns->state = get_state_by_command(ns->regs.command);
+
+ NS_DBG("switch_state: operation is unknown, try to find it\n");
+
+ if (find_operation(ns, 0) != 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+ }
+
+ /* For 16x devices column means the page offset in words */
+ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
+ NS_DBG("switch_state: double the column number for 16x device\n");
+ ns->regs.column <<= 1;
+ }
+
+ if (NS_STATE(ns->nxstate) == STATE_READY) {
+ /*
+ * The current state is the last. Return to STATE_READY
+ */
+
+ u_char status = NS_STATUS_OK(ns);
+
+ /* In case of data states, see if all bytes were input/output */
+ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
+ && ns->regs.count != ns->regs.num) {
+ NS_WARN("switch_state: not all bytes were processed, %d left\n",
+ ns->regs.num - ns->regs.count);
+ status = NS_STATUS_FAILED(ns);
+ }
+
+ NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
+
+ switch_to_ready_state(ns, status);
+
+ return;
+ } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
+ /*
+ * If the next state is data input/output, switch to it now
+ */
+
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[++ns->stateidx + 1];
+ ns->regs.num = ns->regs.count = 0;
+
+ NS_DBG("switch_state: the next state is data I/O, switch, "
+ "state: %s, nxstate: %s\n",
+ get_state_name(ns->state), get_state_name(ns->nxstate));
+
+ /*
+ * Set the internal register to the count of bytes which
+ * are expected to be input or output
+ */
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAIN:
+ case STATE_DATAOUT:
+ ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ break;
+
+ case STATE_DATAOUT_ID:
+ ns->regs.num = ns->geom.idbytes;
+ break;
+
+ case STATE_DATAOUT_STATUS:
+ case STATE_DATAOUT_STATUS_M:
+ ns->regs.count = ns->regs.num = 0;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown data state\n");
+ }
+
+ } else if (ns->nxstate & STATE_ADDR_MASK) {
+ /*
+ * If the next state is address input, set the internal
+ * register to the number of expected address bytes
+ */
+
+ ns->regs.count = 0;
+
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+
+ case STATE_ADDR_COLUMN:
+ /* Column address is always 2 bytes */
+ ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown address state\n");
+ }
+ } else {
+ /*
+ * Just reset internal counters.
+ */
+
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ }
+}
+
+static u_char ns_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+ u_char outb = 0x00;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_byte: unexpected data output cycle, state is %s "
+ "return %#x\n", get_state_name(ns->state), (uint)outb);
+ return outb;
+ }
+
+ /* Status register may be read as many times as it is wanted */
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
+ NS_DBG("read_byte: return %#x status\n", ns->regs.status);
+ return ns->regs.status;
+ }
+
+ /* Check if there is any data in the internal buffer which may be read */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
+ return outb;
+ }
+
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAOUT:
+ if (ns->busw == 8) {
+ outb = ns->buf.byte[ns->regs.count];
+ ns->regs.count += 1;
+ } else {
+ outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
+ ns->regs.count += 2;
+ }
+ break;
+ case STATE_DATAOUT_ID:
+ NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
+ outb = ns->ids[ns->regs.count];
+ ns->regs.count += 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("read_byte: all bytes were read\n");
+
+ /*
+ * The OPT_AUTOINCR allows to read next consecutive pages without
+ * new read operation cycle.
+ */
+ if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
+ ns->regs.count = 0;
+ if (ns->regs.row + 1 < ns->geom.pgnum)
+ ns->regs.row += 1;
+ NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
+ do_state_action(ns, ACTION_CPY);
+ }
+ else if (NS_STATE(ns->nxstate) == STATE_READY)
+ switch_state(ns);
+
+ }
+
+ return outb;
+}
+
+static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("write_byte: chip is disabled, ignore write\n");
+ return;
+ }
+ if (ns->lines.ale && ns->lines.cle) {
+ NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
+ return;
+ }
+
+ if (ns->lines.cle == 1) {
+ /*
+ * The byte written is a command.
+ */
+
+ if (byte == NAND_CMD_RESET) {
+ NS_LOG("reset chip\n");
+ switch_to_ready_state(ns, NS_STATUS_OK(ns));
+ return;
+ }
+
+ /* Check that the command byte is correct */
+ if (check_command(byte)) {
+ NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+ return;
+ }
+
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
+ || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
+ || NS_STATE(ns->state) == STATE_DATAOUT) {
+ int row = ns->regs.row;
+
+ switch_state(ns);
+ if (byte == NAND_CMD_RNDOUT)
+ ns->regs.row = row;
+ }
+
+ /* Check if chip is expecting command */
+ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
+ /* Do not warn if only 2 id bytes are read */
+ if (!(ns->regs.command == NAND_CMD_READID &&
+ NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+ /*
+ * We are in situation when something else (not command)
+ * was expected but command was input. In this case ignore
+ * previous command(s)/state(s) and accept the last one.
+ */
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+ "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+ }
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ }
+
+ NS_DBG("command byte corresponding to %s state accepted\n",
+ get_state_name(get_state_by_command(byte)));
+ ns->regs.command = byte;
+ switch_state(ns);
+
+ } else if (ns->lines.ale == 1) {
+ /*
+ * The byte written is an address.
+ */
+
+ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
+
+ NS_DBG("write_byte: operation isn't known yet, identify it\n");
+
+ if (find_operation(ns, 1) < 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns->regs.count = 0;
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ /* Check that chip is expecting address */
+ if (!(ns->nxstate & STATE_ADDR_MASK)) {
+ NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
+ "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_ERR("write_byte: no more address bytes expected\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ accept_addr_byte(ns, byte);
+
+ ns->regs.count += 1;
+
+ NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
+ (uint)byte, ns->regs.count, ns->regs.num);
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
+ switch_state(ns);
+ }
+
+ } else {
+ /*
+ * The byte written is an input data.
+ */
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
+ "switch to %s\n", (uint)byte,
+ get_state_name(ns->state), get_state_name(STATE_READY));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
+ ns->regs.num);
+ return;
+ }
+
+ if (ns->busw == 8) {
+ ns->buf.byte[ns->regs.count] = byte;
+ ns->regs.count += 1;
+ } else {
+ ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
+ ns->regs.count += 2;
+ }
+ }
+
+ return;
+}
+
+static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
+ ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
+ ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
+
+ if (cmd != NAND_CMD_NONE)
+ ns_nand_write_byte(mtd, cmd);
+}
+
+static int ns_device_ready(struct mtd_info *mtd)
+{
+ NS_DBG("device_ready\n");
+ return 1;
+}
+
+static uint16_t ns_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+
+ NS_DBG("read_word\n");
+
+ return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
+}
+
+static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_buf: data input isn't expected, state is %s, "
+ "switch to STATE_READY\n", get_state_name(ns->state));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("write_buf: too many input bytes\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(ns->buf.byte + ns->regs.count, buf, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
+ }
+}
+
+static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_buf: chip is disabled\n");
+ return;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_buf: ALE or CLE pin is high\n");
+ return;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
+ get_state_name(ns->state));
+ return;
+ }
+
+ if (NS_STATE(ns->state) != STATE_DATAOUT) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
+
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("read_buf: too many bytes to read\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(buf, ns->buf.byte + ns->regs.count, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
+ ns->regs.count = 0;
+ if (ns->regs.row + 1 < ns->geom.pgnum)
+ ns->regs.row += 1;
+ NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
+ do_state_action(ns, ACTION_CPY);
+ }
+ else if (NS_STATE(ns->nxstate) == STATE_READY)
+ switch_state(ns);
+ }
+
+ return;
+}
+
+static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
+
+ if (!memcmp(buf, &ns_verify_buf[0], len)) {
+ NS_DBG("verify_buf: the buffer is OK\n");
+ return 0;
+ } else {
+ NS_DBG("verify_buf: the buffer is wrong\n");
+ return -EFAULT;
+ }
+}
+
+/*
+ * Module initialization function
+ */
+static int __init ns_init_module(void)
+{
+ struct nand_chip *chip;
+ struct nandsim *nand;
+ int retval = -ENOMEM, i;
+
+ if (bus_width != 8 && bus_width != 16) {
+ NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
+ return -EINVAL;
+ }
+
+ /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
+ nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
+ + sizeof(struct nandsim), GFP_KERNEL);
+ if (!nsmtd) {
+ NS_ERR("unable to allocate core structures.\n");
+ return -ENOMEM;
+ }
+ chip = (struct nand_chip *)(nsmtd + 1);
+ nsmtd->priv = (void *)chip;
+ nand = (struct nandsim *)(chip + 1);
+ chip->priv = (void *)nand;
+
+ /*
+ * Register simulator's callbacks.
+ */
+ chip->cmd_ctrl = ns_hwcontrol;
+ chip->read_byte = ns_nand_read_byte;
+ chip->dev_ready = ns_device_ready;
+ chip->write_buf = ns_nand_write_buf;
+ chip->read_buf = ns_nand_read_buf;
+ chip->verify_buf = ns_nand_verify_buf;
+ chip->read_word = ns_nand_read_word;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+ /* and 'badblocks' parameters to work */
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ switch (bbt) {
+ case 2:
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ case 1:
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ case 0:
+ break;
+ default:
+ NS_ERR("bbt has to be 0..2\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ /*
+ * Perform minimum nandsim structure initialization to handle
+ * the initial ID read command correctly
+ */
+ if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
+ nand->geom.idbytes = 4;
+ else
+ nand->geom.idbytes = 2;
+ nand->regs.status = NS_STATUS_OK(nand);
+ nand->nxstate = STATE_UNKNOWN;
+ nand->options |= OPT_PAGE256; /* temporary value */
+ nand->ids[0] = first_id_byte;
+ nand->ids[1] = second_id_byte;
+ nand->ids[2] = third_id_byte;
+ nand->ids[3] = fourth_id_byte;
+ if (bus_width == 16) {
+ nand->busw = 16;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ nsmtd->owner = THIS_MODULE;
+
+ if ((retval = parse_weakblocks()) != 0)
+ goto error;
+
+ if ((retval = parse_weakpages()) != 0)
+ goto error;
+
+ if ((retval = parse_gravepages()) != 0)
+ goto error;
+
+ retval = nand_scan_ident(nsmtd, 1, NULL);
+ if (retval) {
+ NS_ERR("cannot scan NAND Simulator device\n");
+ if (retval > 0)
+ retval = -ENXIO;
+ goto error;
+ }
+
+ if (bch) {
+ unsigned int eccsteps, eccbytes;
+ if (!mtd_nand_has_bch()) {
+ NS_ERR("BCH ECC support is disabled\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ /* use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize/512;
+ eccbytes = (bch*13+7)/8;
+ /* do not bother supporting small page devices */
+ if ((nsmtd->oobsize < 64) || !eccsteps) {
+ NS_ERR("bch not available on small page devices\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
+ NS_ERR("invalid bch value %u\n", bch);
+ retval = -EINVAL;
+ goto error;
+ }
+ chip->ecc.mode = NAND_ECC_SOFT_BCH;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = eccbytes;
+ NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+ }
+
+ retval = nand_scan_tail(nsmtd);
+ if (retval) {
+ NS_ERR("can't register NAND Simulator\n");
+ if (retval > 0)
+ retval = -ENXIO;
+ goto error;
+ }
+
+ if (overridesize) {
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
+ goto err_exit;
+ }
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+ nsmtd->size = new_size;
+ chip->chipsize = new_size;
+ chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ }
+
+ if ((retval = setup_wear_reporting(nsmtd)) != 0)
+ goto err_exit;
+
+ if ((retval = init_nandsim(nsmtd)) != 0)
+ goto err_exit;
+
+ if ((retval = nand_default_bbt(nsmtd)) != 0)
+ goto err_exit;
+
+ if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+ goto err_exit;
+
+ /* Register NAND partitions */
+ retval = mtd_device_register(nsmtd, &nand->partitions[0],
+ nand->nbparts);
+ if (retval != 0)
+ goto err_exit;
+
+ return 0;
+
+err_exit:
+ free_nandsim(nand);
+ nand_release(nsmtd);
+ for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
+ kfree(nand->partitions[i].name);
+error:
+ kfree(nsmtd);
+ free_lists();
+
+ return retval;
+}
+
+module_init(ns_init_module);
+
+/*
+ * Module clean-up function
+ */
+static void __exit ns_cleanup_module(void)
+{
+ struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
+ int i;
+
+ free_nandsim(ns); /* Free nandsim private resources */
+ nand_release(nsmtd); /* Unregister driver */
+ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+ kfree(nsmtd); /* Free other structures */
+ free_lists();
+}
+
+module_exit(ns_cleanup_module);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Artem B. Bityuckiy");
+MODULE_DESCRIPTION ("The NAND flash simulator");
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
new file mode 100644
index 00000000..2b6f632c
--- /dev/null
+++ b/drivers/mtd/nand/ndfc.c
@@ -0,0 +1,302 @@
+/*
+ * drivers/mtd/ndfc.c
+ *
+ * Overview:
+ * Platform independent driver for NDFC (NanD Flash Controller)
+ * integrated into EP440 cores
+ *
+ * Ported to an OF platform driver by Sean MacLennan
+ *
+ * The NDFC supports multiple chips, but this driver only supports a
+ * single chip since I do not have access to any boards with
+ * multiple chips.
+ *
+ * Author: Thomas Gleixner
+ *
+ * Copyright 2006 IBM
+ * Copyright 2008 PIKA Technologies
+ * Sean MacLennan <smaclennan@pikatech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/ndfc.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+
+#define NDFC_MAX_CS 4
+
+struct ndfc_controller {
+ struct platform_device *ofdev;
+ void __iomem *ndfcbase;
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int chip_select;
+ struct nand_hw_control ndfc_control;
+};
+
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
+
+static void ndfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ uint32_t ccr;
+ struct nand_chip *nchip = mtd->priv;
+ struct ndfc_controller *ndfc = nchip->priv;
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ if (chip >= 0) {
+ ccr &= ~NDFC_CCR_BS_MASK;
+ ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
+ } else
+ ccr |= NDFC_CCR_RESET_CE;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+}
+
+static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
+ else
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
+}
+
+static int ndfc_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+
+ return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
+}
+
+static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ uint32_t ccr;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ ccr |= NDFC_CCR_RESET_ECC;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+ wmb();
+}
+
+static int ndfc_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t ecc;
+ uint8_t *p = (uint8_t *)&ecc;
+
+ wmb();
+ ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
+ /* The NDFC uses Smart Media (SMC) bytes order */
+ ecc_code[0] = p[1];
+ ecc_code[1] = p[2];
+ ecc_code[2] = p[3];
+
+ return 0;
+}
+
+/*
+ * Speedups for buffer read/write/verify
+ *
+ * NDFC allows 32bit read/write of data. So we can speed up the buffer
+ * functions. No further checking, as nand_base will always read/write
+ * page aligned.
+ */
+static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
+}
+
+static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
+}
+
+static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Initialize chip structure
+ */
+static int ndfc_chip_init(struct ndfc_controller *ndfc,
+ struct device_node *node)
+{
+ struct device_node *flash_np;
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_part_parser_data ppdata;
+ int ret;
+
+ chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
+ chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
+ chip->cmd_ctrl = ndfc_hwcontrol;
+ chip->dev_ready = ndfc_ready;
+ chip->select_chip = ndfc_select_chip;
+ chip->chip_delay = 50;
+ chip->controller = &ndfc->ndfc_control;
+ chip->read_buf = ndfc_read_buf;
+ chip->write_buf = ndfc_write_buf;
+ chip->verify_buf = ndfc_verify_buf;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.hwctl = ndfc_enable_hwecc;
+ chip->ecc.calculate = ndfc_calculate_ecc;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->priv = ndfc;
+
+ ndfc->mtd.priv = chip;
+ ndfc->mtd.owner = THIS_MODULE;
+
+ flash_np = of_get_next_child(node, NULL);
+ if (!flash_np)
+ return -ENODEV;
+
+ ppdata.of_node = flash_np;
+ ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
+ dev_name(&ndfc->ofdev->dev), flash_np->name);
+ if (!ndfc->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(&ndfc->mtd, 1);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
+
+err:
+ of_node_put(flash_np);
+ if (ret)
+ kfree(ndfc->mtd.name);
+ return ret;
+}
+
+static int __devinit ndfc_probe(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc;
+ const __be32 *reg;
+ u32 ccr;
+ int err, len, cs;
+
+ /* Read the reg property to get the chip select */
+ reg = of_get_property(ofdev->dev.of_node, "reg", &len);
+ if (reg == NULL || len != 12) {
+ dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
+ return -ENOENT;
+ }
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ spin_lock_init(&ndfc->ndfc_control.lock);
+ init_waitqueue_head(&ndfc->ndfc_control.wq);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
+
+ ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
+ if (!ndfc->ndfcbase) {
+ dev_err(&ofdev->dev, "failed to get memory\n");
+ return -EIO;
+ }
+
+ ccr = NDFC_CCR_BS(ndfc->chip_select);
+
+ /* It is ok if ccr does not exist - just default to 0 */
+ reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
+ if (reg)
+ ccr |= be32_to_cpup(reg);
+
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+
+ /* Set the bank settings if given */
+ reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
+ if (reg) {
+ int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
+ out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
+ }
+
+ err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
+ if (err) {
+ iounmap(ndfc->ndfcbase);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devexit ndfc_remove(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
+
+ nand_release(&ndfc->mtd);
+ kfree(ndfc->mtd.name);
+
+ return 0;
+}
+
+static const struct of_device_id ndfc_match[] = {
+ { .compatible = "ibm,ndfc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ndfc_match);
+
+static struct platform_driver ndfc_driver = {
+ .driver = {
+ .name = "ndfc",
+ .owner = THIS_MODULE,
+ .of_match_table = ndfc_match,
+ },
+ .probe = ndfc_probe,
+ .remove = __devexit_p(ndfc_remove),
+};
+
+module_platform_driver(ndfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
new file mode 100644
index 00000000..a86aa812
--- /dev/null
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -0,0 +1,235 @@
+/*
+ * drivers/mtd/nand/nomadik_nand.c
+ *
+ * Overview:
+ * Driver for on-board NAND flash on Nomadik Platforms
+ *
+ * Copyright © 2007 STMicroelectronics Pvt. Ltd.
+ * Author: Sachin Verma <sachin.verma@st.com>
+ *
+ * Copyright © 2009 Alessandro Rubini
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <mach/nand.h>
+#include <mach/fsmc.h>
+
+#include <mtd/mtd-abi.h>
+
+struct nomadik_nand_host {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ struct nand_bbt_descr *bbt_desc;
+};
+
+static struct nand_ecclayout nomadik_ecc_layout = {
+ .eccbytes = 3 * 4,
+ .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
+ 0x02, 0x03, 0x04,
+ 0x12, 0x13, 0x14,
+ 0x22, 0x23, 0x24,
+ 0x32, 0x33, 0x34},
+ /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
+ .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
+};
+
+static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
+{
+ /* No need to enable hw ecc, it's on by default */
+}
+
+static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct nomadik_nand_host *host = nand->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, host->cmd_va);
+ else
+ writeb(cmd, host->addr_va);
+}
+
+static int nomadik_nand_probe(struct platform_device *pdev)
+{
+ struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct nomadik_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct resource *res;
+ int ret = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Call the client's init function, if any */
+ if (pdata->init)
+ ret = pdata->init();
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Init function failed\n");
+ goto err;
+ }
+
+ /* ioremap three regions */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ if (!res) {
+ ret = -EIO;
+ goto err_unmap;
+ }
+ host->addr_va = ioremap(res->start, resource_size(res));
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ if (!res) {
+ ret = -EIO;
+ goto err_unmap;
+ }
+ host->data_va = ioremap(res->start, resource_size(res));
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ if (!res) {
+ ret = -EIO;
+ goto err_unmap;
+ }
+ host->cmd_va = ioremap(res->start, resource_size(res));
+
+ if (!host->addr_va || !host->data_va || !host->cmd_va) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /* Link all private pointers */
+ mtd = &host->mtd;
+ nand = &host->nand;
+ mtd->priv = nand;
+ nand->priv = host;
+
+ host->mtd.owner = THIS_MODULE;
+ nand->IO_ADDR_R = host->data_va;
+ nand->IO_ADDR_W = host->data_va;
+ nand->cmd_ctrl = nomadik_cmd_ctrl;
+
+ /*
+ * This stanza declares ECC_HW but uses soft routines. It's because
+ * HW claims to make the calculation but not the correction. However,
+ * I haven't managed to get the desired data out of it until now.
+ */
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->ecc.layout = &nomadik_ecc_layout;
+ nand->ecc.hwctl = nomadik_ecc_control;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+
+ nand->options = pdata->options;
+
+ /*
+ * Scan to find existence of the device
+ */
+ if (nand_scan(&host->mtd, 1)) {
+ ret = -ENXIO;
+ goto err_unmap;
+ }
+
+ mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
+
+ platform_set_drvdata(pdev, host);
+ return 0;
+
+ err_unmap:
+ if (host->cmd_va)
+ iounmap(host->cmd_va);
+ if (host->data_va)
+ iounmap(host->data_va);
+ if (host->addr_va)
+ iounmap(host->addr_va);
+ err:
+ kfree(host);
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int nomadik_nand_remove(struct platform_device *pdev)
+{
+ struct nomadik_nand_host *host = platform_get_drvdata(pdev);
+ struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->exit)
+ pdata->exit();
+
+ if (host) {
+ nand_release(&host->mtd);
+ iounmap(host->cmd_va);
+ iounmap(host->data_va);
+ iounmap(host->addr_va);
+ kfree(host);
+ }
+ return 0;
+}
+
+static int nomadik_nand_suspend(struct device *dev)
+{
+ struct nomadik_nand_host *host = dev_get_drvdata(dev);
+ int ret = 0;
+ if (host)
+ ret = mtd_suspend(&host->mtd);
+ return ret;
+}
+
+static int nomadik_nand_resume(struct device *dev)
+{
+ struct nomadik_nand_host *host = dev_get_drvdata(dev);
+ if (host)
+ mtd_resume(&host->mtd);
+ return 0;
+}
+
+static const struct dev_pm_ops nomadik_nand_pm_ops = {
+ .suspend = nomadik_nand_suspend,
+ .resume = nomadik_nand_resume,
+};
+
+static struct platform_driver nomadik_nand_driver = {
+ .probe = nomadik_nand_probe,
+ .remove = nomadik_nand_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "nomadik_nand",
+ .pm = &nomadik_nand_pm_ops,
+ },
+};
+
+module_platform_driver(nomadik_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
+MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
new file mode 100644
index 00000000..8febe46e
--- /dev/null
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright © 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#define REG_FMICSR 0x00
+#define REG_SMCSR 0xa0
+#define REG_SMISR 0xac
+#define REG_SMCMD 0xb0
+#define REG_SMADDR 0xb4
+#define REG_SMDATA 0xb8
+
+#define RESET_FMI 0x01
+#define NAND_EN 0x08
+#define READYBUSY (0x01 << 18)
+
+#define SWRST 0x01
+#define PSIZE (0x01 << 3)
+#define DMARWEN (0x03 << 1)
+#define BUSWID (0x01 << 4)
+#define ECC4EN (0x01 << 5)
+#define WP (0x01 << 24)
+#define NANDCS (0x01 << 25)
+#define ENDADDR (0x01 << 31)
+
+#define read_data_reg(dev) \
+ __raw_readl((dev)->reg + REG_SMDATA)
+
+#define write_data_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMDATA)
+
+#define write_cmd_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMCMD)
+
+#define write_addr_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMADDR)
+
+struct nuc900_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *reg;
+ struct clk *clk;
+ spinlock_t lock;
+};
+
+static const struct mtd_partition partitions[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ }
+};
+
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
+{
+ unsigned char ret;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ ret = (unsigned char)read_data_reg(nand);
+
+ return ret;
+}
+
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
+{
+ int i;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ for (i = 0; i < len; i++)
+ buf[i] = (unsigned char)read_data_reg(nand);
+}
+
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
+{
+ int i;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ for (i = 0; i < len; i++)
+ write_data_reg(nand, buf[i]);
+}
+
+static int nuc900_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
+{
+ int i;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ for (i = 0; i < len; i++) {
+ if (buf[i] != (unsigned char)read_data_reg(nand))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int nuc900_check_rb(struct nuc900_nand *nand)
+{
+ unsigned int val;
+ spin_lock(&nand->lock);
+ val = __raw_readl(REG_SMISR);
+ val &= READYBUSY;
+ spin_unlock(&nand->lock);
+
+ return val;
+}
+
+static int nuc900_nand_devready(struct mtd_info *mtd)
+{
+ struct nuc900_nand *nand;
+ int ready;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
+ return ready;
+}
+
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ write_cmd_reg(nand, command & 0xff);
+
+ if (column != -1 || page_addr != -1) {
+
+ if (column != -1) {
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ write_addr_reg(nand, column);
+ write_addr_reg(nand, column >> 8 | ENDADDR);
+ }
+ if (page_addr != -1) {
+ write_addr_reg(nand, page_addr);
+
+ if (chip->chipsize > (128 << 20)) {
+ write_addr_reg(nand, page_addr >> 8);
+ write_addr_reg(nand, page_addr >> 16 | ENDADDR);
+ } else {
+ write_addr_reg(nand, page_addr >> 8 | ENDADDR);
+ }
+ }
+ }
+
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_DEPLETE1:
+ return;
+
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ case NAND_CMD_STATUS_ERROR1:
+ case NAND_CMD_STATUS_ERROR2:
+ case NAND_CMD_STATUS_ERROR3:
+ udelay(chip->chip_delay);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+
+ write_cmd_reg(nand, NAND_CMD_STATUS);
+ write_cmd_reg(nand, command);
+
+ while (!nuc900_check_rb(nand))
+ ;
+
+ return;
+
+ case NAND_CMD_RNDOUT:
+ write_cmd_reg(nand, NAND_CMD_RNDOUTSTART);
+ return;
+
+ case NAND_CMD_READ0:
+
+ write_cmd_reg(nand, NAND_CMD_READSTART);
+ default:
+
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ while (!chip->dev_ready(mtd))
+ ;
+}
+
+
+static void nuc900_nand_enable(struct nuc900_nand *nand)
+{
+ unsigned int val;
+ spin_lock(&nand->lock);
+ __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR));
+
+ val = __raw_readl(nand->reg + REG_FMICSR);
+
+ if (!(val & NAND_EN))
+ __raw_writel(val | NAND_EN, REG_FMICSR);
+
+ val = __raw_readl(nand->reg + REG_SMCSR);
+
+ val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS);
+ val |= WP;
+
+ __raw_writel(val, nand->reg + REG_SMCSR);
+
+ spin_unlock(&nand->lock);
+}
+
+static int __devinit nuc900_nand_probe(struct platform_device *pdev)
+{
+ struct nuc900_nand *nuc900_nand;
+ struct nand_chip *chip;
+ int retval;
+ struct resource *res;
+
+ retval = 0;
+
+ nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ if (!nuc900_nand)
+ return -ENOMEM;
+ chip = &(nuc900_nand->chip);
+
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
+
+ nuc900_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk)) {
+ retval = -ENOENT;
+ goto fail1;
+ }
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->verify_buf = nuc900_verify_buf;
+ chip->chip_delay = 50;
+ chip->options = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENXIO;
+ goto fail1;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ retval = -EBUSY;
+ goto fail1;
+ }
+
+ nuc900_nand->reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_nand->reg) {
+ retval = -ENOMEM;
+ goto fail2;
+ }
+
+ nuc900_nand_enable(nuc900_nand);
+
+ if (nand_scan(&(nuc900_nand->mtd), 1)) {
+ retval = -ENXIO;
+ goto fail3;
+ }
+
+ mtd_device_register(&(nuc900_nand->mtd), partitions,
+ ARRAY_SIZE(partitions));
+
+ platform_set_drvdata(pdev, nuc900_nand);
+
+ return retval;
+
+fail3: iounmap(nuc900_nand->reg);
+fail2: release_mem_region(res->start, resource_size(res));
+fail1: kfree(nuc900_nand);
+ return retval;
+}
+
+static int __devexit nuc900_nand_remove(struct platform_device *pdev)
+{
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ nand_release(&nuc900_nand->mtd);
+ iounmap(nuc900_nand->reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_disable(nuc900_nand->clk);
+ clk_put(nuc900_nand->clk);
+
+ kfree(nuc900_nand);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = __devexit_p(nuc900_nand_remove),
+ .driver = {
+ .name = "nuc900-fmi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(nuc900_nand_driver);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 00000000..c2b0bba9
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <plat/dma.h>
+#include <plat/gpmc.h>
+#include <plat/nand.h>
+
+#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+/* oob info generated runtime depending on ecc algorithm and layout selected */
+static struct nand_ecclayout omap_oobinfo;
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks
+ */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr bb_descrip_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+
+struct omap_nand_info {
+ struct nand_hw_control controller;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ struct completion comp;
+ int dma_ch;
+ int gpmc_irq;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
+};
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
+
+ else if (ctrl & NAND_ALE)
+ gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
+
+ else /* NAND_NCE */
+ gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
+ }
+}
+
+/**
+ * omap_read_buf8 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ ioread8_rep(nand->IO_ADDR_R, buf, len);
+}
+
+/**
+ * omap_write_buf8 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u_char *p = (u_char *)buf;
+ u32 status = 0;
+
+ while (len--) {
+ iowrite8(*p++, info->nand.IO_ADDR_W);
+ /* wait until buffer is available for write */
+ do {
+ status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ } while (!status);
+ }
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+ u32 status = 0;
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ iowrite16(*p++, info->nand.IO_ADDR_W);
+ /* wait until buffer is available for write */
+ do {
+ status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ } while (!status);
+ }
+}
+
+/**
+ * omap_read_buf_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t r_count = 0;
+ int ret = 0;
+ u32 *p = (u32 *)buf;
+
+ /* take care of subpage reads */
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, (u_char *)p, len);
+ else
+ omap_read_buf8(mtd, (u_char *)p, len);
+ } else {
+ do {
+ r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = r_count >> 2;
+ ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
+ p += r_count;
+ len -= r_count << 2;
+ } while (len);
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ }
+}
+
+/**
+ * omap_write_buf_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t w_count = 0;
+ int i = 0, ret = 0;
+ u16 *p = (u16 *)buf;
+ unsigned long tim, limit;
+
+ /* take care of subpage writes */
+ if (len % 2 != 0) {
+ writeb(*buf, info->nand.IO_ADDR_W);
+ p = (u16 *)(buf + 1);
+ len--;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, (u_char *)p, len);
+ else
+ omap_write_buf8(mtd, (u_char *)p, len);
+ } else {
+ while (len) {
+ w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = w_count >> 1;
+ for (i = 0; (i < w_count) && len; i++, len -= 2)
+ iowrite16(*p++, info->nand.IO_ADDR_W);
+ }
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ }
+}
+
+/*
+ * omap_nand_dma_cb: callback on the completion of dma transfer
+ * @lch: logical channel
+ * @ch_satuts: channel status
+ * @data: pointer to completion data structure
+ */
+static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
+{
+ complete((struct completion *) data);
+}
+
+/*
+ * omap_nand_dma_transfer: configer and start dma transfer
+ * @mtd: MTD device structure
+ * @addr: virtual address in RAM of source/destination
+ * @len: number of data bytes to be transferred
+ * @is_write: flag for read/write operation
+ */
+static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
+ unsigned int len, int is_write)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
+ dma_addr_t dma_addr;
+ int ret;
+ unsigned long tim, limit;
+
+ /* The fifo depth is 64 bytes max.
+ * But configure the FIFO-threahold to 32 to get a sync at each frame
+ * and frame length is 32 bytes.
+ */
+ int buf_len = len >> 6;
+
+ if (addr >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)addr & PAGE_MASK) !=
+ ((size_t)(addr + len - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(addr);
+ if (!p1)
+ goto out_copy;
+ addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
+ }
+
+ dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
+ if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+ dev_err(&info->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+
+ if (is_write) {
+ omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ info->phys_base, 0, 0);
+ omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+ omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
+ OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
+ } else {
+ omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ info->phys_base, 0, 0);
+ omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+ omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
+ OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
+ }
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ init_completion(&info->comp);
+
+ omap_start_dma(info->dma_ch);
+
+ /* setup and start DMA using dma_addr */
+ wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+
+ dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ return 0;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
+ : omap_write_buf16(mtd, (u_char *) addr, len);
+ else
+ is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
+ : omap_write_buf8(mtd, (u_char *) addr, len);
+ return 0;
+}
+
+/**
+ * omap_read_buf_dma_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ if (len <= mtd->oobsize)
+ omap_read_buf_pref(mtd, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, buf, len, 0x0);
+}
+
+/**
+ * omap_write_buf_dma_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_dma_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ if (len <= mtd->oobsize)
+ omap_write_buf_pref(mtd, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
+}
+
+/*
+ * omap_nand_irq - GMPC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+ u32 irq_stat;
+
+ irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (irq_stat & 0x2)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.IO_ADDR_W,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.IO_ADDR_R,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (irq_stat & 0x2)
+ goto done;
+ }
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+ /* disable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
+
+ /* clear status */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
+/**
+ * omap_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u16 *p = (u16 *) buf;
+
+ len >>= 1;
+ while (len--) {
+ if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ * If there is no error, %0 is returned. If there is an error but it
+ * was corrected, %1 is returned. Otherwise, %-1 is returned.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ pr_debug("ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ pr_debug("Correcting single bit ECC error at offset: "
+ "%d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 1;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ pr_debug("UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
+ * detection and correction. If there are no errors, %0 is returned. If
+ * there were errors and all of the errors were corrected, the number of
+ * corrected errors is returned. If uncorrectable errors exist, %-1 is
+ * returned.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+ int stat = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ /* keep track of the number of corrected errors */
+ stat += ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return stat;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+ gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
+}
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status = NAND_STATUS_FAIL, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ gpmc_nand_write(info->gpmc_cs,
+ GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
+ while (time_before(jiffies, timeo)) {
+ status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
+ if (status & NAND_STATUS_READY)
+ break;
+ cond_resched();
+ }
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ unsigned int val = 0;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+
+ val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ if ((val & 0x100) == 0x100) {
+ /* Clear IRQ Interrupt */
+ val |= 0x100;
+ val &= ~(0x0);
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
+ } else {
+ unsigned int cnt = 0;
+ while (cnt++ < 0x1FF) {
+ if ((val & 0x100) == 0x100)
+ return 0;
+ val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ }
+ }
+
+ return 1;
+}
+
+static int __devinit omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ int err;
+ int i, offset;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->pdev = pdev;
+
+ info->gpmc_cs = pdata->cs;
+ info->phys_base = pdata->phys_base;
+
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ info->nand.options = pdata->devsize;
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+
+ /* NAND write protect off */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
+
+ if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto out_free_info;
+ }
+
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ if (!info->nand.IO_ADDR_R) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+
+ info->nand.controller = &info->controller;
+
+ info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
+ info->nand.cmd_ctrl = omap_hwcontrol;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * funcrtion and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ info->nand.dev_ready = omap_dev_ready;
+ info->nand.chip_delay = 0;
+ } else {
+ info->nand.waitfunc = omap_wait;
+ info->nand.chip_delay = 50;
+ }
+
+ switch (pdata->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
+ info->nand.read_buf = omap_read_buf_pref;
+ info->nand.write_buf = omap_write_buf_pref;
+ break;
+
+ case NAND_OMAP_POLLED:
+ if (info->nand.options & NAND_BUSWIDTH_16) {
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ } else {
+ info->nand.read_buf = omap_read_buf8;
+ info->nand.write_buf = omap_write_buf8;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+ omap_nand_dma_cb, &info->comp, &info->dma_ch);
+ if (err < 0) {
+ info->dma_ch = -1;
+ dev_err(&pdev->dev, "DMA request failed!\n");
+ goto out_release_mem_region;
+ } else {
+ omap_set_dma_dest_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+
+ info->nand.read_buf = omap_read_buf_dma_pref;
+ info->nand.write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ err = request_irq(pdata->gpmc_irq,
+ omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ pdata->gpmc_irq, err);
+ goto out_release_mem_region;
+ } else {
+ info->gpmc_irq = pdata->gpmc_irq;
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ err = -EINVAL;
+ goto out_release_mem_region;
+ }
+
+ info->nand.verify_buf = omap_verify_buf;
+
+ /* selsect the ecc type */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+ else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.strength = 1;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ }
+
+ /* DIP switches on some boards change between 8 and 16 bit
+ * bus widths for flash. Try the other width if the first try fails.
+ */
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
+ info->nand.options ^= NAND_BUSWIDTH_16;
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+ }
+
+ /* rom code layout */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ offset = 2;
+ else {
+ offset = 1;
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ }
+ omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
+ for (i = 0; i < omap_oobinfo.eccbytes; i++)
+ omap_oobinfo.eccpos[i] = i+offset;
+
+ omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+ omap_oobinfo.oobfree->length = info->mtd.oobsize -
+ (offset + omap_oobinfo.eccbytes);
+
+ info->nand.ecc.layout = &omap_oobinfo;
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(&info->mtd)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+
+ mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
+
+ platform_set_drvdata(pdev, &info->mtd);
+
+ return 0;
+
+out_release_mem_region:
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+
+ platform_set_drvdata(pdev, NULL);
+ if (info->dma_ch != -1)
+ omap_free_dma(info->dma_ch);
+
+ if (info->gpmc_irq)
+ free_irq(info->gpmc_irq, info);
+
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+ kfree(&info->mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(omap_nand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 00000000..1d3bfb26
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,188 @@
+/*
+ * drivers/mtd/nand/orion_nand.c
+ *
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+#include <mach/hardware.h>
+#include <plat/orion_nand.h>
+
+static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nc = mtd->priv;
+ struct orion_nand_data *board = nc->priv;
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << board->cle);
+ else if (ctrl & NAND_ALE)
+ offs = (1 << board->ale);
+ else
+ return;
+
+ if (nc->options & NAND_BUSWIDTH_16)
+ offs <<= 1;
+
+ writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ /*
+ * Since GCC has no proper constraint (PR 43518)
+ * force x variable to r2/r3 registers as ldrd instruction
+ * requires first register to be even.
+ */
+ register uint64_t x asm ("r2");
+
+ asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nc;
+ struct orion_nand_data *board;
+ struct resource *res;
+ void __iomem *io_base;
+ int ret = 0;
+
+ nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
+ if (!nc) {
+ printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
+ ret = -ENOMEM;
+ goto no_res;
+ }
+ mtd = (struct mtd_info *)(nc + 1);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto no_res;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (!io_base) {
+ printk(KERN_ERR "orion_nand: ioremap failed\n");
+ ret = -EIO;
+ goto no_res;
+ }
+
+ board = pdev->dev.platform_data;
+
+ mtd->priv = nc;
+ mtd->owner = THIS_MODULE;
+
+ nc->priv = board;
+ nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
+ nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
+ nc->ecc.mode = NAND_ECC_SOFT;
+
+ if (board->chip_delay)
+ nc->chip_delay = board->chip_delay;
+
+ if (board->width == 16)
+ nc->options |= NAND_BUSWIDTH_16;
+
+ if (board->dev_ready)
+ nc->dev_ready = board->dev_ready;
+
+ platform_set_drvdata(pdev, mtd);
+
+ if (nand_scan(mtd, 1)) {
+ ret = -ENXIO;
+ goto no_dev;
+ }
+
+ mtd->name = "orion_nand";
+ ret = mtd_device_parse_register(mtd, NULL, NULL, board->parts,
+ board->nr_parts);
+ if (ret) {
+ nand_release(mtd);
+ goto no_dev;
+ }
+
+ return 0;
+
+no_dev:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(io_base);
+no_res:
+ kfree(nc);
+
+ return ret;
+}
+
+static int __devexit orion_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nc = mtd->priv;
+
+ nand_release(mtd);
+
+ iounmap(nc->IO_ADDR_W);
+
+ kfree(nc);
+
+ return 0;
+}
+
+static struct platform_driver orion_nand_driver = {
+ .remove = __devexit_p(orion_nand_remove),
+ .driver = {
+ .name = "orion_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init orion_nand_init(void)
+{
+ return platform_driver_probe(&orion_nand_driver, orion_nand_probe);
+}
+
+static void __exit orion_nand_exit(void)
+{
+ platform_driver_unregister(&orion_nand_driver);
+}
+
+module_init(orion_nand_init);
+module_exit(orion_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
+MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 00000000..974dbf82
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR 0x00004000
+#define CLE_PIN_CTL 15
+#define ALE_PIN_CTL 14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, chip->IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(chip->IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ else
+ out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+ /* Push out posted writes */
+ eieio();
+ inl(lpcctl);
+}
+
+int pasemi_device_ready(struct mtd_info *mtd)
+{
+ return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
+{
+ struct pci_dev *pdev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource res;
+ struct nand_chip *chip;
+ int err = 0;
+
+ err = of_address_to_resource(np, 0, &res);
+
+ if (err)
+ return -EINVAL;
+
+ /* We only support one device at the moment */
+ if (pasemi_nand_mtd)
+ return -ENODEV;
+
+ pr_debug("pasemi_nand at %pR\n", &res);
+
+ /* Allocate memory for MTD device structure and private data */
+ pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!pasemi_nand_mtd) {
+ printk(KERN_WARNING
+ "Unable to allocate PASEMI NAND MTD device structure\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ chip = (struct nand_chip *)&pasemi_nand_mtd[1];
+
+ /* Link the private data with the MTD structure */
+ pasemi_nand_mtd->priv = chip;
+ pasemi_nand_mtd->owner = THIS_MODULE;
+
+ chip->IO_ADDR_R = of_iomap(np, 0);
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+ if (!chip->IO_ADDR_R) {
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out_ior;
+ }
+
+ lpcctl = pci_resource_start(pdev, 0);
+ pci_dev_put(pdev);
+
+ if (!request_region(lpcctl, 4, driver_name)) {
+ err = -EBUSY;
+ goto out_ior;
+ }
+
+ chip->cmd_ctrl = pasemi_hwcontrol;
+ chip->dev_ready = pasemi_device_ready;
+ chip->read_buf = pasemi_read_buf;
+ chip->write_buf = pasemi_write_buf;
+ chip->chip_delay = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Enable the following for a flash based bad block table */
+ chip->options = NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(pasemi_nand_mtd, 1)) {
+ err = -ENXIO;
+ goto out_lpc;
+ }
+
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
+ printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+ err = -ENODEV;
+ goto out_lpc;
+ }
+
+ printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n",
+ res.start, lpcctl);
+
+ return 0;
+
+ out_lpc:
+ release_region(lpcctl, 4);
+ out_ior:
+ iounmap(chip->IO_ADDR_R);
+ out_mtd:
+ kfree(pasemi_nand_mtd);
+ out:
+ return err;
+}
+
+static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
+{
+ struct nand_chip *chip;
+
+ if (!pasemi_nand_mtd)
+ return 0;
+
+ chip = pasemi_nand_mtd->priv;
+
+ /* Release resources, unregister device */
+ nand_release(pasemi_nand_mtd);
+
+ release_region(lpcctl, 4);
+
+ iounmap(chip->IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree(pasemi_nand_mtd);
+
+ pasemi_nand_mtd = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id pasemi_nand_match[] =
+{
+ {
+ .compatible = "pasemi,localbus-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct platform_driver pasemi_nand_driver =
+{
+ .driver = {
+ .name = (char*)driver_name,
+ .owner = THIS_MODULE,
+ .of_match_table = pasemi_nand_match,
+ },
+ .probe = pasemi_nand_probe,
+ .remove = pasemi_nand_remove,
+};
+
+module_platform_driver(pasemi_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
new file mode 100644
index 00000000..6404e6e8
--- /dev/null
+++ b/drivers/mtd/nand/plat_nand.c
@@ -0,0 +1,157 @@
+/*
+ * Generic NAND driver
+ *
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_nand_data {
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int __devinit plat_nand_probe(struct platform_device *pdev)
+{
+ struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct plat_nand_data *data;
+ struct resource *res;
+ int err = 0;
+
+ if (pdata->chip.nr_chips < 1) {
+ dev_err(&pdev->dev, "invalid number of chips specified\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ data->io_base = ioremap(res->start, resource_size(res));
+ if (data->io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_release_io;
+ }
+
+ data->chip.priv = &data;
+ data->mtd.priv = &data->chip;
+ data->mtd.owner = THIS_MODULE;
+ data->mtd.name = dev_name(&pdev->dev);
+
+ data->chip.IO_ADDR_R = data->io_base;
+ data->chip.IO_ADDR_W = data->io_base;
+ data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+ data->chip.dev_ready = pdata->ctrl.dev_ready;
+ data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
+ data->chip.chip_delay = pdata->chip.chip_delay;
+ data->chip.options |= pdata->chip.options;
+ data->chip.bbt_options |= pdata->chip.bbt_options;
+
+ data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
+ data->chip.ecc.layout = pdata->chip.ecclayout;
+ data->chip.ecc.mode = NAND_ECC_SOFT;
+
+ platform_set_drvdata(pdev, data);
+
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ err = pdata->ctrl.probe(pdev);
+ if (err)
+ goto out;
+ }
+
+ /* Scan to find existence of the device */
+ if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ err = mtd_device_parse_register(&data->mtd,
+ pdata->chip.part_probe_types, NULL,
+ pdata->chip.partitions,
+ pdata->chip.nr_partitions);
+
+ if (!err)
+ return err;
+
+ nand_release(&data->mtd);
+out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+ platform_set_drvdata(pdev, NULL);
+ iounmap(data->io_base);
+out_release_io:
+ release_mem_region(res->start, resource_size(res));
+out_free:
+ kfree(data);
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int __devexit plat_nand_remove(struct platform_device *pdev)
+{
+ struct plat_nand_data *data = platform_get_drvdata(pdev);
+ struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ nand_release(&data->mtd);
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+ iounmap(data->io_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(data);
+
+ return 0;
+}
+
+static struct platform_driver plat_nand_driver = {
+ .probe = plat_nand_probe,
+ .remove = __devexit_p(plat_nand_remove),
+ .driver = {
+ .name = "gen_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(plat_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_DESCRIPTION("Simple generic NAND driver");
+MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
new file mode 100644
index 00000000..0ddd90e5
--- /dev/null
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -0,0 +1,403 @@
+/*
+ * drivers/mtd/nand/ppchameleonevb.c
+ *
+ * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
+ *
+ * Derived from drivers/mtd/nand/edb7312.c
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash devices found on the
+ * PPChameleon/PPChameleonEVB system.
+ * PPChameleon options (autodetected):
+ * - BA model: no NAND
+ * - ME model: 32MB (Samsung K9F5608U0B)
+ * - HI model: 128MB (Samsung K9F1G08UOM)
+ * PPChameleonEVB options:
+ * - 32MB (Samsung K9F5608U0B)
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <platforms/PPChameleonEVB.h>
+
+#undef USE_READY_BUSY_PIN
+#define USE_READY_BUSY_PIN
+/* see datasheets (tR) */
+#define NAND_BIG_DELAY_US 25
+#define NAND_SMALL_DELAY_US 10
+
+/* handy sizes */
+#define SZ_4M 0x00400000
+#define NAND_SMALL_SIZE 0x02000000
+#define NAND_MTD_NAME "ppchameleon-nand"
+#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
+
+/* GPIO pins used to drive NAND chip mounted on processor module */
+#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
+#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
+#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
+#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
+/* GPIO pins used to drive NAND chip mounted on EVB */
+#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
+#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
+#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
+#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
+
+/*
+ * MTD structure for PPChameleonEVB board
+ */
+static struct mtd_info *ppchameleon_mtd = NULL;
+static struct mtd_info *ppchameleonevb_mtd = NULL;
+
+/*
+ * Module stuff
+ */
+static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
+static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
+
+#ifdef MODULE
+module_param(ppchameleon_fio_pbase, ulong, 0);
+module_param(ppchameleonevb_fio_pbase, ulong, 0);
+#else
+__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
+__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
+#endif
+
+/*
+ * Define static partitions for flash devices
+ */
+static struct mtd_partition partition_info_hi[] = {
+ { .name = "PPChameleon HI Nand Flash",
+ .offset = 0,
+ .size = 128 * 1024 * 1024
+ }
+};
+
+static struct mtd_partition partition_info_me[] = {
+ { .name = "PPChameleon ME Nand Flash",
+ .offset = 0,
+ .size = 32 * 1024 * 1024
+ }
+};
+
+static struct mtd_partition partition_info_evb[] = {
+ { .name = "PPChameleonEVB Nand Flash",
+ .offset = 0,
+ .size = 32 * 1024 * 1024
+ }
+};
+
+#define NUM_PARTITIONS 1
+
+/*
+ * hardware specific access to control-lines
+ */
+static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+#error Missing headerfiles. No way to fix this. -tglx
+ switch (cmd) {
+ case NAND_CTL_SETCLE:
+ MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_CLRCLE:
+ MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_SETALE:
+ MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_CLRALE:
+ MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_SETNCE:
+ MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_CLRNCE:
+ MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ }
+ }
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+#error Missing headerfiles. No way to fix this. -tglx
+ switch (cmd) {
+ case NAND_CTL_SETCLE:
+ MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_CLRCLE:
+ MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_SETALE:
+ MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_CLRALE:
+ MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_SETNCE:
+ MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_CLRNCE:
+ MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ }
+ }
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+#ifdef USE_READY_BUSY_PIN
+/*
+ * read device ready pin
+ */
+static int ppchameleon_device_ready(struct mtd_info *minfo)
+{
+ if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN)
+ return 1;
+ return 0;
+}
+
+static int ppchameleonevb_device_ready(struct mtd_info *minfo)
+{
+ if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
+ return 1;
+ return 0;
+}
+#endif
+
+/*
+ * Main initialization routine
+ */
+static int __init ppchameleonevb_init(void)
+{
+ struct nand_chip *this;
+ void __iomem *ppchameleon_fio_base;
+ void __iomem *ppchameleonevb_fio_base;
+
+ /*********************************
+ * Processor module NAND (if any) *
+ *********************************/
+ /* Allocate memory for MTD device structure and private data */
+ ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!ppchameleon_mtd) {
+ printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* map physical address */
+ ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
+ if (!ppchameleon_fio_base) {
+ printk("ioremap PPChameleon NAND flash failed\n");
+ kfree(ppchameleon_mtd);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&ppchameleon_mtd[1]);
+
+ /* Initialize structures */
+ memset(ppchameleon_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ ppchameleon_mtd->priv = this;
+ ppchameleon_mtd->owner = THIS_MODULE;
+
+ /* Initialize GPIOs */
+ /* Pin mapping for NAND chip */
+ /*
+ CE GPIO_01
+ CLE GPIO_02
+ ALE GPIO_03
+ R/B GPIO_04
+ */
+ /* output select */
+ out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF);
+ /* three-state select */
+ out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF);
+ /* enable output driver */
+ out_be32((volatile unsigned *)GPIO0_TCR,
+ in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
+#ifdef USE_READY_BUSY_PIN
+ /* three-state select */
+ out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF);
+ /* high-impedecence */
+ out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
+ /* input select */
+ out_be32((volatile unsigned *)GPIO0_ISR1H,
+ (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
+#endif
+
+ /* insert callbacks */
+ this->IO_ADDR_R = ppchameleon_fio_base;
+ this->IO_ADDR_W = ppchameleon_fio_base;
+ this->cmd_ctrl = ppchameleon_hwcontrol;
+#ifdef USE_READY_BUSY_PIN
+ this->dev_ready = ppchameleon_device_ready;
+#endif
+ this->chip_delay = NAND_BIG_DELAY_US;
+ /* ECC mode */
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ /* Scan to find existence of the device (it could not be mounted) */
+ if (nand_scan(ppchameleon_mtd, 1)) {
+ iounmap((void *)ppchameleon_fio_base);
+ ppchameleon_fio_base = NULL;
+ kfree(ppchameleon_mtd);
+ goto nand_evb_init;
+ }
+#ifndef USE_READY_BUSY_PIN
+ /* Adjust delay if necessary */
+ if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
+ this->chip_delay = NAND_SMALL_DELAY_US;
+#endif
+
+ ppchameleon_mtd->name = "ppchameleon-nand";
+
+ /* Register the partitions */
+ mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
+ ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+ partition_info_me : partition_info_hi,
+ NUM_PARTITIONS);
+
+ nand_evb_init:
+ /****************************
+ * EVB NAND (always present) *
+ ****************************/
+ /* Allocate memory for MTD device structure and private data */
+ ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!ppchameleonevb_mtd) {
+ printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
+ if (ppchameleon_fio_base)
+ iounmap(ppchameleon_fio_base);
+ return -ENOMEM;
+ }
+
+ /* map physical address */
+ ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
+ if (!ppchameleonevb_fio_base) {
+ printk("ioremap PPChameleonEVB NAND flash failed\n");
+ kfree(ppchameleonevb_mtd);
+ if (ppchameleon_fio_base)
+ iounmap(ppchameleon_fio_base);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&ppchameleonevb_mtd[1]);
+
+ /* Initialize structures */
+ memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ ppchameleonevb_mtd->priv = this;
+
+ /* Initialize GPIOs */
+ /* Pin mapping for NAND chip */
+ /*
+ CE GPIO_14
+ CLE GPIO_15
+ ALE GPIO_16
+ R/B GPIO_31
+ */
+ /* output select */
+ out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0);
+ out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF);
+ /* three-state select */
+ out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0);
+ out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF);
+ /* enable output driver */
+ out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
+ NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
+#ifdef USE_READY_BUSY_PIN
+ /* three-state select */
+ out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC);
+ /* high-impedecence */
+ out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
+ /* input select */
+ out_be32((volatile unsigned *)GPIO0_ISR1L,
+ (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
+#endif
+
+ /* insert callbacks */
+ this->IO_ADDR_R = ppchameleonevb_fio_base;
+ this->IO_ADDR_W = ppchameleonevb_fio_base;
+ this->cmd_ctrl = ppchameleonevb_hwcontrol;
+#ifdef USE_READY_BUSY_PIN
+ this->dev_ready = ppchameleonevb_device_ready;
+#endif
+ this->chip_delay = NAND_SMALL_DELAY_US;
+
+ /* ECC mode */
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(ppchameleonevb_mtd, 1)) {
+ iounmap((void *)ppchameleonevb_fio_base);
+ kfree(ppchameleonevb_mtd);
+ if (ppchameleon_fio_base)
+ iounmap(ppchameleon_fio_base);
+ return -ENXIO;
+ }
+
+ ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
+
+ /* Register the partitions */
+ mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
+ ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+ partition_info_me : partition_info_hi,
+ NUM_PARTITIONS);
+
+ /* Return happy */
+ return 0;
+}
+
+module_init(ppchameleonevb_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit ppchameleonevb_cleanup(void)
+{
+ struct nand_chip *this;
+
+ /* Release resources, unregister device(s) */
+ nand_release(ppchameleon_mtd);
+ nand_release(ppchameleonevb_mtd);
+
+ /* Release iomaps */
+ this = (struct nand_chip *) &ppchameleon_mtd[1];
+ iounmap((void *) this->IO_ADDR_R);
+ this = (struct nand_chip *) &ppchameleonevb_mtd[1];
+ iounmap((void *) this->IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree (ppchameleon_mtd);
+ kfree (ppchameleonevb_mtd);
+}
+module_exit(ppchameleonevb_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
+MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
new file mode 100644
index 00000000..def50caa
--- /dev/null
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -0,0 +1,1319 @@
+/*
+ * drivers/mtd/nand/pxa3xx_nand.c
+ *
+ * Copyright © 2005 Intel Corporation
+ * Copyright © 2006 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+
+#include <mach/dma.h>
+#include <plat/pxa3xx_nand.h>
+
+#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
+#define NAND_STOP_DELAY (2 * HZ/50)
+#define PAGE_CHUNK_SIZE (2048)
+
+/* registers and bit definitions */
+#define NDCR (0x00) /* Control register */
+#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
+#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
+#define NDSR (0x14) /* Status Register */
+#define NDPCR (0x18) /* Page Count Register */
+#define NDBDR0 (0x1C) /* Bad Block Register 0 */
+#define NDBDR1 (0x20) /* Bad Block Register 1 */
+#define NDDB (0x40) /* Data Buffer */
+#define NDCB0 (0x48) /* Command Buffer0 */
+#define NDCB1 (0x4C) /* Command Buffer1 */
+#define NDCB2 (0x50) /* Command Buffer2 */
+
+#define NDCR_SPARE_EN (0x1 << 31)
+#define NDCR_ECC_EN (0x1 << 30)
+#define NDCR_DMA_EN (0x1 << 29)
+#define NDCR_ND_RUN (0x1 << 28)
+#define NDCR_DWIDTH_C (0x1 << 27)
+#define NDCR_DWIDTH_M (0x1 << 26)
+#define NDCR_PAGE_SZ (0x1 << 24)
+#define NDCR_NCSX (0x1 << 23)
+#define NDCR_ND_MODE (0x3 << 21)
+#define NDCR_NAND_MODE (0x0)
+#define NDCR_CLR_PG_CNT (0x1 << 20)
+#define NDCR_STOP_ON_UNCOR (0x1 << 19)
+#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
+#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
+
+#define NDCR_RA_START (0x1 << 15)
+#define NDCR_PG_PER_BLK (0x1 << 14)
+#define NDCR_ND_ARB_EN (0x1 << 12)
+#define NDCR_INT_MASK (0xFFF)
+
+#define NDSR_MASK (0xfff)
+#define NDSR_RDY (0x1 << 12)
+#define NDSR_FLASH_RDY (0x1 << 11)
+#define NDSR_CS0_PAGED (0x1 << 10)
+#define NDSR_CS1_PAGED (0x1 << 9)
+#define NDSR_CS0_CMDD (0x1 << 8)
+#define NDSR_CS1_CMDD (0x1 << 7)
+#define NDSR_CS0_BBD (0x1 << 6)
+#define NDSR_CS1_BBD (0x1 << 5)
+#define NDSR_DBERR (0x1 << 4)
+#define NDSR_SBERR (0x1 << 3)
+#define NDSR_WRDREQ (0x1 << 2)
+#define NDSR_RDDREQ (0x1 << 1)
+#define NDSR_WRCMDREQ (0x1)
+
+#define NDCB0_ST_ROW_EN (0x1 << 26)
+#define NDCB0_AUTO_RS (0x1 << 25)
+#define NDCB0_CSEL (0x1 << 24)
+#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
+#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
+#define NDCB0_NC (0x1 << 20)
+#define NDCB0_DBC (0x1 << 19)
+#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
+#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
+#define NDCB0_CMD2_MASK (0xff << 8)
+#define NDCB0_CMD1_MASK (0xff)
+#define NDCB0_ADDR_CYC_SHIFT (16)
+
+/* macros for registers read/write */
+#define nand_writel(info, off, val) \
+ __raw_writel((val), (info)->mmio_base + (off))
+
+#define nand_readl(info, off) \
+ __raw_readl((info)->mmio_base + (off))
+
+/* error code and state */
+enum {
+ ERR_NONE = 0,
+ ERR_DMABUSERR = -1,
+ ERR_SENDCMD = -2,
+ ERR_DBERR = -3,
+ ERR_BBERR = -4,
+ ERR_SBERR = -5,
+};
+
+enum {
+ STATE_IDLE = 0,
+ STATE_PREPARED,
+ STATE_CMD_HANDLE,
+ STATE_DMA_READING,
+ STATE_DMA_WRITING,
+ STATE_DMA_DONE,
+ STATE_PIO_READING,
+ STATE_PIO_WRITING,
+ STATE_CMD_DONE,
+ STATE_READY,
+};
+
+struct pxa3xx_nand_host {
+ struct nand_chip chip;
+ struct pxa3xx_nand_cmdset *cmdset;
+ struct mtd_info *mtd;
+ void *info_data;
+
+ /* page size of attached chip */
+ unsigned int page_size;
+ int use_ecc;
+ int cs;
+
+ /* calculated from pxa3xx_nand_flash data */
+ unsigned int col_addr_cycles;
+ unsigned int row_addr_cycles;
+ size_t read_id_bytes;
+
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
+};
+
+struct pxa3xx_nand_info {
+ struct nand_hw_control controller;
+ struct platform_device *pdev;
+
+ struct clk *clk;
+ void __iomem *mmio_base;
+ unsigned long mmio_phys;
+ struct completion cmd_complete;
+
+ unsigned int buf_start;
+ unsigned int buf_count;
+
+ /* DMA information */
+ int drcmr_dat;
+ int drcmr_cmd;
+
+ unsigned char *data_buff;
+ unsigned char *oob_buff;
+ dma_addr_t data_buff_phys;
+ int data_dma_ch;
+ struct pxa_dma_desc *data_desc;
+ dma_addr_t data_desc_addr;
+
+ struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
+ unsigned int state;
+
+ int cs;
+ int use_ecc; /* use HW ECC ? */
+ int use_dma; /* use DMA ? */
+ int is_ready;
+
+ unsigned int page_size; /* page size of attached chip */
+ unsigned int data_size; /* data size in FIFO */
+ unsigned int oob_size;
+ int retcode;
+
+ /* generated NDCBx register values */
+ uint32_t ndcb0;
+ uint32_t ndcb1;
+ uint32_t ndcb2;
+};
+
+static bool use_dma = 1;
+module_param(use_dma, bool, 0444);
+MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
+
+/*
+ * Default NAND flash controller configuration setup by the
+ * bootloader. This configuration is used only when pdata->keep_config is set
+ */
+static struct pxa3xx_nand_cmdset default_cmdset = {
+ .read1 = 0x3000,
+ .read2 = 0x0050,
+ .program = 0x1080,
+ .read_status = 0x0070,
+ .read_id = 0x0090,
+ .erase = 0xD060,
+ .reset = 0x00FF,
+ .lock = 0x002A,
+ .unlock = 0x2423,
+ .lock_status = 0x007A,
+};
+
+static struct pxa3xx_nand_timing timing[] = {
+ { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
+ { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
+ { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
+ { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
+};
+
+static struct pxa3xx_nand_flash builtin_flash_types[] = {
+{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
+{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
+{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
+{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
+{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
+{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
+{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
+{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
+{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
+};
+
+/* Define a default flash type setting serve as flash detecting only */
+#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
+
+const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
+
+#define NDTR0_tCH(c) (min((c), 7) << 19)
+#define NDTR0_tCS(c) (min((c), 7) << 16)
+#define NDTR0_tWH(c) (min((c), 7) << 11)
+#define NDTR0_tWP(c) (min((c), 7) << 8)
+#define NDTR0_tRH(c) (min((c), 7) << 3)
+#define NDTR0_tRP(c) (min((c), 7) << 0)
+
+#define NDTR1_tR(c) (min((c), 65535) << 16)
+#define NDTR1_tWHR(c) (min((c), 15) << 4)
+#define NDTR1_tAR(c) (min((c), 15) << 0)
+
+/* convert nano-seconds to nand flash controller clock cycles */
+#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
+ const struct pxa3xx_nand_timing *t)
+{
+ struct pxa3xx_nand_info *info = host->info_data;
+ unsigned long nand_clk = clk_get_rate(info->clk);
+ uint32_t ndtr0, ndtr1;
+
+ ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
+ NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
+ NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
+ NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
+ NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
+ NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
+
+ ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
+ NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
+ NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
+
+ host->ndtr0cs0 = ndtr0;
+ host->ndtr1cs0 = ndtr1;
+ nand_writel(info, NDTR0CS0, ndtr0);
+ nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
+
+ info->data_size = host->page_size;
+ if (!oob_enable) {
+ info->oob_size = 0;
+ return;
+ }
+
+ switch (host->page_size) {
+ case 2048:
+ info->oob_size = (info->use_ecc) ? 40 : 64;
+ break;
+ case 512:
+ info->oob_size = (info->use_ecc) ? 8 : 16;
+ break;
+ }
+}
+
+/**
+ * NOTE: it is a must to set ND_RUN firstly, then write
+ * command buffer, otherwise, it does not work.
+ * We enable all the interrupt at the same time, and
+ * let pxa3xx_nand_irq to handle all logic.
+ */
+static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ uint32_t ndcr;
+
+ ndcr = host->reg_ndcr;
+ ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
+ ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+ ndcr |= NDCR_ND_RUN;
+
+ /* clear status bits and run */
+ nand_writel(info, NDCR, 0);
+ nand_writel(info, NDSR, NDSR_MASK);
+ nand_writel(info, NDCR, ndcr);
+}
+
+static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
+{
+ uint32_t ndcr;
+ int timeout = NAND_STOP_DELAY;
+
+ /* wait RUN bit in NDCR become 0 */
+ ndcr = nand_readl(info, NDCR);
+ while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
+ ndcr = nand_readl(info, NDCR);
+ udelay(1);
+ }
+
+ if (timeout <= 0) {
+ ndcr &= ~NDCR_ND_RUN;
+ nand_writel(info, NDCR, ndcr);
+ }
+ /* clear status bits */
+ nand_writel(info, NDSR, NDSR_MASK);
+}
+
+static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+ uint32_t ndcr;
+
+ ndcr = nand_readl(info, NDCR);
+ nand_writel(info, NDCR, ndcr & ~int_mask);
+}
+
+static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+ uint32_t ndcr;
+
+ ndcr = nand_readl(info, NDCR);
+ nand_writel(info, NDCR, ndcr | int_mask);
+}
+
+static void handle_data_pio(struct pxa3xx_nand_info *info)
+{
+ switch (info->state) {
+ case STATE_PIO_WRITING:
+ __raw_writesl(info->mmio_base + NDDB, info->data_buff,
+ DIV_ROUND_UP(info->data_size, 4));
+ if (info->oob_size > 0)
+ __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
+ DIV_ROUND_UP(info->oob_size, 4));
+ break;
+ case STATE_PIO_READING:
+ __raw_readsl(info->mmio_base + NDDB, info->data_buff,
+ DIV_ROUND_UP(info->data_size, 4));
+ if (info->oob_size > 0)
+ __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
+ DIV_ROUND_UP(info->oob_size, 4));
+ break;
+ default:
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+ info->state);
+ BUG();
+ }
+}
+
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{
+ struct pxa_dma_desc *desc = info->data_desc;
+ int dma_len = ALIGN(info->data_size + info->oob_size, 32);
+
+ desc->ddadr = DDADR_STOP;
+ desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
+
+ switch (info->state) {
+ case STATE_DMA_WRITING:
+ desc->dsadr = info->data_buff_phys;
+ desc->dtadr = info->mmio_phys + NDDB;
+ desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
+ break;
+ case STATE_DMA_READING:
+ desc->dtadr = info->data_buff_phys;
+ desc->dsadr = info->mmio_phys + NDDB;
+ desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ break;
+ default:
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+ info->state);
+ BUG();
+ }
+
+ DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
+ DDADR(info->data_dma_ch) = info->data_desc_addr;
+ DCSR(info->data_dma_ch) |= DCSR_RUN;
+}
+
+static void pxa3xx_nand_data_dma_irq(int channel, void *data)
+{
+ struct pxa3xx_nand_info *info = data;
+ uint32_t dcsr;
+
+ dcsr = DCSR(channel);
+ DCSR(channel) = dcsr;
+
+ if (dcsr & DCSR_BUSERR) {
+ info->retcode = ERR_DMABUSERR;
+ }
+
+ info->state = STATE_DMA_DONE;
+ enable_int(info, NDCR_INT_MASK);
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+}
+
+static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
+{
+ struct pxa3xx_nand_info *info = devid;
+ unsigned int status, is_completed = 0;
+ unsigned int ready, cmd_done;
+
+ if (info->cs == 0) {
+ ready = NDSR_FLASH_RDY;
+ cmd_done = NDSR_CS0_CMDD;
+ } else {
+ ready = NDSR_RDY;
+ cmd_done = NDSR_CS1_CMDD;
+ }
+
+ status = nand_readl(info, NDSR);
+
+ if (status & NDSR_DBERR)
+ info->retcode = ERR_DBERR;
+ if (status & NDSR_SBERR)
+ info->retcode = ERR_SBERR;
+ if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
+ /* whether use dma to transfer data */
+ if (info->use_dma) {
+ disable_int(info, NDCR_INT_MASK);
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_DMA_READING : STATE_DMA_WRITING;
+ start_data_dma(info);
+ goto NORMAL_IRQ_EXIT;
+ } else {
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_PIO_READING : STATE_PIO_WRITING;
+ handle_data_pio(info);
+ }
+ }
+ if (status & cmd_done) {
+ info->state = STATE_CMD_DONE;
+ is_completed = 1;
+ }
+ if (status & ready) {
+ info->is_ready = 1;
+ info->state = STATE_READY;
+ }
+
+ if (status & NDSR_WRCMDREQ) {
+ nand_writel(info, NDSR, NDSR_WRCMDREQ);
+ status &= ~NDSR_WRCMDREQ;
+ info->state = STATE_CMD_HANDLE;
+ nand_writel(info, NDCB0, info->ndcb0);
+ nand_writel(info, NDCB0, info->ndcb1);
+ nand_writel(info, NDCB0, info->ndcb2);
+ }
+
+ /* clear NDSR to let the controller exit the IRQ */
+ nand_writel(info, NDSR, status);
+ if (is_completed)
+ complete(&info->cmd_complete);
+NORMAL_IRQ_EXIT:
+ return IRQ_HANDLED;
+}
+
+static inline int is_buf_blank(uint8_t *buf, size_t len)
+{
+ for (; len > 0; len--)
+ if (*buf++ != 0xff)
+ return 0;
+ return 1;
+}
+
+static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
+ uint16_t column, int page_addr)
+{
+ uint16_t cmd;
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
+
+ host = info->host[info->cs];
+ mtd = host->mtd;
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ /* reset data and oob column point to handle data */
+ info->buf_start = 0;
+ info->buf_count = 0;
+ info->oob_size = 0;
+ info->use_ecc = 0;
+ info->is_ready = 0;
+ info->retcode = ERR_NONE;
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_PAGEPROG:
+ info->use_ecc = 1;
+ case NAND_CMD_READOOB:
+ pxa3xx_set_datasize(info);
+ break;
+ case NAND_CMD_SEQIN:
+ exec_cmd = 0;
+ break;
+ default:
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ break;
+ }
+
+ addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ + host->col_addr_cycles);
+
+ switch (command) {
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READ0:
+ cmd = host->cmdset->read1;
+ if (command == NAND_CMD_READOOB)
+ info->buf_start = mtd->writesize + column;
+ else
+ info->buf_start = column;
+
+ if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | addr_cycle
+ | (cmd & NDCB0_CMD1_MASK);
+ else
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | NDCB0_DBC
+ | addr_cycle
+ | cmd;
+
+ case NAND_CMD_SEQIN:
+ /* small page addr setting */
+ if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
+
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
+
+ info->buf_count = mtd->writesize + mtd->oobsize;
+ memset(info->data_buff, 0xFF, info->buf_count);
+
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ if (is_buf_blank(info->data_buff,
+ (mtd->writesize + mtd->oobsize))) {
+ exec_cmd = 0;
+ break;
+ }
+
+ cmd = host->cmdset->program;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | cmd
+ | addr_cycle;
+ break;
+
+ case NAND_CMD_READID:
+ cmd = host->cmdset->read_id;
+ info->buf_count = host->read_id_bytes;
+ info->ndcb0 |= NDCB0_CMD_TYPE(3)
+ | NDCB0_ADDR_CYC(1)
+ | cmd;
+
+ info->data_size = 8;
+ break;
+ case NAND_CMD_STATUS:
+ cmd = host->cmdset->read_status;
+ info->buf_count = 1;
+ info->ndcb0 |= NDCB0_CMD_TYPE(4)
+ | NDCB0_ADDR_CYC(1)
+ | cmd;
+
+ info->data_size = 8;
+ break;
+
+ case NAND_CMD_ERASE1:
+ cmd = host->cmdset->erase;
+ info->ndcb0 |= NDCB0_CMD_TYPE(2)
+ | NDCB0_AUTO_RS
+ | NDCB0_ADDR_CYC(3)
+ | NDCB0_DBC
+ | cmd;
+ info->ndcb1 = page_addr;
+ info->ndcb2 = 0;
+
+ break;
+ case NAND_CMD_RESET:
+ cmd = host->cmdset->reset;
+ info->ndcb0 |= NDCB0_CMD_TYPE(5)
+ | cmd;
+
+ break;
+
+ case NAND_CMD_ERASE2:
+ exec_cmd = 0;
+ break;
+
+ default:
+ exec_cmd = 0;
+ dev_err(&info->pdev->dev, "non-supported command %x\n",
+ command);
+ break;
+ }
+
+ return exec_cmd;
+}
+
+static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int ret, exec_cmd;
+
+ /*
+ * if this is a x16 device ,then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (host->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, host->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, host->ndtr1cs0);
+ }
+
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_command_pool(info, command, column, page_addr);
+ if (exec_cmd) {
+ init_completion(&info->cmd_complete);
+ pxa3xx_nand_start(info);
+
+ ret = wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT);
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
+ }
+ }
+ info->state = STATE_IDLE;
+}
+
+static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (info->retcode == ERR_SBERR) {
+ switch (info->use_ecc) {
+ case 1:
+ mtd->ecc_stats.corrected++;
+ break;
+ case 0:
+ default:
+ break;
+ }
+ } else if (info->retcode == ERR_DBERR) {
+ /*
+ * for blank page (all 0xff), HW will calculate its ECC as
+ * 0, which is different from the ECC information within
+ * OOB, ignore such double bit errors
+ */
+ if (is_buf_blank(buf, mtd->writesize))
+ info->retcode = ERR_NONE;
+ else
+ mtd->ecc_stats.failed++;
+ }
+
+ return 0;
+}
+
+static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ char retval = 0xFF;
+
+ if (info->buf_start < info->buf_count)
+ /* Has just send a new command? */
+ retval = info->data_buff[info->buf_start++];
+
+ return retval;
+}
+
+static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ u16 retval = 0xFFFF;
+
+ if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
+ retval = *((u16 *)(info->data_buff+info->buf_start));
+ info->buf_start += 2;
+ }
+ return retval;
+}
+
+static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+ memcpy(buf, info->data_buff + info->buf_start, real_len);
+ info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+ memcpy(info->data_buff + info->buf_start, buf, real_len);
+ info->buf_start += real_len;
+}
+
+static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ return 0;
+}
+
+static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ return;
+}
+
+static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+
+ /* pxa3xx_nand_send_command has waited for command complete */
+ if (this->state == FL_WRITING || this->state == FL_ERASING) {
+ if (info->retcode == ERR_NONE)
+ return 0;
+ else {
+ /*
+ * any error make it return 0x01 which will tell
+ * the caller the erase and write fail
+ */
+ return 0x01;
+ }
+ }
+
+ return 0;
+}
+
+static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
+ const struct pxa3xx_nand_flash *f)
+{
+ struct platform_device *pdev = info->pdev;
+ struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ uint32_t ndcr = 0x0; /* enable all interrupts */
+
+ if (f->page_size != 2048 && f->page_size != 512) {
+ dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
+ return -EINVAL;
+ }
+
+ if (f->flash_width != 16 && f->flash_width != 8) {
+ dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
+ return -EINVAL;
+ }
+
+ /* calculate flash information */
+ host->cmdset = &default_cmdset;
+ host->page_size = f->page_size;
+ host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+
+ /* calculate addressing information */
+ host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+
+ if (f->num_blocks * f->page_per_block > 65536)
+ host->row_addr_cycles = 3;
+ else
+ host->row_addr_cycles = 2;
+
+ ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+ ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+ ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
+ ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
+ ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
+ ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
+
+ ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
+ ndcr |= NDCR_SPARE_EN; /* enable spare by default */
+
+ host->reg_ndcr = ndcr;
+
+ pxa3xx_nand_set_timing(host, f->timing);
+ return 0;
+}
+
+static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
+{
+ /*
+ * We set 0 by hard coding here, for we don't support keep_config
+ * when there is more than one chip attached to the controller
+ */
+ struct pxa3xx_nand_host *host = info->host[0];
+ uint32_t ndcr = nand_readl(info, NDCR);
+
+ if (ndcr & NDCR_PAGE_SZ) {
+ host->page_size = 2048;
+ host->read_id_bytes = 4;
+ } else {
+ host->page_size = 512;
+ host->read_id_bytes = 2;
+ }
+
+ host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+ host->cmdset = &default_cmdset;
+
+ host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+ host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+
+ return 0;
+}
+
+/* the maximum possible buffer size for large page with OOB data
+ * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
+ * data buffer and the DMA descriptor
+ */
+#define MAX_BUFF_SIZE PAGE_SIZE
+
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+ struct platform_device *pdev = info->pdev;
+ int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+
+ if (use_dma == 0) {
+ info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ return 0;
+ }
+
+ info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ &info->data_buff_phys, GFP_KERNEL);
+ if (info->data_buff == NULL) {
+ dev_err(&pdev->dev, "failed to allocate dma buffer\n");
+ return -ENOMEM;
+ }
+
+ info->data_desc = (void *)info->data_buff + data_desc_offset;
+ info->data_desc_addr = info->data_buff_phys + data_desc_offset;
+
+ info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
+ pxa3xx_nand_data_dma_irq, info);
+ if (info->data_dma_ch < 0) {
+ dev_err(&pdev->dev, "failed to request data dma\n");
+ dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff, info->data_buff_phys);
+ return info->data_dma_ch;
+ }
+
+ return 0;
+}
+
+static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
+{
+ struct mtd_info *mtd;
+ int ret;
+ mtd = info->host[info->cs]->mtd;
+ /* use the common timing to make a try */
+ ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+ if (ret)
+ return ret;
+
+ pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ if (info->is_ready)
+ return 0;
+
+ return -ENODEV;
+}
+
+static int pxa3xx_nand_scan(struct mtd_info *mtd)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ struct platform_device *pdev = info->pdev;
+ struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
+ const struct pxa3xx_nand_flash *f = NULL;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t id = -1;
+ uint64_t chipsize;
+ int i, ret, num;
+
+ if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
+ goto KEEP_CONFIG;
+
+ ret = pxa3xx_nand_sensing(info);
+ if (ret) {
+ dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+ info->cs);
+
+ return ret;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
+ id = *((uint16_t *)(info->data_buff));
+ if (id != 0)
+ dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
+ else {
+ dev_warn(&info->pdev->dev,
+ "Read out ID 0, potential timing set wrong!!\n");
+
+ return -EINVAL;
+ }
+
+ num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
+ for (i = 0; i < num; i++) {
+ if (i < pdata->num_flash)
+ f = pdata->flash + i;
+ else
+ f = &builtin_flash_types[i - pdata->num_flash + 1];
+
+ /* find the chip in default list */
+ if (f->chip_id == id)
+ break;
+ }
+
+ if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
+ dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
+
+ return -EINVAL;
+ }
+
+ ret = pxa3xx_nand_config_flash(info, f);
+ if (ret) {
+ dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
+ return ret;
+ }
+
+ pxa3xx_flash_ids[0].name = f->name;
+ pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
+ pxa3xx_flash_ids[0].pagesize = f->page_size;
+ chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
+ pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
+ pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
+ if (f->flash_width == 16)
+ pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+ pxa3xx_flash_ids[1].name = NULL;
+ def = pxa3xx_flash_ids;
+KEEP_CONFIG:
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = host->page_size;
+ chip->ecc.strength = 1;
+
+ chip->options = NAND_NO_AUTOINCR;
+ chip->options |= NAND_NO_READRDY;
+ if (host->reg_ndcr & NDCR_DWIDTH_M)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (nand_scan_ident(mtd, 1, def))
+ return -ENODEV;
+ /* calculate addressing information */
+ if (mtd->writesize >= 2048)
+ host->col_addr_cycles = 2;
+ else
+ host->col_addr_cycles = 1;
+
+ info->oob_buff = info->data_buff + mtd->writesize;
+ if ((mtd->size >> chip->page_shift) > 65536)
+ host->row_addr_cycles = 3;
+ else
+ host->row_addr_cycles = 2;
+
+ mtd->name = mtd_names[0];
+ return nand_scan_tail(mtd);
+}
+
+static int alloc_nand_resource(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct pxa3xx_nand_info *info;
+ struct pxa3xx_nand_host *host;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int ret, irq, cs;
+
+ pdata = pdev->dev.platform_data;
+ info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
+ sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ info->pdev = pdev;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = (struct mtd_info *)((unsigned int)&info[1] +
+ (sizeof(*mtd) + sizeof(*host)) * cs);
+ chip = (struct nand_chip *)(&mtd[1]);
+ host = (struct pxa3xx_nand_host *)chip;
+ info->host[cs] = host;
+ host->mtd = mtd;
+ host->cs = cs;
+ host->info_data = info;
+ mtd->priv = host;
+ mtd->owner = THIS_MODULE;
+
+ chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
+ chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
+ chip->controller = &info->controller;
+ chip->waitfunc = pxa3xx_nand_waitfunc;
+ chip->select_chip = pxa3xx_nand_select_chip;
+ chip->cmdfunc = pxa3xx_nand_cmdfunc;
+ chip->read_word = pxa3xx_nand_read_word;
+ chip->read_byte = pxa3xx_nand_read_byte;
+ chip->read_buf = pxa3xx_nand_read_buf;
+ chip->write_buf = pxa3xx_nand_write_buf;
+ chip->verify_buf = pxa3xx_nand_verify_buf;
+ }
+
+ spin_lock_init(&chip->controller->lock);
+ init_waitqueue_head(&chip->controller->wq);
+ info->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get nand clock\n");
+ ret = PTR_ERR(info->clk);
+ goto fail_free_mtd;
+ }
+ clk_enable(info->clk);
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_dat = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for command DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_cmd = r->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no IO memory resource defined\n");
+ ret = -ENODEV;
+ goto fail_put_clk;
+ }
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ ret = -EBUSY;
+ goto fail_put_clk;
+ }
+
+ info->mmio_base = ioremap(r->start, resource_size(r));
+ if (info->mmio_base == NULL) {
+ dev_err(&pdev->dev, "ioremap() failed\n");
+ ret = -ENODEV;
+ goto fail_free_res;
+ }
+ info->mmio_phys = r->start;
+
+ ret = pxa3xx_nand_init_buff(info);
+ if (ret)
+ goto fail_free_io;
+
+ /* initialize all interrupts to be disabled */
+ disable_int(info, NDSR_MASK);
+
+ ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto fail_free_buf;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+fail_free_buf:
+ free_irq(irq, info);
+ if (use_dma) {
+ pxa_free_dma(info->data_dma_ch);
+ dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff, info->data_buff_phys);
+ } else
+ kfree(info->data_buff);
+fail_free_io:
+ iounmap(info->mmio_base);
+fail_free_res:
+ release_mem_region(r->start, resource_size(r));
+fail_put_clk:
+ clk_disable(info->clk);
+ clk_put(info->clk);
+fail_free_mtd:
+ kfree(info);
+ return ret;
+}
+
+static int pxa3xx_nand_remove(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct pxa3xx_nand_platform_data *pdata;
+ struct resource *r;
+ int irq, cs;
+
+ if (!info)
+ return 0;
+
+ pdata = pdev->dev.platform_data;
+ platform_set_drvdata(pdev, NULL);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0)
+ free_irq(irq, info);
+ if (use_dma) {
+ pxa_free_dma(info->data_dma_ch);
+ dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff, info->data_buff_phys);
+ } else
+ kfree(info->data_buff);
+
+ iounmap(info->mmio_base);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, resource_size(r));
+
+ clk_disable(info->clk);
+ clk_put(info->clk);
+
+ for (cs = 0; cs < pdata->num_cs; cs++)
+ nand_release(info->host[cs]->mtd);
+ kfree(info);
+ return 0;
+}
+
+static int pxa3xx_nand_probe(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct pxa3xx_nand_info *info;
+ int ret, cs, probe_success;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -ENODEV;
+ }
+
+ ret = alloc_nand_resource(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "alloc nand resource failed\n");
+ return ret;
+ }
+
+ info = platform_get_drvdata(pdev);
+ probe_success = 0;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ info->cs = cs;
+ ret = pxa3xx_nand_scan(info->host[cs]->mtd);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
+ cs);
+ continue;
+ }
+
+ ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
+ NULL, pdata->parts[cs],
+ pdata->nr_parts[cs]);
+ if (!ret)
+ probe_success = 1;
+ }
+
+ if (!probe_success) {
+ pxa3xx_nand_remove(pdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
+
+ pdata = pdev->dev.platform_data;
+ if (info->state) {
+ dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
+ return -EAGAIN;
+ }
+
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd_suspend(mtd);
+ }
+
+ return 0;
+}
+
+static int pxa3xx_nand_resume(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
+
+ pdata = pdev->dev.platform_data;
+ /* We don't want to handle interrupt without calling mtd routine */
+ disable_int(info, NDCR_INT_MASK);
+
+ /*
+ * Directly set the chip select to a invalid value,
+ * then the driver would reset the timing according
+ * to current chip select at the beginning of cmdfunc
+ */
+ info->cs = 0xff;
+
+ /*
+ * As the spec says, the NDSR would be updated to 0x1800 when
+ * doing the nand_clk disable/enable.
+ * To prevent it damaging state machine of the driver, clear
+ * all status before resume
+ */
+ nand_writel(info, NDSR, NDSR_MASK);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd_resume(mtd);
+ }
+
+ return 0;
+}
+#else
+#define pxa3xx_nand_suspend NULL
+#define pxa3xx_nand_resume NULL
+#endif
+
+static struct platform_driver pxa3xx_nand_driver = {
+ .driver = {
+ .name = "pxa3xx-nand",
+ },
+ .probe = pxa3xx_nand_probe,
+ .remove = pxa3xx_nand_remove,
+ .suspend = pxa3xx_nand_suspend,
+ .resume = pxa3xx_nand_resume,
+};
+
+module_platform_driver(pxa3xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 00000000..c2040187
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1122 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static bool r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+ mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+ mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return chip->priv;
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+ dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+ INIT_COMPLETION(dev->dma_done);
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set initial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ R852_DMA_LEN,
+ (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len)
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+
+/*
+ * Readback the buffer to verify it
+ */
+int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* We can't be sure about anything here... */
+ if (dev->card_unstable)
+ return -1;
+
+ /* This will never happen, unless you wired up a nand chip
+ with > 512 bytes page size to the reader */
+ if (len > SM_SECTOR_SIZE)
+ return 0;
+
+ r852_read_buf(mtd, dev->tmp_buffer, len);
+ return memcmp(buf, dev->tmp_buffer, len);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct r852_device *dev = chip->priv;
+
+ unsigned long timeout;
+ int status;
+
+ timeout = jiffies + (chip->state == FL_ERASING ?
+ msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+ while (time_before(jiffies, timeout))
+ if (chip->dev_ready(mtd))
+ break;
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = (int)chip->read_byte(mtd);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+int r852_ready(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint16_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return 0;
+
+ if (dev->dma_error) {
+ dev->dma_error = 0;
+ return -1;
+ }
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -1;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/*
+ * Start the nand engine
+ */
+
+void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+int r852_register_nand_device(struct r852_device *dev)
+{
+ dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+
+ if (!dev->mtd)
+ goto error1;
+
+ WARN_ON(dev->card_registred);
+
+ dev->mtd->owner = THIS_MODULE;
+ dev->mtd->priv = dev->chip;
+ dev->mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(dev->mtd, dev->sm))
+ goto error2;
+
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ message("can't create media type sysfs attribute");
+
+ dev->card_registred = 1;
+ return 0;
+error2:
+ kfree(dev->mtd);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+void r852_unregister_nand_device(struct r852_device *dev)
+{
+ if (!dev->card_registred)
+ return;
+
+ device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
+ nand_release(dev->mtd);
+ r852_engine_disable(dev);
+ dev->card_registred = 0;
+ kfree(dev->mtd);
+ dev->mtd = NULL;
+}
+
+/* Card state updater */
+void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ r852_update_card_detect(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registred)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't receive any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("received dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ complete(&dev->dma_done);
+ goto out;
+ }
+
+ /* received DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3) {
+ r852_dma_done(dev, 0);
+ complete(&dev->dma_done);
+ }
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->cmd_ctrl = r852_cmdctl;
+ chip->waitfunc = r852_wait;
+ chip->dev_ready = r852_ready;
+
+ /* I/O */
+ chip->read_byte = r852_read_byte;
+ chip->read_buf = r852_read_buf;
+ chip->write_buf = r852_write_buf;
+ chip->verify_buf = r852_verify_buf;
+
+ /* ecc */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ chip->priv = dev;
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ /* kick initial present test */
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM
+static int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+ return 0;
+}
+
+static int r852_resume(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registred) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(1000));
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registred) {
+ r852_engine_enable(dev);
+ dev->chip->select_chip(dev->mtd, 0);
+ dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
+ dev->chip->select_chip(dev->mtd, -1);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#else
+#define r852_suspend NULL
+#define r852_resume NULL
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+static __init int r852_module_init(void)
+{
+ return pci_register_driver(&r852_pci_driver);
+}
+
+static void __exit r852_module_exit(void)
+{
+ pci_unregister_driver(&r852_pci_driver);
+}
+
+module_init(r852_module_init);
+module_exit(r852_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 00000000..e6a21d9d
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/nand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ void __iomem *mmio; /* mmio */
+ struct mtd_info *mtd; /* mtd backpointer */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registred; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
new file mode 100644
index 00000000..e55b5cfb
--- /dev/null
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -0,0 +1,624 @@
+/*
+ * drivers/mtd/nand/rtc_from4.c
+ *
+ * Copyright (C) 2004 Red Hat, Inc.
+ *
+ * Derived from drivers/mtd/nand/spia.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the AG-AND flash device found on the
+ * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
+ * which utilizes the Renesas HN29V1G91T-30 part.
+ * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rslib.h>
+#include <linux/bitrev.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+
+/*
+ * MTD structure for Renesas board
+ */
+static struct mtd_info *rtc_from4_mtd = NULL;
+
+#define RTC_FROM4_MAX_CHIPS 2
+
+/* HS77x9 processor register defines */
+#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60))
+#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62))
+#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64))
+#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66))
+#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68))
+#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C))
+#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80))
+
+/*
+ * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
+ */
+/* Address where flash is mapped */
+#define RTC_FROM4_FIO_BASE 0x14000000
+
+/* CLE and ALE are tied to address lines 5 & 4, respectively */
+#define RTC_FROM4_CLE (1 << 5)
+#define RTC_FROM4_ALE (1 << 4)
+
+/* address lines A24-A22 used for chip selection */
+#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000)
+#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000)
+#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000)
+/* mask address lines A24-A22 used for chip selection */
+#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
+
+/* FPGA status register for checking device ready (bit zero) */
+#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
+#define RTC_FROM4_DEVICE_READY 0x0001
+
+/* FPGA Reed-Solomon ECC Control register */
+
+#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
+#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7)
+#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6)
+#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5)
+
+/* FPGA Reed-Solomon ECC code base */
+#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
+#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
+
+/* FPGA Reed-Solomon ECC check register */
+#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
+#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7)
+
+#define ERR_STAT_ECC_AVAILABLE 0x20
+
+/* Undefine for software ECC */
+#define RTC_FROM4_HWECC 1
+
+/* Define as 1 for no virtual erase blocks (in JFFS2) */
+#define RTC_FROM4_NO_VIRTBLOCKS 0
+
+/*
+ * Module stuff
+ */
+static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
+
+static const struct mtd_partition partition_info[] = {
+ {
+ .name = "Renesas flash partition 1",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL},
+};
+
+#define NUM_PARTITIONS 1
+
+/*
+ * hardware specific flash bbt decriptors
+ * Note: this is to allow debugging by disabling
+ * NAND_BBT_CREATE and/or NAND_BBT_WRITE
+ *
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 40,
+ .len = 4,
+ .veroffs = 44,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 40,
+ .len = 4,
+ .veroffs = 44,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+#ifdef RTC_FROM4_HWECC
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * hardware specific Out Of Band information
+ */
+static struct nand_ecclayout rtc_from4_nand_oobinfo = {
+ .eccbytes = 32,
+ .eccpos = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31},
+ .oobfree = {{32, 32}}
+};
+
+#endif
+
+/*
+ * rtc_from4_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: hardware control command
+ *
+ * Address lines (A5 and A4) are used to control Command and Address Latch
+ * Enable on this board, so set the read/write address appropriately.
+ *
+ * Chip Enable is also controlled by the Chip Select (CS5) and
+ * Address lines (A24-A22), so no action is required here.
+ *
+ */
+static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = (mtd->priv);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE);
+ else
+ writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE);
+}
+
+/*
+ * rtc_from4_nand_select_chip - hardware specific chip select
+ * @mtd: MTD device structure
+ * @chip: Chip to select (0 == slot 3, 1 == slot 4)
+ *
+ * The chip select is based on address lines A24-A22.
+ * This driver uses flash slots 3 and 4 (A23-A22).
+ *
+ */
+static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+
+ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
+
+ switch (chip) {
+
+ case 0: /* select slot 3 chip */
+ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
+ break;
+ case 1: /* select slot 4 chip */
+ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
+ break;
+
+ }
+}
+
+/*
+ * rtc_from4_nand_device_ready - hardware specific ready/busy check
+ * @mtd: MTD device structure
+ *
+ * This board provides the Ready/Busy state in the status register
+ * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal.
+ *
+ */
+static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
+{
+ unsigned short status;
+
+ status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
+
+ return (status & RTC_FROM4_DEVICE_READY);
+
+}
+
+/*
+ * deplete - code to perform device recovery in case there was a power loss
+ * @mtd: MTD device structure
+ * @chip: Chip to select (0 == slot 3, 1 == slot 4)
+ *
+ * If there was a sudden loss of power during an erase operation, a
+ * "device recovery" operation must be performed when power is restored
+ * to ensure correct operation. This routine performs the required steps
+ * for the requested chip.
+ *
+ * See page 86 of the data sheet for details.
+ *
+ */
+static void deplete(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* wait until device is ready */
+ while (!this->dev_ready(mtd)) ;
+
+ this->select_chip(mtd, chip);
+
+ /* Send the commands for device recovery, phase 1 */
+ this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
+ this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
+
+ /* Send the commands for device recovery, phase 2 */
+ this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
+ this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
+
+}
+
+#ifdef RTC_FROM4_HWECC
+/*
+ * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
+ * @mtd: MTD device structure
+ * @mode: I/O mode; read or write
+ *
+ * enable hardware ECC for data read or write
+ *
+ */
+static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
+ unsigned short status;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E;
+
+ *rs_ecc_ctl = status;
+ break;
+
+ case NAND_ECC_READSYN:
+ status = 0x00;
+
+ *rs_ecc_ctl = status;
+ break;
+
+ case NAND_ECC_WRITE:
+ status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E;
+
+ *rs_ecc_ctl = status;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+}
+
+/*
+ * rtc_from4_calculate_ecc - hardware specific code to read ECC code
+ * @mtd: MTD device structure
+ * @dat: buffer containing the data to generate ECC codes
+ * @ecc_code ECC codes calculated
+ *
+ * The ECC code is calculated by the FPGA. All we have to do is read the values
+ * from the FPGA registers.
+ *
+ * Note: We read from the inverted registers, since data is inverted before
+ * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
+ *
+ */
+static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
+ unsigned short value;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ value = *rs_eccn;
+ ecc_code[i] = (unsigned char)value;
+ rs_eccn++;
+ }
+ ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
+}
+
+/*
+ * rtc_from4_correct_data - hardware specific code to correct data using ECC code
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to generate ECC codes
+ * @ecc1 ECC codes read
+ * @ecc2 ECC codes calculated
+ *
+ * The FPGA tells us fast, if there's an error or not. If no, we go back happy
+ * else we read the ecc results from the fpga and call the rs library to decode
+ * and hopefully correct the error.
+ *
+ */
+static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
+{
+ int i, j, res;
+ unsigned short status;
+ uint16_t par[6], syn[6];
+ uint8_t ecc[8];
+ volatile unsigned short *rs_ecc;
+
+ status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
+
+ if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
+ return 0;
+ }
+
+ /* Read the syndrome pattern from the FPGA and correct the bitorder */
+ rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
+ for (i = 0; i < 8; i++) {
+ ecc[i] = bitrev8(*rs_ecc);
+ rs_ecc++;
+ }
+
+ /* convert into 6 10bit syndrome fields */
+ par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)];
+ par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)];
+ par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)];
+ par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)];
+ par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)];
+ par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0);
+
+ /* Convert to computable syndrome */
+ for (i = 0; i < 6; i++) {
+ syn[i] = par[0];
+ for (j = 1; j < 6; j++)
+ if (par[j] != rs_decoder->nn)
+ syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
+
+ /* Convert to index form */
+ syn[i] = rs_decoder->index_of[syn[i]];
+ }
+
+ /* Let the library code do its magic. */
+ res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
+ if (res > 0) {
+ pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
+ }
+ return res;
+}
+
+/**
+ * rtc_from4_errstat - perform additional error status checks
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @state: state or the operation
+ * @status: status code returned from read status
+ * @page: startpage inside the chip, must be called with (page & this->pagemask)
+ *
+ * Perform additional error status checks on erase and write failures
+ * to determine if errors are correctable. For this device, correctable
+ * 1-bit errors on erase and write are considered acceptable.
+ *
+ * note: see pages 34..37 of data sheet for details.
+ *
+ */
+static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
+ int state, int status, int page)
+{
+ int er_stat = 0;
+ int rtn, retlen;
+ size_t len;
+ uint8_t *buf;
+ int i;
+
+ this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
+
+ if (state == FL_ERASING) {
+
+ for (i = 0; i < 4; i++) {
+ if (!(status & 1 << (i + 1)))
+ continue;
+ this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
+ -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
+
+ /* err_ecc_not_avail */
+ if (!(rtn & ERR_STAT_ECC_AVAILABLE))
+ er_stat |= 1 << (i + 1);
+ }
+
+ } else if (state == FL_WRITING) {
+
+ unsigned long corrected = mtd->ecc_stats.corrected;
+
+ /* single bank write logic */
+ this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
+
+ if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
+ /* err_ecc_not_avail */
+ er_stat |= 1 << 1;
+ goto out;
+ }
+
+ len = mtd->writesize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ er_stat = 1;
+ goto out;
+ }
+
+ /* recovery read */
+ rtn = nand_do_read(mtd, page, len, &retlen, buf);
+
+ /* if read failed or > 1-bit error corrected */
+ if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
+ er_stat |= 1 << 1;
+ kfree(buf);
+ }
+out:
+ rtn = status;
+ if (er_stat == 0) { /* if ECC is available */
+ rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
+ }
+
+ return rtn;
+}
+#endif
+
+/*
+ * Main initialization routine
+ */
+static int __init rtc_from4_init(void)
+{
+ struct nand_chip *this;
+ unsigned short bcr1, bcr2, wcr2;
+ int i;
+ int ret;
+
+ /* Allocate memory for MTD device structure and private data */
+ rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!rtc_from4_mtd) {
+ printk("Unable to allocate Renesas NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&rtc_from4_mtd[1]);
+
+ /* Initialize structures */
+ memset(rtc_from4_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ rtc_from4_mtd->priv = this;
+ rtc_from4_mtd->owner = THIS_MODULE;
+
+ /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
+ bcr1 = *SH77X9_BCR1 & ~0x0002;
+ bcr1 |= 0x0002;
+ *SH77X9_BCR1 = bcr1;
+
+ /* set */
+ bcr2 = *SH77X9_BCR2 & ~0x0c00;
+ bcr2 |= 0x0800;
+ *SH77X9_BCR2 = bcr2;
+
+ /* set area 5 wait states */
+ wcr2 = *SH77X9_WCR2 & ~0x1c00;
+ wcr2 |= 0x1c00;
+ *SH77X9_WCR2 = wcr2;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = rtc_from4_fio_base;
+ this->IO_ADDR_W = rtc_from4_fio_base;
+ /* Set address of hardware control function */
+ this->cmd_ctrl = rtc_from4_hwcontrol;
+ /* Set address of chip select function */
+ this->select_chip = rtc_from4_nand_select_chip;
+ /* command delay time (in us) */
+ this->chip_delay = 100;
+ /* return the status of the Ready/Busy line */
+ this->dev_ready = rtc_from4_nand_device_ready;
+
+#ifdef RTC_FROM4_HWECC
+ printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
+
+ this->ecc.mode = NAND_ECC_HW_SYNDROME;
+ this->ecc.size = 512;
+ this->ecc.bytes = 8;
+ this->ecc.strength = 3;
+ /* return the status of extra status and ECC checks */
+ this->errstat = rtc_from4_errstat;
+ /* set the nand_oobinfo to support FPGA H/W error detection */
+ this->ecc.layout = &rtc_from4_nand_oobinfo;
+ this->ecc.hwctl = rtc_from4_enable_hwecc;
+ this->ecc.calculate = rtc_from4_calculate_ecc;
+ this->ecc.correct = rtc_from4_correct_data;
+
+ /* We could create the decoder on demand, if memory is a concern.
+ * This way we have it handy, if an error happens
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 0
+ * primitve element to generate roots = 1
+ * generator polinomial degree = 6
+ */
+ rs_decoder = init_rs(10, 0x409, 0, 1, 6);
+ if (!rs_decoder) {
+ printk(KERN_ERR "Could not create a RS decoder\n");
+ ret = -ENOMEM;
+ goto err_1;
+ }
+#else
+ printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
+
+ this->ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* set the bad block tables to support debugging */
+ this->bbt_td = &rtc_from4_bbt_main_descr;
+ this->bbt_md = &rtc_from4_bbt_mirror_descr;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
+ ret = -ENXIO;
+ goto err_2;
+ }
+
+ /* Perform 'device recovery' for each chip in case there was a power loss. */
+ for (i = 0; i < this->numchips; i++) {
+ deplete(rtc_from4_mtd, i);
+ }
+
+#if RTC_FROM4_NO_VIRTBLOCKS
+ /* use a smaller erase block to minimize wasted space when a block is bad */
+ /* note: this uses eight times as much RAM as using the default and makes */
+ /* mounts take four times as long. */
+ rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS;
+#endif
+
+ /* Register the partitions */
+ ret = mtd_device_register(rtc_from4_mtd, partition_info,
+ NUM_PARTITIONS);
+ if (ret)
+ goto err_3;
+
+ /* Return happy */
+ return 0;
+err_3:
+ nand_release(rtc_from4_mtd);
+err_2:
+ free_rs(rs_decoder);
+err_1:
+ kfree(rtc_from4_mtd);
+ return ret;
+}
+
+module_init(rtc_from4_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit rtc_from4_cleanup(void)
+{
+ /* Release resource, unregister partitions */
+ nand_release(rtc_from4_mtd);
+
+ /* Free the MTD device structure */
+ kfree(rtc_from4_mtd);
+
+#ifdef RTC_FROM4_HWECC
+ /* Free the reed solomon resources */
+ if (rs_decoder) {
+ free_rs(rs_decoder);
+ }
+#endif
+}
+
+module_exit(rtc_from4_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
+MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
new file mode 100644
index 00000000..91121f33
--- /dev/null
+++ b/drivers/mtd/nand/s3c2410.c
@@ -0,0 +1,1154 @@
+/* linux/drivers/mtd/nand/s3c2410.c
+ *
+ * Copyright © 2004-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Samsung S3C2410/S3C2440/S3C2412 NAND driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+#include <plat/regs-nand.h>
+#include <plat/nand.h>
+
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+static int hardware_ecc = 1;
+#else
+static int hardware_ecc = 0;
+#endif
+
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+static const int clock_stop = 1;
+#else
+static const int clock_stop = 0;
+#endif
+
+
+/* new oob placement block for use with hardware ecc generation
+ */
+
+static struct nand_ecclayout nand_hw_eccoob = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {{8, 8}}
+};
+
+/* controller and mtd information */
+
+struct s3c2410_nand_info;
+
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
+struct s3c2410_nand_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct s3c2410_nand_set *set;
+ struct s3c2410_nand_info *info;
+ int scan_res;
+};
+
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C2412,
+ TYPE_S3C2440,
+};
+
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
+/* overview of the s3c2410 nand state */
+
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @area: The IO area resource that came from request_mem_region().
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers described by @area.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
+ * @cpu_type: The exact type of this controller.
+ */
+struct s3c2410_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct s3c2410_nand_mtd *mtds;
+ struct s3c2410_platform_nand *platform;
+
+ /* device info */
+ struct device *device;
+ struct resource *area;
+ struct clk *clk;
+ void __iomem *regs;
+ void __iomem *sel_reg;
+ int sel_bit;
+ int mtd_count;
+ unsigned long save_sel;
+ unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
+
+ enum s3c_cpu_type cpu_type;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct s3c2410_nand_mtd, mtd);
+}
+
+static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return s3c2410_nand_mtd_toours(mtd)->info;
+}
+
+static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev->dev.platform_data;
+}
+
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
+{
+ return clock_stop;
+}
+
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
+/* timing calculations */
+
+#define NS_IN_KHZ 1000000
+
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
+
+ pr_debug("result %d from %ld, %d\n", result, clk, wanted);
+
+ if (result > max) {
+ printk("%d ns is too big for current clock rate %ld\n", wanted, clk);
+ return -1;
+ }
+
+ if (result < 1)
+ result = 1;
+
+ return result;
+}
+
+#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+
+/* controller setup */
+
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
+static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
+{
+ struct s3c2410_platform_nand *plat = info->platform;
+ int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
+ int tacls, twrph0, twrph1;
+ unsigned long clkrate = clk_get_rate(info->clk);
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
+ unsigned long flags;
+
+ /* calculate the timing information for the controller */
+
+ info->clk_rate = clkrate;
+ clkrate /= 1000; /* turn clock into kHz for ease of use */
+
+ if (plat != NULL) {
+ tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+ twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
+ } else {
+ /* default timings */
+ tacls = tacls_max;
+ twrph0 = 8;
+ twrph1 = 8;
+ }
+
+ if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
+ dev_err(info->device, "cannot get suitable timings\n");
+ return -EINVAL;
+ }
+
+ dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ mask = (S3C2410_NFCONF_TACLS(3) |
+ S3C2410_NFCONF_TWRPH0(7) |
+ S3C2410_NFCONF_TWRPH1(7));
+ set = S3C2410_NFCONF_EN;
+ set |= S3C2410_NFCONF_TACLS(tacls - 1);
+ set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
+
+ set = S3C2440_NFCONF_TACLS(tacls - 1);
+ set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ default:
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ cfg = readl(info->regs + S3C2410_NFCONF);
+ cfg &= ~mask;
+ cfg |= set;
+ writel(cfg, info->regs + S3C2410_NFCONF);
+
+ local_irq_restore(flags);
+
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
+{
+ int ret;
+
+ ret = s3c2410_nand_setrate(info);
+ if (ret < 0)
+ return ret;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ default:
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ /* enable the controller and de-assert nFCE */
+
+ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
+ }
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
+static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct nand_chip *this = mtd->priv;
+ unsigned long cur;
+
+ nmtd = this->priv;
+ info = nmtd->info;
+
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ cur = readl(info->sel_reg);
+
+ if (chip == -1) {
+ cur |= info->sel_bit;
+ } else {
+ if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
+ dev_err(info->device, "invalid chip %d\n", chip);
+ return;
+ }
+
+ if (info->platform != NULL) {
+ if (info->platform->select_chip != NULL)
+ (info->platform->select_chip) (nmtd->set, chip);
+ }
+
+ cur &= ~info->sel_bit;
+ }
+
+ writel(cur, info->sel_reg);
+
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+}
+
+/* s3c2410_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+*/
+
+static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2410_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2410_NFADDR);
+}
+
+/* command and control functions */
+
+static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2440_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2440_NFADDR);
+}
+
+/* s3c2410_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+*/
+
+static int s3c2410_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
+}
+
+static int s3c2440_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
+/* ECC handling functions */
+
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned int diff0, diff1, diff2;
+ unsigned int bit, byte;
+
+ pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
+
+ diff0 = read_ecc[0] ^ calc_ecc[0];
+ diff1 = read_ecc[1] ^ calc_ecc[1];
+ diff2 = read_ecc[2] ^ calc_ecc[2];
+
+ pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n",
+ __func__,
+ read_ecc[0], read_ecc[1], read_ecc[2],
+ calc_ecc[0], calc_ecc[1], calc_ecc[2],
+ diff0, diff1, diff2);
+
+ if (diff0 == 0 && diff1 == 0 && diff2 == 0)
+ return 0; /* ECC is ok */
+
+ /* sometimes people do not think about using the ECC, so check
+ * to see if we have an 0xff,0xff,0xff read ECC and then ignore
+ * the error, on the assumption that this is an un-eccd page.
+ */
+ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
+ && info->platform->ignore_unset_ecc)
+ return 0;
+
+ /* Can we correct this ECC (ie, one row and column change).
+ * Note, this is similar to the 256 error code on smartmedia */
+
+ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
+ ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
+ ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
+ /* calculate the bit position of the error */
+
+ bit = ((diff2 >> 3) & 1) |
+ ((diff2 >> 4) & 2) |
+ ((diff2 >> 5) & 4);
+
+ /* calculate the byte position of the error */
+
+ byte = ((diff2 << 7) & 0x100) |
+ ((diff1 << 0) & 0x80) |
+ ((diff1 << 1) & 0x40) |
+ ((diff1 << 2) & 0x20) |
+ ((diff1 << 3) & 0x10) |
+ ((diff0 >> 4) & 0x08) |
+ ((diff0 >> 3) & 0x04) |
+ ((diff0 >> 2) & 0x02) |
+ ((diff0 >> 1) & 0x01);
+
+ dev_dbg(info->device, "correcting error bit %d, byte %d\n",
+ bit, byte);
+
+ dat[byte] ^= (1 << bit);
+ return 1;
+ }
+
+ /* if there is only one bit difference in the ECC, then
+ * one of only a row or column parity has changed, which
+ * means the error is most probably in the ECC itself */
+
+ diff0 |= (diff1 << 8);
+ diff0 |= (diff2 << 16);
+
+ if ((diff0 & ~(1<<fls(diff0))) == 0)
+ return 1;
+
+ return -1;
+}
+
+/* ECC functions
+ *
+ * These allow the s3c2410 and s3c2440 to use the controller's ECC
+ * generator block to ECC the data as it passes through]
+*/
+
+static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2410_NFCONF);
+ ctrl |= S3C2410_NFCONF_INITECC;
+ writel(ctrl, info->regs + S3C2410_NFCONF);
+}
+
+static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT);
+}
+
+static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
+}
+
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
+ ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
+ ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
+
+ pr_debug("%s: returning ecc %02x%02x%02x\n", __func__,
+ ecc_code[0], ecc_code[1], ecc_code[2]);
+
+ return 0;
+}
+
+static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
+
+ return 0;
+}
+
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
+
+ return 0;
+}
+
+/* over-ride the standard functions for a little more speed. We can
+ * use read/write block to move the data buffers to/from the controller
+*/
+
+static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ readsb(this->IO_ADDR_R, buf, len);
+}
+
+static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
+}
+
+static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ writesb(this->IO_ADDR_W, buf, len);
+}
+
+static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
+}
+
+/* cpufreq driver support */
+
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long newclk;
+
+ info = container_of(nb, struct s3c2410_nand_info, freq_transition);
+ newclk = clk_get_rate(info->clk);
+
+ if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
+ (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
+ s3c2410_nand_setrate(info);
+ }
+
+ return 0;
+}
+
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
+
+ return cpufreq_register_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+ cpufreq_unregister_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ return 0;
+}
+
+static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+}
+#endif
+
+/* device management functions */
+
+static int s3c24xx_nand_remove(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = to_nand_info(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (info == NULL)
+ return 0;
+
+ s3c2410_nand_cpufreq_deregister(info);
+
+ /* Release all our mtds and their partitions, then go through
+ * freeing the resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct s3c2410_nand_mtd *ptr = info->mtds;
+ int mtdno;
+
+ for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
+ pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
+ nand_release(&ptr->mtd);
+ }
+
+ kfree(info->mtds);
+ }
+
+ /* free the common resources */
+
+ if (!IS_ERR(info->clk)) {
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+ clk_put(info->clk);
+ }
+
+ if (info->regs != NULL) {
+ iounmap(info->regs);
+ info->regs = NULL;
+ }
+
+ if (info->area != NULL) {
+ release_resource(info->area);
+ kfree(info->area);
+ info->area = NULL;
+ }
+
+ kfree(info);
+
+ return 0;
+}
+
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *mtd,
+ struct s3c2410_nand_set *set)
+{
+ if (set)
+ mtd->mtd.name = set->name;
+
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ set->partitions, set->nr_partitions);
+}
+
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
+ *
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
+static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd,
+ struct s3c2410_nand_set *set)
+{
+ struct nand_chip *chip = &nmtd->chip;
+ void __iomem *regs = info->regs;
+
+ chip->write_buf = s3c2410_nand_write_buf;
+ chip->read_buf = s3c2410_nand_read_buf;
+ chip->select_chip = s3c2410_nand_select_chip;
+ chip->chip_delay = 50;
+ chip->priv = nmtd;
+ chip->options = set->options;
+ chip->controller = &info->controller;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->IO_ADDR_W = regs + S3C2410_NFDATA;
+ info->sel_reg = regs + S3C2410_NFCONF;
+ info->sel_bit = S3C2410_NFCONF_nFCE;
+ chip->cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->dev_ready = s3c2410_nand_devready;
+ break;
+
+ case TYPE_S3C2440:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2440_NFCONT_nFCE;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2440_nand_devready;
+ chip->read_buf = s3c2440_nand_read_buf;
+ chip->write_buf = s3c2440_nand_write_buf;
+ break;
+
+ case TYPE_S3C2412:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2412_NFCONT_nFCE0;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2412_nand_devready;
+
+ if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+ dev_info(info->device, "System booted from NAND\n");
+
+ break;
+ }
+
+ chip->IO_ADDR_R = chip->IO_ADDR_W;
+
+ nmtd->info = info;
+ nmtd->mtd.priv = chip;
+ nmtd->mtd.owner = THIS_MODULE;
+ nmtd->set = set;
+
+ if (hardware_ecc) {
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+
+ }
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ if (set->ecc_layout != NULL)
+ chip->ecc.layout = set->ecc_layout;
+
+ if (set->disable_ecc)
+ chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->options |= NAND_SKIP_BBTSCAN;
+ }
+}
+
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
+ *
+ * This routine is called after the chip probe has successfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
+static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd)
+{
+ struct nand_chip *chip = &nmtd->chip;
+
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
+
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
+ /* change the behaviour depending on wether we are using
+ * the large or small page nand device */
+
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
+ }
+}
+
+/* s3c24xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+*/
+static int s3c24xx_nand_probe(struct platform_device *pdev)
+{
+ struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct s3c2410_nand_set *sets;
+ struct resource *res;
+ int err = 0;
+ int size;
+ int nr_sets;
+ int setno;
+
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ pr_debug("s3c2410_nand_probe(%p)\n", pdev);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "no memory for flash info\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ /* get the clock source and enable it */
+
+ info->clk = clk_get(&pdev->dev, "nand");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = resource_size(res);
+
+ info->area = request_mem_region(res->start, size, pdev->name);
+
+ if (info->area == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+ info->regs = ioremap(res->start, size);
+ info->cpu_type = cpu_type;
+
+ if (info->regs == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -EIO;
+ goto exit_error;
+ }
+
+ dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
+
+ /* initialise the hardware */
+
+ err = s3c2410_nand_inithw(info);
+ if (err != 0)
+ goto exit_error;
+
+ sets = (plat != NULL) ? plat->sets : NULL;
+ nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+
+ info->mtd_count = nr_sets;
+
+ /* allocate our information */
+
+ size = nr_sets * sizeof(*info->mtds);
+ info->mtds = kzalloc(size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mtd storage\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ for (setno = 0; setno < nr_sets; setno++, nmtd++) {
+ pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info);
+
+ s3c2410_nand_init_chip(info, nmtd, sets);
+
+ nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
+ (sets) ? sets->nr_chips : 1,
+ NULL);
+
+ if (nmtd->scan_res == 0) {
+ s3c2410_nand_update_chip(info, nmtd);
+ nand_scan_tail(&nmtd->mtd);
+ s3c2410_nand_add_partition(info, nmtd, sets);
+ }
+
+ if (sets != NULL)
+ sets++;
+ }
+
+ err = s3c2410_nand_cpufreq_register(info);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init cpufreq support\n");
+ goto exit_error;
+ }
+
+ if (allow_clk_suspend(info)) {
+ dev_info(&pdev->dev, "clock idle support enabled\n");
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ pr_debug("initialised ok\n");
+ return 0;
+
+ exit_error:
+ s3c24xx_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ info->save_sel = readl(info->sel_reg);
+
+ /* For the moment, we must ensure nFCE is high during
+ * the time we are suspended. This really should be
+ * handled by suspending the MTDs we are using, but
+ * that is currently not the case. */
+
+ writel(info->save_sel | info->sel_bit, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_resume(struct platform_device *dev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+ unsigned long sel;
+
+ if (info) {
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+ s3c2410_nand_inithw(info);
+
+ /* Restore the state of the nFCE line. */
+
+ sel = readl(info->sel_reg);
+ sel &= ~info->sel_bit;
+ sel |= info->save_sel & info->sel_bit;
+ writel(sel, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+}
+
+#else
+#define s3c24xx_nand_suspend NULL
+#define s3c24xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
+ .driver = {
+ .name = "s3c24xx-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s3c2410_nand_init(void)
+{
+ printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
+
+ return platform_driver_register(&s3c24xx_nand_driver);
+}
+
+static void __exit s3c2410_nand_exit(void)
+{
+ platform_driver_unregister(&s3c24xx_nand_driver);
+}
+
+module_init(s3c2410_nand_init);
+module_exit(s3c2410_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
new file mode 100644
index 00000000..e9b2b260
--- /dev/null
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -0,0 +1,963 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright (c) 2008 Renesas Solutions Corp.
+ * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
+ *
+ * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sh_flctl.h>
+
+static struct nand_ecclayout flctl_4secc_oob_16 = {
+ .eccbytes = 10,
+ .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+ .oobfree = {
+ {.offset = 12,
+ . length = 4} },
+};
+
+static struct nand_ecclayout flctl_4secc_oob_64 = {
+ .eccbytes = 10,
+ .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
+ .oobfree = {
+ {.offset = 60,
+ . length = 4} },
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr flctl_4secc_smallpage = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 11,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+static struct nand_bbt_descr flctl_4secc_largepage = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 58,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+static void empty_fifo(struct sh_flctl *flctl)
+{
+ writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */
+ writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */
+}
+
+static void start_translation(struct sh_flctl *flctl)
+{
+ writeb(TRSTRT, FLTRCR(flctl));
+}
+
+static void timeout_error(struct sh_flctl *flctl, const char *str)
+{
+ dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
+}
+
+static void wait_completion(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ if (readb(FLTRCR(flctl)) & TREND) {
+ writeb(0x0, FLTRCR(flctl));
+ return;
+ }
+ udelay(1);
+ }
+
+ timeout_error(flctl, __func__);
+ writeb(0x0, FLTRCR(flctl));
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t addr = 0;
+
+ if (column == -1) {
+ addr = page_addr; /* ERASE1 */
+ } else if (page_addr != -1) {
+ /* SEQIN, READ0, etc.. */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ if (flctl->page_size) {
+ addr = column & 0x0FFF;
+ addr |= (page_addr & 0xff) << 16;
+ addr |= ((page_addr >> 8) & 0xff) << 24;
+ /* big than 128MB */
+ if (flctl->rw_ADRCNT == ADRCNT2_E) {
+ uint32_t addr2;
+ addr2 = (page_addr >> 16) & 0xff;
+ writel(addr2, FLADR2(flctl));
+ }
+ } else {
+ addr = column;
+ addr |= (page_addr & 0xff) << 8;
+ addr |= ((page_addr >> 8) & 0xff) << 16;
+ addr |= ((page_addr >> 16) & 0xff) << 24;
+ }
+ }
+ writel(addr, FLADR(flctl));
+}
+
+static void wait_rfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ uint32_t val;
+ /* check FIFO */
+ val = readl(FLDTCNTR(flctl)) >> 16;
+ if (val & 0xFF)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static void wait_wfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t len, timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ /* check FIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ int checked[4];
+ void __iomem *ecc_reg[4];
+ int i;
+ uint32_t data, size;
+
+ memset(checked, 0, sizeof(checked));
+
+ while (timeout--) {
+ size = readl(FLDTCNTR(flctl)) >> 24;
+ if (size & 0xFF)
+ return 0; /* success */
+
+ if (readl(FL4ECCCR(flctl)) & _4ECCFA)
+ return 1; /* can't correct */
+
+ udelay(1);
+ if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
+ continue;
+
+ /* start error correction */
+ ecc_reg[0] = FL4ECCRESULT0(flctl);
+ ecc_reg[1] = FL4ECCRESULT1(flctl);
+ ecc_reg[2] = FL4ECCRESULT2(flctl);
+ ecc_reg[3] = FL4ECCRESULT3(flctl);
+
+ for (i = 0; i < 3; i++) {
+ data = readl(ecc_reg[i]);
+ if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
+ uint8_t org;
+ int index;
+
+ if (flctl->page_size)
+ index = (512 * sector_number) +
+ (data >> 16);
+ else
+ index = data >> 16;
+
+ org = flctl->done_buff[index];
+ flctl->done_buff[index] = org ^ (data & 0xFF);
+ checked[i] = 1;
+ }
+ }
+
+ writel(0, FL4ECCCR(flctl));
+ }
+
+ timeout_error(flctl, __func__);
+ return 1; /* timeout */
+}
+
+static void wait_wecfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ uint32_t len;
+
+ while (timeout--) {
+ /* check FLECFIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static void read_datareg(struct sh_flctl *flctl, int offset)
+{
+ unsigned long data;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ wait_completion(flctl);
+
+ data = readl(FLDATAR(flctl));
+ *buf = le32_to_cpu(data);
+}
+
+static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+ void *fifo_addr = (void *)FLDTFIFO(flctl);
+
+ len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++) {
+ wait_rfifo_ready(flctl);
+ buf[i] = readl(fifo_addr);
+ buf[i] = be32_to_cpu(buf[i]);
+ }
+}
+
+static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector)
+{
+ int i;
+ unsigned long *ecc_buf = (unsigned long *)buff;
+ void *fifo_addr = (void *)FLECFIFO(flctl);
+
+ for (i = 0; i < 4; i++) {
+ if (wait_recfifo_ready(flctl , sector))
+ return 1;
+ ecc_buf[i] = readl(fifo_addr);
+ ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ }
+
+ return 0;
+}
+
+static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+ int i, len_4align;
+ unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+ void *fifo_addr = (void *)FLDTFIFO(flctl);
+
+ len_4align = (rlen + 3) / 4;
+ for (i = 0; i < len_4align; i++) {
+ wait_wfifo_ready(flctl);
+ writel(cpu_to_be32(data[i]), fifo_addr);
+ }
+}
+
+static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
+ uint32_t flcmdcr_val, addr_len_bytes = 0;
+
+ /* Set SNAND bit if page size is 2048byte */
+ if (flctl->page_size)
+ flcmncr_val |= SNAND_E;
+ else
+ flcmncr_val &= ~SNAND_E;
+
+ /* default FLCMDCR val */
+ flcmdcr_val = DOCMD1_E | DOADR_E;
+
+ /* Set for FLCMDCR */
+ switch (cmd) {
+ case NAND_CMD_ERASE1:
+ addr_len_bytes = flctl->erase_ADRCNT;
+ flcmdcr_val |= DOCMD2_E;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_RNDOUT:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= CDSRC_E;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_SEQIN:
+ /* This case is that cmd is READ0 or READ1 or READ00 */
+ flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
+ break;
+ case NAND_CMD_PAGEPROG:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_READID:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val |= CDSRC_E;
+ addr_len_bytes = ADRCNT_1;
+ break;
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RESET:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val &= ~(DOADR_E | DOSR_E);
+ break;
+ default:
+ break;
+ }
+
+ /* Set address bytes parameter */
+ flcmdcr_val |= addr_len_bytes;
+
+ /* Now actually write */
+ writel(flcmncr_val, FLCMNCR(flctl));
+ writel(flcmdcr_val, FLCMDCR(flctl));
+ writel(flcmcdr_val, FLCMCDR(flctl));
+}
+
+static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->read_buf(mtd, p, eccsize);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ if (flctl->hwecc_cant_correct[i])
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += 0;
+ }
+
+ return 0;
+}
+
+static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->write_buf(mtd, p, eccsize);
+}
+
+static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int sector, page_sectors;
+
+ if (flctl->page_size)
+ page_sectors = 4;
+ else
+ page_sectors = 1;
+
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+ FLCMNCR(flctl));
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ int ret;
+
+ empty_fifo(flctl);
+ writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
+ writel(page_addr << 2 | sector, FLADR(flctl));
+
+ start_translation(flctl);
+ read_fiforeg(flctl, 512, 512 * sector);
+
+ ret = read_ecfiforeg(flctl,
+ &flctl->done_buff[mtd->writesize + 16 * sector],
+ sector);
+
+ if (ret)
+ flctl->hwecc_cant_correct[sector] = 1;
+
+ writel(0x0, FL4ECCCR(flctl));
+ wait_completion(flctl);
+ }
+ writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
+ FLCMNCR(flctl));
+}
+
+static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ empty_fifo(flctl);
+ if (flctl->page_size) {
+ int i;
+ /* In case that the page size is 2k */
+ for (i = 0; i < 16 * 3; i++)
+ flctl->done_buff[i] = 0xFF;
+
+ set_addr(mtd, 3 * 528 + 512, page_addr);
+ writel(16, FLDTCNTR(flctl));
+
+ start_translation(flctl);
+ read_fiforeg(flctl, 16, 16 * 3);
+ wait_completion(flctl);
+ } else {
+ /* In case that the page size is 512b */
+ set_addr(mtd, 512, page_addr);
+ writel(16, FLDTCNTR(flctl));
+
+ start_translation(flctl);
+ read_fiforeg(flctl, 16, 0);
+ wait_completion(flctl);
+ }
+}
+
+static void execmd_write_page_sector(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int i, page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ if (flctl->page_size)
+ page_sectors = 4;
+ else
+ page_sectors = 1;
+
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ empty_fifo(flctl);
+ writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
+ writel(page_addr << 2 | sector, FLADR(flctl));
+
+ start_translation(flctl);
+ write_fiforeg(flctl, 512, 512 * sector);
+
+ for (i = 0; i < 4; i++) {
+ wait_wecfifo_ready(flctl); /* wait for write ready */
+ writel(0xFFFFFFFF, FLECFIFO(flctl));
+ }
+ wait_completion(flctl);
+ }
+
+ writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
+}
+
+static void execmd_write_oob(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ if (flctl->page_size) {
+ sector = 3;
+ page_sectors = 4;
+ } else {
+ sector = 0;
+ page_sectors = 1;
+ }
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ for (; sector < page_sectors; sector++) {
+ empty_fifo(flctl);
+ set_addr(mtd, sector * 528 + 512, page_addr);
+ writel(16, FLDTCNTR(flctl)); /* set read size */
+
+ start_translation(flctl);
+ write_fiforeg(flctl, 16, 16 * sector);
+ wait_completion(flctl);
+ }
+}
+
+static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t read_cmd = 0;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+
+ flctl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ flctl->index = 0;
+
+ switch (command) {
+ case NAND_CMD_READ1:
+ case NAND_CMD_READ0:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_page_sector(mtd, page_addr);
+ break;
+ }
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, 0, page_addr);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ flctl->index += column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READOOB:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_oob(mtd, page_addr);
+ break;
+ }
+
+ if (flctl->page_size) {
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | NAND_CMD_READ0);
+ set_addr(mtd, mtd->writesize, page_addr);
+ } else {
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, 0, page_addr);
+ }
+ flctl->read_bytes = mtd->oobsize;
+ goto read_normal_exit;
+
+ case NAND_CMD_RNDOUT:
+ if (flctl->hwecc)
+ break;
+
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READID:
+ set_cmd_regs(mtd, command, command);
+
+ /* READID is always performed using an 8-bit bus */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column <<= 1;
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = 8;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_ERASE1:
+ flctl->erase1_page_addr = page_addr;
+ break;
+
+ case NAND_CMD_ERASE2:
+ set_cmd_regs(mtd, NAND_CMD_ERASE1,
+ (command << 8) | NAND_CMD_ERASE1);
+ set_addr(mtd, -1, flctl->erase1_page_addr);
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (!flctl->page_size) {
+ /* output read command */
+ if (column >= mtd->writesize) {
+ column -= mtd->writesize;
+ read_cmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ read_cmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ read_cmd = NAND_CMD_READ1;
+ }
+ }
+ flctl->seqin_column = column;
+ flctl->seqin_page_addr = page_addr;
+ flctl->seqin_read_cmd = read_cmd;
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ empty_fifo(flctl);
+ if (!flctl->page_size) {
+ set_cmd_regs(mtd, NAND_CMD_SEQIN,
+ flctl->seqin_read_cmd);
+ set_addr(mtd, -1, -1);
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ }
+ if (flctl->hwecc) {
+ /* write page with hwecc */
+ if (flctl->seqin_column == mtd->writesize)
+ execmd_write_oob(mtd);
+ else if (!flctl->seqin_column)
+ execmd_write_page_sector(mtd);
+ else
+ printk(KERN_ERR "Invalid address !?\n");
+ break;
+ }
+ set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
+ set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
+ writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
+ start_translation(flctl);
+ write_fiforeg(flctl, flctl->index, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_STATUS:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ flctl->read_bytes = 1;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ start_translation(flctl);
+ read_datareg(flctl, 0); /* read and end */
+ break;
+
+ case NAND_CMD_RESET:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ default:
+ break;
+ }
+ goto runtime_exit;
+
+read_normal_exit:
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+runtime_exit:
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ return;
+}
+
+static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int ret;
+
+ switch (chipnr) {
+ case -1:
+ flctl->flcmncr_base &= ~CE0_ENABLE;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+ if (flctl->qos_request) {
+ dev_pm_qos_remove_request(&flctl->pm_qos);
+ flctl->qos_request = 0;
+ }
+
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ break;
+ case 0:
+ flctl->flcmncr_base |= CE0_ENABLE;
+
+ if (!flctl->qos_request) {
+ ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+ &flctl->pm_qos, 100);
+ if (ret < 0)
+ dev_err(&flctl->pdev->dev,
+ "PM QoS request failed: %d\n", ret);
+ flctl->qos_request = 1;
+ }
+
+ if (flctl->holden) {
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(HOLDEN, FLHOLDCR(flctl));
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int i, index = flctl->index;
+
+ for (i = 0; i < len; i++)
+ flctl->done_buff[index + i] = buf[i];
+ flctl->index += len;
+}
+
+static uint8_t flctl_read_byte(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int index = flctl->index;
+ uint8_t data;
+
+ data = flctl->done_buff[index];
+ flctl->index++;
+ return data;
+}
+
+static uint16_t flctl_read_word(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int index = flctl->index;
+ uint16_t data;
+ uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
+
+ data = *buf;
+ flctl->index += 2;
+ return data;
+}
+
+static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = flctl_read_byte(mtd);
+}
+
+static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != flctl_read_byte(mtd))
+ return -EFAULT;
+ return 0;
+}
+
+static int flctl_chip_init_tail(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ struct nand_chip *chip = &flctl->chip;
+
+ if (mtd->writesize == 512) {
+ flctl->page_size = 0;
+ if (chip->chipsize > (32 << 20)) {
+ /* big than 32MB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (chip->chipsize > (2 << 16)) {
+ /* big than 128KB */
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_2;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ } else {
+ flctl->page_size = 1;
+ if (chip->chipsize > (128 << 20)) {
+ /* big than 128MB */
+ flctl->rw_ADRCNT = ADRCNT2_E;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (chip->chipsize > (8 << 16)) {
+ /* big than 512KB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ }
+
+ if (flctl->hwecc) {
+ if (mtd->writesize == 512) {
+ chip->ecc.layout = &flctl_4secc_oob_16;
+ chip->badblock_pattern = &flctl_4secc_smallpage;
+ } else {
+ chip->ecc.layout = &flctl_4secc_oob_64;
+ chip->badblock_pattern = &flctl_4secc_largepage;
+ }
+
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 10;
+ chip->ecc.strength = 4;
+ chip->ecc.read_page = flctl_read_page_hwecc;
+ chip->ecc.write_page = flctl_write_page_hwecc;
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /* 4 symbols ECC enabled */
+ flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static int __devinit flctl_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct sh_flctl *flctl;
+ struct mtd_info *flctl_mtd;
+ struct nand_chip *nand;
+ struct sh_flctl_platform_data *pdata;
+ int ret = -ENXIO;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ goto err_iomap;
+ }
+
+ flctl->reg = ioremap(res->start, resource_size(res));
+ if (flctl->reg == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ goto err_iomap;
+ }
+
+ platform_set_drvdata(pdev, flctl);
+ flctl_mtd = &flctl->mtd;
+ nand = &flctl->chip;
+ flctl_mtd->priv = nand;
+ flctl->pdev = pdev;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ flctl->hwecc = pdata->has_hwecc;
+ flctl->holden = pdata->use_holden;
+
+ nand->options = NAND_NO_AUTOINCR;
+
+ /* Set address of hardware control function */
+ /* 20 us command delay time */
+ nand->chip_delay = 20;
+
+ nand->read_byte = flctl_read_byte;
+ nand->write_buf = flctl_write_buf;
+ nand->read_buf = flctl_read_buf;
+ nand->verify_buf = flctl_verify_buf;
+ nand->select_chip = flctl_select_chip;
+ nand->cmdfunc = flctl_cmdfunc;
+
+ if (pdata->flcmncr_val & SEL_16BIT) {
+ nand->options |= NAND_BUSWIDTH_16;
+ nand->read_word = flctl_read_word;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ ret = nand_scan_ident(flctl_mtd, 1, NULL);
+ if (ret)
+ goto err_chip;
+
+ ret = flctl_chip_init_tail(flctl_mtd);
+ if (ret)
+ goto err_chip;
+
+ ret = nand_scan_tail(flctl_mtd);
+ if (ret)
+ goto err_chip;
+
+ mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+
+ return 0;
+
+err_chip:
+ pm_runtime_disable(&pdev->dev);
+err_iomap:
+ kfree(flctl);
+ return ret;
+}
+
+static int __devexit flctl_remove(struct platform_device *pdev)
+{
+ struct sh_flctl *flctl = platform_get_drvdata(pdev);
+
+ nand_release(&flctl->mtd);
+ pm_runtime_disable(&pdev->dev);
+ kfree(flctl);
+
+ return 0;
+}
+
+static struct platform_driver flctl_driver = {
+ .remove = flctl_remove,
+ .driver = {
+ .name = "sh_flctl",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init flctl_nand_init(void)
+{
+ return platform_driver_probe(&flctl_driver, flctl_probe);
+}
+
+static void __exit flctl_nand_cleanup(void)
+{
+ platform_driver_unregister(&flctl_driver);
+}
+
+module_init(flctl_nand_init);
+module_exit(flctl_nand_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("SuperH FLCTL driver");
+MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
new file mode 100644
index 00000000..3421e376
--- /dev/null
+++ b/drivers/mtd/nand/sharpsl.c
@@ -0,0 +1,238 @@
+/*
+ * drivers/mtd/nand/sharpsl.c
+ *
+ * Copyright (C) 2004 Richard Purdie
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * Based on Sharp's NAND driver sharp_sl.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
+struct sharpsl_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ void __iomem *io;
+};
+
+#define mtd_to_sharpsl(_mtd) container_of(_mtd, struct sharpsl_nand, mtd)
+
+/* register offset */
+#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
+#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
+#define ECCCP 0x08 /* column parity 5 - 0 bit */
+#define ECCCNTR 0x0C /* ECC byte counter */
+#define ECCCLRR 0x10 /* cleare ECC */
+#define FLASHIO 0x14 /* Flash I/O */
+#define FLASHCTL 0x18 /* Flash Control */
+
+/* Flash control bit */
+#define FLRYBY (1 << 5)
+#define FLCE1 (1 << 4)
+#define FLWP (1 << 3)
+#define FLALE (1 << 2)
+#define FLCLE (1 << 1)
+#define FLCE0 (1 << 0)
+
+/*
+ * hardware specific access to control-lines
+ * ctrl:
+ * NAND_CNE: bit 0 -> ! bit 0 & 4
+ * NAND_CLE: bit 1 -> bit 1
+ * NAND_ALE: bit 2 -> bit 2
+ *
+ */
+static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned char bits = ctrl & 0x07;
+
+ bits |= (ctrl & 0x01) << 4;
+
+ bits ^= 0x11;
+
+ writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
+}
+
+static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ writeb(0, sharpsl->io + ECCCLRR);
+}
+
+static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+ ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+ ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+ return readb(sharpsl->io + ECCCNTR) != 0;
+}
+
+/*
+ * Main initialization routine
+ */
+static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct resource *r;
+ int err = 0;
+ struct sharpsl_nand *sharpsl;
+ struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
+
+ if (!data) {
+ dev_err(&pdev->dev, "no platform data!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+ if (!sharpsl) {
+ printk("Unable to allocate SharpSL NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no io memory resource defined!\n");
+ err = -ENODEV;
+ goto err_get_res;
+ }
+
+ /* map physical address */
+ sharpsl->io = ioremap(r->start, resource_size(r));
+ if (!sharpsl->io) {
+ printk("ioremap to access Sharp SL NAND chip failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&sharpsl->chip);
+
+ /* Link the private data with the MTD structure */
+ sharpsl->mtd.priv = this;
+ sharpsl->mtd.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, sharpsl);
+
+ /*
+ * PXA initialize
+ */
+ writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = sharpsl->io + FLASHIO;
+ this->IO_ADDR_W = sharpsl->io + FLASHIO;
+ /* Set address of hardware control function */
+ this->cmd_ctrl = sharpsl_nand_hwcontrol;
+ this->dev_ready = sharpsl_nand_dev_ready;
+ /* 15 us command delay time */
+ this->chip_delay = 15;
+ /* set eccmode using hardware ECC */
+ this->ecc.mode = NAND_ECC_HW;
+ this->ecc.size = 256;
+ this->ecc.bytes = 3;
+ this->ecc.strength = 1;
+ this->badblock_pattern = data->badblock_pattern;
+ this->ecc.layout = data->ecc_layout;
+ this->ecc.hwctl = sharpsl_nand_enable_hwecc;
+ this->ecc.calculate = sharpsl_nand_calculate_ecc;
+ this->ecc.correct = nand_correct_data;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&sharpsl->mtd, 1);
+ if (err)
+ goto err_scan;
+
+ /* Register the partitions */
+ sharpsl->mtd.name = "sharpsl-nand";
+
+ err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
+ data->partitions, data->nr_partitions);
+ if (err)
+ goto err_add;
+
+ /* Return happy */
+ return 0;
+
+err_add:
+ nand_release(&sharpsl->mtd);
+
+err_scan:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+ kfree(sharpsl);
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
+{
+ struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+
+ /* Release resources, unregister device */
+ nand_release(&sharpsl->mtd);
+
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap(sharpsl->io);
+
+ /* Free the MTD device structure */
+ kfree(sharpsl);
+
+ return 0;
+}
+
+static struct platform_driver sharpsl_nand_driver = {
+ .driver = {
+ .name = "sharpsl-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = sharpsl_nand_probe,
+ .remove = __devexit_p(sharpsl_nand_remove),
+};
+
+module_platform_driver(sharpsl_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 00000000..774c3c26
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/nand.h>
+#include <linux/module.h>
+#include "sm_common.h"
+
+static struct nand_ecclayout nand_oob_sm = {
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15},
+ .oobfree = {
+ {.offset = 0 , .length = 4}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ {.offset = 11, .length = 2} /* LBA2 */
+ }
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static struct nand_ecclayout nand_oob_sm_small = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3 , .length = 2}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ }
+};
+
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret, error = 0;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd_write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ printk(KERN_NOTICE
+ "sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ error = -EIO;
+ } else
+ mtd->ecc_stats.badblocks++;
+
+ return error;
+}
+
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
+ {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
+ {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
+ {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
+ {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
+ {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
+ {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
+ {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
+ {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
+ {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
+ {NULL,}
+};
+
+#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+
+ {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
+ {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
+ {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
+ {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
+ {NULL,}
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ ret = nand_scan_ident(mtd, 1, smartmedia ?
+ nand_smartmedia_flash_ids : nand_xd_flash_ids);
+
+ if (ret)
+ return ret;
+
+ /* Bad block marker position */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ chip->ecc.layout = &nand_oob_sm;
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ chip->ecc.layout = &nand_oob_sm_small;
+ else
+ return -ENODEV;
+
+ ret = nand_scan_tail(mtd);
+
+ if (ret)
+ return ret;
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 00000000..00f4a833
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __attribute__((packed));
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
new file mode 100644
index 00000000..e02b08bc
--- /dev/null
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -0,0 +1,280 @@
+/*
+ * drivers/mtd/nand/socrates_nand.c
+ *
+ * Copyright © 2008 Ilya Yanok, Emcraft Systems
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#define FPGA_NAND_CMD_MASK (0x7 << 28)
+#define FPGA_NAND_CMD_COMMAND (0x0 << 28)
+#define FPGA_NAND_CMD_ADDR (0x1 << 28)
+#define FPGA_NAND_CMD_READ (0x2 << 28)
+#define FPGA_NAND_CMD_WRITE (0x3 << 28)
+#define FPGA_NAND_BUSY (0x1 << 15)
+#define FPGA_NAND_ENABLE (0x1 << 31)
+#define FPGA_NAND_DATA_SHIFT 16
+
+struct socrates_nand_host {
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ struct device *dev;
+};
+
+/**
+ * socrates_nand_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void socrates_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ struct socrates_nand_host *host = this->priv;
+
+ for (i = 0; i < len; i++) {
+ out_be32(host->io_base, FPGA_NAND_ENABLE |
+ FPGA_NAND_CMD_WRITE |
+ (buf[i] << FPGA_NAND_DATA_SHIFT));
+ }
+}
+
+/**
+ * socrates_nand_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void socrates_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ struct socrates_nand_host *host = this->priv;
+ uint32_t val;
+
+ val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
+
+ out_be32(host->io_base, val);
+ for (i = 0; i < len; i++) {
+ buf[i] = (in_be32(host->io_base) >>
+ FPGA_NAND_DATA_SHIFT) & 0xff;
+ }
+}
+
+/**
+ * socrates_nand_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ */
+static uint8_t socrates_nand_read_byte(struct mtd_info *mtd)
+{
+ uint8_t byte;
+ socrates_nand_read_buf(mtd, &byte, sizeof(byte));
+ return byte;
+}
+
+/**
+ * socrates_nand_read_word - read one word from the chip
+ * @mtd: MTD device structure
+ */
+static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
+{
+ uint16_t word;
+ socrates_nand_read_buf(mtd, (uint8_t *)&word, sizeof(word));
+ return word;
+}
+
+/**
+ * socrates_nand_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int socrates_nand_verify_buf(struct mtd_info *mtd, const u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (buf[i] != socrates_nand_read_byte(mtd))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void socrates_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct socrates_nand_host *host = nand_chip->priv;
+ uint32_t val;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ val = FPGA_NAND_CMD_COMMAND;
+ else
+ val = FPGA_NAND_CMD_ADDR;
+
+ if (ctrl & NAND_NCE)
+ val |= FPGA_NAND_ENABLE;
+
+ val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
+
+ out_be32(host->io_base, val);
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int socrates_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct socrates_nand_host *host = nand_chip->priv;
+
+ if (in_be32(host->io_base) & FPGA_NAND_BUSY)
+ return 0; /* busy */
+ return 1;
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int __devinit socrates_nand_probe(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int res;
+ struct mtd_part_parser_data ppdata;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
+ if (!host) {
+ printk(KERN_ERR
+ "socrates_nand: failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ host->io_base = of_iomap(ofdev->dev.of_node, 0);
+ if (host->io_base == NULL) {
+ printk(KERN_ERR "socrates_nand: ioremap failed\n");
+ kfree(host);
+ return -EIO;
+ }
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ host->dev = &ofdev->dev;
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->name = "socrates_nand";
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &ofdev->dev;
+ ppdata.of_node = ofdev->dev.of_node;
+
+ /*should never be accessed directly */
+ nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
+ nand_chip->IO_ADDR_W = (void *)0xdeadbeef;
+
+ nand_chip->cmd_ctrl = socrates_nand_cmd_ctrl;
+ nand_chip->read_byte = socrates_nand_read_byte;
+ nand_chip->read_word = socrates_nand_read_word;
+ nand_chip->write_buf = socrates_nand_write_buf;
+ nand_chip->read_buf = socrates_nand_read_buf;
+ nand_chip->verify_buf = socrates_nand_verify_buf;
+ nand_chip->dev_ready = socrates_nand_device_ready;
+
+ nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
+
+ /* TODO: I have no idea what real delay is. */
+ nand_chip->chip_delay = 20; /* 20us command delay time */
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+out:
+ dev_set_drvdata(&ofdev->dev, NULL);
+ iounmap(host->io_base);
+ kfree(host);
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int __devexit socrates_nand_remove(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+ iounmap(host->io_base);
+ kfree(host);
+
+ return 0;
+}
+
+static const struct of_device_id socrates_nand_match[] =
+{
+ {
+ .compatible = "abb,socrates-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socrates_nand_match);
+
+static struct platform_driver socrates_nand_driver = {
+ .driver = {
+ .name = "socrates_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = socrates_nand_match,
+ },
+ .probe = socrates_nand_probe,
+ .remove = __devexit_p(socrates_nand_remove),
+};
+
+module_platform_driver(socrates_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ilya Yanok");
+MODULE_DESCRIPTION("NAND driver for Socrates board");
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
new file mode 100644
index 00000000..bef76cd7
--- /dev/null
+++ b/drivers/mtd/nand/spia.c
@@ -0,0 +1,176 @@
+/*
+ * drivers/mtd/nand/spia.c
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ *
+ *
+ * 10-29-2001 TG change to support hardwarespecific access
+ * to controllines (due to change in nand.c)
+ * page_cache added
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * SPIA board which utilizes the Toshiba TC58V64AFT part. This is
+ * a 64Mibit (8MiB x 8 bits) NAND flash device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+
+/*
+ * MTD structure for SPIA board
+ */
+static struct mtd_info *spia_mtd = NULL;
+
+/*
+ * Values specific to the SPIA board (used with EP7212 processor)
+ */
+#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
+#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
+#define SPIA_PEDR 0x0080 /*
+ * IO offset to Port E data register
+ * where the CLE, ALE and NCE pins
+ * are wired to.
+ */
+#define SPIA_PEDDR 0x00c0 /*
+ * IO offset to Port E data direction
+ * register so we can control the IO
+ * lines.
+ */
+
+/*
+ * Module stuff
+ */
+
+static int spia_io_base = SPIA_IO_BASE;
+static int spia_fio_base = SPIA_FIO_BASE;
+static int spia_pedr = SPIA_PEDR;
+static int spia_peddr = SPIA_PEDDR;
+
+module_param(spia_io_base, int, 0);
+module_param(spia_fio_base, int, 0);
+module_param(spia_pedr, int, 0);
+module_param(spia_peddr, int, 0);
+
+/*
+ * Define partitions for flash device
+ */
+static const struct mtd_partition partition_info[] = {
+ {
+ .name = "SPIA flash partition 1",
+ .offset = 0,
+ .size = 2 * 1024 * 1024},
+ {
+ .name = "SPIA flash partition 2",
+ .offset = 2 * 1024 * 1024,
+ .size = 6 * 1024 * 1024}
+};
+
+#define NUM_PARTITIONS 2
+
+/*
+ * hardware specific access to control-lines
+ *
+ * ctrl:
+ * NAND_CNE: bit 0 -> bit 2
+ * NAND_CLE: bit 1 -> bit 0
+ * NAND_ALE: bit 2 -> bit 1
+ */
+static void spia_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ void __iomem *addr = spia_io_base + spia_pedr;
+ unsigned char bits;
+
+ bits = (ctrl & NAND_CNE) << 2;
+ bits |= (ctrl & NAND_CLE | NAND_ALE) >> 1;
+ writeb((readb(addr) & ~0x7) | bits, addr);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+/*
+ * Main initialization routine
+ */
+static int __init spia_init(void)
+{
+ struct nand_chip *this;
+
+ /* Allocate memory for MTD device structure and private data */
+ spia_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!spia_mtd) {
+ printk("Unable to allocate SPIA NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&spia_mtd[1]);
+
+ /* Initialize structures */
+ memset(spia_mtd, 0, sizeof(struct mtd_info));
+ memset(this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ spia_mtd->priv = this;
+ spia_mtd->owner = THIS_MODULE;
+
+ /*
+ * Set GPIO Port E control register so that the pins are configured
+ * to be outputs for controlling the NAND flash.
+ */
+ (*(volatile unsigned char *)(spia_io_base + spia_peddr)) = 0x07;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = (void __iomem *)spia_fio_base;
+ this->IO_ADDR_W = (void __iomem *)spia_fio_base;
+ /* Set address of hardware control function */
+ this->cmd_ctrl = spia_hwcontrol;
+ /* 15 us command delay time */
+ this->chip_delay = 15;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(spia_mtd, 1)) {
+ kfree(spia_mtd);
+ return -ENXIO;
+ }
+
+ /* Register the partitions */
+ mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
+
+ /* Return happy */
+ return 0;
+}
+
+module_init(spia_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit spia_cleanup(void)
+{
+ /* Release resources, unregister device */
+ nand_release(spia_mtd);
+
+ /* Free the MTD device structure */
+ kfree(spia_mtd);
+}
+
+module_exit(spia_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
new file mode 100644
index 00000000..5aa51808
--- /dev/null
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -0,0 +1,542 @@
+/*
+ * Toshiba TMIO NAND flash controller driver
+ *
+ * Slightly murky pre-git history of the driver:
+ *
+ * Copyright (c) Ian Molton 2004, 2005, 2008
+ * Original work, independent of sharps code. Included hardware ECC support.
+ * Hard ECC did not work for writes in the early revisions.
+ * Copyright (c) Dirk Opfer 2005.
+ * Modifications developed from sharps code but
+ * NOT containing any, ported onto Ians base.
+ * Copyright (c) Chris Humbert 2005
+ * Copyright (c) Dmitry Baryshkov 2008
+ * Minor fixes
+ *
+ * Parts copyright Sebastian Carlier
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * NAND Flash Host Controller Configuration Register
+ */
+#define CCR_COMMAND 0x04 /* w Command */
+#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */
+#define CCR_INTP 0x3d /* b Interrupt Pin */
+#define CCR_INTE 0x48 /* b Interrupt Enable */
+#define CCR_EC 0x4a /* b Event Control */
+#define CCR_ICC 0x4c /* b Internal Clock Control */
+#define CCR_ECCC 0x5b /* b ECC Control */
+#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */
+#define CCR_NFM 0x61 /* b NAND Flash Monitor */
+#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */
+#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */
+
+/*
+ * NAND Flash Control Register
+ */
+#define FCR_DATA 0x00 /* bwl Data Register */
+#define FCR_MODE 0x04 /* b Mode Register */
+#define FCR_STATUS 0x05 /* b Status Register */
+#define FCR_ISR 0x06 /* b Interrupt Status Register */
+#define FCR_IMR 0x07 /* b Interrupt Mask Register */
+
+/* FCR_MODE Register Command List */
+#define FCR_MODE_DATA 0x94 /* Data Data_Mode */
+#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */
+#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */
+
+#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */
+#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */
+#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */
+
+#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */
+#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */
+
+#define FCR_MODE_LED_OFF 0x00 /* LED OFF */
+#define FCR_MODE_LED_ON 0x04 /* LED ON */
+
+#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */
+#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */
+
+#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */
+#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */
+
+#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */
+#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */
+
+#define FCR_MODE_WE 0x80
+#define FCR_MODE_ECC1 0x40
+#define FCR_MODE_ECC0 0x20
+#define FCR_MODE_CE 0x10
+#define FCR_MODE_PCNT1 0x08
+#define FCR_MODE_PCNT0 0x04
+#define FCR_MODE_ALE 0x02
+#define FCR_MODE_CLE 0x01
+
+#define FCR_STATUS_BUSY 0x80
+
+/*--------------------------------------------------------------------------*/
+
+struct tmio_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ struct platform_device *dev;
+
+ void __iomem *ccr;
+ void __iomem *fcr;
+ unsigned long fcr_base;
+
+ unsigned int irq;
+
+ /* for tmio_nand_read_byte */
+ u8 read;
+ unsigned read_good:1;
+};
+
+#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
+
+
+/*--------------------------------------------------------------------------*/
+
+static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u8 mode;
+
+ if (ctrl & NAND_NCE) {
+ mode = FCR_MODE_DATA;
+
+ if (ctrl & NAND_CLE)
+ mode |= FCR_MODE_CLE;
+ else
+ mode &= ~FCR_MODE_CLE;
+
+ if (ctrl & NAND_ALE)
+ mode |= FCR_MODE_ALE;
+ else
+ mode &= ~FCR_MODE_ALE;
+ } else {
+ mode = FCR_MODE_STANDBY;
+ }
+
+ tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
+ tmio->read_good = 0;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ tmio_iowrite8(cmd, chip->IO_ADDR_W);
+}
+
+static int tmio_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
+}
+
+static irqreturn_t tmio_irq(int irq, void *__tmio)
+{
+ struct tmio_nand *tmio = __tmio;
+ struct nand_chip *nand_chip = &tmio->chip;
+
+ /* disable RDYREQ interrupt */
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+
+ if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
+ dev_warn(&tmio->dev->dev, "spurious interrupt\n");
+
+ wake_up(&nand_chip->controller->wq);
+ return IRQ_HANDLED;
+}
+
+/*
+ *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
+ *This interrupt is normally disabled, but for long operations like
+ *erase and write, we enable it to wake us up. The irq handler
+ *disables the interrupt.
+ */
+static int
+tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ long timeout;
+
+ /* enable RDYREQ interrupt */
+ tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+ tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
+
+ timeout = wait_event_timeout(nand_chip->controller->wq,
+ tmio_nand_dev_ready(mtd),
+ msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
+
+ if (unlikely(!tmio_nand_dev_ready(mtd))) {
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
+ nand_chip->state == FL_ERASING ? "erase" : "program",
+ nand_chip->state == FL_ERASING ? 400 : 20);
+
+ } else if (unlikely(!timeout)) {
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
+ }
+
+ nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ return nand_chip->read_byte(mtd);
+}
+
+/*
+ *The TMIO controller combines two 8-bit data bytes into one 16-bit
+ *word. This function separates them so nand_base.c works as expected,
+ *especially its NAND_CMD_READID routines.
+ *
+ *To prevent stale data from being read, tmio_nand_hwcontrol() clears
+ *tmio->read_good.
+ */
+static u_char tmio_nand_read_byte(struct mtd_info *mtd)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ unsigned int data;
+
+ if (tmio->read_good--)
+ return tmio->read;
+
+ data = tmio_ioread16(tmio->fcr + FCR_DATA);
+ tmio->read = data >> 8;
+ return data;
+}
+
+/*
+ *The TMIO controller converts an 8-bit NAND interface to a 16-bit
+ *bus interface, so all data reads and writes must be 16-bit wide.
+ *Thus, we implement 16-bit versions of the read, write, and verify
+ *buffer functions.
+ */
+static void
+tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static int
+tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ u16 *p = (u16 *) buf;
+
+ for (len >>= 1; len; len--)
+ if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
+ return -EFAULT;
+ return 0;
+}
+
+static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
+ tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
+ tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
+}
+
+static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ unsigned int ecc;
+
+ tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
+
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[1] = ecc; /* 000-255 LP7-0 */
+ ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[2] = ecc; /* 000-255 CP5-0,11b */
+ ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[3] = ecc; /* 256-511 LP15-8 */
+ ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */
+
+ tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
+ return 0;
+}
+
+static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ int r0, r1;
+
+ /* assume ecc.size = 512 and ecc.bytes = 6 */
+ r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ if (r0 < 0)
+ return r0;
+ r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
+ if (r1 < 0)
+ return r1;
+ return r0 + r1;
+}
+
+static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+ int ret;
+
+ if (cell->enable) {
+ ret = cell->enable(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* (4Ch) CLKRUN Enable 1st spcrunc */
+ tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
+
+ /* (10h)BaseAddress 0x1000 spba.spba2 */
+ tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
+ tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
+
+ /* (04h)Command Register I/O spcmd */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
+
+ /* (62h) Power Supply Control ssmpwc */
+ /* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
+
+ /* (63h) Detect Control ssmdtc */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
+
+ /* Interrupt status register clear sintst */
+ tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+
+ /* After power supply, Media are reset smode */
+ tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
+ tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
+ tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
+
+ /* Standby Mode smode */
+ tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
+
+ mdelay(5);
+
+ return 0;
+}
+
+static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
+ if (cell->disable)
+ cell->disable(dev);
+}
+
+static int tmio_probe(struct platform_device *dev)
+{
+ struct tmio_nand_data *data = dev->dev.platform_data;
+ struct resource *fcr = platform_get_resource(dev,
+ IORESOURCE_MEM, 0);
+ struct resource *ccr = platform_get_resource(dev,
+ IORESOURCE_MEM, 1);
+ int irq = platform_get_irq(dev, 0);
+ struct tmio_nand *tmio;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int retval;
+
+ if (data == NULL)
+ dev_warn(&dev->dev, "NULL platform data!\n");
+
+ tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
+ if (!tmio) {
+ retval = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ tmio->dev = dev;
+
+ platform_set_drvdata(dev, tmio);
+ mtd = &tmio->mtd;
+ nand_chip = &tmio->chip;
+ mtd->priv = nand_chip;
+ mtd->name = "tmio-nand";
+
+ tmio->ccr = ioremap(ccr->start, resource_size(ccr));
+ if (!tmio->ccr) {
+ retval = -EIO;
+ goto err_iomap_ccr;
+ }
+
+ tmio->fcr_base = fcr->start & 0xfffff;
+ tmio->fcr = ioremap(fcr->start, resource_size(fcr));
+ if (!tmio->fcr) {
+ retval = -EIO;
+ goto err_iomap_fcr;
+ }
+
+ retval = tmio_hw_init(dev, tmio);
+ if (retval)
+ goto err_hwinit;
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = tmio->fcr;
+ nand_chip->IO_ADDR_W = tmio->fcr;
+
+ /* Set address of hardware control function */
+ nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
+ nand_chip->dev_ready = tmio_nand_dev_ready;
+ nand_chip->read_byte = tmio_nand_read_byte;
+ nand_chip->write_buf = tmio_nand_write_buf;
+ nand_chip->read_buf = tmio_nand_read_buf;
+ nand_chip->verify_buf = tmio_nand_verify_buf;
+
+ /* set eccmode using hardware ECC */
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 6;
+ nand_chip->ecc.strength = 2;
+ nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
+ nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
+ nand_chip->ecc.correct = tmio_nand_correct_data;
+
+ if (data)
+ nand_chip->badblock_pattern = data->badblock_pattern;
+
+ /* 15 us command delay time */
+ nand_chip->chip_delay = 15;
+
+ retval = request_irq(irq, &tmio_irq,
+ IRQF_DISABLED, dev_name(&dev->dev), tmio);
+ if (retval) {
+ dev_err(&dev->dev, "request_irq error %d\n", retval);
+ goto err_irq;
+ }
+
+ tmio->irq = irq;
+ nand_chip->waitfunc = tmio_nand_wait;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(mtd, 1)) {
+ retval = -ENODEV;
+ goto err_scan;
+ }
+ /* Register the partitions */
+ retval = mtd_device_parse_register(mtd, NULL, NULL,
+ data ? data->partition : NULL,
+ data ? data->num_partitions : 0);
+ if (!retval)
+ return retval;
+
+ nand_release(mtd);
+
+err_scan:
+ if (tmio->irq)
+ free_irq(tmio->irq, tmio);
+err_irq:
+ tmio_hw_stop(dev, tmio);
+err_hwinit:
+ iounmap(tmio->fcr);
+err_iomap_fcr:
+ iounmap(tmio->ccr);
+err_iomap_ccr:
+ kfree(tmio);
+err_kzalloc:
+ return retval;
+}
+
+static int tmio_remove(struct platform_device *dev)
+{
+ struct tmio_nand *tmio = platform_get_drvdata(dev);
+
+ nand_release(&tmio->mtd);
+ if (tmio->irq)
+ free_irq(tmio->irq, tmio);
+ tmio_hw_stop(dev, tmio);
+ iounmap(tmio->fcr);
+ iounmap(tmio->ccr);
+ kfree(tmio);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmio_suspend(struct platform_device *dev, pm_message_t state)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ if (cell->suspend)
+ cell->suspend(dev);
+
+ tmio_hw_stop(dev, platform_get_drvdata(dev));
+ return 0;
+}
+
+static int tmio_resume(struct platform_device *dev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ /* FIXME - is this required or merely another attack of the broken
+ * SHARP platform? Looks suspicious.
+ */
+ tmio_hw_init(dev, platform_get_drvdata(dev));
+
+ if (cell->resume)
+ cell->resume(dev);
+
+ return 0;
+}
+#else
+#define tmio_suspend NULL
+#define tmio_resume NULL
+#endif
+
+static struct platform_driver tmio_driver = {
+ .driver.name = "tmio-nand",
+ .driver.owner = THIS_MODULE,
+ .probe = tmio_probe,
+ .remove = tmio_remove,
+ .suspend = tmio_suspend,
+ .resume = tmio_resume,
+};
+
+module_platform_driver(tmio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
+MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
+MODULE_ALIAS("platform:tmio-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
new file mode 100644
index 00000000..26398dcf
--- /dev/null
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -0,0 +1,457 @@
+/*
+ * TXx9 NAND flash memory controller driver
+ * Based on RBTX49xx patch from CELF patch archive.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2004-2007
+ * All Rights Reserved.
+ */
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <asm/txx9/ndfmc.h>
+
+/* TXX9 NDFMC Registers */
+#define TXX9_NDFDTR 0x00
+#define TXX9_NDFMCR 0x04
+#define TXX9_NDFSR 0x08
+#define TXX9_NDFISR 0x0c
+#define TXX9_NDFIMR 0x10
+#define TXX9_NDFSPR 0x14
+#define TXX9_NDFRSTR 0x18 /* not TX4939 */
+
+/* NDFMCR : NDFMC Mode Control */
+#define TXX9_NDFMCR_WE 0x80
+#define TXX9_NDFMCR_ECC_ALL 0x60
+#define TXX9_NDFMCR_ECC_RESET 0x60
+#define TXX9_NDFMCR_ECC_READ 0x40
+#define TXX9_NDFMCR_ECC_ON 0x20
+#define TXX9_NDFMCR_ECC_OFF 0x00
+#define TXX9_NDFMCR_CE 0x10
+#define TXX9_NDFMCR_BSPRT 0x04 /* TX4925/TX4926 only */
+#define TXX9_NDFMCR_ALE 0x02
+#define TXX9_NDFMCR_CLE 0x01
+/* TX4939 only */
+#define TXX9_NDFMCR_X16 0x0400
+#define TXX9_NDFMCR_DMAREQ_MASK 0x0300
+#define TXX9_NDFMCR_DMAREQ_NODMA 0x0000
+#define TXX9_NDFMCR_DMAREQ_128 0x0100
+#define TXX9_NDFMCR_DMAREQ_256 0x0200
+#define TXX9_NDFMCR_DMAREQ_512 0x0300
+#define TXX9_NDFMCR_CS_MASK 0x0c
+#define TXX9_NDFMCR_CS(ch) ((ch) << 2)
+
+/* NDFMCR : NDFMC Status */
+#define TXX9_NDFSR_BUSY 0x80
+/* TX4939 only */
+#define TXX9_NDFSR_DMARUN 0x40
+
+/* NDFMCR : NDFMC Reset */
+#define TXX9_NDFRSTR_RST 0x01
+
+struct txx9ndfmc_priv {
+ struct platform_device *dev;
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ int cs;
+ const char *mtdname;
+};
+
+#define MAX_TXX9NDFMC_DEV 4
+struct txx9ndfmc_drvdata {
+ struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
+ void __iomem *base;
+ unsigned char hold; /* in gbusclock */
+ unsigned char spw; /* in gbusclock */
+ struct nand_hw_control hw_control;
+};
+
+static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct txx9ndfmc_priv *txx9_priv = chip->priv;
+ return txx9_priv->dev;
+}
+
+static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+
+ return drvdata->base + (reg << plat->shift);
+}
+
+static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
+{
+ return __raw_readl(ndregaddr(dev, reg));
+}
+
+static void txx9ndfmc_write(struct platform_device *dev,
+ u32 val, unsigned int reg)
+{
+ __raw_writel(val, ndregaddr(dev, reg));
+}
+
+static uint8_t txx9ndfmc_read_byte(struct mtd_info *mtd)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+
+ return txx9ndfmc_read(dev, TXX9_NDFDTR);
+}
+
+static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
+ while (len--)
+ __raw_writel(*buf++, ndfdtr);
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+
+ while (len--)
+ *buf++ = __raw_readl(ndfdtr);
+}
+
+static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+
+ while (len--)
+ if (*buf++ != (uint8_t)__raw_readl(ndfdtr))
+ return -EFAULT;
+ return 0;
+}
+
+static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct txx9ndfmc_priv *txx9_priv = chip->priv;
+ struct platform_device *dev = txx9_priv->dev;
+ struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
+ mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
+ mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
+ /* TXX9_NDFMCR_CE bit is 0:high 1:low */
+ mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
+ if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
+ mcr &= ~TXX9_NDFMCR_CS_MASK;
+ mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
+ }
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+ }
+ if (cmd != NAND_CMD_NONE)
+ txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
+ if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
+ /* dummy write to update external latch */
+ if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
+ txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
+ }
+ mmiowb();
+}
+
+static int txx9ndfmc_dev_ready(struct mtd_info *mtd)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+
+ return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
+}
+
+static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ struct nand_chip *chip = mtd->priv;
+ int eccbytes;
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
+ for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
+ ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code += 3;
+ }
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ return 0;
+}
+
+static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ int eccsize;
+ int corrected = 0;
+ int stat;
+
+ for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+ stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ if (stat < 0)
+ return stat;
+ corrected += stat;
+ buf += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ }
+ return corrected;
+}
+
+static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_initialize(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int tmout = 100;
+
+ if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
+ ; /* no NDFRSTR. Write to NDFSPR resets the NDFMC. */
+ else {
+ /* reset NDFMC */
+ txx9ndfmc_write(dev,
+ txx9ndfmc_read(dev, TXX9_NDFRSTR) |
+ TXX9_NDFRSTR_RST,
+ TXX9_NDFRSTR);
+ while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
+ if (--tmout == 0) {
+ dev_err(&dev->dev, "reset failed.\n");
+ break;
+ }
+ udelay(1);
+ }
+ }
+ /* setup Hold Time, Strobe Pulse Width */
+ txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
+ txx9ndfmc_write(dev,
+ (plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
+ TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
+}
+
+#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
+ DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
+
+static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (!ret) {
+ if (mtd->writesize >= 512) {
+ /* Hardware ECC 6 byte ECC per 512 Byte data */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ }
+ ret = nand_scan_tail(mtd);
+ }
+ return ret;
+}
+
+static int __init txx9ndfmc_probe(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ int hold, spw;
+ int i;
+ struct txx9ndfmc_drvdata *drvdata;
+ unsigned long gbusclk = plat->gbus_clock;
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->base = devm_request_and_ioremap(&dev->dev, res);
+ if (!drvdata->base)
+ return -EBUSY;
+
+ hold = plat->hold ?: 20; /* tDH */
+ spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
+
+ hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
+ spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
+ if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
+ hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */
+ spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */
+ hold = clamp(hold, 1, 15);
+ drvdata->hold = hold;
+ spw = clamp(spw, 1, 15);
+ drvdata->spw = spw;
+ dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
+ (gbusclk + 500000) / 1000000, hold, spw);
+
+ spin_lock_init(&drvdata->hw_control.lock);
+ init_waitqueue_head(&drvdata->hw_control.wq);
+
+ platform_set_drvdata(dev, drvdata);
+ txx9ndfmc_initialize(dev);
+
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct txx9ndfmc_priv *txx9_priv;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ if (!(plat->ch_mask & (1 << i)))
+ continue;
+ txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
+ GFP_KERNEL);
+ if (!txx9_priv) {
+ dev_err(&dev->dev, "Unable to allocate "
+ "TXx9 NDFMC MTD device structure.\n");
+ continue;
+ }
+ chip = &txx9_priv->chip;
+ mtd = &txx9_priv->mtd;
+ mtd->owner = THIS_MODULE;
+
+ mtd->priv = chip;
+
+ chip->read_byte = txx9ndfmc_read_byte;
+ chip->read_buf = txx9ndfmc_read_buf;
+ chip->write_buf = txx9ndfmc_write_buf;
+ chip->verify_buf = txx9ndfmc_verify_buf;
+ chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
+ chip->dev_ready = txx9ndfmc_dev_ready;
+ chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+ chip->ecc.correct = txx9ndfmc_correct_data;
+ chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+ chip->ecc.mode = NAND_ECC_HW;
+ /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->chip_delay = 100;
+ chip->controller = &drvdata->hw_control;
+
+ chip->priv = txx9_priv;
+ txx9_priv->dev = dev;
+
+ if (plat->ch_mask != 1) {
+ txx9_priv->cs = i;
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
+ } else {
+ txx9_priv->cs = -1;
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
+ }
+ if (plat->wide_mask & (1 << i))
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (txx9ndfmc_nand_scan(mtd)) {
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ continue;
+ }
+ mtd->name = txx9_priv->mtdname;
+
+ mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ drvdata->mtds[i] = mtd;
+ }
+
+ return 0;
+}
+
+static int __exit txx9ndfmc_remove(struct platform_device *dev)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int i;
+
+ platform_set_drvdata(dev, NULL);
+ if (!drvdata)
+ return 0;
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct mtd_info *mtd = drvdata->mtds[i];
+ struct nand_chip *chip;
+ struct txx9ndfmc_priv *txx9_priv;
+
+ if (!mtd)
+ continue;
+ chip = mtd->priv;
+ txx9_priv = chip->priv;
+
+ nand_release(mtd);
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int txx9ndfmc_resume(struct platform_device *dev)
+{
+ if (platform_get_drvdata(dev))
+ txx9ndfmc_initialize(dev);
+ return 0;
+}
+#else
+#define txx9ndfmc_resume NULL
+#endif
+
+static struct platform_driver txx9ndfmc_driver = {
+ .remove = __exit_p(txx9ndfmc_remove),
+ .resume = txx9ndfmc_resume,
+ .driver = {
+ .name = "txx9ndfmc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init txx9ndfmc_init(void)
+{
+ return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe);
+}
+
+static void __exit txx9ndfmc_exit(void)
+{
+ platform_driver_unregister(&txx9ndfmc_driver);
+}
+
+module_init(txx9ndfmc_init);
+module_exit(txx9ndfmc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
+MODULE_ALIAS("platform:txx9ndfmc");
diff --git a/drivers/mtd/nand/wmt_nand.c b/drivers/mtd/nand/wmt_nand.c
new file mode 100755
index 00000000..0fa2bde0
--- /dev/null
+++ b/drivers/mtd/nand/wmt_nand.c
@@ -0,0 +1,8285 @@
+/*++
+linux/drivers/mtd/nand/wmt_nand.c
+
+Copyright (c) 2008 WonderMedia Technologies, Inc.
+
+This program is free software: you can redistribute it and/or modify it under the
+terms of the GNU General Public License as published by the Free Software Foundation,
+either version 2 of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <http://www.gnu.org/licenses/>.
+
+WonderMedia Technologies, Inc.
+10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+--*/
+
+//#include <linux/config.h>
+#include <linux/module.h>
+/*#include <linux/types.h>*/
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+/*#include <linux/platform_device.h>*/
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+/*#include <linux/clk.h>*/
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+/*#include <linux/mtd/partitions.h>*/
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/reboot.h> //Lch
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/sizes.h>
+#include <mach/irqs.h>
+#include <mach/hardware.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include "wmt_nand.h"
+
+
+#ifndef memzero
+#define memzero(s, n) memset ((s), 0, (n))
+#endif
+//#define RETRY_DEBUG
+#define WMT_HW_RDMZ
+//#ifdef WMT_SW_RDMZ
+unsigned int rdmz[BYTE_SEED]= {
+ 0xC5F7B49E, 0x85AD42B6, 0x1888A48B, 0xFBA90A42, 0xE20E7129, 0x37E8086E, 0x6F1C1918, 0x31510E20,
+ 0x382771CB, 0x6107F49D, 0x901B6D0B, 0x3CD489E1, 0xA9B9CE07, 0x6B41AC61, 0x749F181D, 0xA7DDA658,
+ 0x405276C0, 0xB67EFB43, 0xC5EE35A6, 0xF8406534, 0x73D8093A, 0xD98028A3, 0x084CE1AF, 0xB4744210,
+ 0x951E02A0, 0xF657482D, 0x6C64F9D0, 0x68DB8651, 0xD1E64A45, 0x3A0FCB39, 0x9C9BB663, 0x05322DAE,
+ 0xA4F40077, 0x801BA102, 0xB73BE0DD, 0xA2E34B6A, 0x5A50E576, 0x83CD0C99, 0x63C1440B, 0x2F82661D,
+ 0x6846C973, 0xA74C29E6, 0x880E86A2, 0xB1D7E000, 0xF9B6F2B5, 0x71E5F26C, 0xE707DE1E, 0x439D5A63,
+ 0x1F697817, 0x23DFB560, 0xE87F6BD0, 0xBD1BBCC3, 0xB1D3A074, 0x6C1B7C0A, 0xE2823FDB, 0x17F45131,
+ 0x9082625D, 0xDFD364FD, 0x88DF4E2B, 0xB6FE752D, 0x5B04FF38, 0xB27648A9, 0x8C4EF297, 0x1C595F00,
+ 0x9E7B4520, 0x826ADDFF, 0xF83FE0EE, 0xF981B0B0, 0x1F9233D7, 0xA2C148CB, 0xF73C908E, 0x18F36125,
+ 0xE45D3D77, 0xB77BA7EA, 0x6D962E25, 0xFF4BF3B8, 0x7C06714F, 0x812DFFDA, 0xE499B45A, 0x73498684,
+ 0x11DCD8C1, 0x0FE5FAEC, 0x882C8503, 0x1CBB95F8, 0x62889F09, 0xF6798B10, 0x7FFE1FE9, 0x464DBD35,
+ 0x476EA249, 0xD7D7428D, 0xD885740A, 0xA034FA2C, 0xB37FD49C, 0x9AC07AD5, 0xAEFA9F54, 0x80B1AC25,
+ 0xAFE642C5, 0x55249024, 0xC3BD79F8, 0x78D3CAB0, 0x71523E07, 0x179AD53B, 0x4C6DE12B, 0x545E4957,
+ 0xE19CDBF1, 0xB9CA4748, 0xD401EF16, 0x0C7FD0DC, 0x2D55D75B, 0x8169F899, 0xBE415FAA, 0x45355DFD,
+ 0x1EE42A38, 0x3E167903, 0x838D4BAE, 0xACB42144, 0x8A9970D3, 0x978DB4A5, 0x45A09237, 0x431554E5,
+ 0xAAD8AFF7, 0x4F260392, 0xF60E8E22, 0xDEFCBB1D, 0xA6903D2E, 0x0C041572, 0x32A1E06E, 0xD41C2E5A,
+ 0xE43F79E1, 0xD562B75D, 0x53B35557, 0x871CF712, 0x06130B69, 0x4FE6CACB, 0xA79121F3, 0x31D1804E,
+ 0xA6CDBB55, 0x2B31D900, 0x6F8D96A6, 0xF90DFE42, 0x3F8E6A88, 0x5D5F338E, 0x19BEFA53, 0xA80B5EC5,
+ 0x33A4BCC7, 0x7C6435D9, 0xE334EF6D, 0xDABCCF28, 0x0B1E822E, 0x6BC9A2E7, 0xC12ECFFD, 0xCB2410AA,
+ 0x5E239332, 0xD599FC9D, 0xD2ADA8FC, 0x985F0C4C, 0xA3FBD68F, 0x1A6857C8, 0x7CF1FA13, 0xBEC591B0,
+ 0x4E7219DC, 0xC7B5CA12, 0x31730D81, 0x954B0433, 0xFA399921, 0x17871477, 0xA42D4816, 0xAC692951,
+ 0x3346763F, 0x8097EFF0, 0x9727B982, 0x5D7D302F, 0xB4D28FAB, 0x33353379, 0xB438C5BB, 0xE49DF42E,
+ 0xE6E4083B, 0x82BB1576, 0xFF1675C3, 0x5B33BD3D, 0xDC018912, 0xC9886442, 0xA8F895ED, 0x99E15C12,
+ 0x45E855E8, 0xA73B2CD4, 0x290C2256, 0x510A601B, 0xB2DC458E, 0x9493508F, 0xEB9E844E, 0x0796D9AE,
+ 0x79741BD6, 0xEAAC9AE2, 0xC1990396, 0x3BB91B8F, 0x51D3287A, 0x9EAECDDD, 0x10EEC66D, 0xC9EA20D4,
+ 0xCAE1855A, 0xA7C42760, 0x3DBF5142, 0xDD2E56F2, 0xE7C71747, 0x1202F5B2, 0xF0444344, 0x2382331B,
+ 0xCF4AA7A2, 0xE037CA0B, 0x9CC2706C, 0xB7AA6F63, 0x6ABFBB08, 0x5DF9FE35, 0xBF95CB8A, 0xEA64D353,
+ 0xBB5DB139, 0xF25BBBB3, 0xB069B05E, 0x1FA571D2, 0xCCB68970, 0xB2FA065B, 0xAC52ABC8, 0xE3C72445,
+ 0x70F92FFD, 0x3292E21F, 0x2FC6615E, 0x329E2283, 0x9130F29F, 0x8736745B, 0x802463EF, 0xF2173C18,
+ 0xC1EA46D0, 0x0F1631C4, 0x226965D6, 0x2537F5C9, 0x26875CB0, 0x05C9666E, 0x25EAFDDC, 0x9F585A5C,
+ 0x12D33D3B, 0xF76DD669, 0x81303E96, 0x0CD91D67, 0x8B7EE682, 0xC306750F, 0x36B85254, 0xCB0AD397,
+ 0x4DB9750B, 0xFB0FC7F9, 0x442540F0, 0x758785F8, 0xE7E514E6, 0xBF6E804A, 0x6B7A2EF8, 0xA41E4A67,
+ 0x57B36655, 0xE5E72D5D, 0xC4C5AA32, 0x43A2988E, 0x5A45A4D2, 0x40D6B8DA, 0xBD39BF62, 0x1CBFD58C,
+ 0xF72511B6, 0x651E46A7, 0x8F0D90C6, 0x9552850B, 0x87D4BEA3, 0x7CD7B9C6, 0x86046AF7, 0x462BB9D7,
+ 0xB0DA3C41, 0x7A95F448, 0x5021FF8F, 0x093EB834, 0xBD0EFD67, 0x72C81437, 0xB2E38763, 0xD1BF8C4A,
+ 0x889789F4, 0x52D00D1C, 0xD8D07299, 0xAC5A2B20, 0xC89C393B, 0x5636B492, 0xD375FC40, 0x89F81123,
+ 0xB3EA1B56, 0xC7310408, 0x3A3449A0, 0x4C1AE419, 0xF55CEDA3, 0x01415BEA, 0xF2A0F073, 0x31774DF5,
+ 0x00E68A8C, 0x695E5496, 0xE7749B58, 0x77327028, 0x6CD335BB, 0x98468D74, 0xDE16F10D, 0x7138FA79,
+ 0x5ED8D8F2, 0x54870136, 0xCDEE53A2, 0x3DB7D1AA, 0xF6754B8C, 0xC1088C28, 0xF3E5EBED, 0x567A3339,
+ 0xA2F60ACE, 0x994B5135, 0x5D35F7F0, 0x50FCF79A, 0xB0E1BED8, 0xAA14A632, 0xA04F3F82, 0xAC8BE3A9,
+ 0xCFB5AC16, 0xF484B91F, 0x10E64685, 0xE2B13DAA, 0xEC2E1E35, 0x4623393F, 0x9B81213F, 0x5C5A6F27,
+ 0xB1C6E1D0, 0xAF00C849, 0x3C7AC4B2, 0x24C9E2A0, 0x0FE1BA98, 0x1D810BBC, 0x8FDC584F, 0x927B1026,
+ 0x2566B32E, 0xBF440303, 0xED4D467E, 0x19EFBCB4, 0x31C80176, 0xDB209CD7, 0x406174B1, 0x4DA4B447,
+ 0x134F6EC4, 0xBC1220F6, 0xA75D2836, 0xDEB8BC5E, 0xFC48D6DE, 0x3A78CE0B, 0x3D991297, 0xE5EFADB5,
+ 0xEF9EB74C, 0x656D03E1, 0xBBA2BA8D, 0xE6E8C8A7, 0x3C4D86B7, 0x4ABE231B, 0x4A272C4D, 0xA920C151,
+ 0x8846417D, 0x55F99831, 0x7A627F14, 0x6FC991E5, 0xA3D515B2, 0x09F2B1F1, 0x5267C177, 0x284D79BC,
+ 0xA3AA9068, 0x83AB087B, 0x9475DA03, 0x82C0D0D8, 0xE0E242F6, 0x0E466BFE, 0x867FAF59, 0x59DF8EE2,
+ 0xE5AFEA82, 0x20EBD203, 0xC076152F, 0x4469C75B, 0x04047376, 0xF75654F0, 0x51B16CEC, 0xFCB7DD6A,
+ 0x2ECBBD1F, 0xB1BD247E, 0xB0F4FF7C, 0x690F1271, 0x7EB7C4EB, 0x9FB65038, 0x50D674D3, 0x36D6D65E,
+ 0x17E550E1, 0xC63458A1, 0x924C5223, 0x4B117295, 0xFA8295D6, 0x59EC8C93, 0x1E75A586, 0xF64A8961,
+ 0x842450ED, 0x90ECE657, 0x033CE78B, 0x03526381, 0xDFBDE0F7, 0x5430CD5D, 0x3D735887, 0x32476AE2,
+ 0xBD427ACC, 0x034BE2B9, 0xA250C775, 0x3F6060EC, 0x1F5A7A66, 0xD805FA64, 0x3EDE30B2, 0xF949F901,
+ 0x65568178, 0x6B23E8F7, 0x168608AA, 0x99F8DD2A, 0x3805726A, 0xCC6B8165, 0x0B2500B7, 0xBB48F09D,
+ 0x31400FF0, 0x6E914B37, 0x2C98C243, 0x53D551B5, 0x70A8691A, 0xAB51BDAC, 0xC742414E, 0x0E9B63EB,
+ 0x3FA0A9B5, 0x4EC5D5B7, 0x3728C137, 0x3E83B6C9, 0xDE7C3573, 0x387AF7B0, 0x463238EF, 0xCD371BC3,
+ 0x11C559F9, 0x7208DD6E, 0xE37C28B2, 0x3E92B719, 0x88CA0F8F, 0x75E5C16E, 0x85FC0451, 0x814BFB38,
+ 0x132D2A52, 0xDE0B3041, 0x99785344, 0xA6EFB8F4, 0x865DACF8, 0xF4B3FB1A, 0x7E91873E, 0xA777AB7F,
+ 0x588FD4D8, 0x41B9200D, 0x5C03A928, 0x035EA31D, 0x614B7336, 0xE1989B85, 0x2C67C9F7, 0x476622A1,
+ 0xFC8C5FF3, 0xFE4AEF65, 0x41D3E473, 0x1541A4E1, 0x1BB44300, 0xF8FB69C3, 0x3DB391DE, 0x63D8C533,
+ 0x526F419F, 0x031664C2, 0x85650B07, 0x624C1624, 0x324BAA7E, 0x03B4E90D, 0xB6E3B461, 0xB3445605,
+ 0x4A4128AF, 0x5E945F59, 0x2504F7B8, 0xDD5D13B4, 0xD3683D0C, 0x61B8B81E, 0x4BDD7B50, 0x15EBA9C6,
+ 0x0369E118, 0x0F3CB28D, 0xA45E0D50, 0x98C6031A, 0x40FC3B93, 0x3B0ED7E4, 0xA14E235F, 0x915E7695,
+ 0x5BD9F72D, 0x0BA94E45, 0x9B54A9C2, 0xCEDE74B5, 0x801321EA, 0x9C60FDA3, 0x842CD005, 0xBBB7FB29,
+ 0x25F37CE4, 0xE2B57DDE, 0x7983908A, 0xD544F488, 0x6B72AE10, 0x8F455719, 0x717CFD3A, 0x04003302,
+ 0x62FBDA4F, 0xC2D6A15B, 0x0C445245, 0xFDD48521, 0x71073894, 0x1BF40437, 0x378E0C8C, 0x98A88710,
+ 0x9C13B8E5, 0xB083FA4E, 0xC80DB685, 0x9E6A44F0, 0xD4DCE703, 0xB5A0D630, 0x3A4F8C0E, 0x53EED32C,
+ 0xA0293B60, 0x5B3F7DA1, 0x62F71AD3, 0x7C20329A, 0xB9EC049D, 0xECC01451, 0x042670D7, 0x5A3A2108,
+ 0xCA8F0150, 0x7B2BA416, 0xB6327CE8, 0xB46DC328, 0xE8F32522, 0x9D07E59C, 0x4E4DDB31, 0x829916D7,
+ 0x527A003B, 0xC00DD081, 0x5B9DF06E, 0x5171A5B5, 0xAD2872BB, 0xC1E6864C, 0xB1E0A205, 0x97C1330E,
+ 0x342364B9, 0x53A614F3, 0x44074351, 0xD8EBF000, 0x7CDB795A, 0x38F2F936, 0xF383EF0F, 0xA1CEAD31,
+ 0x0FB4BC0B, 0x11EFDAB0, 0xF43FB5E8, 0x5E8DDE61, 0x58E9D03A, 0xB60DBE05, 0xF1411FED, 0x8BFA2898,
+ 0xC841312E, 0xEFE9B27E, 0xC46FA715, 0x5B7F3A96, 0xAD827F9C, 0xD93B2454, 0x4627794B, 0x0E2CAF80,
+ 0xCF3DA290, 0x41356EFF, 0x7C1FF077, 0xFCC0D858, 0x8FC919EB, 0x5160A465, 0xFB9E4847, 0x8C79B092,
+ 0x722E9EBB, 0xDBBDD3F5, 0x36CB1712, 0xFFA5F9DC, 0x3E0338A7, 0x4096FFED, 0x724CDA2D, 0xB9A4C342,
+ 0x08EE6C60, 0x87F2FD76, 0x44164281, 0x8E5DCAFC, 0x31444F84, 0xFB3CC588, 0xBFFF0FF4, 0xA326DE9A,
+ 0xA3B75124, 0x6BEBA146, 0x6C42BA05, 0x501A7D16, 0xD9BFEA4E, 0x4D603D6A, 0xD77D4FAA, 0xC058D612,
+ 0x57F32162, 0x2A924812, 0x61DEBCFC, 0xBC69E558, 0xB8A91F03, 0x8BCD6A9D, 0xA636F095, 0xAA2F24AB,
+ 0x70CE6DF8, 0x5CE523A4, 0x6A00F78B, 0x863FE86E, 0x96AAEBAD, 0x40B4FC4C, 0xDF20AFD5, 0x229AAEFE,
+ 0x8F72151C, 0x1F0B3C81, 0x41C6A5D7, 0xD65A10A2, 0xC54CB869, 0xCBC6DA52, 0xA2D0491B, 0xA18AAA72,
+ 0x556C57FB, 0x279301C9, 0xFB074711, 0x6F7E5D8E, 0x53481E97, 0x06020AB9, 0x1950F037, 0xEA0E172D,
+ 0xF21FBCF0, 0xEAB15BAE, 0x29D9AAAB, 0xC38E7B89, 0x830985B4, 0xA7F36565, 0x53C890F9, 0x98E8C027,
+ 0x5366DDAA, 0x1598EC80, 0x37C6CB53, 0x7C86FF21, 0x1FC73544, 0xAEAF99C7, 0x8CDF7D29, 0xD405AF62,
+ 0x99D25E63, 0xBE321AEC, 0x719A77B6, 0x6D5E6794, 0x858F4117, 0xB5E4D173, 0x609767FE, 0x65920855,
+ 0xAF11C999, 0x6ACCFE4E, 0x6956D47E, 0xCC2F8626, 0x51FDEB47, 0x8D342BE4, 0x3E78FD09, 0x5F62C8D8,
+ 0x27390CEE, 0xE3DAE509, 0x98B986C0, 0xCAA58219, 0xFD1CCC90, 0x0BC38A3B, 0xD216A40B, 0xD63494A8,
+ 0x19A33B1F, 0x404BF7F8, 0xCB93DCC1, 0xAEBE9817, 0xDA6947D5, 0x999A99BC, 0x5A1C62DD, 0xF24EFA17,
+ 0x7372041D, 0xC15D8ABB, 0xFF8B3AE1, 0x2D99DE9E, 0x6E00C489, 0xE4C43221, 0x547C4AF6, 0x4CF0AE09,
+ 0x22F42AF4, 0x539D966A, 0x9486112B, 0x2885300D, 0xD96E22C7, 0x4A49A847, 0x75CF4227, 0x03CB6CD7,
+ 0x3CBA0DEB, 0x75564D71, 0xE0CC81CB, 0x1DDC8DC7, 0xA8E9943D, 0xCF5766EE, 0x08776336, 0x64F5106A,
+ 0x6570C2AD, 0x53E213B0, 0x1EDFA8A1, 0xEE972B79, 0x73E38BA3, 0x09017AD9, 0xF82221A2, 0x11C1198D,
+ 0xE7A553D1, 0x701BE505, 0xCE613836, 0x5BD537B1, 0xB55FDD84, 0x2EFCFF1A, 0xDFCAE5C5, 0xF53269A9,
+ 0xDDAED89C, 0x792DDDD9, 0x5834D82F, 0x0FD2B8E9, 0xE65B44B8, 0x597D032D, 0xD62955E4, 0xF1E39222,
+ 0xB87C97FE, 0x1949710F, 0x97E330AF, 0x994F1141, 0xC898794F, 0xC39B3A2D, 0x401231F7, 0x790B9E0C,
+ 0x60F52368, 0x078B18E2, 0x9134B2EB, 0x129BFAE4, 0x1343AE58, 0x02E4B337, 0x12F57EEE, 0xCFAC2D2E,
+ 0x89699E9D, 0x7BB6EB34, 0xC0981F4B, 0x066C8EB3, 0xC5BF7341, 0x61833A87, 0x9B5C292A, 0xE58569CB,
+ 0xA6DCBA85, 0x7D87E3FC, 0x2212A078, 0x3AC3C2FC, 0x73F28A73, 0x5FB74025, 0xB5BD177C, 0xD20F2533,
+ 0xABD9B32A, 0x72F396AE, 0x6262D519, 0x21D14C47, 0x2D22D269, 0x206B5C6D, 0x5E9CDFB1, 0x0E5FEAC6,
+ 0xFB9288DB, 0x328F2353, 0xC786C863, 0xCAA94285, 0x43EA5F51, 0xBE6BDCE3, 0xC302357B, 0xA315DCEB,
+ 0x586D1E20, 0xBD4AFA24, 0x2810FFC7, 0x849F5C1A, 0xDE877EB3, 0xB9640A1B, 0x5971C3B1, 0x68DFC625,
+ 0x444BC4FA, 0xA968068E, 0x6C68394C, 0xD62D1590, 0x644E1C9D, 0x2B1B5A49, 0xE9BAFE20, 0x44FC0891,
+ 0x59F50DAB, 0x63988204, 0x9D1A24D0, 0xA60D720C, 0x7AAE76D1, 0x80A0ADF5, 0xF9507839, 0x18BBA6FA,
+ 0x00734546, 0x34AF2A4B, 0x73BA4DAC, 0xBB993814, 0x36699ADD, 0xCC2346BA, 0xEF0B7886, 0x389C7D3C,
+ 0x2F6C6C79, 0x2A43809B, 0x66F729D1, 0x1EDBE8D5, 0x7B3AA5C6, 0xE0844614, 0xF9F2F5F6, 0x2B3D199C,
+ 0xD17B0567, 0x4CA5A89A, 0x2E9AFBF8, 0x287E7BCD, 0x5870DF6C, 0x550A5319, 0xD0279FC1, 0x5645F1D4,
+ 0xE7DAD60B, 0xFA425C8F, 0x08732342, 0xF1589ED5, 0xF6170F1A, 0xA3119C9F, 0xCDC0909F, 0x2E2D3793,
+ 0xD8E370E8, 0x57806424, 0x1E3D6259, 0x1264F150, 0x07F0DD4C, 0x8EC085DE, 0x47EE2C27, 0x493D8813,
+ 0x92B35997, 0x5FA20181, 0x76A6A33F, 0x0CF7DE5A, 0x98E400BB, 0xED904E6B, 0xA030BA58, 0x26D25A23,
+ 0x09A7B762, 0x5E09107B, 0x53AE941B, 0x6F5C5E2F, 0xFE246B6F, 0x9D3C6705, 0x9ECC894B, 0x72F7D6DA,
+ 0xF7CF5BA6, 0xB2B681F0, 0xDDD15D46, 0xF3746453, 0x9E26C35B, 0xA55F118D, 0xA5139626, 0xD49060A8,
+ 0xC42320BE, 0x2AFCCC18, 0xBD313F8A, 0x37E4C8F2, 0xD1EA8AD9, 0x84F958F8, 0x2933E0BB, 0x1426BCDE,
+ 0xD1D54834, 0xC1D5843D, 0x4A3AED01, 0x4160686C, 0x7071217B, 0x872335FF, 0x433FD7AC, 0x2CEFC771,
+ 0xF2D7F541, 0x9075E901, 0xE03B0A97, 0x2234E3AD, 0x020239BB, 0x7BAB2A78, 0x28D8B676, 0xFE5BEEB5,
+ 0x1765DE8F, 0x58DE923F, 0xD87A7FBE, 0xB4878938, 0x3F5BE275, 0xCFDB281C, 0x286B3A69, 0x9B6B6B2F,
+ 0x8BF2A870, 0xE31A2C50, 0xC9262911, 0x2588B94A, 0xFD414AEB, 0x2CF64649, 0x8F3AD2C3, 0xFB2544B0,
+ 0xC2122876, 0xC876732B, 0x819E73C5, 0x81A931C0, 0xEFDEF07B, 0xAA1866AE, 0x1EB9AC43, 0x1923B571,
+ 0xDEA13D66, 0x81A5F15C, 0x512863BA, 0x1FB03076, 0x0FAD3D33, 0x6C02FD32, 0x9F6F1859, 0x7CA4FC80,
+ 0xB2AB40BC, 0x3591F47B, 0x0B430455, 0x4CFC6E95, 0x9C02B935, 0xE635C0B2, 0x8592805B, 0x5DA4784E,
+ 0x98A007F8, 0xB748A59B, 0x964C6121, 0x29EAA8DA, 0x3854348D, 0x55A8DED6, 0xE3A120A7, 0x874DB1F5,
+ 0x9FD054DA, 0xA762EADB, 0x9B94609B, 0x9F41DB64, 0x6F3E1AB9, 0x9C3D7BD8, 0xA3191C77, 0xE69B8DE1,
+ 0x08E2ACFC, 0x39046EB7, 0xF1BE1459, 0x9F495B8C, 0x446507C7, 0xBAF2E0B7, 0x42FE0228, 0x40A5FD9C,
+ 0x89969529, 0x6F059820, 0x4CBC29A2, 0x5377DC7A, 0x432ED67C, 0x7A59FD8D, 0xBF48C39F, 0x53BBD5BF,
+ 0xAC47EA6C, 0x20DC9006, 0xAE01D494, 0x01AF518E, 0xB0A5B99B, 0xF0CC4DC2, 0x9633E4FB, 0xA3B31150,
+ 0xFE462FF9, 0xFF2577B2, 0xA0E9F239, 0x0AA0D270, 0x8DDA2180, 0x7C7DB4E1, 0x9ED9C8EF, 0xB1EC6299,
+ 0x2937A0CF, 0x818B3261, 0x42B28583, 0x31260B12, 0x9925D53F, 0x81DA7486, 0xDB71DA30, 0xD9A22B02,
+ 0xA5209457, 0x2F4A2FAC, 0x12827BDC, 0x6EAE89DA, 0x69B41E86, 0x30DC5C0F, 0x25EEBDA8, 0x0AF5D4E3,
+ 0x81B4F08C, 0x079E5946, 0x522F06A8, 0xCC63018D, 0x207E1DC9, 0x9D876BF2, 0xD0A711AF, 0xC8AF3B4A,
+ 0xADECFB96, 0x05D4A722, 0xCDAA54E1, 0x676F3A5A, 0xC00990F5, 0xCE307ED1, 0xC2166802, 0x5DDBFD94,
+ 0x12F9BE72, 0x715ABEEF, 0x3CC1C845, 0x6AA27A44, 0xB5B95708, 0x47A2AB8C, 0x38BE7E9D, 0x82001981,
+ 0xB17DED27, 0xE16B50AD, 0x86222922, 0x7EEA4290, 0xB8839C4A, 0x0DFA021B, 0x1BC70646, 0xCC544388,
+ 0x4E09DC72, 0xD841FD27, 0x6406DB42, 0xCF352278, 0x6A6E7381, 0x5AD06B18, 0x1D27C607, 0x29F76996,
+ 0xD0149DB0, 0xAD9FBED0, 0x317B8D69, 0xBE10194D, 0xDCF6024E, 0xF6600A28, 0x0213386B, 0x2D1D1084,
+ 0x654780A8, 0x3D95D20B, 0x5B193E74, 0x5A36E194, 0x74799291, 0xCE83F2CE, 0xA726ED98, 0xC14C8B6B,
+ 0xA93D001D, 0x6006E840, 0xADCEF837, 0xA8B8D2DA, 0x5694395D, 0xE0F34326, 0x58F05102, 0xCBE09987,
+ 0x9A11B25C, 0xA9D30A79, 0x2203A1A8, 0x6C75F800, 0x3E6DBCAD, 0x9C797C9B, 0xF9C1F787, 0xD0E75698,
+ 0x07DA5E05, 0x08F7ED58, 0xFA1FDAF4, 0x2F46EF30, 0xAC74E81D, 0xDB06DF02, 0x78A08FF6, 0x45FD144C,
+ 0x64209897, 0xF7F4D93F, 0x6237D38A, 0x2DBF9D4B, 0x56C13FCE, 0xEC9D922A, 0x2313BCA5, 0x071657C0,
+ 0xE79ED148, 0xA09AB77F, 0x3E0FF83B, 0xFE606C2C, 0xC7E48CF5, 0xA8B05232, 0x7DCF2423, 0xC63CD849,
+ 0xB9174F5D, 0x6DDEE9FA, 0x1B658B89, 0xFFD2FCEE, 0x9F019C53, 0xA04B7FF6, 0x39266D16, 0x5CD261A1,
+ 0x04773630, 0xC3F97EBB, 0x220B2140, 0x472EE57E, 0x18A227C2, 0x7D9E62C4, 0x5FFF87FA, 0x51936F4D,
+ 0x51DBA892, 0xB5F5D0A3, 0x36215D02, 0x280D3E8B, 0x6CDFF527, 0x26B01EB5, 0x6BBEA7D5, 0x602C6B09,
+ 0x2BF990B1, 0x15492409, 0x30EF5E7E, 0xDE34F2AC, 0xDC548F81, 0xC5E6B54E, 0xD31B784A, 0x55179255,
+ 0x386736FC, 0xAE7291D2, 0x35007BC5, 0xC31FF437, 0x4B5575D6, 0xA05A7E26, 0x6F9057EA, 0x114D577F,
+ 0xC7B90A8E, 0x8F859E40, 0x20E352EB, 0xEB2D0851, 0x62A65C34, 0xE5E36D29, 0x5168248D, 0xD0C55539,
+ 0xAAB62BFD, 0x93C980E4, 0x7D83A388, 0xB7BF2EC7, 0xA9A40F4B, 0x8301055C, 0x8CA8781B, 0x75070B96,
+ 0x790FDE78, 0xF558ADD7, 0x94ECD555, 0x61C73DC4, 0xC184C2DA, 0xD3F9B2B2, 0xA9E4487C, 0x4C746013,
+ 0x29B36ED5, 0x8ACC7640, 0x9BE365A9, 0x3E437F90, 0x8FE39AA2, 0xD757CCE3, 0x466FBE94, 0xEA02D7B1,
+ 0x4CE92F31, 0x5F190D76, 0x38CD3BDB, 0xB6AF33CA, 0xC2C7A08B, 0x5AF268B9, 0xB04BB3FF, 0xB2C9042A,
+ 0x5788E4CC, 0x35667F27, 0x34AB6A3F, 0xE617C313, 0x28FEF5A3, 0xC69A15F2, 0x1F3C7E84, 0x2FB1646C,
+ 0x939C8677, 0x71ED7284, 0xCC5CC360, 0x6552C10C, 0xFE8E6648, 0x85E1C51D, 0x690B5205, 0xEB1A4A54,
+ 0x0CD19D8F, 0xA025FBFC, 0xE5C9EE60, 0xD75F4C0B, 0x6D34A3EA, 0xCCCD4CDE, 0xAD0E316E, 0xF9277D0B,
+ 0xB9B9020E, 0xE0AEC55D, 0x7FC59D70, 0x96CCEF4F, 0xB7006244, 0x72621910, 0xAA3E257B, 0x26785704,
+ 0x117A157A, 0xA9CECB35, 0xCA430895, 0x94429806, 0xECB71163, 0xA524D423, 0xBAE7A113, 0x81E5B66B,
+ 0x9E5D06F5, 0xBAAB26B8, 0xF06640E5, 0x8EEE46E3, 0x5474CA1E, 0x67ABB377, 0x043BB19B, 0xB27A8835,
+ 0x32B86156, 0xA9F109D8, 0x8F6FD450, 0xF74B95BC, 0xB9F1C5D1, 0x0480BD6C, 0xFC1110D1, 0x88E08CC6,
+ 0xF3D2A9E8, 0x380DF282, 0xE7309C1B, 0x2DEA9BD8, 0x5AAFEEC2, 0x977E7F8D, 0xEFE572E2, 0x7A9934D4,
+ 0xEED76C4E, 0xBC96EEEC, 0xAC1A6C17, 0x07E95C74, 0xF32DA25C, 0x2CBE8196, 0x6B14AAF2, 0x78F1C911,
+ 0xDC3E4BFF, 0x8CA4B887, 0xCBF19857, 0xCCA788A0, 0xE44C3CA7, 0xE1CD9D16, 0x200918FB, 0x3C85CF06,
+ 0x307A91B4, 0x83C58C71, 0x489A5975, 0x094DFD72, 0x89A1D72C, 0x0172599B, 0x097ABF77, 0xE7D61697,
+ 0x44B4CF4E, 0xBDDB759A, 0xE04C0FA5, 0x83364759, 0xE2DFB9A0, 0x30C19D43, 0xCDAE1495, 0xF2C2B4E5,
+ 0x536E5D42, 0x3EC3F1FE, 0x1109503C, 0x9D61E17E, 0xB9F94539, 0x2FDBA012, 0xDADE8BBE, 0x69079299,
+ 0x55ECD995, 0xB979CB57, 0xB1316A8C, 0x90E8A623, 0x96916934, 0x9035AE36, 0x2F4E6FD8, 0x872FF563,
+ 0xFDC9446D, 0x994791A9, 0xE3C36431, 0xE554A142, 0xA1F52FA8, 0xDF35EE71, 0xE1811ABD, 0x518AEE75,
+ 0x2C368F10, 0xDEA57D12, 0x14087FE3, 0xC24FAE0D, 0xEF43BF59, 0xDCB2050D, 0xACB8E1D8, 0x346FE312,
+ 0x2225E27D, 0x54B40347, 0x36341CA6, 0xEB168AC8, 0xB2270E4E, 0x158DAD24, 0xF4DD7F10, 0xA27E0448,
+ 0x2CFA86D5, 0x31CC4102, 0x4E8D1268, 0xD306B906, 0xBD573B68, 0xC05056FA, 0x7CA83C1C, 0x0C5DD37D,
+ 0x8039A2A3, 0x1A579525, 0x39DD26D6, 0xDDCC9C0A, 0x1B34CD6E, 0x6611A35D, 0x7785BC43, 0x9C4E3E9E,
+ 0x97B6363C, 0x9521C04D, 0xB37B94E8, 0x0F6DF46A, 0x3D9D52E3, 0x7042230A, 0x7CF97AFB, 0x959E8CCE,
+ 0x68BD82B3, 0x2652D44D, 0x974D7DFC, 0x143F3DE6, 0xAC386FB6, 0xAA85298C, 0x6813CFE0, 0xAB22F8EA,
+ 0xF3ED6B05, 0x7D212E47, 0x843991A1, 0x78AC4F6A, 0xFB0B878D, 0xD188CE4F, 0xE6E0484F, 0x17169BC9,
+ 0x6C71B874, 0xABC03212, 0x0F1EB12C, 0x093278A8, 0x03F86EA6, 0xC76042EF, 0xA3F71613, 0xA49EC409,
+ 0xC959ACCB, 0xAFD100C0, 0x3B53519F, 0x867BEF2D, 0xCC72005D, 0x76C82735, 0xD0185D2C, 0x13692D11,
+ 0x84D3DBB1, 0xAF04883D, 0xA9D74A0D, 0xB7AE2F17, 0xFF1235B7, 0xCE9E3382, 0x4F6644A5, 0x397BEB6D,
+ 0x7BE7ADD3, 0x595B40F8, 0xEEE8AEA3, 0xF9BA3229, 0xCF1361AD, 0x52AF88C6, 0x5289CB13, 0x6A483054,
+ 0x6211905F, 0x157E660C, 0x5E989FC5, 0x9BF26479, 0x68F5456C, 0xC27CAC7C, 0x1499F05D, 0x0A135E6F,
+ 0xE8EAA41A, 0xE0EAC21E, 0x251D7680, 0xA0B03436, 0xB83890BD, 0x43919AFF, 0xA19FEBD6, 0x9677E3B8,
+ 0xF96BFAA0, 0xC83AF480, 0xF01D854B, 0x911A71D6, 0x01011CDD, 0x3DD5953C, 0x946C5B3B, 0xFF2DF75A,
+ 0x8BB2EF47, 0x2C6F491F, 0x6C3D3FDF, 0xDA43C49C, 0x1FADF13A, 0xE7ED940E, 0x94359D34, 0x4DB5B597,
+ 0x45F95438, 0xF18D1628, 0x64931488, 0x92C45CA5, 0xFEA0A575, 0x967B2324, 0x479D6961, 0x7D92A258,
+ 0xE109143B, 0xE43B3995, 0x40CF39E2, 0xC0D498E0, 0x77EF783D, 0xD50C3357, 0x8F5CD621, 0x0C91DAB8,
+ 0x6F509EB3, 0x40D2F8AE, 0x289431DD, 0x8FD8183B, 0x07D69E99, 0xB6017E99, 0x4FB78C2C, 0x3E527E40,
+ 0xD955A05E, 0x9AC8FA3D, 0x85A1822A, 0xA67E374A, 0x4E015C9A, 0xF31AE059, 0x42C9402D, 0x2ED23C27,
+ 0xCC5003FC, 0xDBA452CD, 0x4B263090, 0x94F5546D, 0x1C2A1A46, 0xAAD46F6B, 0xF1D09053, 0x43A6D8FA,
+ 0xCFE82A6D, 0xD3B1756D, 0x4DCA304D, 0xCFA0EDB2, 0x379F0D5C, 0xCE1EBDEC, 0xD18C8E3B, 0x734DC6F0,
+ 0x8471567E, 0x9C82375B, 0x78DF0A2C, 0xCFA4ADC6, 0xA23283E3, 0x5D79705B, 0x217F0114, 0xA052FECE,
+ 0x44CB4A94, 0x3782CC10, 0x265E14D1, 0x29BBEE3D, 0xA1976B3E, 0xBD2CFEC6, 0xDFA461CF, 0x29DDEADF,
+ 0x5623F536, 0x106E4803, 0x5700EA4A, 0x80D7A8C7, 0x5852DCCD, 0xF86626E1, 0x4B19F27D, 0xD1D988A8,
+ 0x7F2317FC, 0xFF92BBD9, 0x5074F91C, 0x05506938, 0xC6ED10C0, 0xBE3EDA70, 0xCF6CE477, 0xD8F6314C,
+ 0x949BD067, 0xC0C59930, 0x215942C1, 0x98930589, 0x4C92EA9F, 0x40ED3A43, 0x6DB8ED18, 0xECD11581,
+ 0x52904A2B, 0x17A517D6, 0x09413DEE, 0x375744ED, 0xB4DA0F43, 0x186E2E07, 0x92F75ED4, 0x057AEA71,
+ 0x40DA7846, 0x03CF2CA3, 0xA9178354, 0xE63180C6, 0x103F0EE4, 0xCEC3B5F9, 0x685388D7, 0x64579DA5,
+ 0x56F67DCB, 0x82EA5391, 0x66D52A70, 0xB3B79D2D, 0xE004C87A, 0x67183F68, 0x610B3401, 0x2EEDFECA,
+ 0x897CDF39, 0xB8AD5F77, 0x1E60E422, 0x35513D22, 0x5ADCAB84, 0xA3D155C6, 0x9C5F3F4E, 0xC1000CC0,
+ 0xD8BEF693, 0x70B5A856, 0x43111491, 0x3F752148, 0xDC41CE25, 0x06FD010D, 0x0DE38323, 0x662A21C4,
+ 0xA704EE39, 0x6C20FE93, 0x32036DA1, 0xE79A913C, 0x353739C0, 0xAD68358C, 0x0E93E303, 0x14FBB4CB,
+ 0x680A4ED8, 0xD6CFDF68, 0x98BDC6B4, 0x5F080CA6, 0x6E7B0127, 0xFB300514, 0x01099C35, 0x168E8842,
+ 0xB2A3C054, 0x1ECAE905, 0x2D8C9F3A, 0xAD1B70CA, 0x3A3CC948, 0x6741F967, 0xD39376CC, 0xE0A645B5,
+ 0x549E800E, 0xB0037420, 0x56E77C1B, 0xD45C696D, 0x2B4A1CAE, 0x7079A193, 0xAC782881, 0x65F04CC3,
+ 0xCD08D92E, 0x54E9853C, 0x1101D0D4, 0xB63AFC00, 0x9F36DE56, 0xCE3CBE4D, 0x7CE0FBC3, 0xE873AB4C,
+ 0x03ED2F02, 0x047BF6AC, 0x7D0FED7A, 0x97A37798, 0x563A740E, 0x6D836F81, 0x3C5047FB, 0xA2FE8A26,
+ 0xB2104C4B, 0x7BFA6C9F, 0xB11BE9C5, 0x16DFCEA5, 0x2B609FE7, 0xF64EC915, 0x1189DE52, 0x038B2BE0,
+ 0xF3CF68A4, 0xD04D5BBF, 0x1F07FC1D, 0xFF303616, 0x63F2467A, 0xD4582919, 0xBEE79211, 0xE31E6C24,
+ 0x5C8BA7AE, 0xB6EF74FD, 0x0DB2C5C4, 0xFFE97E77, 0x4F80CE29, 0x5025BFFB, 0x9C93368B, 0x2E6930D0,
+ 0x823B9B18, 0x61FCBF5D, 0x110590A0, 0x239772BF, 0x0C5113E1, 0x3ECF3162, 0xAFFFC3FD, 0x28C9B7A6,
+ 0xA8EDD449, 0x5AFAE851, 0x9B10AE81, 0x94069F45, 0xB66FFA93, 0x93580F5A, 0xB5DF53EA, 0xB0163584,
+ 0x95FCC858, 0x0AA49204, 0x1877AF3F, 0xEF1A7956, 0x6E2A47C0, 0x62F35AA7, 0xE98DBC25, 0x2A8BC92A,
+ 0x1C339B7E, 0xD73948E9, 0x9A803DE2, 0x618FFA1B, 0x25AABAEB, 0x502D3F13, 0xB7C82BF5, 0x08A6ABBF,
+ 0x63DC8547, 0xC7C2CF20, 0x9071A975, 0x75968428, 0xB1532E1A, 0xF2F1B694, 0xA8B41246, 0xE862AA9C,
+ 0x555B15FE, 0x49E4C072, 0xBEC1D1C4, 0xDBDF9763, 0x54D207A5, 0xC18082AE, 0x46543C0D, 0x3A8385CB,
+ 0xBC87EF3C, 0xFAAC56EB, 0x4A766AAA, 0x30E39EE2, 0x60C2616D, 0x69FCD959, 0xD4F2243E, 0xA63A3009,
+ 0x14D9B76A, 0xC5663B20, 0x4DF1B2D4, 0x1F21BFC8, 0xC7F1CD51, 0x6BABE671, 0xA337DF4A, 0xF5016BD8,
+ 0x26749798, 0xAF8C86BB, 0x1C669DED, 0xDB5799E5, 0xE163D045, 0xAD79345C, 0x5825D9FF, 0x59648215,
+ 0xABC47266, 0x9AB33F93, 0x9A55B51F, 0xF30BE189, 0x147F7AD1, 0x634D0AF9, 0x0F9E3F42, 0x97D8B236,
+ 0x49CE433B, 0x38F6B942, 0x662E61B0, 0x32A96086, 0xFF473324, 0xC2F0E28E, 0x3485A902, 0xF58D252A,
+ 0x0668CEC7, 0x5012FDFE, 0xF2E4F730, 0x6BAFA605, 0x369A51F5, 0x6666A66F, 0xD68718B7, 0x7C93BE85,
+ 0xDCDC8107, 0x705762AE, 0xBFE2CEB8, 0x4B6677A7, 0x5B803122, 0xB9310C88, 0x551F12BD, 0x133C2B82,
+ 0x88BD0ABD, 0xD4E7659A, 0x6521844A, 0xCA214C03, 0xF65B88B1, 0xD2926A11, 0xDD73D089, 0xC0F2DB35,
+ 0x4F2E837A, 0xDD55935C, 0xF8332072, 0x47772371, 0xAA3A650F, 0xB3D5D9BB, 0x821DD8CD, 0x593D441A,
+ 0x195C30AB, 0x54F884EC, 0x47B7EA28, 0xFBA5CADE, 0x5CF8E2E8, 0x82405EB6, 0x7E088868, 0x44704663,
+ 0x79E954F4, 0x9C06F941, 0x73984E0D, 0x16F54DEC, 0xAD57F761, 0x4BBF3FC6, 0x77F2B971, 0x3D4C9A6A,
+ 0x776BB627, 0xDE4B7776, 0x560D360B, 0x03F4AE3A, 0x7996D12E, 0x165F40CB, 0xB58A5579, 0xBC78E488,
+ 0xEE1F25FF, 0xC6525C43, 0x65F8CC2B, 0xE653C450, 0x72261E53, 0xF0E6CE8B, 0x10048C7D, 0x1E42E783,
+ 0x983D48DA, 0xC1E2C638, 0x244D2CBA, 0x04A6FEB9, 0xC4D0EB96, 0x80B92CCD, 0x84BD5FBB, 0x73EB0B4B,
+ 0x225A67A7, 0xDEEDBACD, 0xF02607D2, 0x419B23AC, 0xF16FDCD0, 0x9860CEA1, 0xE6D70A4A, 0x79615A72,
+ 0x29B72EA1, 0x1F61F8FF, 0x0884A81E, 0xCEB0F0BF, 0x5CFCA29C, 0x17EDD009, 0xED6F45DF, 0xB483C94C,
+ 0xAAF66CCA, 0x5CBCE5AB, 0xD898B546, 0x48745311, 0x4B48B49A, 0x481AD71B, 0x97A737EC, 0xC397FAB1,
+ 0xFEE4A236, 0xCCA3C8D4, 0x71E1B218, 0x72AA50A1, 0xD0FA97D4, 0xEF9AF738, 0xF0C08D5E, 0x28C5773A,
+ 0x161B4788, 0xEF52BE89, 0x8A043FF1, 0xE127D706, 0xF7A1DFAC, 0x6E590286, 0x565C70EC, 0x9A37F189,
+ 0x9112F13E, 0x2A5A01A3, 0x1B1A0E53, 0x758B4564, 0x59138727, 0x0AC6D692, 0x7A6EBF88, 0xD13F0224,
+ 0x167D436A, 0x18E62081, 0x27468934, 0x69835C83, 0x5EAB9DB4, 0x60282B7D, 0xBE541E0E, 0x862EE9BE,
+ 0xC01CD151, 0x0D2BCA92, 0x1CEE936B, 0x6EE64E05, 0x8D9A66B7, 0xB308D1AE, 0x3BC2DE21, 0x4E271F4F,
+ 0xCBDB1B1E, 0x4A90E026, 0x59BDCA74, 0x87B6FA35, 0x1ECEA971, 0xB8211185, 0x3E7CBD7D, 0xCACF4667,
+ 0xB45EC159, 0x13296A26, 0x4BA6BEFE, 0x0A1F9EF3, 0x561C37DB, 0x554294C6, 0x3409E7F0, 0xD5917C75,
+ 0xF9F6B582, 0xBE909723, 0x421CC8D0, 0xBC5627B5, 0xFD85C3C6, 0xE8C46727, 0xF3702427, 0x0B8B4DE4,
+ 0x3638DC3A, 0x55E01909, 0x078F5896, 0x04993C54, 0x81FC3753, 0xE3B02177, 0xD1FB8B09, 0xD24F6204,
+ 0x64ACD665, 0xD7E88060, 0x9DA9A8CF, 0xC33DF796, 0xE639002E, 0x3B64139A, 0xE80C2E96, 0x89B49688,
+ 0xC269EDD8, 0xD782441E, 0xD4EBA506, 0xDBD7178B, 0x7F891ADB, 0xE74F19C1, 0xA7B32252, 0x9CBDF5B6,
+ 0x3DF3D6E9, 0xACADA07C, 0xF7745751, 0xFCDD1914, 0x6789B0D6, 0xA957C463, 0x2944E589, 0xB524182A,
+ 0x3108C82F, 0x8ABF3306, 0xAF4C4FE2, 0x4DF9323C, 0x347AA2B6, 0xE13E563E, 0x8A4CF82E, 0x0509AF37,
+ 0x7475520D, 0x7075610F, 0x128EBB40, 0xD0581A1B, 0xDC1C485E, 0x21C8CD7F, 0x50CFF5EB, 0x4B3BF1DC,
+ 0x7CB5FD50, 0xE41D7A40, 0x780EC2A5, 0xC88D38EB, 0x00808E6E, 0x9EEACA9E, 0x4A362D9D, 0xFF96FBAD,
+ 0xC5D977A3, 0x9637A48F, 0x361E9FEF, 0x6D21E24E, 0x0FD6F89D, 0x73F6CA07, 0xCA1ACE9A, 0x26DADACB,
+ 0x22FCAA1C, 0x78C68B14, 0xB2498A44, 0xC9622E52, 0x7F5052BA, 0xCB3D9192, 0x23CEB4B0, 0xBEC9512C,
+ 0xF0848A1D, 0x721D9CCA, 0x20679CF1, 0xE06A4C70, 0xBBF7BC1E, 0xEA8619AB, 0x47AE6B10, 0x8648ED5C,
+ 0x37A84F59, 0xA0697C57, 0x944A18EE, 0xC7EC0C1D, 0x83EB4F4C, 0x5B00BF4C, 0x27DBC616, 0x1F293F20,
+ 0xECAAD02F, 0x4D647D1E, 0x42D0C115, 0x533F1BA5, 0xA700AE4D, 0xF98D702C, 0xA164A016, 0x17691E13,
+ 0xE62801FE, 0x6DD22966, 0xA5931848, 0x4A7AAA36, 0x8E150D23, 0xD56A37B5, 0x78E84829, 0xA1D36C7D,
+ 0xE7F41536, 0xE9D8BAB6, 0x26E51826, 0x67D076D9, 0x1BCF86AE, 0xE70F5EF6, 0x68C6471D, 0x39A6E378,
+ 0xC238AB3F, 0x4E411BAD, 0x3C6F8516, 0xE7D256E3, 0xD11941F1, 0x2EBCB82D, 0x10BF808A, 0x50297F67,
+ 0x2265A54A, 0x9BC16608, 0x932F0A68, 0x14DDF71E, 0x50CBB59F, 0xDE967F63, 0xEFD230E7, 0x14EEF56F,
+ 0xAB11FA9B, 0x08372401, 0xAB807525, 0xC06BD463, 0xAC296E66, 0xFC331370, 0x258CF93E, 0x68ECC454,
+ 0xBF918BFE, 0x7FC95DEC, 0x283A7C8E, 0x02A8349C, 0x63768860, 0xDF1F6D38, 0x67B6723B, 0xEC7B18A6,
+ 0x4A4DE833, 0xE062CC98, 0x90ACA160, 0xCC4982C4, 0xA649754F, 0x20769D21, 0xB6DC768C, 0xF6688AC0,
+ 0x29482515, 0x0BD28BEB, 0x84A09EF7, 0x9BABA276, 0xDA6D07A1, 0x0C371703, 0xC97BAF6A, 0x02BD7538,
+ 0xA06D3C23, 0x01E79651, 0x548BC1AA, 0x7318C063, 0x881F8772, 0xE761DAFC, 0xB429C46B, 0xB22BCED2,
+ 0xAB7B3EE5, 0x417529C8, 0xB36A9538, 0x59DBCE96, 0x7002643D, 0xB38C1FB4, 0x30859A00, 0x9776FF65,
+ 0xC4BE6F9C, 0x5C56AFBB, 0x0F307211, 0x1AA89E91, 0x2D6E55C2, 0x51E8AAE3, 0x4E2F9FA7, 0xE0800660,
+ 0x6C5F7B49, 0xB85AD42B, 0x21888A48, 0x9FBA90A4, 0xEE20E712, 0x837E8086, 0x06F1C191, 0xB31510E2,
+ 0xD382771C, 0xB6107F49, 0x1901B6D0, 0x73CD489E, 0x1A9B9CE0, 0xD6B41AC6, 0x8749F181, 0x0A7DDA65,
+ 0x3405276C, 0x6B67EFB4, 0x4C5EE35A, 0xAF840653, 0x373D8093, 0xFD98028A, 0x0084CE1A, 0x0B474421,
+ 0xD951E02A, 0x0F657482, 0x16C64F9D, 0x568DB865, 0x9D1E64A4, 0x33A0FCB3, 0xE9C9BB66, 0x705322DA,
+ 0x2A4F4007, 0xD801BA10, 0xAB73BE0D, 0x6A2E34B6, 0x95A50E57, 0xB83CD0C9, 0xD63C1440, 0x32F82661,
+ 0x66846C97, 0x2A74C29E, 0x0880E86A, 0x5B1D7E00, 0xCF9B6F2B, 0xE71E5F26, 0x3E707DE1, 0x7439D5A6,
+ 0x01F69781, 0x023DFB56, 0x3E87F6BD, 0x4BD1BBCC, 0xAB1D3A07, 0xB6C1B7C0, 0x1E2823FD, 0xD17F4513,
+ 0xD9082625, 0xBDFD364F, 0xD88DF4E2, 0x8B6FE752, 0x95B04FF3, 0x7B27648A, 0x08C4EF29, 0x01C595F0,
+};
+//#endif
+
+#define WR_BUF_CNT 16
+#define NANDINFO "nandinfo"
+static struct proc_dir_entry *nandinfo_proc = NULL;
+static struct mtd_info *mtd_nandinfo = NULL;
+uint8_t *buf_rdmz, *wr_cache;
+/*#define NAND_DEBUG*/
+unsigned int wmt_version;
+uint32_t par1_ofs, par2_ofs, par3_ofs, par4_ofs, eslc_write, prob_end;
+#include <linux/mtd/partitions.h>
+#define NUM_NAND_PARTITIONS ARRAY_SIZE(nand_partitions)
+
+#ifndef CONFIG_MTD_NAND_WMT_UBUNTU
+
+struct mtd_partition nand_partitions[] = {
+ {
+ .name = "logo",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "recovery",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "misc",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "keydata",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x4000000,
+ },
+ {
+ .name = "system",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x40000000,
+ },
+ {
+ .name = "cache",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x20000000,
+ },
+ {
+ .name = "swap",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x10000000,
+ },
+#ifndef CONFIG_MTD_NAND_WMT_ANDROID_UBUNTU_DUALOS
+ {
+ .name = "data",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+#else // #ifdef CONFIG_MTD_NAND_WMT_ANDROID_UBUNTU_DUALOS
+ {
+ .name = "data",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x88000000,
+ },
+ { .name = "ubuntu-boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "ubuntu-rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+#endif
+};
+
+#else // #ifdef CONFIG_MTD_NAND_WMT_UBUNTU
+
+struct mtd_partition nand_partitions[] = {
+ {
+ .name = "ubuntu-logo",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "ubuntu-boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "ubuntu-rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+#endif
+
+EXPORT_SYMBOL(nand_partitions);
+
+int second_chip = 0;
+EXPORT_SYMBOL(second_chip);
+
+#ifdef CONFIG_MTD_NAND_WMT_HWECC
+ static int MAX_CHIP = CONFIG_MTD_NAND_CHIP_NUM;
+ static int hardware_ecc = 1;
+#else
+ #define MAX_CHIP 1
+ static int hardware_ecc = 0;
+#endif
+
+#define HW_ENCODE_OOB
+//#define SW_ENCODE_OOB
+
+#ifdef SW_ENCODE_OOB
+static unsigned char parity[MAX_PARITY_SIZE];
+#endif
+static unsigned int bch_err_pos[MAX_ECC_BIT_ERROR];
+static unsigned int bch_err_pos[MAX_ECC_BIT_ERROR];
+
+/* used for software de-randomizer of read id and read status command */
+unsigned char rdmz_tb[128] = {
+ 0x84, 0x4a, 0x37, 0xbe, 0xd7, 0xd2, 0x39, 0x03, 0x8e, 0x77, 0xb9, 0x41, 0x99, 0xa7, 0x78, 0x62,
+ 0x53, 0x88, 0x12, 0xf4, 0x75, 0x21, 0xf0, 0x27, 0xc2, 0x0f, 0x04, 0x80, 0xd7, 0x5a, 0xce, 0x37,
+ 0x56, 0xb1, 0x1c, 0xdc, 0x61, 0x9a, 0x86, 0x10, 0xae, 0xec, 0x73, 0x54, 0xa1, 0x5a, 0x56, 0xdc,
+ 0x2b, 0x45, 0x5e, 0x09, 0x99, 0xb7, 0x64, 0x2b, 0x7f, 0x0c, 0x62, 0x91, 0xa0, 0xfe, 0x35, 0x84,
+ 0xdf, 0x7a, 0xa0, 0x21, 0xa7, 0x42, 0x30, 0x38, 0x80, 0x05, 0x6e, 0x6b, 0xda, 0x23, 0x3f, 0xf3,
+ 0x8e, 0x5d, 0xf7, 0x63, 0xbd, 0x34, 0x92, 0x19, 0x7d, 0x84, 0xcf, 0x66, 0xe9, 0x0d, 0x23, 0x32,
+ 0x55, 0xed, 0x5f, 0xc0, 0xcd, 0x76, 0xaf, 0x87, 0x9e, 0x83, 0x96, 0xa3, 0xf8, 0xb5, 0x09, 0x46,
+ 0x25, 0xa2, 0xc4, 0x3d, 0x2c, 0x46, 0x58, 0x89, 0x14, 0x2e, 0x3b, 0x29, 0x9a, 0x96, 0x0c, 0xe7
+};
+
+/*
+ * check the page is erased or not
+ * each row is oob byte 16~23 of randomizer seed(page) 0 ~ 15
+ *
+*/
+unsigned char rdmz_FF[18][24] = {
+/*{0xac,0x77,0xed,0x0b,0x8a,0xde,0x0f,0xd8},
+{0xd6,0xbb,0xf6,0x85,0x45,0xef,0x07,0x6c},
+{0xeb,0xdd,0xfb,0x42,0x22,0x77,0x03,0xb6},
+{0x75,0xee,0xfd,0xa1,0x11,0xbb,0x81,0x5b},
+{0x3a,0x77,0x7e,0xd0,0x88,0x5d,0x40,0xad},
+{0x1d,0xbb,0xbf,0x68,0x44,0xae,0x20,0x56},
+{0x8e,0xdd,0x5f,0xb4,0xa2,0xd7,0x90,0x2b},
+{0x47,0xee,0xaf,0x5a,0xd1,0x6b,0xc8,0x95},
+{0xa3,0x77,0x57,0xad,0x68,0xb5,0xe4,0x4a},
+{0x51,0xbb,0xab,0xd6,0x34,0x5a,0xf2,0x25},
+{0x28,0xdd,0x55,0xeb,0x9a,0xad,0xf9,0x12},
+{0x94,0xee,0xaa,0x75,0x4d,0xd6,0xfc,0x09},
+{0xca,0x77,0xd5,0xba,0xa6,0xeb,0xfe,0x84},
+{0x65,0xbb,0x6a,0x5d,0x53,0xf5,0x7f,0xc2},
+{0xb2,0xdd,0xb5,0x2e,0x29,0x7a,0x3f,0x61},
+{0x59,0xee,0xda,0x17,0x14,0xbd,0x1f,0xb0}*//* byte 24 ~ byte 48 */
+{0x3d,0xf0,0xfb,0x7f,0x28,0xa5,0x31,0xc8,0xa9,0x4e,0xe3,0x23,0x9e,0x65,0x79,0xef,0x51,0x13,0x8c,0xab,0x5e,0xa5,0xa9,0x23},
+{0x1e,0xf8,0xfd,0xbf,0x94,0x52,0x18,0x64,0x54,0x27,0xf1,0x91,0xcf,0x32,0x3c,0xf7,0xa8,0x09,0xc6,0x55,0xaf,0xd2,0xd4,0x91},
+{0x8f,0xfc,0xfe,0xdf,0xca,0xa9,0x8c,0x32,0x2a,0x93,0xf8,0x48,0xe7,0x99,0x9e,0xfb,0x54,0x84,0x63,0x2a,0x57,0xe9,0xea,0x48},
+{0xc7,0x7e,0x7f,0xef,0x65,0xd4,0x46,0x99,0x95,0xc9,0x7c,0x24,0xf3,0xcc,0x4f,0xfd,0xaa,0x42,0x31,0x95,0xab,0x74,0x75,0x24},
+{0xe3,0x3f,0x3f,0xf7,0xb2,0xea,0x23,0x4c,0x4a,0x64,0x3e,0x12,0x79,0x66,0xa7,0xfe,0xd5,0xa1,0x18,0xca,0xd5,0xba,0xba,0x12},
+{0x71,0x9f,0x1f,0xfb,0xd9,0x75,0x11,0xa6,0xa5,0x32,0x9f,0x09,0x3c,0x33,0xd3,0x7f,0x6a,0x50,0x0c,0xe5,0x6a,0x5d,0x5d,0x89},
+{0xb8,0x4f,0x8f,0xfd,0x6c,0xba,0x08,0x53,0x52,0x19,0x4f,0x84,0x1e,0x99,0xe9,0x3f,0x35,0x28,0x06,0xf2,0x35,0x2e,0x2e,0x44},
+{0xdc,0xa7,0x47,0xfe,0x36,0xdd,0x04,0xa9,0x29,0x0c,0x27,0x42,0x0f,0x4c,0xf4,0x9f,0x9a,0x94,0x03,0x79,0x1a,0x97,0x97,0x22},
+{0x6e,0x53,0x23,0x7f,0x1b,0xee,0x02,0x54,0x94,0x06,0x93,0x21,0x87,0xa6,0x7a,0x4f,0xcd,0xca,0x81,0x3c,0x8d,0xcb,0x4b,0x91},
+{0xb7,0x29,0x91,0xbf,0x0d,0xf7,0x81,0x2a,0xca,0x03,0xc9,0x90,0x43,0xd3,0xbd,0xa7,0xe6,0xe5,0x40,0x9e,0x46,0xe5,0xa5,0xc8},
+{0x5b,0x14,0xc8,0xdf,0x06,0x7b,0x40,0x15,0x65,0x81,0xe4,0xc8,0xa1,0x69,0x5e,0xd3,0x73,0xf2,0x20,0xcf,0xa3,0x72,0x52,0xe4},
+{0xad,0x0a,0x64,0xef,0x03,0xbd,0x20,0x0a,0x32,0xc0,0x72,0x64,0xd0,0xb4,0x2f,0xe9,0x39,0x79,0x90,0x67,0xd1,0xb9,0x29,0x72},
+{0xd6,0x05,0xb2,0xf7,0x81,0x5e,0x10,0x85,0x99,0xe0,0x39,0x32,0xe8,0x5a,0x97,0xf4,0x1c,0x3c,0xc8,0xb3,0xe8,0x5c,0x94,0x39},
+{0xeb,0x82,0xd9,0xfb,0x40,0x2f,0x88,0x42,0x4c,0x70,0x1c,0x19,0xf4,0x2d,0xcb,0x7a,0x8e,0x9e,0x64,0x59,0xf4,0x2e,0x4a,0x1c},
+{0xf5,0xc1,0xec,0xfd,0xa0,0x97,0xc4,0x21,0xa6,0x38,0x8e,0x8c,0x7a,0x96,0xe5,0xbd,0x47,0x4f,0x32,0xac,0x7a,0x97,0xa5,0x8e},
+{0x7a,0xe0,0xf6,0xfe,0x50,0x4b,0x62,0x90,0x53,0x9c,0xc7,0x46,0x3d,0xcb,0xf2,0xde,0xa3,0x27,0x19,0x56,0xbd,0x4b,0x52,0x47}
+};
+unsigned int rdmz_badblk[2][6] = {
+{0x80040fc2,0x37ce5ad7,0xdc1cb156,0x10869a61,0x5473ecae,0xdc565aa1},
+{0x400207e1,0x9be7ad6b,0x6e0ed8ab,0x8c3cd30,0xaa39f657,0x6e2b2d50}
+};
+
+unsigned char eslc_map_table[128] = {
+ 0x0, 0x1,
+ 0x2, 0x3,
+ 0x6, 0x7,
+ 0xa, 0xb,
+ 0xe, 0xf,
+ 0x12, 0x13,
+ 0x16, 0x17,
+ 0x1a, 0x1b,
+ 0x1e, 0x1f,
+ 0x22, 0x23,
+ 0x26, 0x27,
+ 0x2a, 0x2b,
+ 0x2e, 0x2f,
+ 0x32, 0x33,
+ 0x36, 0x37,
+ 0x3a, 0x3b,
+ 0x3e, 0x3f,
+ 0x42, 0x43,
+ 0x46, 0x47,
+ 0x4a, 0x4b,
+ 0x4e, 0x4f,
+ 0x52, 0x53,
+ 0x56, 0x57,
+ 0x5a, 0x5b,
+ 0x5e, 0x5f,
+ 0x62, 0x63,
+ 0x66, 0x67,
+ 0x6a, 0x6b,
+ 0x6e, 0x6f,
+ 0x72, 0x73,
+ 0x76, 0x77,
+ 0x7a, 0x7b,
+ 0x7e, 0x7f,
+ 0x82, 0x83,
+ 0x86, 0x87,
+ 0x8a, 0x8b,
+ 0x8e, 0x8f,
+ 0x92, 0x93,
+ 0x96, 0x97,
+ 0x9a, 0x9b,
+ 0x9e, 0x9f,
+ 0xa2, 0xa3,
+ 0xa6, 0xa7,
+ 0xaa, 0xab,
+ 0xae, 0xaf,
+ 0xb2, 0xb3,
+ 0xb6, 0xb7,
+ 0xba, 0xbb,
+ 0xbe, 0xbf,
+ 0xc2, 0xc3,
+ 0xc6, 0xc7,
+ 0xca, 0xcb,
+ 0xce, 0xcf,
+ 0xd2, 0xd3,
+ 0xd6, 0xd7,
+ 0xda, 0xdb,
+ 0xde, 0xdf,
+ 0xe2, 0xe3,
+ 0xe6, 0xe7,
+ 0xea, 0xeb,
+ 0xee, 0xef,
+ 0xf2, 0xf3,
+ 0xf6, 0xf7,
+ 0xfa, 0xfb
+};
+
+/*
+ * hardware specific Out Of Band information
+*/
+
+/*
+* new oob placement block for use with hardware ecc generation
+*/
+/*
+static struct nand_ecclayout wmt_oobinfo_2048 = {
+ .eccbytes = 7,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+
+
+static struct nand_ecclayout wmt_12bit_oobinfo_4096 = {
+ .eccbytes = 20,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+
+static struct nand_ecclayout wmt_oobinfo_8192 = {
+ .eccbytes = 42,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+*/
+static struct nand_ecclayout wmt_oobinfo_16k = {
+ .eccbytes = 70,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+
+
+/* Ick. The BBT code really ought to be able to work this bit out
+ for itself from the above, at least for the 2KiB case
+*/
+static uint8_t wmt_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t wmt_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t wmt_rdmz[] = { 'z', 'm', 'd', 'r' };
+static uint8_t retry_table[] = {'r','e','t','r','y','t','a','b','l','e'};
+
+static struct nand_bbt_descr wmt_rdtry_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 10,
+ .veroffs = 0,
+ .maxblocks = 4,
+ .pattern = retry_table,
+ .reserved_block_code = 1
+};
+
+static struct nand_bbt_descr wmt_bbt_main_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 0,
+ .maxblocks = 4,
+ .pattern = wmt_bbt_pattern_2048,
+ .reserved_block_code = 1
+};
+
+static struct nand_bbt_descr wmt_bbt_mirror_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 0,
+ .maxblocks = 4,
+ .pattern = wmt_mirror_pattern_2048,
+};
+
+
+/* controller and mtd information */
+extern unsigned int wmt_read_oscr(void);
+/*static*/ void print_nand_register(struct mtd_info *mtd);
+void print_nand_buffer(char *value, unsigned int length);
+#ifdef NAND_DEBUG
+static void print_nand_buffer_int(unsigned int *value, unsigned int length);
+#endif
+
+struct wmt_nand_set {
+ int nr_chips;
+ int nr_partitions;
+ char *name;
+ int *nr_map;
+ struct mtd_partition *partitions;
+};
+
+struct wmt_nand_platform_data {
+ const char * name;
+ int id;
+ struct device dev;
+ u32 num_resources;
+ struct resource * resource;
+
+ const struct platform_device_id *id_entry;
+
+ /* MFD cell pointer */
+ struct mfd_cell *mfd_cell;
+
+ /* arch specific additions */
+ struct pdev_archdata archdata;
+ struct mtd_partition *partitions;
+};
+
+#if 0
+ struct wmt_platform_nand {
+ /* timing information for controller, all times in nanoseconds */
+
+ int tacls; /* time for active CLE/ALE to nWE/nOE */
+ int twrph0; /* active time for nWE/nOE */
+ int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
+
+ int nr_sets;
+ struct wmt_nand_set *sets;
+ void (*select_chip)(struct s3c2410_nand_set *, int chip);
+ }
+#endif
+
+struct wmt_nand_info;
+
+struct wmt_nand_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ /*struct wmt_nand_set* set;*/
+ struct wmt_nand_info *info;
+ int scan_res;
+};
+
+/* overview of the wmt nand state */
+
+struct wmt_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct wmt_nand_mtd *mtds;
+ struct wmt_platform_nand *platform;
+ int oper_step;
+ /* device info */
+ struct device *device;
+ struct resource *area;
+ void __iomem *reg;
+ int cpu_type;
+ int datalen;
+ int nr_data;
+ int data_pos;
+ int page_addr;
+ dma_addr_t dmaaddr;
+ dma_addr_t last_bank_dmaaddr;
+ int dma_finish;
+ int phase;
+ void *done_data; /* completion data */
+ unsigned int isr_state;
+ unsigned int isr_cmd;
+ unsigned int cur_lpage;
+ unsigned int cur_page;
+ unsigned int last_bank_col;
+ unsigned int oob_col;
+ //void (*done)(void *data);/* completion function */
+ unsigned char *dmabuf;
+
+#ifdef CONFIG_MTD_NAND_DIRECT_WRITE
+
+ int vmalloc_flag;
+ int sglen;
+ struct scatterlist *sglist;
+ dma_addr_t data_address0;
+ dma_addr_t data_address1;
+ dma_addr_t tempaddr;
+ unsigned char *tempbuf;
+
+#endif
+
+ int ECC_bytes;
+ int oob_ECC_bytes;
+ int oob_ecc_error;
+ int data_ecc_uncor_err; /* use read retry for data area has uncorrectable error*/
+ int unc_bank;
+ int unc_allFF;
+ int bank_size;
+ int ECC_mode;
+ int oob_ECC_mode;
+ unsigned int lst_wpage;
+ int wr_page[WR_BUF_CNT];
+ char banks;
+ char oob_max_bit_error;
+};
+
+/* conversion functions */
+
+static struct wmt_nand_mtd *wmt_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct wmt_nand_mtd, mtd);
+}
+
+static struct wmt_nand_info *wmt_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return wmt_nand_mtd_toours(mtd)->info;
+}
+
+/*
+static struct wmt_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+*/
+/*
+static struct platform_device *to_platform(struct device *dev)
+{
+ return container_of(dev, struct platform_device, dev);
+}
+*/
+#if 0
+static struct wmt_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev->dev.platform_data;
+}
+#endif
+
+void copy_filename (char *dst, char *src, int size)
+{
+ if (*src && (*src == '"')) {
+ ++src;
+ --size;
+ }
+
+ while ((--size > 0) && *src && (*src != '"')) {
+ *dst++ = *src++;
+ }
+ *dst = '\0';
+}
+
+int set_ECC_mode(struct mtd_info *mtd)
+{
+ unsigned int ECCbit = mtd->dwECCBitNum;
+ unsigned int ECC_mode;
+ switch (ECCbit) {
+ case 1:
+ ECC_mode = ECC1bit;
+ break;
+ case 4:
+ ECC_mode = ECC4bit;
+ break;
+ case 8:
+ ECC_mode = ECC8bit;
+ break;
+ case 12:
+ ECC_mode = ECC12bit;
+ break;
+ case 16:
+ ECC_mode = ECC16bit;
+ break;
+ case 24:
+ ECC_mode = ECC24bitPer1K;
+ break;
+ case 40:
+ ECC_mode = ECC40bitPer1K;
+ break;
+ case 60:
+ ECC_mode = ECC60bitPer1K;
+ break;
+ default:
+ printk("ecc mode input not support ECCbit=%d\n", ECCbit);
+ return -1;
+ }
+ return ECC_mode;
+}
+void calculate_ECC_info(struct mtd_info *mtd, struct ECC_size_info *ECC_size)
+{
+ switch (ECC_size->ecc_engine) {
+ case ECC4bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC4bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 4;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC4bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC4bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC4bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC4bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC8bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC8bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 8;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC8bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC8bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC8bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC8bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC12bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC12bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 12;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC12bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC12bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC12bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC12bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC16bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC16bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 16;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC16bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC16bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC16bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC16bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC24bitPer1K:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC24bitPer1K_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 24;
+ ECC_size->banks = mtd->realwritesize/1024;
+ ECC_size->bank_size = 1024;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_mode = ECC24bitPer1K;
+ ECC_size->unprotect = mtd->realoobsize - ECC24bitPer1K_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC40bitPer1K:
+ ECC_size->ecc_bits_count = ECC40bitPer1K_bit_count;
+ ECC_size->oob_ecc_bits_count = ECC24bitPer1K_bit_count;
+ ECC_size->max_bit_error = 40;
+ ECC_size->oob_max_bit_error = 24;
+ ECC_size->banks = mtd->realwritesize/1024;
+ ECC_size->bank_size = 1024;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC40bitPer1K_byte_count;
+ ECC_size->ECC_bytes = ECC40bitPer1K_byte_count;
+ ECC_size->oob_ECC_bytes = ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_mode = ECC24bitPer1K;
+ ECC_size->unprotect = mtd->realoobsize - ECC40bitPer1K_byte_count*ECC_size->banks - ECC24bitPer1K_byte_count - 24;
+ break;
+ case ECC60bitPer1K:
+ ECC_size->ecc_bits_count = ECC60bitPer1K_bit_count;
+ ECC_size->oob_ecc_bits_count = ECC24bitPer1K_bit_count;
+ ECC_size->max_bit_error = 60;
+ ECC_size->oob_max_bit_error = 24;
+ ECC_size->banks = mtd->realwritesize/1024;
+ ECC_size->bank_size = 1024;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC60bitPer1K_byte_count;
+ ECC_size->ECC_bytes = ECC60bitPer1K_byte_count;
+ ECC_size->oob_ECC_bytes = ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_mode = ECC24bitPer1K;
+ ECC_size->unprotect = mtd->realoobsize - ECC60bitPer1K_byte_count*ECC_size->banks - ECC24bitPer1K_byte_count - 24;
+ break;
+ default:
+ printk("%d-bit ECC engine is not support:\r\n", ECC_size->ecc_engine);
+ break;;
+ }
+ return;
+}
+
+int get_partition_name(const char *src, char** endpp, char* buffer)
+{
+ int i = 0;
+ if(NULL == src || NULL == buffer)
+ {
+ return -1;
+ }
+
+ while(*src != ':')
+ {
+ *buffer++ = *src++;
+ i++;
+ }
+ *endpp = (char *)src;
+ buffer[i] = '\0';
+ return i;
+}
+
+int search_mtd_table(char *string, int *ret)
+{
+ int i, err = 0;
+ for (i = 0; i < NUM_NAND_PARTITIONS; i++) {
+ // printk(KERN_DEBUG "MTD dev%d size: %8.8llx \"%s\"\n",
+ //i, nand_partitions[i].size, nand_partitions[i].name);
+ if (strcmp(string, nand_partitions[i].name) == 0) {
+ *ret = i;
+ break;
+ }
+ }
+ return err;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported
+ */
+int get_flash_info_from_env(unsigned int id, unsigned int id2, struct WMT_nand_flash_dev *type)
+{
+ int ret, sf_boot_nand_en = 0x4000;
+ char varval[200], *s = NULL, *tmp, varname[] = "wmt.io.nand";
+ unsigned int varlen = 200, value;
+
+ value = STRAP_STATUS_VAL;
+ if ((value&0x4008) != sf_boot_nand_en)
+ return 1;
+ ret = wmt_getsyspara(varname, varval, &varlen);
+ if (!ret) {
+ s = varval;
+ value = simple_strtoul(s, &tmp, 16); type->dwFlashID = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBlockCount = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwPageSize = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwSpareSize = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBlockSize = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwAddressCycle = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBI0Position = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBI1Position = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBIOffset = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwDataWidth = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwPageProgramLimit = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwSeqRowReadSupport = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwSeqPageProgram = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwNandType = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwECCBitNum = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwRWTimming = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwTadl = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwDDR = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwRetry = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwRdmz = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwFlashID2 = value; s = tmp+1;
+ copy_filename(type->ProductName, s, MAX_PRODUCT_NAME_LENGTH);
+
+ if (type->dwBlockCount < 1024 || type->dwBlockCount > 16384) {
+ printk(KERN_INFO "dwBlockCount = 0x%x is abnormal\n", type->dwBlockCount);
+ return 2;
+ }
+ if (type->dwPageSize < 512 || type->dwPageSize > 16384) {
+ printk(KERN_INFO "dwPageSize = 0x%x is abnormal\n", type->dwPageSize);
+ return 2;
+ }
+ if (type->dwPageSize > 512)
+ type->options = NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ if (type->dwBlockSize < (1024*64) || type->dwBlockSize > (16384*256)) {
+ printk(KERN_INFO "dwBlockSize = 0x%x is abnormal\n", type->dwBlockSize);
+ return 2;
+ }
+ if (type->dwAddressCycle < 3 || type->dwAddressCycle > 5) {
+ printk(KERN_INFO "dwAddressCycle = 0x%x is abnoraml\n", type->dwAddressCycle);
+ return 2;
+ }
+ if (type->dwBI0Position != 0 &&
+ type->dwBI0Position > ((type->dwBlockSize/type->dwPageSize)-1)) {
+ printk(KERN_INFO "dwBI0Position = 0x%x is abnoraml\n", type->dwBI0Position);
+ return 2;
+ }
+ if (type->dwBI1Position != 0 &&
+ type->dwBI1Position > ((type->dwBlockSize/type->dwPageSize)-1)) {
+ printk(KERN_INFO "dwBI1Position = 0x%x is abnoraml\n", type->dwBI1Position);
+ return 2;
+ }
+ if (type->dwBIOffset != 0 && type->dwBIOffset != 5) {
+ printk(KERN_INFO "dwBIOffset = 0x%x is abnoraml\n", type->dwBIOffset);
+ return 2;
+ }
+ if (type->dwDataWidth != 0/* && type->dwDataWidth != 1*/) {
+ printk(KERN_INFO "dwDataWidth = 0x%x is abnoraml\n", type->dwDataWidth);
+ return 2;
+ }
+ printk(KERN_DEBUG "dwFlashID = 0x%x\n", type->dwFlashID);
+ printk(KERN_DEBUG "dwBlockCount = 0x%x\n", type->dwBlockCount);
+ printk(KERN_DEBUG "dwPageSize = 0x%x\n", type->dwPageSize);
+ printk(KERN_DEBUG "dwSpareSize = 0x%x\n", type->dwSpareSize);
+ printk(KERN_DEBUG "dwBlockSize = 0x%x\n", type->dwBlockSize);
+ printk(KERN_DEBUG "dwAddressCycle = 0x%x\n", type->dwAddressCycle);
+ printk(KERN_DEBUG "dwBI0Position = 0x%x\n", type->dwBI0Position);
+ printk(KERN_DEBUG "dwBI1Position = 0x%x\n", type->dwBI1Position);
+ printk(KERN_DEBUG "dwBIOffset = 0x%x\n", type->dwBIOffset);
+ printk(KERN_DEBUG "dwDataWidth = 0x%x\n", type->dwDataWidth);
+ printk(KERN_DEBUG "dwPageProgramLimit = 0x%x\n", type->dwPageProgramLimit);
+ printk(KERN_DEBUG "dwSeqRowReadSupport = 0x%x\n", type->dwSeqRowReadSupport);
+ printk(KERN_DEBUG "dwSeqPageProgram = 0x%x\n", type->dwSeqPageProgram);
+ printk(KERN_DEBUG "dwNandType = 0x%x\n", type->dwNandType);
+ printk(KERN_DEBUG "dwECCBitNum = 0x%x\n", type->dwECCBitNum);
+ printk(KERN_DEBUG "dwRWTimming = 0x%x\n", type->dwRWTimming);
+ printk(KERN_DEBUG "dwTadl = 0x%x\n", type->dwTadl);
+ printk(KERN_DEBUG "dwDDR = 0x%x\n", type->dwDDR);
+ printk(KERN_DEBUG "dwRetry = 0x%x\n", type->dwRetry);
+ printk(KERN_DEBUG "dwRdmz = 0x%x\n", type->dwRdmz);
+ printk(KERN_DEBUG "dwFlashID2 = 0x%x\n", type->dwFlashID2);
+ printk(KERN_DEBUG "cProductName = %s\n", type->ProductName);
+ if (id != type->dwFlashID || id2 != type->dwFlashID2) {
+ printk(KERN_ERR "env flash id is different from real id = 0x%x 0x%x\n",
+ type->dwFlashID, type->dwFlashID2);
+ return 3;
+ }
+ }
+ return ret;
+}
+
+static int wmt_calc_clock(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, struct NFC_RW_T *nfc_rw)
+{
+ unsigned int i, div1=0, clk1, clk = 0, PLLB;
+ unsigned int tREA, tREH, tADL, tWP, divisor = 11, tWH, tWB, tWHR, margin;
+
+ /*print_nand_register(mtd);*/
+ PLLB = *(volatile unsigned int *)PMPMB_ADDR;
+ PLLB = (2*(((PLLB>>16)&0x7F)+1))/((((PLLB>>8)&0x1F)+1)*(1<<(PLLB&3)));
+ printk(KERN_DEBUG "PLLB=0x%x, spec_clk=0x%x\n", PLLB, spec_clk);
+ tREA = (spec_clk>>24)&0xFF;
+ tREH = (spec_clk>>16)&0xFF;
+ tWP = (spec_clk>>8)&0xFF;
+ tWH = spec_clk&0xFF;
+ tWB = (spec_tadl>>24)&0xFF;
+ tWHR = (spec_tadl>>16)&0xFF;
+ tADL = spec_tadl&0xFFFF;
+ for (i = 1; i < 16; i++) {
+ if (MAX_SPEED_MHZ >= (PLLB*SOURCE_CLOCK)/i) {
+ div1 = i;
+ break;
+ }
+ }
+
+ margin = (tREA+10)*10+15;
+ if (mtd->id == 0x98D78493 && mtd->id2 == 0x72570000)
+ margin = (tREA+6)*10;
+ else if (mtd->id == 0x45D78493 && mtd->id2 == 0x72570000)
+ margin = (tREA+6)*10;
+
+ for (i = div1; i < 32; i++) {
+ clk1 = (10000 * i)/(PLLB*SOURCE_CLOCK);
+ if ((2*clk1) >= margin) {
+ divisor = i;
+ clk = clk1/10;
+ //printk("div=%d tREA=%d 2*clk=%d\n", i, (tREA+10)*10+15, clk*2);
+ break;
+ }
+ }
+ nfc_rw->T_R_hold = 1;
+ nfc_rw->T_R_setup = 1;
+ nfc_rw->divisor = divisor;
+ nfc_rw->T_W_hold = 1;
+ nfc_rw->T_W_setup = 1;
+
+ i = 0;
+ while ((i*clk) < tADL && i < 50)
+ i++;
+ nfc_rw->T_TADL = i;
+ i = 0;
+ while ((i*clk) < tWHR && i < 50)
+ i++;
+ nfc_rw->T_TWHR = i;
+ i = 0;
+ while ((i*clk) < tWB && i < 50)
+ i++;
+ nfc_rw->T_TWB = i;
+
+ nfc_rw->T_RHC_THC =
+ ((nfc_rw->T_R_hold&0xFF) << 12) +
+ (((nfc_rw->T_R_setup&0xFF) + (nfc_rw->T_R_hold&0xFF)) << 8) +
+ ((nfc_rw->T_W_setup&0xF) << 4) +
+ ((nfc_rw->T_W_setup + nfc_rw->T_W_hold)&0xF);
+
+ if ((MAX_SPEED_MHZ < (PLLB*SOURCE_CLOCK)/(divisor)) || clk == 0 || clk > 45)
+ return 1;
+
+ return 0;
+}
+#if 0
+static int old_wmt_calc_clock(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, struct NFC_RW_T *nfc_rw)
+{
+ unsigned int i, div1=0, div2, clk1, clk2=0, comp, T_setup, T1=0, T2=0, clk, PLLB;
+ unsigned int tREA, tREH, Thold, Thold2, Ttmp, tADL, tWP, divisor, tWH, tWB, tWHR;
+
+ /*print_nand_register(mtd);*/
+ PLLB = *(volatile unsigned int *)PMPMB_ADDR;
+ PLLB = (2*(((PLLB>>16)&0x7F)+1))/((((PLLB>>8)&0x1F)+1)*(1<<(PLLB&3)));
+ printk(KERN_DEBUG "PLLB=0x%x, spec_clk=0x%x\n", PLLB, spec_clk);
+ tREA = (spec_clk>>24)&0xFF;
+ tREH = (spec_clk>>16)&0xFF;
+ tWP = (spec_clk>>8)&0xFF;
+ tWH = spec_clk&0xFF;
+ tWB = (spec_tadl>>24)&0xFF;
+ tWHR = (spec_tadl>>16)&0xFF;
+ tADL = spec_tadl&0xFFFF;
+ for (i = 1; i < 16; i++) {
+ if (MAX_SPEED_MHZ >= (PLLB*SOURCE_CLOCK)/i) {
+ div1 = i;
+ break;
+ }
+ }
+
+ clk1 = (1000 * div1)/(PLLB*SOURCE_CLOCK);
+ //printk("clk1=%d, div1=%d, spec_clk=%d\n", clk1, div1, spec_clk);
+ for (T1 = 1; T1 < 10; T1++) {
+ if ((T1*clk1) >= (tREA + MAX_READ_DELAY))
+ break;
+ }
+ i = 1;
+ while (i*clk1 <= tREH) {
+ i++;
+ }
+ Thold = i;
+ printk(KERN_DEBUG "T1=%d, clk1=%d, div1=%d, Thold=%d, tREA=%d+delay(%d)\n", T1, clk1, div1, Thold, tREA, MAX_READ_DELAY);
+ Ttmp = T_setup = T1;
+ clk = clk1;
+ divisor = div1;
+ div2 = div1;
+ while (Ttmp > 1 && clk != 0) {
+ div2++;
+ clk2 = (1000 * div2)/(PLLB*SOURCE_CLOCK);
+ comp = 0;
+ for (T2 = 1; T2 < Ttmp; T2++) {
+ if ((T2*clk2) >= (tREA + MAX_READ_DELAY)) {
+ Ttmp = T2;
+ comp = 1;
+ i = 1;
+ while (i*clk2 <= tREH) {
+ i++;
+ }
+ Thold2 = i;
+ printk(KERN_DEBUG "T2=%d, clk2=%d, div2=%d, Thold2=%d, comp=1\n", T2, clk2, div2, Thold2);
+ break;
+ }
+ }
+ if (comp == 1) {
+ clk1 = clk * (T_setup+Thold) * mtd->realwritesize;
+ div1 = clk2 * (T2+Thold2) * mtd->realwritesize;
+ printk(KERN_DEBUG "Tim1=%d , Tim2=%d\n", clk1, div1);
+ if ((clk * (T_setup+Thold) * mtd->realwritesize) > (clk2 * (T2+Thold2) * mtd->realwritesize)) {
+ T_setup = T2;
+ clk = clk2;
+ divisor = div2;
+ Thold = Thold2;
+ } else {
+ printk(KERN_DEBUG "T2 is greater and not use\n");
+ }
+ }
+ } /* end of while */
+ nfc_rw->T_R_hold = Thold;
+ nfc_rw->T_R_setup = T_setup;
+ nfc_rw->divisor = divisor;
+
+ i = 1;
+ nfc_rw->T_W_setup = 0x1; /* set write setup/hold time */
+ while ((i*clk) <= (tWP+MAX_WRITE_DELAY)) {
+ nfc_rw->T_W_setup += 1;
+ i++;
+ }
+ nfc_rw->T_W_hold = 1;
+
+ if ((nfc_rw->T_W_hold * 2) == 2)
+ Thold = 4;
+ else if ((nfc_rw->T_W_hold * 2) == 4)
+ Thold = 6;
+ i = 0;
+ while (((i/*+Thold*/)*clk) < tADL && i < 50)
+ i++;
+ nfc_rw->T_TADL = i;
+ //printk("Tad i=%d\n", i);
+ i = 0;
+ while ((i*clk) < tWHR && i < 50)
+ i++;
+ nfc_rw->T_TWHR = i;
+ i = 0;
+ while ((i*clk) < tWB && i < 50)
+ i++;
+ nfc_rw->T_TWB = i;
+
+ nfc_rw->T_RHC_THC =
+ ((nfc_rw->T_R_hold&0xFF) << 12) +
+ (((nfc_rw->T_R_setup&0xFF) + (nfc_rw->T_R_hold&0xFF)) << 8) +
+ ((nfc_rw->T_W_setup&0xF) << 4) +
+ //((nfc_rw->T_W_hold&0xF) << 4) +
+ ((nfc_rw->T_W_setup + nfc_rw->T_W_hold)&0xF);
+
+ if ((MAX_SPEED_MHZ < (PLLB*SOURCE_CLOCK)/(divisor)) || clk == 0 || T_setup == 0 || clk > 45)
+ return 1;
+
+ return 0;
+}
+#endif
+
+static void wmt_nfc_init(struct wmt_nand_info *info, struct mtd_info *mtd)
+{
+ writeb((PAGE_2K|WP_DISABLE|DIRECT_MAP), info->reg + NFCR12_NAND_TYPE_SEL);
+
+ writel(0x2424, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x0, info->reg + NFCRd_OOB_CTRL);
+
+}
+
+void wmt_init_nfc(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, int busw)
+{
+ unsigned int status = 0, page_size, divisor, NFC_RWTimming;
+ struct nand_chip *chip = mtd->priv;
+ struct NFC_RW_T nfc_rw;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned short cfg = 0;
+
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ writel(0x0, info->reg + NFCRd_OOB_CTRL);
+
+ if (mtd->realwritesize == 2048) {
+ page_size = PAGE_2K;
+ } else if (mtd->realwritesize == 4096) {
+ page_size = PAGE_4K;
+ } else if (mtd->realwritesize == 6144) {
+ page_size = PAGE_8K;
+ } else if (mtd->realwritesize == 8192) {
+ page_size = PAGE_8K;
+ } else if (mtd->realwritesize == 16384 || mtd->realwritesize == 15360) {
+ page_size = PAGE_16K;
+ } else
+ page_size = PAGE_32K;
+
+ cfg = WP_DISABLE|DIRECT_MAP|page_size;
+ if (prob_end == 1 && !mtd->dwDDR)
+ cfg |= RD_DLY;
+
+ if (busw) {
+ cfg |= WIDTH_16;
+ printk(KERN_WARNING "nand flash use 16-bit witdth mode\n");
+ }
+ writeb(cfg, info->reg + NFCR12_NAND_TYPE_SEL);
+
+ status = wmt_calc_clock(mtd, spec_clk, spec_tadl, &nfc_rw);
+ if (status) {
+ printk(KERN_ERR "timming calculate fail\n");
+ nfc_rw.T_RHC_THC = 0x2424;
+ nfc_rw.T_TADL = 0x3c;
+ nfc_rw.T_TWHR = 0x12;
+ nfc_rw.T_TWB = 0xa;
+ nfc_rw.divisor = 10;
+ }
+ NFC_RWTimming = nfc_rw.T_RHC_THC;
+ divisor = nfc_rw.divisor;
+ if (prob_end == 0 && mtd->dwDDR)
+ divisor = divisor + 5;
+
+ switch(mtd->id)
+ {
+ case 0x2C88044B:
+ case 0x2C68044A:
+ case 0x2C64444B:
+ case 0x2C44444B:
+ case 0x2C48044A:
+ case 0x8968044A:
+ //NFC_RWTimming = 0x2424;
+ //divisor = 9;
+ //nand_get_feature(mtd, 1);
+ nand_set_feature(mtd, NAND_SET_FEATURE, 01, 05);
+ nand_get_feature(mtd, 1);
+ break;
+ }
+ //chip->select_chip(mtd, -1);
+ if (!status) {
+ while ((*(volatile unsigned long *)(PMCS_ADDR+0x18))&0x7F0038)
+ ;
+ *(volatile unsigned long *)PMNAND_ADDR = divisor;
+ while ((*(volatile unsigned long *)(PMCS_ADDR+0x18))&0x7F0038)
+ ;
+ }
+ divisor = *(volatile unsigned long *)PMNAND_ADDR;
+ if (((mtd->id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ if (prob_end == 1)
+ NFC_RWTimming = 0x1312;//0x2424;
+ else
+ NFC_RWTimming = 0x2424;
+ }
+
+ if (prob_end == 1)
+ NFC_RWTimming = 0x1212;
+ else
+ NFC_RWTimming = 0x2424;
+
+ printk(KERN_NOTICE "TWB=%dT, tWHR=%dT, tadl=%dT, div=0x%x, (RH/RC/WH/WC)=0x%x\n",
+ nfc_rw.T_TWB, nfc_rw.T_TWHR, nfc_rw.T_TADL, divisor, NFC_RWTimming);
+ writel((nfc_rw.T_TWB<<16) + (nfc_rw.T_TWHR<<8) + nfc_rw.T_TADL, info->reg + NFCRe_CALC_TADL);
+ writel(NFC_RWTimming, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+
+ if (mtd->dwDDR) {
+ if (mtd->dwDDR == 1) {
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ nand_get_feature(mtd, 0x80);
+ nand_set_feature(mtd, NAND_SET_FEATURE, 0x80, 0);
+ nand_get_feature(mtd, 0x80);
+ }
+ writel(0x0101, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(0x7F, info->reg + NFCR7_DLYCOMP);
+ writeb(readb(info->reg + NFCR12_NAND_TYPE_SEL)|0x80, info->reg + NFCR12_NAND_TYPE_SEL);
+ }
+ printk("DDR=%d\n", mtd->dwDDR);
+ /*print_nand_register(mtd);*/
+ chip->select_chip(mtd, -1);
+}
+
+#if 0
+static void disable_redunt_out_bch_ctrl(struct wmt_nand_info *info, int flag)
+{
+ if (flag == 1)
+ writeb(readb(info->reg + NFCRd_OOB_CTRL)|RED_DIS, info->reg + NFCRd_OOB_CTRL);
+ else
+ writeb(readb(info->reg + NFCRd_OOB_CTRL)&(~RED_DIS), info->reg + NFCRd_OOB_CTRL);
+}
+static void redunt_read_hm_ecc_ctrl(struct wmt_nand_info *info, int flag)
+{
+ if (flag == 1)
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ, info->reg + NFCRd_OOB_CTRL);
+ else
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ), info->reg + NFCRd_OOB_CTRL);
+}
+#endif
+
+static void set_ecc_engine(struct wmt_nand_info *info, int type)
+{
+ /*struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ writel((readl(info->reg + NFCR9_ECC_BCH_CTRL) & (~ECC_MODE)) | type,
+ info->reg + NFCR9_ECC_BCH_CTRL);
+
+ if (type > ECC1bit) { /* enable BCH ecc interrupt */
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) | BCH_INT_EN, info->reg + NFCR9_ECC_BCH_CTRL);
+ } else
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) & (~BCH_INT_EN), info->reg + NFCR9_ECC_BCH_CTRL);
+
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+}
+
+
+static int wmt_nand_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //unsigned int b2r_stat;
+ int i = 0;
+
+ while (1) {
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ break;
+ if ((++i>>20)) {
+ printk(KERN_ERR "nand flash is not ready\n");
+ /*print_nand_register(mtd);*/
+ /* while (1);*/
+ return -1;
+ }
+ }
+ //b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ wmb();
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R) {
+ printk(KERN_ERR "NFC err : B2R status not clean\n");
+ return -2;
+ }
+ return 0;
+}
+
+
+static int wmt_nfc_transfer_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (!(readb(info->reg + NFCRa_NFC_STAT) & NFC_BUSY))
+ break;
+
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+/* Vincent 2008.11.3*/
+static int wmt_wait_chip_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if ((readb(info->reg + NFCRa_NFC_STAT) & FLASH_RDY))
+ break;
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+static int wmt_wait_cmd_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (!(readb(info->reg + NFCRa_NFC_STAT) & NFC_CMD_RDY))
+ break;
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+
+/* #if (NAND_PAGE_SIZE == 512) Vincent 2008.11.4
+static int wmt_wait_dma_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (!(readb(info->reg + NFC_IDLE) & 0x02))
+ break;
+ if (++i>>20) {
+ printk(KERN_ERR"\r DMA NOT Ready!\n");
+ print_nand_register(mtd);
+ return -3;
+ }
+ }
+ return 0;
+}
+#endif Vincent 2008.11.4*/
+
+static void wmt_wait_nfc_ready(struct wmt_nand_info *info)
+{
+ unsigned int bank_stat1, i = 0;
+ while (1) {
+ bank_stat1 = readw(info->reg + NFCRb_NFC_INT_STAT);
+ if (!(readb(info->reg + NFCRa_NFC_STAT) & NFC_BUSY))
+ break;
+ else if ((bank_stat1 & (ERR_CORRECT | BCH_ERR)) == (ERR_CORRECT | BCH_ERR))
+ break;
+
+ if (i>>20)
+ return;
+ i++;
+ }
+}
+
+static void bit_correct(uint8_t *c, uint8_t pos)
+{
+ c[0] = (((c[0] ^ (0x01<<pos)) & (0x01<<pos)) | (c[0] & (~(0x01<<pos))));
+ #if 0
+ temp = info->dmabuf[bch_err_idx[0] >> 3];
+ temp >>= ((bch_err_idx[0] & 0x07) - 1);
+ #endif
+}
+
+/*
+ * flag = 0, need check BCH ECC
+ * flag = 1, don't check ECC
+ * flag = 2, need check Harming ECC
+ *
+*/
+
+static int NFC_WAIT_IDLE(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (readb(info->reg + NFCR15_IDLE_STAT) & NFC_IDLE)
+ break;
+ if (i>>20) {
+ printk(KERN_NOTICE "nfc_wait_idle() time out\n");
+ print_nand_register(mtd);
+ //while(i);
+ return -1;
+ }
+ i++;
+ }
+ return 0;
+
+}
+
+static int wmt_nfc_wait_idle(struct mtd_info *mtd, unsigned int flag, int command,
+int column, unsigned int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (readb(info->reg + NFCR15_IDLE_STAT) & NFC_IDLE)
+ break;
+ if (i>>20) {
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ printk(KERN_NOTICE "nfc_wait_idle time out\n");
+ print_nand_register(mtd);
+ //while(i);
+ return -1;
+ }
+ i++;
+ }
+ /* continue read next bank and calc BCH ECC */
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ return 0;
+}
+
+int check_rdmz_mark(unsigned int *buf, int size, int oob, struct mtd_info *mtd)
+{
+ /*struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ int i = 0, k = 0;
+ uint8_t *bm = (uint8_t *) &buf[1];
+ for (i = 0; i < 4; i++) {
+ if (bm[i] == wmt_rdmz[i])
+ k++;
+ }
+ if (k > 0 && k < 4) {
+ printk("buf=0x%x 0x%x mark=0x%x\n", buf[0], *(unsigned int *)(buf-1), *(unsigned int *)wmt_rdmz);
+ //printk("nfcrf=%x oob=%d page=0x%x\n", readl(info->reg + NFCRf_CALC_RDMZ), oob, info->cur_page);
+ }
+ if (k >= 2)
+ return 0;
+ else
+ return 1;
+}
+void set_FIFO_FF(unsigned int *buf, int size)
+{
+ int i;
+ for (i = 0; i < size; i++)
+ buf[i] = 0xFFFFFFFF;
+}
+int check_all_FF(unsigned int *buf, int size, struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0, j = 0, k = 0;
+ unsigned int *bf = (unsigned int *)&rdmz_FF[info->cur_page%16][0];
+ //unsigned int *bf = (unsigned int *)&info->dmabuf[24];
+ unsigned int *bf1 = &rdmz_badblk[info->cur_page%2][0];
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0) {
+ for (i = 0; i < size; i++) {
+ if (buf[i] != bf[i] && buf[i] != bf1[i]) {
+ k++;
+ /*if (info->cur_page < ((mtd->blkcnt - 4) * mtd->pagecnt))
+ printk("need retry %d=[%x][%x] \n",i, buf[i],bf[i]);*/
+ } else
+ j++;
+ }
+ if (j > (size/2))
+ return 1;
+ } else {
+ if (info->ECC_mode <= 3)
+ size--;
+ for (i = 0; i < size; i++) {
+ if (buf[i] != 0xFFFFFFFF && buf[i] != 0) {
+ k++;
+ /*printk("unc %d=[%x]\n",i, buf[i]);*/
+ } else
+ j++;
+ }
+ if (j > (size/2))
+ return 1;
+ }
+ /*if (info->cur_lpage < ((mtd->blkcnt - 4) * mtd->pagecnt)) {
+ print_nand_register(mtd);
+ printk("cur page 0x%x\n",info->cur_page);
+ }*/
+ return 0;
+}
+
+#if 1
+int check_all_FF_sw(unsigned int *buf, int size, struct mtd_info *mtd)
+{
+ /*struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ int i = 0, j = 0, k = 0;
+ for (i = 0; i < size; i++) {
+ if (buf[i] != 0xFFFFFFFF) {
+ k++;//return -i;
+ /*if (oob)
+ printk("%d=[%x] \n",i, buf[i]);*/
+ } else
+ j++;
+ }
+ //if (k && oob) {
+ //printk("k=%d total%d, oob=%d\n", k, size, oob);
+ /*print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ while(k);*/
+ //}
+ /*if (k && !oob)
+ printk("k=%d j%d, total=%d\n", k, j, size);*/
+ if (j > (size/2))
+ return 1;
+ else
+ return 0;
+}
+#endif
+
+void clear_ecc_resume_dma(struct wmt_nand_info *info)
+{
+ writeb((ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+ wmb();
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ wmb();
+}
+
+
+
+void bch_data_ecc_correct(struct mtd_info *mtd)
+{
+ int i, all_FF = 0;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank;
+ unsigned int bank_size;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* for data area */
+ /*if (bch_ecc_idx > 15)
+ printk(KERN_NOTICE "pg=0x%x bk%d=%d\n",info->cur_page, bank, bch_ecc_idx);*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read data \n");
+ #endif
+ bank_size = info->bank_size;
+ /*if (this->cur_chip && (info->cur_page%4) == 0)
+ if ((info->cur_lpage < 0x7FB00) && this->cur_chip->cur_try_times < 5 && this->cur_chip != 0 && info->isr_cmd == 0x0) {
+ printk("----------------------------------set unc error by dannier info->cur_page0x%x\n", info->cur_page);
+ bch_ecc_idx = BCH_ERR_CNT;
+ }*/
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (bank == 0)
+ info->unc_bank = 1;
+ else
+ info->unc_bank |= (1<<bank);
+ /*if (oob_parity_size >= 40)
+ oob_parity_size = 40;*/
+
+ clear_ecc_resume_dma(info);
+
+ if (bank >= (info->banks-1)) {
+ //all_FF = check_rdmz_mark((uint32_t *)(info->reg+ECC_FIFO_4), 1, 0, mtd);
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6, mtd);
+
+ if (all_FF) {
+ info->data_ecc_uncor_err = 0;
+ info->unc_allFF = 1;
+ /*set_FIFO_FF((uint32_t *)(info->reg+ECC_FIFO_0), 5);
+ set_FIFO_FF((uint32_t *)info->dmabuf, mtd->realwritesize/4);*/
+ return;
+ } /*else
+ printk("**********lost check all FF case *********af%x, bk%x\n",
+ info->unc_bank,((1<<info->banks)-1));*/
+ }
+
+ if (info->isr_cmd == 0x0 && mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ if ((info->cur_lpage >= ((mtd->blkcnt-8)*mtd->pagecnt) &&
+ info->cur_lpage < ((mtd->blkcnt-4)*mtd->pagecnt)) &&
+ ((this->cur_chip->nand_id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ /* read retry table not allowed to use read retry */
+ info->data_ecc_uncor_err = 2;
+ if (bank >= (info->banks-1))
+ printk(KERN_ERR "data area bank %d uncor err page=0x%x no retry\n", bank, info->cur_page);
+ }
+ #ifdef RETRY_DEBUG
+ else {
+ if (bank >= (info->banks-1))
+ printk(KERN_ERR "data area bank %d uncor err page=0x%x use retry\n", bank, info->cur_page);
+ }
+ #endif
+
+ return;
+ } else {
+ if (bank >= (info->banks-1)) {
+ printk("reda lpage=%x bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", info->cur_lpage, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+ printk(KERN_ERR "data area uncor err page=0x%x,blk=%d no retry\n", info->cur_page, info->cur_page/mtd->pagecnt);
+ /*print_nand_buffer(info->dmabuf, 32);printk("isrcmd 0x=%x\n", info->isr_cmd);
+ print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 48);
+ print_nand_register(mtd);
+ while(1);*/
+ } else
+ return;
+ }
+ printk(KERN_ERR "data area unc++ page=0x%x no retry\n", info->cur_page);
+ mtd->ecc_stats.failed++;
+ return; /* uncorrected err */
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %d bit corrected err on bank %d \n", bch_ecc_idx, bank);
+ #endif
+ /*if (bank >= (info->banks-1)) {
+ print_nand_register(mtd);
+ }*/
+
+ for (i = 0; i < bch_ecc_idx; i++)
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+
+ for (i = 0; i < bch_ecc_idx; i++) {
+ //bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ //printk(KERN_NOTICE "data page=0x%x byte=%d corrected err on bank %d bs=%d, banks=%d\n",
+ //info->cur_page, bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "bank%d area value=%x ", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ bit_correct(&info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "bank%d area c-value=%x \n", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ } else if ((bch_err_pos[i] >> 3) < (bank_size + 24) && bank >= (info->banks-1)) {//oob area
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "red area value=%x ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "red area c-value=%x \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+}
+
+void bch_redunt_ecc_correct(struct mtd_info *mtd)
+{
+ int i, all_FF = 1;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank;
+ unsigned int bank_size;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* for data area */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read oob \n");
+ #endif
+
+ if (info->isr_cmd != 0x50) {
+ printk("bch_redunt_ecc_correct cmd not read oob \n");
+ print_nand_register(mtd);
+ while(1)
+ ;
+ }
+ /*oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (oob_parity_size >= 40)
+ oob_parity_size = 40;*/
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ info->unc_bank = 1;
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6, mtd);
+
+ clear_ecc_resume_dma(info);
+
+ if (all_FF > 0) {
+ info->unc_allFF = 1;
+ return;
+ }
+ /*printk("red unc err\n");
+ print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ while(1);*/
+ if (mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ info->oob_ecc_error = 0x50;
+ if ((info->cur_lpage >= ((mtd->blkcnt-8)*mtd->pagecnt) &&
+ info->cur_lpage < ((mtd->blkcnt-4)*mtd->pagecnt)) &&
+ ((this->cur_chip->nand_id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ /* read retry table not allowed to use read retry */
+ info->data_ecc_uncor_err = 2;
+ printk(KERN_ERR "red area bank %d uncor err page=0x%x no retry\n", bank, info->cur_page);
+ }
+ #ifdef RETRY_DEBUG
+ else
+ printk(KERN_ERR "red area bank %d uncor err page=0x%x use retry\n", bank, info->cur_page);
+ #endif
+
+ return;
+ } else {
+ printk(KERN_ERR "red area uncor err page=0x%x no retry\n", info->cur_page);
+ }
+ mtd->ecc_stats.failed++;
+ printk(KERN_ERR "red area unc++ page=0x%x no retry\n", info->cur_page);
+ return; /* uncorrected err */
+ }
+ bank_size = info->bank_size;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "redunt area %d bit corrected err on bank %d \n", bch_ecc_idx, bank);
+ #endif
+ for (i = 0; i < bch_ecc_idx; i++) {
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ if((bch_err_pos[i] >> 3) < 24) {
+ //printk(KERN_NOTICE "area value=%d ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3)));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ }
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "redunt area %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+}
+
+void bch_data_last_bk_ecc_correct(struct mtd_info *mtd)
+{
+ int i, all_FF;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank, bank_size;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ bank_size = info->bank_size;
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ info->unc_bank = 1;
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6/*oob_parity_size/4*/, mtd);
+ clear_ecc_resume_dma(info);
+ if (all_FF > 0) {
+ info->unc_allFF = 1;
+ return;
+ }
+ if (mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d use retry\n",info->cur_page);
+ return;
+ } else
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d no retry\n",info->cur_page);
+ mtd->ecc_stats.failed++;
+ printk(KERN_ERR "lst area unc++ page=0x%x no retry\n", info->cur_page);
+ //while(bank_stat1);
+ return;
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "last bank %d bit corrected error\n", bch_ecc_idx);
+ #endif
+ for (i = 0; i < bch_ecc_idx; i++) {
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ bit_correct(&info->dmabuf[bank_size * (info->banks-1) + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ } else if ((bch_err_pos[i] >> 3) < (bank_size + 24)) {//oob area of last bank
+ //printk(KERN_NOTICE "redundant area value=%d ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //printk(KERN_NOTICE "redundant area value=%d \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area last bank %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+}
+
+void bch_data_ecc_correct_noalign(struct mtd_info *mtd)
+{
+ int i, all_FF = 0;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank, dmabank = info->banks;
+ unsigned int bank_size;
+
+ dmabank = info->banks + 1;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ bank_size = info->bank_size;
+ /* for data area */
+ /*if (bch_ecc_idx >= 50)
+ printk(KERN_NOTICE "pg=0x%x=blk%d bk%d=%d\n",info->cur_page, info->cur_page/mtd->pagecnt, bank, bch_ecc_idx);*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read data \n");//print_nand_register(mtd);
+ #endif
+
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ /*if (bank >= (dmabank-1))
+ print_nand_buffer((uint8_t *)info->dmabuf+bank_size * (dmabank-1), 32);*/
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (bank == 0)
+ info->unc_bank = 1;
+ else
+ info->unc_bank |= (1<<bank);
+
+ clear_ecc_resume_dma(info);
+
+ if (bank >= (dmabank-1)) {
+ if (dmabank == (info->banks + 1))
+ all_FF = check_all_FF((uint32_t *)(info->dmabuf+mtd->realwritesize+24), 6, mtd);
+ else
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6, mtd);
+ if (all_FF) {
+ info->data_ecc_uncor_err = 0;
+ info->unc_allFF = 1;
+ return;
+ } /*else
+ printk("**********lost check all FF case *********af%x, bk%x\n",
+ info->unc_bank,((1<<dmabank)-1));*/
+ }
+
+ if (info->isr_cmd == 0x0 && mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ if ((info->cur_lpage >= ((mtd->blkcnt-8)*mtd->pagecnt) &&
+ info->cur_lpage < ((mtd->blkcnt-4)*mtd->pagecnt)) &&
+ ((this->cur_chip->nand_id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ /* read retry table not allowed to use read retry */
+ info->data_ecc_uncor_err = 2;
+ if (bank >= (dmabank-1))
+ printk(KERN_ERR "data area bank %d uncor err at eslc page=0x%x no retry\n", bank, info->cur_page);
+ }
+ #ifdef RETRY_DEBUG
+ else {
+ if (bank >= (dmabank-1))
+ printk(KERN_ERR "data area bank %d uncor err page=0x%x use retry\n", bank, info->cur_page);
+ }
+ #endif
+
+ return;
+ } else {
+ if (bank >= (dmabank-1)) {
+ printk("reda lpage=%x bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", info->cur_lpage, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+ printk(KERN_ERR "data area uncor err page=0x%x,blk=%d no retry\n", info->cur_page, info->cur_page/mtd->pagecnt);
+ /*print_nand_buffer(info->dmabuf, 32);printk("isrcmd 0x=%x\n", info->isr_cmd);
+ print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 48);
+ print_nand_register(mtd);
+ while(1);*/
+ } else
+ return;
+ }
+ printk(KERN_ERR "data area unc++ page=0x%x no retry\n", info->cur_page);
+ mtd->ecc_stats.failed++;
+ return; /* uncorrected err */
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %d bit corrected err on bank %d \n", bch_ecc_idx, bank);
+ #endif
+ /*if (bank >= (dmabank-1)) {
+ print_nand_register(mtd);
+ }*/
+
+ for (i = 0; i < bch_ecc_idx; i++)
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+
+ for (i = 0; i < bch_ecc_idx; i++) {
+ //if (bank >= (dmabank-1))
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,dmabank);
+ //printk(KERN_NOTICE "data page=0x%x byte=%d corrected err on bank %d bs=%d, banks=%d\n",
+ //info->cur_page, bch_err_pos[i]>>3, bank, bank_size,dmabank);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ //printk(KERN_NOTICE "bank%d area value=%x ", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ bit_correct(&info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ //printk(KERN_NOTICE "bank%d area c-value=%x \n", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ } else if ((bch_err_pos[i] >> 3) < (bank_size + 24) && bank >= (dmabank-1)) {//oob area
+ //printk(KERN_NOTICE "red area value=%x ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //printk(KERN_NOTICE "red area c-value=%x \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+}
+
+void bch_data_last_bk_ecc_correct_noalign(struct mtd_info *mtd)
+{
+ int i, all_FF;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank, bank_size, bank_oob = info->banks;
+
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read lst bk data \n");
+ #endif
+
+ bank_size = info->bank_size;
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ //print_nand_buffer((uint8_t *)info->dmabuf+bank_size * bank_oob, 32);
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ all_FF = check_all_FF((uint32_t *)(info->dmabuf+bank_size * bank_oob + 24), 6, mtd);//print_nand_buffer(info->dmabuf+bank_size * bank_oob + 24, 24);
+ clear_ecc_resume_dma(info);
+ //printk(KERN_ERR "lstbk err cur_page=0x%x %d all_FF=%d\n",info->cur_page, info->cur_page, all_FF);
+ if (all_FF > 0) {
+ info->unc_allFF = 1;
+ return;
+ }
+ if (mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d use retry\n",info->cur_page);
+ //print_nand_buffer(info->dmabuf+bank_size * bank_oob/* + 24*/, 48);
+ return;
+ } else
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d no retry\n",info->cur_page);
+ mtd->ecc_stats.failed++;
+ //while(bank_stat1);
+ return;
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "last bank %d bit corrected error\n", bch_ecc_idx);
+ #endif
+ for (i = 0; i < bch_ecc_idx; i++) {
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,bank_oob+1);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ bit_correct(&info->dmabuf[bank_size * bank_oob + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ } /*else if ((bch_err_pos[i] >> 3) < (bank_size + 24)) {//oob area of last bank
+ //printk(KERN_NOTICE "redundant area value=%d ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //printk(KERN_NOTICE "redundant area value=%d \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }*/
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area last bank %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+}
+
+/*
+* [Routine Description]
+* read status
+* [Arguments]
+* cmd : nand read status command
+* [Return]
+* the result of command
+*/
+static int wmt_read_nand_status(struct mtd_info *mtd, unsigned char cmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int cfg = 0, status = -1;
+ unsigned int b2r_stat;
+
+ #ifdef WMT_HW_RDMZ
+ unsigned int rdmz;
+ rdmz = readb(info->reg + NFCRf_CALC_RDMZ+2);
+ if (mtd->dwRdmz && rdmz) {
+ //dump_stack();
+ nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+
+ writeb(cmd, info->reg + NFCR2_COMPORT0);
+ cfg = TWHR|DPAHSE_DISABLE|(1<<1);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+//print_nand_register(mtd);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "NFC command transfer1 is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ cfg = SING_RW|NAND2NFC;
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+//print_nand_register(mtd);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "NFC command transfer2 is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ /*print_nand_register(mtd);*/
+ return status;
+ }
+ info->datalen = 0;
+ info->dmabuf[0] = readb(info->reg + NFCR0_DATAPORT) & 0xff;
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz && rdmz) {
+ //printk(KERN_ERR "sts=%x\n", info->dmabuf[0]);
+ info->dmabuf[0] = info->dmabuf[0]^rdmz_tb[0];
+ if ((info->dmabuf[0]&0xFF) != 0xe0) {
+ printk(KERN_ERR "de-rdmz sts=%x page=%x\n", info->dmabuf[0],info->cur_page);
+ //if (info->cur_page != 0x7ff00) {
+ print_nand_register(mtd);
+ dump_stack();
+ //while(1);
+ //}
+ }
+ }
+ #endif
+
+ status = info->dmabuf[0];
+ //printk( "read status=0x%x\n", status);
+ return status;
+}
+
+void fill_desc(unsigned int *Desc, unsigned int len, unsigned char *buf, unsigned int bank_size)
+{
+ unsigned int CurDes_off = 0, i;
+ unsigned char *desc = (unsigned char *)Desc;
+
+ for (i = 0; i < (len/bank_size); i++) {
+ nand_init_short_desc((unsigned int *)(desc+CurDes_off), bank_size,
+ (unsigned int *)(buf+i*bank_size),
+ ((i == ((len/bank_size)-1)) && (!(len%bank_size))) ? 1 : 0);
+ CurDes_off += sizeof(struct _NAND_PDMA_DESC_S);
+ }
+ if (len%bank_size)
+ nand_init_short_desc((unsigned int *)(desc+CurDes_off),
+ (len%bank_size), (unsigned int *)(buf+i*bank_size), 1);
+}
+
+/* data_flag = 0: set data ecc fifo */
+static int wmt_nfc_dma_cfg(struct mtd_info *mtd, unsigned int len, unsigned int wr,
+int data_flag, int Nbank)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int status;
+ unsigned int *ReadDesc, *WriteDesc, ofs;
+ ofs = mtd->writesize + mtd->oobsize + 0x1000 - (mtd->oobsize%0x1000);
+ ReadDesc = (unsigned int *)(info->dmabuf + ofs + 0x100);
+ WriteDesc = (unsigned int *)(info->dmabuf + ofs + 0x200);
+ /*
+ printk(KERN_ERR "info->dmabuf = 0x%x\r\n", (unsigned int) info->dmabuf);
+ printk(KERN_ERR "info->dmaaddr = 0x%x\r\n", (unsigned int) info->dmaaddr);
+ printk(KERN_ERR "ReadDesc addr = 0x%x\r\n", (unsigned int) ReadDesc);
+ printk(KERN_ERR "WriteDesc addr = 0x%x\r\n", (unsigned int) WriteDesc);
+ */
+ if (len == 0) {
+ printk(KERN_ERR "DMA transfer length = 0\r\n");
+ return 1;
+ }
+ if (len > 1024 && readb(info->reg + NFCR9_ECC_BCH_CTRL)&DIS_BCH_ECC) {
+ len = 512;
+ if (mtd->realwritesize > 8192)
+ len = 1024;
+ }
+
+ if (data_flag == 0) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+ }
+ writew(len - 1, info->reg + NFCR8_DMA_CNT);
+ status = nand_init_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "nand_init_pdma fail status = 0x%x", status);
+
+ if (readl(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS)
+ writel(NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+
+ if (readl(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS) {
+ printk(KERN_ERR "PDMA interrupt status can't be clear ");
+ printk(KERN_ERR "NFC_DMA_ISR = 0x%8.8x \n", (unsigned int)readl(info->reg + NFC_DMA_ISR));
+ }
+
+ nand_alloc_desc_pool((wr) ? WriteDesc : ReadDesc);
+ /*nand_init_short_desc((wr)?WriteDesc : ReadDesc, len, (unsigned long *)buf);*/
+ if (info->oob_ecc_error == 0x50 && len != 1 && len != 3) {
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->last_bank_dmaaddr, 1024);
+ if (len != 1024 && len != 512)
+ printk("oob_ecc_error len!=1024, len=%d \n", len);
+ } else if (Nbank == 2) {//for multi-plane 2nd plane wr dma cfg
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->dmaaddr, 1024);
+ else
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->dmaaddr + mtd->realwritesize, 1024);
+ } else
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->dmaaddr, 1024);
+ /*printk(KERN_ERR "dma wr=%d, len=0x%x\n", wr, len);*/
+
+ nand_config_pdma(mtd,
+ (wr) ? (unsigned long *)(info->dmaaddr + ofs + 0x200)
+ : (unsigned long *)(info->dmaaddr + ofs + 0x100), wr);
+
+ return 0;
+}
+
+int nand_init_pdma(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ writel(NAND_PDMA_GCR_SOFTRESET, info->reg + NFC_DMA_GCR);
+ writel(NAND_PDMA_GCR_DMA_EN, info->reg + NFC_DMA_GCR);
+ if (readl(info->reg + NFC_DMA_GCR) & NAND_PDMA_GCR_DMA_EN)
+ return 0;
+ else
+ return 1;
+}
+
+
+int nand_free_pdma(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ writel(0, info->reg + NFC_DMA_DESPR);
+ writel(0, info->reg + NFC_DMA_GCR);
+ return 0;
+}
+
+
+int nand_alloc_desc_pool(unsigned int *DescAddr)
+{
+ memset(DescAddr, 0x00, 0x80);
+ return 0;
+}
+
+int nand_init_short_desc(unsigned int *DescAddr, unsigned int ReqCount, unsigned int *BufferAddr, int End)
+{
+ struct _NAND_PDMA_DESC_S *CurDes_S;
+ CurDes_S = (struct _NAND_PDMA_DESC_S *) DescAddr;
+ CurDes_S->ReqCount = ReqCount;
+ CurDes_S->i = End;
+ CurDes_S->end = End;
+ CurDes_S->format = 0;
+ CurDes_S->DataBufferAddr = (unsigned long)BufferAddr;
+ return 0;
+}
+
+int nand_init_long_desc(unsigned long *DescAddr, unsigned int ReqCount, unsigned long *BufferAddr,
+unsigned long *BranchAddr, int End)
+{
+ struct _NAND_PDMA_DESC_L *CurDes_L;
+ CurDes_L = (struct _NAND_PDMA_DESC_L *) DescAddr;
+ CurDes_L->ReqCount = ReqCount;
+ CurDes_L->i = 0;
+ CurDes_L->format = 1;
+ CurDes_L->DataBufferAddr = (unsigned long)BufferAddr;
+ CurDes_L->BranchAddr = (unsigned long)BranchAddr;
+ if (End) {
+ CurDes_L->end = 1;
+ CurDes_L->i = 1;
+ }
+
+ return 0;
+}
+/*
+int nand_config_desc(unsigned long *DescAddr, unsigned long *BufferAddr, int Blk_Cnt)
+{
+ int i = 0 ;
+ unsigned long *CurDes = DescAddr;
+
+ nand_alloc_desc_pool(CurDes);
+
+
+ for (i = 0 ; i < 3 ; i++) {
+ nand_init_short_desc(CurDes, 0x80, BufferAddr);
+ BufferAddr += (i * 0x80);
+ CurDes += (i * sizeof(NAND_PDMA_DESC_S));
+ }
+ if (Blk_Cnt > 1) {
+ nand_init_long_desc(CurDes, 0x80, BufferAddr, CurDes + sizeof(NAND_PDMA_DESC_L), 0);
+ BufferAddr += (i * 0x80);
+ CurDes += (i * sizeof(NAND_PDMA_DESC_L));
+
+ nand_init_long_desc(CurDes, (Blk_Cnt - 1) * 512, BufferAddr,
+ CurDes + sizeof(NAND_PDMA_DESC_L), 1);
+ } else {
+ nand_init_long_desc(CurDes, 0x80, BufferAddr, CurDes + sizeof(NAND_PDMA_DESC_L), 1);
+ }
+
+ return 0;
+}
+*/
+
+int nand_config_pdma(struct mtd_info *mtd, unsigned long *DescAddr, unsigned int dir)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ if (info->isr_cmd != NAND_SET_FEATURE && info->isr_cmd != 0x37 && info->isr_cmd != 0x36)
+ writel(NAND_PDMA_IER_INT_EN, info->reg + NFC_DMA_IER);
+ writel((unsigned long)DescAddr, info->reg + NFC_DMA_DESPR);
+ if (dir == NAND_PDMA_READ)
+ writel(readl(info->reg + NFC_DMA_CCR)|NAND_PDMA_CCR_peripheral_to_IF,
+ info->reg + NFC_DMA_CCR);
+ else
+ writel(readl(info->reg + NFC_DMA_CCR)&(~NAND_PDMA_CCR_peripheral_to_IF),
+ info->reg + NFC_DMA_CCR);
+ wmb();
+ /*mask_interrupt(IRQ_NFC_DMA);*/
+ writel(readl(info->reg + NFC_DMA_CCR)|NAND_PDMA_CCR_RUN, info->reg + NFC_DMA_CCR);
+ /*printk(KERN_ERR "NFC_DMA_CCR = 0x%8.8x\r\n", readl(info->reg + NFC_DMA_CCR));*/
+ /*print_nand_register(mtd);*/
+ wmb();
+ return 0;
+}
+
+int nand_pdma_handler(struct mtd_info *mtd)
+{
+ unsigned long status = 0;
+ unsigned long count = 0;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ count = 0x100000;
+#if 0
+ /* polling CSR TC status */
+ if (!(readl(info->reg + NFC_DMA_CCR)|NAND_PDMA_CCR_peripheral_to_IF)) {
+ do {
+ count--;
+ if (readl(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS) {
+ status = readl(info->reg + NFC_DMA_CCR) & NAND_PDMA_CCR_EvtCode;
+ writel(readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+ printk(KERN_ERR "NFC_DMA_ISR = 0x%8.8x\r\n",
+ (unsigned int)readl(info->reg + NFC_DMA_ISR));
+ break;
+ }
+ if (count == 0) {
+ printk(KERN_ERR "PDMA Time Out!\n");
+ printk(KERN_ERR "NFC_DMA_CCR = 0x%8.8x\r\n",
+ (unsigned int)readl(info->reg + NFC_DMA_CCR));
+ /*print_nand_register(mtd);*/
+ count = 0x100000;
+ /*break;*/
+ }
+ } while (1);
+} else
+#endif
+ status = readl(info->reg + NFC_DMA_CCR) & NAND_PDMA_CCR_EvtCode;
+ writel(readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+ if (status == NAND_PDMA_CCR_Evt_ff_underrun)
+ printk(KERN_ERR "PDMA Buffer under run!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_ff_overrun)
+ printk(KERN_ERR "PDMA Buffer over run!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_desp_read)
+ printk(KERN_ERR "PDMA read Descriptor error!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_data_rw)
+ printk(KERN_ERR "PDMA read/write memory descriptor error!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_early_end)
+ printk(KERN_ERR "PDMA read early end!\n");
+
+ if (count == 0) {
+ printk(KERN_ERR "PDMA TimeOut!\n");
+ while (1)
+ ;
+ }
+ return 0;
+}
+
+int nand_get_feature(struct mtd_info *mtd, int addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, i = 0;
+ int status = -1;
+
+ writeb(0xEE, info->reg + NFCR2_COMPORT0);
+ writeb(addr, info->reg + NFCR3_COMPORT1_2);
+ cfg = DPAHSE_DISABLE|(0x02<<1);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+
+ if (status) {
+ printk(KERN_ERR "nand_get_feature(): wait cmd is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_ERR "flash is not ready\n");
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_ERR "get feature wait B2R fail\n");
+
+ cfg = NAND2NFC|SING_RW;
+ for (i = 0; i < 4; i++) {
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ return status;
+ status = wmt_nfc_transfer_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "in nand_get_feature(): wait transfer cmd is not ready\n");
+ return status;
+ }
+ info->dmabuf[i] = readb(info->reg + NFCR0_DATAPORT) & 0xff;
+ }
+ //#ifdef NAND_DEBUG
+ printk(KERN_NOTICE "nand get feature %x %x %x %x\n",
+ info->dmabuf[0], info->dmabuf[1], info->dmabuf[2], info->dmabuf[3]);
+ //#endif
+ info->datalen = 0;
+ return 0;
+}
+
+int nand_set_feature(struct mtd_info *mtd, int cmd, int addrss, int value)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, len = 4;
+ int status = -1;
+ DECLARE_COMPLETION(complete);
+ //unsigned char id[4] = {value, 0, 0, 0};
+ info->dmabuf[0] = value;
+ info->dmabuf[1] = 0;
+ info->dmabuf[2] = 0;
+ info->dmabuf[3] = 0;
+ info->isr_cmd = cmd;
+ info->done_data = &complete;
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) | DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ //printk("set feature cycle1\n");
+
+ writeb(0x1F, info->reg + NFCR13_INT_MASK);
+ writel(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ printk("nand get feature B2R can't clear\n");
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ //printk("set feature cycle2\n");
+
+ wmt_nfc_dma_cfg(mtd, len, 1, 0, -1);
+ //print_nand_register(nfc);
+
+ writeb(cmd, info->reg + NFCR2_COMPORT0);
+ writeb(addrss, info->reg + NFCR3_COMPORT1_2);
+ cfg = (0x02<<1);
+ //print_nand_register(mtd);
+ //printk("set feature cycle trigg = 0x%x\n", cfg|NFC_TRIGGER|OLD_CMD);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ //print_nand_register(mtd);
+ //printk("set feature cycle3\n");
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("get feature nand flash idle time out\n");
+ return status;
+ }
+
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ //printk("set feature cycle5\n");
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ /*print_nand_register(mtd);*/
+ return status;
+ }
+
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("set feature nand flash idle time out\n");
+ return status;
+ }
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) & ~DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ printk(KERN_DEBUG " MICRON flash set feature timing mode %d\n", value);
+ return status;
+}
+
+int get_parameter(struct mtd_info *mtd, uint8_t *buf, uint8_t *addr, int size)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, len = 1;
+ int i, status = -1, regc = size;
+ unsigned char *FIFO = (unsigned char *) (info->reg+ECC_FIFO_c);
+
+ //print_nand_register(mtd);
+
+ for (i = 0; i < regc;i++) {
+ //DECLARE_COMPLETION(complete);
+ info->isr_cmd = 0x37;
+ //info->done_data = &complete;
+ //printk("hynix retry get c1\n");
+ //nfc->reg->NFCR13 = 0x0F;
+ writeb(0x1F, info->reg + NFCR13_INT_MASK);
+ //nfc->reg->NFCRb |= B2R; /* write to clear */
+ writel(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ printk("B2R can't clear\n");
+
+ //printk("hynix retry get c2\n");
+ wmt_nfc_dma_cfg(mtd, len, 0, 0, -1);
+ //print_nand_register(nfc);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ if (i == 0) {
+ FIFO[0] = 0x37;
+ FIFO[1] = addr[0];
+ //nfc->reg->NFCRc = 0x00020001;
+ writel(0x00020001, info->reg + NFCRc_CMD_ADDR);
+ cfg = (0x02<<1);
+ } else {
+ FIFO[0] = addr[i];
+ // set address latch ALE(high) and CLE(lower)
+ //nfc->reg->NFCRc = 0x00010000;
+ writel(0x00010000, info->reg + NFCRc_CMD_ADDR);
+ cfg = (0x01<<1);
+ }
+ //print_nand_register(mtd);
+ //printk("hynix get retry param trigg = 0x%x\n", NAND2NFC|cfg|NFC_TRIGGER);
+ //nfc->reg->NFCR1 = NAND2NFC|cfg|NFC_TRIGGER; /* cfg & start*/
+ writew(NAND2NFC|cfg|NFC_TRIGGER, info->reg + NFCR1_COMCTRL);
+ //print_nand_register(mtd);
+ //wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //j = 0;
+ while (!readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS);
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("get feature nand flash idle time out\n");
+ return status;
+ }
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ //printk("set feature cycle5\n");
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ /*print_nand_register(mtd);*/
+ return status;
+ }
+
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("set feature nand flash idle time out\n");
+ return status;
+ }
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+
+ buf[i] = info->dmabuf[0];
+ }
+
+ #ifdef RETRY_DEBUG
+ printk("retry param buf =");
+ for (i = 0; i < regc;i++)
+ printk(" 0x%x", buf[i]);
+ printk("\n");
+ #endif
+
+ //writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) & ~DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ return status;
+}
+
+int hynix_get_parameter(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char buf[16] = {0};
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned char *def_value = NULL;
+ unsigned int reg_num;
+ int i = 0, j = 0;
+ int rc = -1;
+
+ if (mode == ESLC_MODE) {
+ reg_num = cur_chip->eslc_reg_num;
+ offset = cur_chip->eslc_offset;
+ def_value = cur_chip->eslc_def_value;
+ set_value = cur_chip->eslc_set_value;
+ } else if (mode == READ_RETRY_MODE) {
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ def_value = cur_chip->retry_def_value;
+ } else {
+ printk("Not support this mode %d\n", mode);
+ return rc;
+ }
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ rc = get_parameter(mtd, buf, offset, reg_num);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+ if (rc != 0)
+ return rc;
+
+ if (mode == ESLC_MODE) {
+ if((def_value[reg_num] != 0xff) && (def_value[reg_num + 1] != 0xff)) {
+ for(i = 0; i < reg_num; i++) {
+ def_value[i] = buf[i];
+ set_value[i] += buf[i];
+ }
+ def_value[reg_num] = 0xff;
+ def_value[reg_num + 1] = 0xff;
+ //printk("ESLC: ");
+ print_nand_buffer(buf, reg_num);
+ } else {
+ //printk("ESLC Current: ");
+ //print_nand_buffer(buf, reg_num);
+ }
+ } else if (mode == READ_RETRY_MODE) {
+ if ((def_value[reg_num] != 0xff) && (def_value[reg_num + 1] != 0xff)) {
+ for (i = 0; i < reg_num; i++)
+ def_value[i] = buf[i];
+ def_value[reg_num] = 0xff;
+ def_value[reg_num + 1] = 0xff;
+ //printk("Retry : ");
+ //print_nand_buffer(buf, reg_num);
+ } else {
+ //printk("Retry Current: ");
+ //print_nand_buffer(buf, reg_num);
+ //printk("\n");
+ for(j = 0; j < cur_chip->total_try_times; j++) {
+ for(i = 0; i < reg_num; i++) {
+ if(buf[i] != cur_chip->retry_value[j*reg_num+i])
+ break;
+ }
+ if(i == reg_num) {
+ cur_chip->cur_try_times = j;
+ printk("Get current try times %d from current register.\n", j);
+ break;
+ }
+ }
+
+ }
+ }
+ return rc;
+}
+
+int write_bytes_cmd(struct mtd_info *mtd, int cmd_cnt, int addr_cnt, int data_cnt, uint8_t *cmd, uint8_t *addr, uint8_t *data)
+{
+ int i, status = 0;
+ unsigned int cmd_addr_cycle = 0, cfg = 0, cfg_bit8 = 0, counter = 10000;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned char *FIFO = (unsigned char *) (info->reg+ECC_FIFO_c);
+
+
+ writeb(0x1F, info->reg + NFCR13_INT_MASK);
+
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("nand flash idle time out\n");
+ return status;
+ }
+
+ if (data_cnt > 0) {
+ info->isr_cmd = 0x36;
+ memcpy(info->dmabuf, data, data_cnt);
+ wmt_nfc_dma_cfg(mtd, data_cnt, 1, 0, -1);
+ }
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ for (i = 0; i < cmd_cnt; i++) {
+ FIFO[i] = cmd[i];
+ cmd_addr_cycle |= (1<<i);
+ }
+ for (i = cmd_cnt; i < (addr_cnt+cmd_cnt); i++) {
+ FIFO[i] = addr[i-cmd_cnt];
+ cmd_addr_cycle |= (1<<(i+16));
+ }
+ writel(cmd_addr_cycle, info->reg + NFCRc_CMD_ADDR);
+
+ #ifdef RETRY_DEBUG
+ //printk("NFCRc=0x%x ", cmd_addr_cycle);
+ printk("FIFO = ");
+ for (i = 0; i < (addr_cnt+cmd_cnt); i++)
+ printk("0x%x ", FIFO[i]);
+ if (data_cnt > 0) {
+ printk("data = ");
+ for (i = 0; i < data_cnt; i++) {
+ printk("0x%x ", data[i]);
+ }
+ printk("\n");
+ } else
+ printk("\n");
+ #endif
+
+ cfg = ((cmd_cnt + addr_cnt)&0x7)<<1;
+ cfg_bit8 = (((cmd_cnt + addr_cnt)&0x18)>>3)<<8;
+
+ if (data_cnt == 0)
+ cfg |= DPAHSE_DISABLE;
+
+ writew(cfg_bit8|cfg|NFC_TRIGGER, info->reg + NFCR1_COMCTRL);
+
+//print_nand_register(mtd);
+ status = wmt_nfc_transfer_ready(mtd);
+ if (status) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ /*print_nand_register(mtd);*/
+ goto go_fail;
+ }
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("retry c1 wait idle time out\n");
+ goto go_fail;
+ }
+ if (cmd_cnt > 0 && cmd)
+ if (cmd[0] == NAND_CMD_RESET) {
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Reset err, nand device is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ }
+ }
+ if (data_cnt > 0)
+ while (!readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS) {
+ if (counter <= 0) {
+ break;
+ }
+ counter--;
+ }
+ if (data_cnt > 0) {
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status) {
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ goto go_fail;
+ }
+ }
+
+go_fail:
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+
+ return status;
+}
+
+int set_parameter(struct mtd_info *mtd, unsigned char *buf, unsigned char *offset, int regn)
+{
+ int i, status = -1, regc = regn;
+ unsigned char cmd[2] = {0x36, 0x16};
+//print_nand_register(mtd);
+ status = write_bytes_cmd(mtd, 1, 1, 1, (uint8_t *)&cmd[0], offset, buf);
+ if (status)
+ printk("hynix_set read retry reg: phase 0 fail");
+ for (i = 1; i < regc; i++) {
+ status = write_bytes_cmd(mtd, 0, 1, 1, NULL, &offset[i], &buf[i]);
+ if (status)
+ printk("hynix_set read retry reg: phase %d fail", i);
+ }
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&cmd[1], NULL, NULL);
+ if (status)
+ printk("load_hynix_opt_reg: phase 3 fail");
+
+ return status;
+}
+
+void dummy_read(struct mtd_info *mtd)
+{
+ int status = -1;
+ uint8_t cmd[2] = {0x00, 0x30}, addr[5] = {0, 0, 0, 0, 0};
+
+ status = write_bytes_cmd(mtd, 1, 5, 0, &cmd[0], addr, NULL);
+ if (status)
+ printk("dummy read cmd(00) + addr fail\n");
+ status = write_bytes_cmd(mtd, 1, 0, 0, &cmd[1], NULL, NULL);
+ if (status)
+ printk("dummy read cmd(0x30) fail\n");
+/*print_nand_register(mtd);
+dump_stack();*/
+ /* check busy to ready status*/
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "NFC check B2R time out\n");
+ }
+}
+
+int hynix_set_parameter(struct mtd_info *mtd, int mode, int def_value)
+{struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num;
+ int rc = -1;
+
+
+ if (mode == ESLC_MODE) {
+ reg_num = cur_chip->eslc_reg_num;
+ offset = cur_chip->eslc_offset;
+ if (def_value == ECC_ERROR_VALUE) {
+ set_value = cur_chip->eslc_set_value;
+ } else {
+ set_value = cur_chip->eslc_def_value;
+ }
+ } else {
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_value == ECC_ERROR_VALUE) {
+ cur_chip->cur_try_times++;
+ if (cur_chip->cur_try_times >= cur_chip->total_try_times)
+ cur_chip->cur_try_times = -1;
+ if ((cur_chip->cur_try_times >= 0) && (cur_chip->cur_try_times < cur_chip->total_try_times))
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times* cur_chip->retry_reg_num;
+ else
+ set_value = cur_chip->retry_def_value;
+
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = -1;
+ }
+ }
+#ifdef RETRY_DEBUG
+ printk("hynix set value: cur_try_times=%d\n", cur_chip->cur_try_times);
+ for(rc = 0; rc < reg_num; rc++)
+ printk(" 0x%x:0x%x ", offset[rc], set_value[rc]);
+ printk("reg_num = %d\n", reg_num);
+#endif
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ rc = set_parameter(mtd, set_value, offset, reg_num);
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ if(rc) {
+ printk("set_parameter fail.\n");
+ return rc;
+ }
+
+ if (def_value == DEFAULT_VALUE && mode == ESLC_MODE) {
+ printk("dummy read: rpage=%x wpage%x\n", info->cur_page, info->lst_wpage);
+ dummy_read(mtd);
+ }
+
+ return rc;
+}
+
+int toshiba_pre_condition(struct mtd_info *mtd)
+{
+ int status = 0;
+ unsigned char cmd1[2] = {0x5c, 0xc5};
+
+ status = write_bytes_cmd(mtd, 2, 0, 0, cmd1, NULL, NULL);
+ if(status)
+ printk("toshiba pre condition cmd1 time out.\n");
+ else
+ printk("toshiba pre condition OK.\n");
+
+ return status;
+}
+
+int toshiba_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int toshiba_set_parameter(struct mtd_info *mtd, int mode, int def_mode)
+{
+ int i, status = -1;
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char cmd2[1] = {0x55};
+ unsigned char cmd3[2] = {0x26, 0x5d};
+ unsigned char *set_value = NULL;
+ unsigned char *offset = NULL;
+
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+
+ if (cur_chip->cur_try_times >= cur_chip->total_try_times)
+ cur_chip->cur_try_times = 0;
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times*cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+
+ cur_chip->cur_try_times++;
+ #ifdef RETRY_DEBUG
+ printk("toshiba set cur_try_times=%d\n", cur_chip->cur_try_times);
+ #endif
+ for (i = 0; i < 4; i++) {
+ status = write_bytes_cmd(mtd, 1, 1, 1, cmd2, &offset[i], &set_value[i]);
+ if (status)
+ printk("toshiba set read retry reg: phase %d fail", i);
+ }
+
+ status = write_bytes_cmd(mtd, 2, 0, 0, cmd3, NULL, NULL);
+ if (status) {
+ printk("pre condition cmd2 time out\n");
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return status;
+}
+
+int samsung_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int samsung_set_parameter(struct mtd_info *mtd, int mode, int def_mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num;
+ int rc = -1, i;
+ uint8_t cmd[1] = {0xA1};
+ uint8_t data[3] = {0, 0, 0};
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_mode == ECC_ERROR_VALUE) {
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times * reg_num;
+ cur_chip->cur_try_times++;
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = 0;
+ }
+
+ #ifdef RETRY_DEBUG
+ printk("samsung set value: cur_try_times=%d\n", cur_chip->cur_try_times);
+ for(i = 0; i < reg_num; i++)
+ printk(" 0x%x:0x%x ", offset[i], set_value[i]);
+ printk("reg_num = %d\n", reg_num);
+ #endif
+
+ for (i = 0; i < reg_num; i++) {
+ data[1] = offset[i];
+ data[2] = set_value[i];
+ rc = write_bytes_cmd(mtd, 1, 0, 3, cmd, NULL, data);
+ if (rc)
+ printk("samsung read retry reg: phase %d fail\n", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return rc;
+}
+
+int sandisk_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int sandisk_set_parameter(struct mtd_info *mtd, int total_try_times, int def_value)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num, upper_page = 0;
+ int i, rc = -1;
+ uint8_t cmd[4] = {0x3B, 0xB9, 0x53, 0x54};
+
+ if (total_try_times != (cur_chip->total_try_times&0xFF))
+ upper_page = 1;
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_value == ECC_ERROR_VALUE) {
+ cur_chip->cur_try_times++;
+ if (cur_chip->cur_try_times >= total_try_times)
+ cur_chip->cur_try_times = -1;
+ if ((cur_chip->cur_try_times >= 0) && (cur_chip->cur_try_times < total_try_times)) {
+ if (upper_page)
+ set_value = cur_chip->retry_value +
+ (cur_chip->cur_try_times + (cur_chip->total_try_times&0xFF))* reg_num;
+ else
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times * reg_num;
+ } else
+ set_value = cur_chip->retry_def_value;
+
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = -1;
+ }
+#ifdef RETRY_DEBUG
+ printk("sandisk set value: upper_page=%d, cur_try_times=%d\n", upper_page, cur_chip->cur_try_times);
+ for(i = 0; i < reg_num; i++)
+ printk(" 0x%x:0x%x ", offset[i], set_value[i]);
+ printk("reg_num = %d\n", reg_num);
+#endif
+ rc = write_bytes_cmd(mtd, 2, 0, 0, cmd, NULL, NULL);
+ if (rc)
+ printk("sandisk read retry reg: set cmd fail\n");
+ for (i = 0; i < reg_num; i++) {
+ rc = write_bytes_cmd(mtd, 1, 1, 1, &cmd[2], &offset[i], &set_value[i]);
+ if (rc)
+ printk("sandisk set retry reg: phase %d fail\n", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return rc;
+}
+
+int sandisk_init_retry_register(struct mtd_info *mtd, struct nand_read_retry_param *cur_chip)
+{
+ int i,status = -1;
+ unsigned char cmd[4] = {0x3B, 0xB9, 0x53, 0x54};
+ unsigned char *offset = cur_chip->otp_offset;
+ unsigned char *data = cur_chip->otp_data;
+ unsigned int regc = cur_chip->otp_len;
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ #ifdef RETRY_DEBUG
+ printk("set sandisk init retry register offset addr: 0x%x, 0x%x\n", offset[0], offset[1]);
+ #endif
+ status = write_bytes_cmd(mtd, 2, 0, 0, cmd, NULL, NULL);
+ if (status) {
+ printk("send sandisk_init_retry_register cmd fail\n");
+ }
+ for (i = 0; i < regc; i++) {
+ status = write_bytes_cmd(mtd, 1, 1, 1, &cmd[2], &offset[i], &data[i]);
+ if (status)
+ printk("sandisk_init_retry_register : phase %d fail", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return status;
+}
+
+int micron_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int micron_set_parameter(struct mtd_info *mtd, int mode, int def_mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num;
+ int rc = -1, i;
+ uint8_t cmd[1] = {NAND_SET_FEATURE};
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_mode == ECC_ERROR_VALUE) {
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times * reg_num;
+ cur_chip->cur_try_times++;
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = 0;
+ }
+
+ #ifdef RETRY_DEBUG
+ printk("micron set value: cur_try_times=%d\n", cur_chip->cur_try_times);
+ for(i = 0; i < reg_num; i++)
+ printk(" 0x%x:0x%x ", offset[i], set_value[i]);
+ printk("reg_num = %d\n", reg_num);
+ #endif
+
+ for (i = 0; i < reg_num; i++) {
+ rc = write_bytes_cmd(mtd, 1, 1, 1, cmd, offset, set_value);
+ if (rc)
+ printk("micron read retry reg: phase %d fail\n", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return rc;
+}
+
+static int wmt_nand_read_raw_page(struct mtd_info *mtd, struct nand_chip *chip, int page);
+int hynix_get_otp(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_read_retry_param *cur_chip = chip->cur_chip;
+ int i, j, status = -1;
+ //unsigned char data[2] = {0x00, 0x4D};
+ unsigned char cmd[5] = {0x36, 0x16, 0x17, 0x04, 0x19};
+ //unsigned char addr[2] = {0xAE , 0xB0};
+ unsigned int page = 0x200;
+ unsigned char *buff, reset = NAND_CMD_RESET, retry_end = NAND_CMD_HYNIX_RETRY_END;
+ unsigned char *offset = cur_chip->otp_offset;
+ unsigned char *data = cur_chip->otp_data;
+ unsigned int retry_times, retry_regs, chk = 0;
+ unsigned char *bf, *bf2;
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ printk("get otp offset addr: 0x%x, 0x%x\n", offset[0], offset[1]);
+ //chip->cmdfunc(mtd, NAND_CMD_RESET_NO_STATUS_READ, -1, -1);
+
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ if (status) {
+ printk("load_hynix_opt_reg: reset fail");
+ }
+ status = write_bytes_cmd(mtd, 1, 1, 1, (uint8_t *)&cmd[0], (uint8_t *)&offset[0], (uint8_t *)&data[0]);
+ if (status)
+ printk("load_hynix_opt_reg: phase 1 fail");
+ status = write_bytes_cmd(mtd, 0, 1, 1, NULL, (uint8_t *)&offset[1], (uint8_t *)&data[1]);
+ if (status)
+ printk("load_hynix_opt_reg: phase 2 fail");
+ status = write_bytes_cmd(mtd, 4, 0, 0, (uint8_t *)&cmd[1], NULL, NULL);
+ if (status)
+ printk("load_hynix_opt_reg: phase 3 fail");
+ //status = HY_nand_read(0, page, buf, 1026, ecc_code, nfc, 0);
+ wmt_nand_read_raw_page(mtd, chip, page);
+ /*if (status != 0) {
+ printk("load_hynix_opt_reg: phase 3 fail status = %d\n", status);
+ //return -1;
+ }*/
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ if (status) {
+ printk("load_hynix_opt_reg: reset fail");
+ }
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&retry_end, NULL, NULL);
+ if (status) {
+ printk("load_hynix_opt_reg: OTP end 0x38 fail");
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ print_nand_buffer((uint8_t *)info->dmabuf, 1040);
+ buff = info->dmabuf;
+ if (buff[0] > 8 || buff[1] > 8) {
+ printk("retry_cmd buff is not big enough for size %d\n", buff[0]*buff[1]);
+ return -1;
+ }
+
+ retry_times = buff[0];
+ retry_regs = buff[1];
+
+ cur_chip->total_try_times = buff[0] - 1;
+ cur_chip->retry_reg_num = buff[1];
+ for (i = 0; i < 16; i+=2) {
+ bf = &buff[i * retry_times * retry_regs + 2];
+ bf2 = &buff[(i+1) * retry_times * retry_regs + 2];
+ for (j = 0; j < (retry_times*retry_regs); j++) {
+ if ((bf[j] ^ bf2[j]) != 0xFF) {
+ printk("inverse check fail %x %x\n", bf[j], bf2[j]);
+ break;
+ }
+ }
+ if (j >= (retry_times*retry_regs)) {
+ chk = 1;
+ break;
+ }
+ }
+
+ if (chk == 0) {
+ printk("hynix : no valid otp data checked\n");
+ }
+
+ for (j = 0; j < retry_regs; j++)
+ cur_chip->retry_def_value[j] = bf[j];
+
+ print_nand_buffer(cur_chip->retry_def_value, retry_regs);
+
+ for (i = 0; i < (retry_times-1); i++) {
+ for (j = 0; j < retry_regs; j++) {
+ cur_chip->retry_value[i*retry_regs + j] = bf[(i+1)*retry_regs + j];
+ }
+ print_nand_buffer(&cur_chip->retry_value[i*retry_regs], retry_regs);
+ }
+ cur_chip->retry_def_value[buff[1]] = 0xff;
+ cur_chip->retry_def_value[buff[1]+1] = 0xff;
+
+
+ return 0;
+}
+
+int nand_get_para(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ int ret = 0;
+ struct nand_read_retry_param *cur_chip = chip->cur_chip;
+
+ if (cur_chip->get_otp_table) {
+ ret = cur_chip->get_otp_table(mtd, chip);
+ if (ret) {
+ printk("get otp para error\n");
+ chip->cur_chip = NULL;
+ return ret;
+ } else
+ printk("get otp retry para end\n");
+ } else if (cur_chip->get_parameter) {
+ ret = cur_chip->get_parameter(mtd, READ_RETRY_MODE);
+ if (ret) {
+ printk("get default retry para error\n");
+ chip->cur_chip = NULL;
+ return ret;
+ } else
+ printk("get default retry para end\n");
+ }
+
+ if (cur_chip->eslc_reg_num) {
+ ret = cur_chip->get_parameter(mtd, ESLC_MODE);
+ if (ret) {
+ printk("get default eslc error\n");
+ chip->cur_chip = NULL;
+ } else
+ printk("get eslc param end\n");
+ }
+
+ print_nand_buffer((uint8_t *)cur_chip, sizeof(chip_table[0]));
+
+ return ret;
+}
+
+static int wmt_nand_readID(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, i = 0;
+ int status = -1;
+
+ writeb(NAND_CMD_READID, info->reg + NFCR2_COMPORT0);
+ writeb(0x00, info->reg + NFCR3_COMPORT1_2);
+ cfg = DPAHSE_DISABLE|(0x02<<1);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd);*/
+
+ if (status) {
+ printk(KERN_ERR "in wmt_nand_readID(): wait cmd is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ cfg = NAND2NFC|SING_RW;
+ for (i = 0; i < 6; i++) {
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd);*/
+ if (status)
+ return status;
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "in wmt_nand_readID(): wait transfer cmd is not ready\n");
+ return status;
+ }
+ info->dmabuf[i] = readb(info->reg + NFCR0_DATAPORT) & 0xff;
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "readID is %x\n", readb(info->reg + NFCR0_DATAPORT));
+ #endif
+ }
+ info->datalen = 0;
+ return 0;
+}
+
+/* check flash busy pin is ready => return 1 else return 0 */
+static int wmt_device_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ return readb(info->reg + NFCRa_NFC_STAT) & 0x01;
+}
+
+
+static void wmt_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ if (mode == hardware_ecc)
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) & (~DIS_BCH_ECC), info->reg + NFCR9_ECC_BCH_CTRL);
+ else
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) | DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+}
+
+/*static*/ void print_nand_register(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int j;
+
+ for (j = 0; j < 0x200; j += 16)
+ printk(KERN_NOTICE "NFCR%x ~ NFCR%x = 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\r\n",
+ j/4, (j+12)/4,
+ readl(info->reg + j + 0),
+ readl(info->reg + j + 4),
+ readl(info->reg + j + 8),
+ readl(info->reg + j + 12));
+}
+
+void print_nand_buffer(char *value, unsigned int length)
+{
+ int j;
+ for (j = 0; j < length; j += 16)
+ printk(KERN_NOTICE "Row%3.3x:%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x"
+ "-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x\n",
+ j, value[j+0], value[j+1], value[j+2], value[j+3], value[j+4],
+ value[j+5], value[j+6], value[j+7], value[j+8], value[j+9],
+ value[j+10], value[j+11], value[j+12], value[j+13], value[j+14], value[j+15]);
+}
+void print_nand_buffer_int(unsigned int *value, unsigned int length)
+{
+ int j;
+ for (j = 0; j < length; j += 8)
+ printk(KERN_NOTICE"Row%3.3x:%8.2x-%8.2x-%8.2x-%8.2x-%8.2x-%8.2x-%8.2x-%8.2x\n",
+ j, value[j+0], value[j+1], value[j+2], value[j+3], value[j+4], value[j+5], value[j+6], value[j+7]);
+}
+
+static void set_read_addr(struct mtd_info *mtd, unsigned int *address_cycle, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int addr_cycle = 0;
+
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ addr_cycle++;
+ if (mtd->realwritesize != 512) {
+ writeb(column >> 8, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle++;
+ }
+ if (page_addr != -1) {
+ if (mtd->realwritesize != 512) {
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ page_addr >>= 8;
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle += 2;
+ } else {
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle += 2;
+ }
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ addr_cycle++;
+ }
+ } else {/*page size 512*/
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ }
+ }
+ /* } else if (page_addr != -1) {*/
+ } else if ((page_addr != -1) && (column == -1)) {
+ writeb(page_addr & 0xff, info->reg + NFCR3_COMPORT1_2);
+ page_addr >>= 8;
+ writeb(page_addr & 0xff, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle += 2;
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else {/*page size = 512 bytes */
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+
+ /* One more address cycle for devices > 128MiB */
+ /* if (chip->chipsize > (128 << 20)) {*/
+ page_addr >>= 8;
+ /* writeb(page_addr,
+ info->reg + NFCR4_COMPORT3_4 + 1); */
+ /* before, may be a little error */
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ }
+ }
+ *address_cycle = addr_cycle;
+}
+
+static int wmt_multi_page_start_micron(struct mtd_info *mtd, unsigned command, int colum, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+ unsigned int pagecnt = mtd->pagecnt;
+ unsigned int b2r_stat;
+ int status = 0, i;
+ DECLARE_COMPLETION(complete);
+
+ uint8_t cmd[2] = {0x00, 0x32}, addr[5] = {0, 0, 0, 0, 0};
+
+ for (i = 0; i < 3; i++) {
+ addr[2+i] = 0xFF&(page>>(8*i));
+ }
+
+ status = write_bytes_cmd(mtd, 1, 5, 0, &cmd[0], addr, NULL);
+ if (status)
+ printk("micron multi read cmd(00) + addr fail\n");
+ status = write_bytes_cmd(mtd, 1, 0, 0, &cmd[1], NULL, NULL);
+ if (status)
+ printk("micron multi read cmd(32) + addr fail\n");
+
+ /* check busy to ready status*/
+ status = wmt_nand_ready(mtd);
+
+ for (i = 0; i < 3; i++) {
+ addr[2+i] = 0xFF&((page + pagecnt)>>(8*i));
+ }
+
+ status = write_bytes_cmd(mtd, 1, 5, 0, &cmd[0], addr, NULL);
+ if (status)
+ printk("micron multi read cmd(00) + addr fail\n");
+
+ writeb(0x30, info->reg + NFCR2_COMPORT0);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (B2R&b2r_stat) {
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ }
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ info->done_data = &complete;
+ info->isr_cmd = 0x60;
+
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ info->datalen = 0;
+
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ //b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ //writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_start err: nfc command is not ready\n");
+ }
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ return 0;
+}
+
+static int wmt_multi_page_start(struct mtd_info *mtd, unsigned command, int colum, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int pagecnt = mtd->pagecnt;
+ unsigned int b2r_stat;
+ int status = 0;
+ DECLARE_COMPLETION(complete);
+
+ chip->cmdfunc(mtd, 0x60, -1, page);
+ chip->cmdfunc(mtd, 0x60, -1, page + pagecnt);
+
+ writeb(0x30, info->reg + NFCR2_COMPORT0);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (B2R&b2r_stat) {
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ }
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ info->done_data = &complete;
+ info->isr_cmd = 0x60;
+
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ info->datalen = 0;
+
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ //b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ //writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_start err: nfc command is not ready\n");
+ }
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ return 0;
+}
+//unsigned int r1,r2,r3,r4,r5,r6,r7,r8,r9,r10;
+static int wmt_multi_page_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+ DECLARE_COMPLETION(complete);
+ unsigned int addr_cycle = 0 /*b2r_stat, bank_stat1, bank_stat2=0*/;
+ int status = -1;
+ unsigned char *FIFO = (unsigned char *) (info->reg+ECC_FIFO_c);
+
+ info->isr_cmd = command;
+ info->data_ecc_uncor_err = 0;
+ info->dma_finish = 0;
+ info->done_data = &complete;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+ //printk("multi read page=%x blk=%d, addr_cycle=%d trig=%x\n",page_addr, page_addr/128, addr_cycle, DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD);
+ //print_nand_register(mtd);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wmb();
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_s2 err: nfc command is not ready\n");
+ }
+
+ addr_cycle = 0;
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ writeb(column, info->reg + NFCR3_COMPORT1_2 + 1);
+ addr_cycle += 2;
+ }
+
+ writeb(NAND_CMD_RNDOUT, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wmb();
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_s2 err: nfc command is not ready\n");
+ }
+
+ writeb(0x1C, info->reg + NFCR13_INT_MASK);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ FIFO[0] = NAND_CMD_RNDOUTSTART;
+ FIFO[3] = 0xFF&page_addr;
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ writel(0x80001, info->reg + NFCRc_CMD_ADDR);
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize + 1024, 0, -1, -1);
+ else
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 0, -1, -1);//r3 = wmt_read_oscr();
+
+ info->datalen = 0;
+
+ //printk("2page=%x blk=%d, addr_cycle=%d trig=%x\n",page_addr, page_addr/256, addr_cycle, NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD);
+//print_nand_register(mtd);
+ //writew(NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ //writew(NAND2NFC|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ writew(NAND2NFC|(1<<1)|NFC_TRIGGER, info->reg + NFCR1_COMCTRL);
+ wmb();
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ if (info->dma_finish != 1) {
+ printk("read page wait dma time out info->dma_finish=%d\n",info->dma_finish);
+ print_nand_register(mtd);
+ dump_stack();
+ while(info->dma_finish == 0) {
+ if (readl(info->reg + NFC_DMA_ISR)&1) {
+ writel(0, info->reg + NFC_DMA_IER);
+ info->dma_finish++;
+ if (info->done_data != NULL) {
+ //complete(info->done_data);
+ info->done_data = NULL;
+ }
+ }
+ }
+ }
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "dma transfer data time out: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));
+
+ wmt_nfc_transfer_ready(mtd);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+ if (status) {
+ printk(KERN_NOTICE"multi-read page wait idle status =%d\n", status);
+ }
+ return 0;
+}
+
+static int wmt_dma_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if ((readb(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS))
+ break;
+
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+
+//#define RE_PORFO
+static int wmt_nand_page_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ struct nand_read_retry_param *cur_chip = chip->cur_chip;
+ unsigned int addr_cycle = 0, b2r_stat;
+ int status = -1;
+ unsigned int bank_stat, id = 0, pageInBlk = 0;
+ int i, total_times = 1, total_try_times = 0, tmp = 0;
+ unsigned char reset = NAND_CMD_RESET, retry_enable =0xB6, retry_disable = 0xD6;
+ DECLARE_COMPLETION(complete);
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "read data cmd: 0x%x col:0x%x, page:0x%x\n", command, column, page_addr);
+ #endif
+ /*info->phase = 0;
+ if (readl(info->reg + NFCR9_ECC_BCH_CTRL) & DIS_BCH_ECC)
+ info->phase = 2;*/
+
+ if (cur_chip != NULL) {
+ total_times = cur_chip->total_try_times + 1;
+ id = (cur_chip->nand_id>>24)&0xFF;
+ if (id == NAND_MFR_SANDISK) {
+ pageInBlk = page_addr%mtd->pagecnt;
+ if (((pageInBlk%2) == 1 || pageInBlk == 0) && pageInBlk != (mtd->pagecnt - 1))
+ total_try_times = cur_chip->total_try_times&0xFF;//Lower page
+ else
+ total_try_times = (cur_chip->total_try_times>>8)&0xFF;//Upper page
+ } else
+ total_try_times = cur_chip->total_try_times&0xFF;
+ //printk("read page--cur_times = %d, totoal_times = %d \n", cur_chip->cur_try_times, total_times);
+ }
+ //cur_chip->cur_try_times = 4;
+ for (i = 0; i < total_times; i++) {
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+ if (i > 0)
+ info->isr_cmd = command;
+
+ info->data_ecc_uncor_err = 0;
+ info->dma_finish = 0;
+ writeb(0x1C, info->reg + NFCR13_INT_MASK);
+ info->done_data = &complete;
+ /* 1: read, 0:data, -1: */
+ if (info->phase == 2) {//disable bch read
+ tmp = (mtd->realoobsize > 512) ? mtd->realoobsize : 512;
+ wmt_nfc_dma_cfg(mtd, tmp, 0, -1, -1);
+ } else {
+ if (info->oob_ecc_error == 0x50) {//read last bank for oob in DDR mode
+ wmt_nfc_dma_cfg(mtd, chip->ecc.size, 0, -1, -1);
+ } else {//read whole page
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize + 1024, 0, -1, -1);
+ else
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 0, -1, -1);
+ }
+ }
+ /*print_nand_register(mtd);*/
+ wmb();
+ info->datalen = 0;
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ /* printk(KERN_NOTICE "RB is %d\n", b2r_stat & 0x02);*/
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ bank_stat = readw(info->reg + NFCRb_NFC_INT_STAT);
+ writew(bank_stat|0x101, info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/
+ if (status)
+ printk(KERN_ERR "The chip is not ready\n");
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+ if (addr_cycle == 4)
+ writeb(NAND_CMD_READSTART, info->reg + NFCR5_COMPORT5_6);
+ else if (addr_cycle == 5)
+ writeb(NAND_CMD_READSTART, (unsigned char *)(info->reg + NFCR5_COMPORT5_6) + 1);
+ wmb();
+ writew(NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wmb();
+ //printk("read page wait for completion\n");
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ if (info->dma_finish != 1) {
+ printk("read page wait dma time out info->dma_finish=%d\n",info->dma_finish);
+ print_nand_register(mtd);
+ dump_stack();
+ while(info->dma_finish == 0) {
+ if (readl(info->reg + NFC_DMA_ISR)&1) {
+ writel(0, info->reg + NFC_DMA_IER);
+ info->dma_finish++;
+ if (info->done_data != NULL) {
+ //complete(info->done_data);
+ info->done_data = NULL;
+ }
+ }
+ }
+ }
+ status = nand_pdma_handler(mtd);
+ //printk(KERN_ERR "check status pdma handler status= %x \n", status);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "dma transfer data time out: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));
+//printk("read page 3\n");
+ wmt_nfc_transfer_ready(mtd);
+ /*status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);*/
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+//printk("read page 4\n");
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+//printk("read page 5\n");
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ /*print_nand_register(mtd);*/
+ /*while(1);*/
+ }
+ if (info->unc_allFF == 0 && info->unc_bank && mtd->dwRetry == 0) {
+ mtd->ecc_stats.failed++;
+ printk("no retry flash occur uncoverable ecc error uncor_err=%d\n", info->data_ecc_uncor_err);
+ }
+
+ if(info->data_ecc_uncor_err == 1) {
+ if((cur_chip != NULL)) {
+ mtd->ecc_err_cnt = 0;
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt)){
+ if((id != NAND_MFR_HYNIX) ||((id == NAND_MFR_HYNIX) && (cur_chip->cur_try_times >=5)))
+ printk("Unc_Err %d_th pg=0x%x cur_retry=%d\n", i, page_addr, cur_chip->cur_try_times);
+ }
+
+ if (id == NAND_MFR_HYNIX) {
+ //printk("set retry mode cur_try_times=%d\n", cur_chip->cur_try_times);
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, ECC_ERROR_VALUE);
+ cur_chip->retry = 1;
+
+ if (i == total_try_times) {
+ cur_chip->retry = 0;
+ /* read retry many times still ecc uncorrectable error */
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //dump_stack();
+ //while(cur_chip);
+ return status;
+ }
+ } else if (id == NAND_MFR_TOSHIBA) {
+ if (cur_chip->cur_try_times >= total_try_times) {
+ /* send reset cmd after read retry finish(fail) for toshiba */
+ write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ cur_chip->cur_try_times = 0;
+ cur_chip->retry = 0;
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //while(cur_chip);
+ return status;
+ }
+ if (cur_chip->cur_try_times == 0 && cur_chip->retry != 1)
+ toshiba_pre_condition(mtd);
+ cur_chip->set_parameter(mtd, 0, 0);
+ cur_chip->retry = 1;
+ } else if (id == NAND_MFR_SAMSUNG || id == NAND_MFR_MICRON) {
+ if (cur_chip->cur_try_times >= total_try_times) {
+ /* send default cmd after read retry finish(fail) for samsung */
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ cur_chip->cur_try_times = 0;
+ cur_chip->retry = 0;
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //while(cur_chip);
+ return status;
+ }
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, ECC_ERROR_VALUE);
+ cur_chip->retry = 1;
+ } else if (id == NAND_MFR_SANDISK) {
+ //printk("set retry mode cur_try_times=%d\n", cur_chip->cur_try_times);
+ cur_chip->set_parameter(mtd, total_try_times, ECC_ERROR_VALUE);
+ if (i == 0 && cur_chip->retry != 1)
+ write_bytes_cmd(mtd, 1, 0, 0, &retry_enable, NULL, NULL);
+ cur_chip->retry = 1;
+
+ if (i == total_try_times) {
+ write_bytes_cmd(mtd, 1, 0, 0, &retry_disable, NULL, NULL);
+ cur_chip->retry = 0;
+ /* read retry many times still ecc uncorrectable error */
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //while(cur_chip);
+ return status;
+ }
+ }
+ } else {
+ printk("read page uncor err but cur_chip = NULL!\n");
+ break;
+ }
+ } else {
+ if (cur_chip) {
+ unsigned int bakeup;
+ if (cur_chip->retry == 1) {
+ if((id != NAND_MFR_HYNIX) || ((id == NAND_MFR_HYNIX)&&(cur_chip->cur_try_times >= 5)))
+ printk("read retry PASS cur_try_times=%d\n", cur_chip->cur_try_times);
+ bakeup = *(uint32_t *)info->dmabuf;
+ } else
+ break;
+ /* send reset cmd after read retry finish(pass) for toshiba */
+ if (id == NAND_MFR_TOSHIBA) {
+ write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ printk("reset cmd to finish retry\n");
+ cur_chip->cur_try_times = 0;
+ } else if (id == NAND_MFR_SAMSUNG || id == NAND_MFR_MICRON) {
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ cur_chip->cur_try_times = 0;
+ } else if (id == NAND_MFR_SANDISK) {
+ write_bytes_cmd(mtd, 1, 0, 0, &retry_disable, NULL, NULL);
+ //set retry default value need before page program
+ cur_chip->set_parameter(mtd, total_try_times, DEFAULT_VALUE);
+ //should we reset cur_try_times to zero?
+ cur_chip->cur_try_times = -1;
+ } if (id == NAND_MFR_HYNIX) {
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ cur_chip->cur_try_times = -1;
+ }
+ cur_chip->retry = 0;
+ *(uint32_t *)info->dmabuf = bakeup;
+ }
+ break;
+ }
+ } //end of retry for loop
+
+ return 0;
+}
+#if 0
+static int wmt_multi_copy_start(struct mtd_info *mtd, unsigned command, int column, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ unsigned int div = mtd->erasesize / mtd->writesize;
+
+ unsigned int b2r_stat;
+
+ int status = 0;
+
+ chip->cmdfunc(mtd, 0x60, -1, page);
+
+ chip->cmdfunc(mtd, 0x60, -1, page + div);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x35, info->reg + NFCR2_COMPORT0);
+
+ writew(NAND2NFC|DPAHSE_DISABLE|1<<1|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);// cost lots of time
+
+ status = wmt_wait_cmd_ready(mtd);
+
+ if (status) {
+
+ printk(KERN_ERR "Multi_read err: nfc command is not ready\n");
+ }
+
+ return 0;
+}
+static int wmt_multi_copy_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+
+ unsigned int addr_cycle = 0;//, b2r_stat, bank_stat1, bank_stat2=0;
+ int status = -1;
+ //unsigned int bank_stat, id = 0, pageInBlk = 0;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+ // bank_stat = readw(info->reg + NFCRb_NFC_INT_STAT);
+ // writew(bank_stat|0x101, info->reg + NFCRb_NFC_INT_STAT);
+
+ //status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/ //problem
+
+ // if (status)
+ // printk(KERN_ERR "The chip is not ready\n");
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read err: nfc command is not ready\n");
+ }
+
+ addr_cycle = 0;
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ writeb(column, info->reg + NFCR3_COMPORT1_2 + 1);
+ addr_cycle += 2;
+ }
+
+ // writeb(0x07, info->reg + WMT_NFC_REDUNT_ECC_STAT);
+ // writel(0xffffffff, info->reg + WMT_NFC_BANK18_ECC_STAT);
+ // b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ // writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(NAND_CMD_RNDOUT, info->reg + NFCR2_COMPORT0);
+
+ writeb(NAND_CMD_RNDOUTSTART, info->reg + NFCR4_COMPORT3_4);
+
+ writew(DPAHSE_DISABLE|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+ if(status) {
+ printk(KERN_NOTICE"WaitIdle is not ready=%d\n", status);
+ }
+ return status;
+}
+
+static int wmt_multi_copy_write(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int div = mtd->erasesize / mtd->writesize;
+ unsigned int addr_cycle = 0;
+ int status = -1;
+ int b2r_stat = 0;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+ writeb(0x85, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "erase command is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x11, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ set_read_addr(mtd, &addr_cycle, column, page_addr+div);
+ writeb(0x81, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "command is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x10, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ //printk("\n wmt_copy_back_write is OK!");
+ return status;
+}
+
+static int wmt_copy_back_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int addr_cycle = 0, b2r_stat;
+ int status = -1;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+
+ writew(NAND2NFC|DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD,info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "Read 0x00 cmd is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x35, info->reg + NFCR2_COMPORT0);
+
+ writew(DPAHSE_DISABLE|1<<1|NFC_TRIGGER|OLD_CMD,info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ //printk("\n wmt_copy_back_read is OK! ");
+ return status;
+}
+
+static int wmt_copy_back_write(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int addr_cycle = 0;
+ int status = -1;
+ int b2r_stat = 0;
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+ writeb(0x85, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "erase command is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x10, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ //printk("\n wmt_copy_back_write is OK!");
+ return status;
+}
+#endif
+
+
+static void wmt_nand_oob_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+ unsigned int addr_cycle = 0, b2r_stat;
+ int status = -1;
+ unsigned int bank_stat;
+ int mycolumn = column, mypage_addr = page_addr;
+ DECLARE_COMPLETION(complete);
+
+ info->data_ecc_uncor_err = 0;
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "wmt_nand_oob_read: readoob col=0x%x, page=0x%x\n", column, page_addr);
+ #endif
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x18, info->reg + NFCR13_INT_MASK);
+ info->done_data = &complete;
+
+ info->datalen = 0;
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ /* printk(KERN_NOTICE "RB is %d\n", b2r_stat & 0x02);*/
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ bank_stat = readw(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat)
+ writew(B2R|(ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/
+ if (status)
+ printk(KERN_ERR "The chip is not ready\n");
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+ if (addr_cycle == 4)
+ writeb(NAND_CMD_READSTART, info->reg + NFCR5_COMPORT5_6);
+ else if (addr_cycle == 5)
+ writeb(NAND_CMD_READSTART, (unsigned char *)(info->reg + NFCR5_COMPORT5_6) + 1);
+
+ writew(NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+
+ /* read oob has no dma but assert B2R status */
+ //printk("read oob wait for completion");
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = wmt_nfc_transfer_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"oob read wait NFC_BUSY time out\n");
+ //wmt_nand_ready(mtd);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ status = wmt_nfc_wait_idle(mtd, 0, command, mycolumn, mypage_addr);
+
+ if (status) {
+ if (status == -4)
+ return;
+ printk(KERN_ERR "wmt_nfc_wait_idle status =%d\n", status);
+ printk(KERN_ERR "command =0x%x\n", command);
+ printk(KERN_ERR "Read ERR ,NFC is not idle\n");
+ /*print_nand_register(mtd);*/
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ /*while(1);*/
+ }
+//printk(KERN_NOTICE "rbe|");
+ return;
+}
+
+/**
+ * wmt_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ *
+*/
+int wmt_isbad_bbt(struct mtd_info *mtd, struct nand_chip *chip, int block)
+{
+ uint8_t res;
+
+ if (!mtd || !chip) {
+ printk(KERN_ERR "nand not init, check bad block fail.\n");
+ return 1;
+ }
+ if (!chip->bbt) {
+ printk(KERN_ERR "nand bbt not init, check bad block fail.\n");
+ return 1;
+ }
+ /* dannier test nandwrite tool */
+ #if 0
+ if (block == 339 || block == 342 || block == 344) {
+ //if (block == 338 || block == 340 || block == 341 || block == 343) {
+ printk("blk%d --->bad\n", block);
+ return 1;
+ }
+ #endif
+
+ /* Get block number * 2 */
+ block <<= 1;
+ res = (chip->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return 1;
+ case 0x02:
+ return 1;
+ }
+ return 1;
+}
+
+/**
+ * wmt_isbad_bbt_multi - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ *
+*/
+int wmt_isbad_bbt_multi(struct mtd_info *mtd, struct nand_chip *chip, int block)
+{
+ uint8_t res;
+
+ if (!mtd || !chip) {
+ printk(KERN_ERR "nand not init, check bad block fail.\n");
+ return 1;
+ }
+ if (!chip->bbt) {
+ printk(KERN_ERR "nand bbt not init, check bad block fail.\n");
+ return 1;
+ }
+ /* dannier test nandwrite tool */
+ #if 0
+ if (block == 339 || block == 342 || block == 344) {
+ //if (block == 338 || block == 340 || block == 341 || block == 343) {
+ printk("blk%d --->bad\n", block);
+ return 1;
+ }
+ #endif
+
+ /* Get block number * 4 */
+ block <<= 2;
+ res = (chip->bbt[block >> 3] >> (block & 0x4)) & 0x0F;
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ case 0x04:
+ case 0x05:
+ return 1;
+ }
+ return 1;
+}
+
+//#define ESLC_DEBUG
+#define ESLC_READ_WRITE
+#ifdef ESLC_READ_WRITE
+static int hynix_eslc_page_address_calculate(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ int status = -1, page_in_blk, par_page_start = 0, par_page_end, block;
+ int good_blk = 0, bad_blk = 0, par_blk_start, par_blk_end, i, j, blk_page_shift;
+ unsigned int par_blk_ofs = 0, real_need_blk, real_page;
+
+ blk_page_shift = chip->phys_erase_shift - chip->page_shift;
+ block = page >> blk_page_shift;
+ page_in_blk = page%mtd->pagecnt;
+
+ if (page < par1_ofs/4) {
+ par_page_start = 0;
+ par_page_end = par1_ofs/4;
+ } else if (page < par1_ofs) {
+ par_page_start = par1_ofs/4;
+ par_page_end = par1_ofs;
+ } else if (page < par2_ofs) {
+ par_page_start = par1_ofs;
+ par_page_end = par2_ofs;
+ } else if (page < par3_ofs) {
+ par_page_start = par2_ofs;
+ par_page_end = par3_ofs;
+ } else {
+ par_page_start = par3_ofs;
+ par_page_end = par4_ofs;
+ }
+ par_blk_start = par_page_start >> blk_page_shift;
+ par_blk_end = par_page_end >> blk_page_shift;
+ par_blk_ofs = block - par_blk_start;
+
+ for (j = par_blk_start; j < block; j++) {
+ if (chip->realplanenum)
+ status = wmt_isbad_bbt_multi(mtd, chip, j);
+ else
+ status = wmt_isbad_bbt(mtd, chip, j);
+ if (status) {
+ #ifdef ESLC_DEBUG
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk("skip blk%d bad\n", j);
+ #endif
+ bad_blk++;
+ }
+ }
+ par_blk_ofs = par_blk_ofs - bad_blk;
+ real_need_blk = par_blk_ofs*2 + ((page_in_blk >= (mtd->pagecnt/2)) ? 1 : 0);
+
+ for (i = par_blk_start; i < par_blk_end; i++) {
+ //printk("i=%d, par_blk_start=0x%x, par_blk_end=0x%x real_need_blk=0x%x\n", i, par_blk_start, par_blk_end, real_need_blk);
+ if (chip->realplanenum)
+ status = wmt_isbad_bbt_multi(mtd, chip, i);
+ else
+ status = wmt_isbad_bbt(mtd, chip, i);
+ if (status == 0) {
+ #ifdef ESLC_DEBUG
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk("blk%d good\n",i);
+ #endif
+ good_blk++;
+ }
+ if (good_blk >= (real_need_blk + 1)) {
+ #ifdef ESLC_DEBUG
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk("wr blk%d \n",i);
+ #endif
+ break;
+ }
+ }
+ if (i >= par_blk_end) {
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk(KERN_ERR "eslc addr is out of partition size, skip page=0x%x"
+ ", par_page_end=0x%x, end_blk=%d\n", page, par_page_end, i);
+ return -1;
+ }
+ real_page = (i << blk_page_shift) + eslc_map_table[(page_in_blk%(mtd->pagecnt/2))];
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk(KERN_NOTICE "page = 0x%x ======> eslc page = 0x%x\n", page, real_page);
+
+ return real_page;
+}
+#endif
+
+/*
+ * wmt_nand_cmdfunc - Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices We dont have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void wmt_nand_cmdfunc(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int addr_cycle = 0, b2r_stat, pmc_nand, chip_en, tmp;
+ int status = -1, i;
+ int mycolumn, mypage_addr;
+ DECLARE_COMPLETION(complete);
+
+ if (!chip->realplanenum && (command == NAND_CMD_READ0)) {
+ info->cur_lpage = page_addr;
+ if (page_addr >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+ }
+//printk(KERN_DEBUG "cmd %x col:%x, page:0x%x hold=0x%x \n", command, column, page_addr, ((mtd->blkcnt - 8)*mtd->pagecnt));
+ if (mtd->id == 0xECDED57A) {
+ if (page_addr >= (4096*128)) {
+ page_addr = page_addr + 0x80000;
+ //printk(KERN_NOTICE "cmd %x col:%x, page:0x%x\n", command, column, page_addr);
+ }
+ } else if (command == NAND_CMD_READ0 && chip->cur_chip && prob_end == 1 &&
+ (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ #ifdef ESLC_READ_WRITE
+ if (!chip->realplanenum)
+ if (command == NAND_CMD_READ0) {
+ if ((page_addr < par4_ofs && second_chip == 0)) {
+ #ifdef ESLC_DEBUG
+ if (page_addr%mtd->pagecnt == 0 || page_addr%mtd->pagecnt == (mtd->pagecnt/2))
+ printk("\ncmdfunc: \n");
+ #endif
+ page_addr = hynix_eslc_page_address_calculate(mtd, chip, page_addr);
+ if (page_addr < 0)
+ return;
+ }
+ #endif
+ }
+ }
+ mycolumn = column;
+ mypage_addr = page_addr;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_cmdfunc() command: %x column:%x, page_addr:%x\n",
+ command, column, page_addr);
+ //if (command == 0x70)
+ //dump_stack();
+ #endif
+ info->isr_cmd = command;
+ if (page_addr != 0xFFFFFFFF && page_addr != -1)
+ info->cur_page = page_addr;
+ info->phase = 0;
+ if (readl(info->reg + NFCR9_ECC_BCH_CTRL) & DIS_BCH_ECC)
+ info->phase = 2;
+ pmc_nand = *(volatile unsigned long *)PMCEU_ADDR;// |= (0x0010000);//add by vincent
+ if (!(pmc_nand&0x0010000))
+ printk(KERN_NOTICE "pmc_nand=0x%x\n", pmc_nand);
+
+ chip_en = readb(info->reg + NFCR12_NAND_TYPE_SEL+1);
+ if ((chip_en&7) == 7) {
+ printk(KERN_NOTICE "chip 0, or 1, is not select chip_sel=%x\n", chip_en);
+ writeb(0xfe, info->reg + NFCR12_NAND_TYPE_SEL+1);
+ }
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ #ifdef WMT_HW_RDMZ
+ tmp = DIS_BCH_ECC & readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz || tmp) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+ #endif
+ wmt_nand_page_read(mtd, command, column, page_addr);
+ /*#ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz)
+ nfc_hw_rdmz(mtd, 1);
+ #endif*/
+ return;
+ case NAND_CMD_READOOB:
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+ #endif
+ //printk("oobRe=%x mtd->bbt_sw_rdmz=%d dwRdmz=%d\n", page_addr, mtd->bbt_sw_rdmz, mtd->dwRdmz);
+ wmt_nand_oob_read(mtd, command, column, page_addr);
+ /*#ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz)
+ nfc_hw_rdmz(mtd, 1);
+ #endif*/
+ return;
+
+ case MULTI_READ_1CYCLE:
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_MICRON || (0xFF&(mtd->id>>24)) == NAND_MFR_INTEL)
+ wmt_multi_page_start_micron(mtd, command, column, page_addr);
+ else
+ wmt_multi_page_start(mtd, command, column, page_addr);
+ return;
+ case MULTI_READ_2CYCLE:
+ info->isr_cmd = 0x00;
+ command = 0x00;
+ wmt_multi_page_read(mtd, command, column, page_addr);
+ return;
+ /*case MULTI_COPY_1CYCLE:
+ info->isr_cmd = 0x60;
+ command = 0x60;
+ wmt_multi_copy_start(mtd, command, column, page_addr);
+ return;
+ case MULTI_COPY_2CYCLE:
+ info->isr_cmd = 0x00;
+ command = 0x00;
+ wmt_multi_copy_read(mtd, command, column, page_addr);
+ return;
+ case MULTI_COPY_3CYCLE:
+ info->isr_cmd = 0x85;
+ command = 0x85;
+ wmt_multi_copy_write(mtd, command, column, page_addr);
+ return;
+ case COPY_BACK_1CYCLE:
+ info->isr_cmd = 0x00;
+ command = 0x00;
+ wmt_copy_back_read(mtd, command, column, page_addr);
+ return;
+ case COPY_BACK_2CYCLE:
+ info->isr_cmd = 0x85;
+ command = 0x85;
+ wmt_copy_back_write(mtd, command, column, page_addr);
+ return;*/
+
+ case 0x81:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_ERASE1:
+ /* printk(KERN_NOTICE "command is %x\n", command);*/
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ addr_cycle++;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512) {
+ writeb(column >> 8, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle++;
+ }/*#endif*/
+ if (page_addr != -1) {
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512) {
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ page_addr >>= 8;
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle += 2;
+ /*#else*/
+ } else {
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle += 2;
+ } /*#endif*/
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else /*#else*/
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ /*#endif*/
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else /*#else*/
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ /*#endif*/
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ addr_cycle++;
+ }
+ } else {
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+ page_addr >>= 8;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else /*#else*/
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ /*#endif*/
+ addr_cycle++;
+ }
+ }
+ }
+ /*} else if (page_addr != -1) {*/
+ } else if ((page_addr != -1) && (column == -1)) {
+ writeb(page_addr & 0xff, info->reg + NFCR3_COMPORT1_2);
+ page_addr >>= 8;
+ writeb(page_addr & 0xff, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle += 2;
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else {
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ }
+ }
+
+ /* set command 1 cycle */
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ if (command == NAND_CMD_SEQIN || command == 0x81) {
+ wmb();
+ info->done_data = &complete;
+ writew(((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ } else {
+ /* writeb(read(info->reg + NFCR12_NAND_TYPE_SEL) | WP_DISABLE ,
+ info->reg + NFCR12_NAND_TYPE_SEL);*/
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD,
+ info->reg + NFCR1_COMCTRL);
+ }
+ wmb();
+
+ if (command == NAND_CMD_ERASE1) {//printk("erpg=0x%x\n", page_addr);
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd); */
+ if (status)
+ printk(KERN_ERR "command is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ } else {
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = wmt_nfc_transfer_ready(mtd);
+ /*status = wmt_wait_dma_ready(mtd);*/ /*dannier mask*/
+ wmt_wait_nfc_ready(info);
+ if (status) {
+ printk(KERN_ERR "dma transfer data is not ready: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ /*printk(KERN_NOTICE "\rwait transfer data is not ready: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));*/
+ /*print_nand_register(mtd);*/
+ /* while (1);*/
+ /* return;*/
+ }
+ }
+ return;
+
+
+ case 0x11:
+ //printk("\n0x11 is here \n");
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ //writeb(0x1B, info->reg + NFCR13_INT_MASK);
+ info->done_data = &complete;
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|0x400, info->reg + NFCR1_COMCTRL);
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //print_nand_register(mtd);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ if (status < 0)
+ printk(KERN_ERR "page multi plane err, nand controller is not idle\n");
+ return;
+
+
+ case NAND_CMD_PAGEPROG:
+ /* case NAND_CMD_READSTART:*/
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_ERASE3:
+ /*printk(KERN_NOTICE "command is %x\n", command);*/
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (B2R&b2r_stat) {
+ printk(KERN_NOTICE"flash B2R status assert command=0x%x statu%x\n",command, b2r_stat);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ }
+
+ if (NAND_CMD_ERASE2 == command || NAND_CMD_ERASE3 == command) {
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+ }
+ info->done_data = &complete;
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ info->datalen = 0;
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ #if 0 /* for debug */
+ if (command == NAND_CMD_ERASE2 || NAND_CMD_ERASE3 == command) {
+ wmt_read_nand_status(mtd, NAND_CMD_STATUS);
+ if ((readb(info->reg + NFCR0_DATAPORT) & 0xff) == 0xc0) {
+ printk(KERN_NOTICE "wmt_func: erase block OK\n");
+ printk(KERN_NOTICE "read nand status is %x\n",
+ readb(info->reg + NFCR0_DATAPORT) & 0xff);
+ } else
+ printk(KERN_NOTICE "wmt_func: erase block failed\n");
+ }
+ #endif
+
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ if (status < 0) {
+ printk(KERN_ERR "page program or erase err, nand controller is not idle\n");
+ /*print_nand_register(mtd);*/
+ /* while (1);*/
+ #if 0
+ status = wmt_read_nand_status(mtd, NAND_CMD_STATUS);
+ if (status < 0)
+ printk(KERN_NOTICE "\rNFC or NAND is not ready\n");
+ else if (status & NAND_STATUS_FAIL)
+ printk(KERN_NOTICE "\r status : fail\n");
+ else if (!(status & NAND_STATUS_READY))
+ printk(KERN_NOTICE "\r status : busy\n");
+ else if (!(status & NAND_STATUS_WP))
+ printk(KERN_NOTICE "\r status : protect\n");
+ #endif
+ return;
+ }
+
+ return;
+
+ case NAND_CMD_RESET_NO_STATUS_READ:
+ case NAND_CMD_HYNIX_RETRY_END:
+
+ if (!chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writew(DPAHSE_DISABLE|(0x01<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Reset err, nand device is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ }
+
+ return;
+
+ case NAND_CMD_RESET:
+
+ if (!chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writew(DPAHSE_DISABLE|(0x01<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ b2r_stat = readb(info->reg + NFCR12_NAND_TYPE_SEL+1);
+ printk(KERN_ERR "Reset err, nand device chip %d is not ready\n", ((~b2r_stat)&0xFF)>>1);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ }
+
+ wmt_read_nand_status(mtd, NAND_CMD_STATUS);
+ /* while (!(chip->read_byte(mtd) & NAND_STATUS_READY));*/
+ i = 0;
+ while (!((readb(info->reg + NFCR0_DATAPORT) & 0xff) & NAND_STATUS_READY)) {
+ if (i>>12) {
+ printk("reset flash chip%d time out\n", ~readb(info->reg + NFCR12_NAND_TYPE_SEL+1));
+ break;
+ }
+ i++;
+ }
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Reset status is ok\n");
+ #endif
+ return;
+
+ case NAND_CMD_READID:
+
+ status = wmt_nand_readID(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "readID status is %d\n", status);
+ #endif
+ return;
+
+ case NAND_GET_FEATURE:
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ status = nand_get_feature(mtd, 0x1);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+ return;
+
+ case NAND_CMD_STATUS:
+
+ wmt_read_nand_status(mtd, command);
+ return;
+
+ case NAND_CMD_STATUS_MULTI:
+
+ wmt_read_nand_status(mtd, command);
+ return;
+
+ case NAND_CMD_RNDIN:
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ addr_cycle++;
+ if (mtd->realwritesize != 512) {
+ writeb(column >> 8, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle++;
+ }
+ }
+ info->done_data = &complete;
+ /* set command 1 cycle */
+ writeb(command, info->reg + NFCR2_COMPORT0);
+
+ writew(((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = wmt_nfc_wait_idle(mtd, 1, -1, -1, -1); /* don't check ecc, wait nfc idle */
+ /* status = wmt_wait_cmd_ready(mtd);*/
+ /* status = wmt_nfc_ready(mtd);*/
+ if (status)
+ printk(KERN_ERR "Ramdom input err: nfc is not idle\n");
+
+ return;
+
+ case NAND_CMD_RNDOUT:
+
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ writeb(column, info->reg + NFCR3_COMPORT1_2 + 1);
+ addr_cycle += 2;
+ }
+
+ /* CLEAR ECC BIT */
+ //writeb(0x1B, info->reg + NFCR13_INT_MASK);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ /* set command 1 cycle */
+ writeb(command, info->reg + NFCR2_COMPORT0);
+
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd);*/
+ if (status) {
+ printk(KERN_ERR "Ramdom output err: nfc command is not ready\n");
+ /* return;*/
+ }
+
+ writeb(NAND_CMD_RNDOUTSTART, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writew(NAND2NFC|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nand_ready(mtd);*/
+ if (status) {
+ printk(KERN_ERR "Ramdom output err: nfc io transfer is not finished\n");
+ /* return;*/
+ }
+ /* reduntant aera check ecc, wait nfc idle */
+ status = wmt_nfc_wait_idle(mtd, 0, -1, -1, -1);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status)
+ printk(KERN_ERR "Ramdom output err: nfc is not idle\n");
+ return;
+
+
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ udelay(chip->chip_delay);
+ return;
+
+
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+
+ /* trigger command and addrress cycle */
+
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+ /* Apply this short delay always to ensure that we do wait tWB in */
+ /* any case on any machine.*/
+ /* ndelay(100);*/
+ wmt_device_ready(mtd);
+}
+
+
+static void wmt_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int b2r_stat;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_select_chip()\n");
+ #endif
+ if (!((*(volatile unsigned long *)PMCEU_ADDR)&0x0010000))
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ if (chipnr > 1)
+ printk(KERN_WARNING "There are only support two chip sets\n");
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ if (chipnr == 1)
+ chipnr++;
+ else if (chipnr == 2)
+ chipnr--;
+
+ if (chipnr >= 0 && chipnr < 4)
+ writeb(~(1<<chipnr), info->reg + NFCR12_NAND_TYPE_SEL+1);
+ else if (chipnr < 0)
+ writeb(~0, info->reg + NFCR12_NAND_TYPE_SEL+1);
+ else
+ printk(KERN_WARNING "There are only support two chip sets. chipnr = %d\n", chipnr);
+}
+
+void rdmzier(uint8_t *buf, int size, int page)
+{
+ int i, j;
+ unsigned int *bi = (unsigned int *)buf;
+ j = page%256;
+
+ for (i = 0; i < size; i++) {
+ bi[i] = rdmz[j] ^ bi[i];
+ j++;
+ if (j >= BYTE_SEED)
+ j = 0;
+ }
+}
+void rdmzier_oob(uint8_t *buf, uint8_t *src, int size, int page, int ofs)
+{
+ int i, j;
+ unsigned int *bi = (unsigned int *)buf;
+ unsigned int *bs = (unsigned int *)src;
+ j = page%256;
+ j = (j+ofs)%BYTE_SEED;
+
+ for (i = 0; i < size; i++) {
+ bi[i] = rdmz[j] ^ bs[i];
+ j++;
+ if (j >= BYTE_SEED)
+ j = 0;
+ }
+}
+
+
+static void wmt_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_write_buf()\n");
+ #endif
+ //printk("info->dmabuf=%x datalen=%x \n", (unsigned int)info->dmabuf, info->datalen);
+ memcpy(info->dmabuf + info->datalen, buf, len);
+//print_nand_buffer((uint8_t *)info->dmabuf, mtd->writesize);
+ info->datalen += len;
+}
+
+static void wmt_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_buf() len: %x infoDatalen :%x\n", len, info->datalen);
+ #endif
+
+ memcpy(buf, info->dmabuf + info->datalen, len);
+ info->datalen += len;
+}
+
+static uint8_t wmt_read_byte(struct mtd_info *mtd)
+{
+ /* struct wmt_nand_mtd *nmtd = mtd->priv;*/
+ /* struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ uint8_t d;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_byte()\n");
+ #endif
+
+ /* d = readb(info->reg + NFCR0_DATAPORT) & 0xff;*/
+ wmt_nand_read_buf(mtd, &d, 1);
+ /* via_dev_dbg(&nmtd->info->platform->dev, "Read %02x\n", d);*/
+ /* via_dev_dbg(info->platform->dev, "Read %02x\n", d);*/
+
+ return d;
+}
+
+static int wmt_nand_read_oob_noalign(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *buf = chip->oob_poi;
+ uint8_t *bufpoi = buf;
+
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+
+ // read redundant area cmd
+ //printk(KERN_NOTICE "scan oob page=0x%x\n", page);dannier
+ info->oob_ecc_error = 0x0;
+ #if 0
+ if (!mtd->dwDDR) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ,
+ info->reg + NFCRd_OOB_CTRL);
+ //writeb((info->oob_ECC_bytes+1), info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->oob_ECC_mode);
+ //pos = info->oob_col/*+ i * (eccsize + chunk);*/
+ //print_nand_register(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, info->oob_col, page);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->ECC_mode);
+ //writeb(info->oob_ECC_bytes, info->reg + NFCR10_OOB_ECC_SIZE+1);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ),
+ info->reg + NFCRd_OOB_CTRL);
+ } else
+ #endif
+ {
+ info->data_ecc_uncor_err = 0;
+ info->oob_ecc_error = 0x50;
+ }
+
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ if (info->data_ecc_uncor_err == 1 || info->oob_ecc_error == 0x50) {
+ if (info->data_ecc_uncor_err == 1)
+ printk(KERN_WARNING "**************page0x%x, read oob unc err goto read page\n", page);
+ info->isr_cmd = 0;
+ wmt_nand_page_read(mtd, 0, info->last_bank_col, page);
+ info->oob_ecc_error = 0;
+ }
+
+ if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(chip->oob_poi), 6);//set_FIFO_FF((uint32_t *)(info->reg+ECC_FIFO_0), 4);
+ /*printk("oobRe=%x \n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ print_nand_buffer((char *)(chip->oob_poi), 32);
+ printk("\n");*/
+ } else {
+ memcpy(bufpoi, info->dmabuf + mtd->realwritesize, 24);
+ //print_nand_buffer((char *)(chip->oob_poi), 32);
+ //print_nand_buffer((char *)(info->dmabuf + mtd->realwritesize), 32);
+ /*if (!(*(uint32_t *)(info->reg+ECC_FIFO_0) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_1) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_2) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_3) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_4) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_5) == 0xFFFFFFFF)) {
+ printk("fail to derdmz oob roob page= 0x%x e\n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //while(1);
+ }*/
+ }
+
+ return 1;
+}
+
+static int wmt_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *buf = chip->oob_poi;
+ /* int length = mtd->realoobsize; */ /* prepad = chip->ecc.prepad, bytes = chip->ecc.bytes;*/
+ /* int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;*/
+ /* int eccsize = chip->ecc.size;*/
+ uint8_t *bufpoi = buf;
+ /* struct nand_oobfree *free = chip->ecc.layout->oobfree;*/
+ /* uint32_t boffs;*/
+ /* int pos; */ /* toread, sndrnd = 1;*/
+ #ifdef WMT_SW_RDMZ
+ unsigned int rdmz_mark = 0;
+ #endif
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_read_oob() page =0x%x cur_page=0x%x\n", page, info->cur_page);
+ #endif
+
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+
+ // read redundant area cmd
+ //printk(KERN_NOTICE "scan oob page=%d\n", page);
+ info->oob_ecc_error = 0x0;
+ #if 1
+ if (!mtd->dwDDR) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ,
+ info->reg + NFCRd_OOB_CTRL);
+ //writeb((info->oob_ECC_bytes+1), info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->oob_ECC_mode);
+ //pos = info->oob_col/*+ i * (eccsize + chunk);*/
+ //print_nand_register(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, info->oob_col, page);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->ECC_mode);
+ //writeb(info->oob_ECC_bytes, info->reg + NFCR10_OOB_ECC_SIZE+1);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ),
+ info->reg + NFCRd_OOB_CTRL);
+ } else
+ #endif
+ {
+ info->data_ecc_uncor_err = 0;
+ info->oob_ecc_error = 0x50;
+ }
+
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ if (info->data_ecc_uncor_err == 1 || info->oob_ecc_error == 0x50) {
+ if (info->data_ecc_uncor_err == 1)
+ printk(KERN_WARNING "**************page0x%x, read oob unc err goto read page\n", page);
+ info->isr_cmd = 0;
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) | 0x10, info->reg + NFCR9_ECC_BCH_CTRL);
+ wmt_nand_page_read(mtd, 0, info->last_bank_col, page);
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) & 0xEF, info->reg + NFCR9_ECC_BCH_CTRL);
+ info->oob_ecc_error = 0;
+ }
+//print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 16);
+ #ifdef WMT_SW_RDMZ
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+ //printk("re oob page=0x%x rdmz_mark=0x%x wmt_rdmz=0x%x fifo5=0x%x\n",page , rdmz_mark, *(unsigned int *)wmt_rdmz, *(unsigned int *)(info->reg+ECC_FIFO_5));
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz) {
+ rdmzier_oob(bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer(info->reg+ECC_FIFO_0, 24);
+ } else
+ #endif
+ if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(chip->oob_poi), 5);//set_FIFO_FF((uint32_t *)(info->reg+ECC_FIFO_0), 4);
+ /*printk("oobRe=%x \n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ print_nand_buffer((char *)(chip->oob_poi), 32);
+ printk("\n");*/
+ } else {
+ memcpy(bufpoi, info->reg+ECC_FIFO_0, 20);
+ /*if (!(*(uint32_t *)(info->reg+ECC_FIFO_0) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_1) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_2) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_3) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_4) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_5) == 0xFFFFFFFF)) {
+ printk("fail to derdmz oob roob page= 0x%x e\n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //while(1);
+ }*/
+ }
+ /*chip->read_buf(mtd, bufpoi, 32);*/
+ /*chip->read_buf(mtd, bufpoi + i * 16, 16);*/
+
+ return 1;
+}
+
+static int wmt_nand_read_oob_single(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int ret = 0;
+
+ info->cur_lpage = page;
+ info->cur_page = page;
+
+ ret = cache_read_data(mtd, chip, page, NULL);
+ if (!ret) {
+ //printk("re oob lpg=0x%x from cache\n", page);
+ return 0;
+ }
+ ret = 0;
+
+ if (page >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+//printk("11oobRe=0x%x mtd->bbt_sw_rdmz=%d cur_page=0x%x\n", page, mtd->bbt_sw_rdmz, info->cur_page);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ if (wmt_nand_read_oob_noalign(mtd, chip, page, sndcmd))
+ ret = 1;
+ } else {
+ if (wmt_nand_read_oob(mtd, chip, page, sndcmd))
+ ret = 1;
+ }
+
+ return ret;
+}
+
+
+static int wmt_nand_read_oob_plane(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ //printk("\n wmt_nand_read_oob_plane \n");
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ int pagecnt = mtd->pagecnt;
+ int ret = 0; //printk("\n wmt_nand_read_oob_plane page=%x =>page=%x\n",page,(page / pagecnt) * pagecnt + page);
+
+ info->cur_lpage = page;
+ ret = cache_read_data(mtd, chip, page, NULL);
+ if (!ret) {
+ //printk("re oob lpg=0x%x from cache\n", page);
+ return 0;
+ }
+ ret = 0;
+
+ if (page >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ page = (page / pagecnt) * pagecnt + page;
+
+ info->cur_page = page;
+ //printk("22oobRe=0x%x mtd->bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", page, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ if (wmt_nand_read_oob_noalign(mtd, chip, page, 1))
+ ret = 1;
+ } else {
+ if (wmt_nand_read_oob(mtd, chip, page, 1))
+ ret = 1;
+ }
+/* info->oper_step = 1;
+ if(wmt_nand_read_oob(mtd, chip, page+div, 1))ret = 1;
+ //if(ret)printk("ret is 1! \n");
+*/
+ return ret;
+}
+
+
+
+/*
+ * wmt_nand_read_raw_page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int wmt_nand_read_raw_page(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ unsigned int bch;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ /*print_nand_register(mtd);
+ dump_stack();*/
+
+ bch = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb((bch & (~BCH_INT_EN))| DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ writeb(bch, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ set_ecc_engine(info, info->ECC_mode);
+
+ return 0;
+}
+
+
+/* - SCAN DEFAULT INVALID BAD BLOCK -
+ * wmt_nand_read_bb_oob - OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int wmt_nand_read_bb_oob(struct mtd_info *mtd, struct nand_chip *chip,
+int page, int sndcmd)
+{
+ unsigned int bch, bak_time;
+ int i, size = 1024, ofs = mtd->realwritesize;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_bb_oob() page=0x%x\n", page);
+ #endif
+ bch = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb((bch & (~BCH_INT_EN))| DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ bak_time = readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ if (!mtd->dwDDR)
+ writel(0x2424, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ ofs = ofs + 2048;
+ }
+
+ if (sndcmd) {
+ if ((0xFF&(mtd->id>>24)) == 0x45) {
+ for (i = 0; i < ((ofs/1024)+1); i++) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, i*1024, page);
+ info->datalen = 0;
+ if (i == (ofs/1024))
+ size = (mtd->realoobsize >= 1024) ? 1024 : mtd->realoobsize;
+ chip->read_buf(mtd, chip->oob_poi - ofs + (i*1024), size);
+ }
+ } else if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi, 1);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi+1, 63);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi, 64);
+ }
+ sndcmd = 0;
+ }
+ if (!mtd->dwDDR)
+ writel(bak_time, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(bch, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ set_ecc_engine(info, info->ECC_mode);
+
+ return sndcmd;
+}
+
+/* - SCAN DEFAULT INVALID BAD BLOCK -
+ * wmt_nand_read_bb_oob_multi - OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int wmt_nand_read_bb_oob_multi(struct mtd_info *mtd, struct nand_chip *chip,
+int page, int sndcmd)
+{
+ unsigned int bch, bak_time;
+ int i, size = 1024, plane, ofs = mtd->realwritesize;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_bb_oob() page=0x%x\n", page);
+ #endif
+ bch = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb((bch & (~BCH_INT_EN))| DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ bak_time = readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ if (!mtd->dwDDR)
+ writel(0x2424, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ ofs = ofs + 2048;
+ }
+
+ if (sndcmd) {
+ if ((0xFF&(mtd->id>>24)) == 0x45) {
+ plane = (info->oper_step ? (ofs-1024) : mtd->writesize);
+ for (i = 0; i < ((ofs/1024)+1); i++) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, i*1024, page);
+ info->datalen = 0;
+ if (i == (ofs/1024))
+ size = (mtd->realoobsize >= 1024) ? 1024 : mtd->realoobsize;
+ chip->read_buf(mtd, chip->oob_poi - plane + (i*1024), size);
+ }
+ } else if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000) {
+ plane = (info->oper_step ? 32 : 0);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi+plane, 1);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi+1+plane, 31);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ plane = (info->oper_step ? 32 : 0);
+ chip->read_buf(mtd, chip->oob_poi+plane, 32);
+ }
+ sndcmd = 0;
+ }
+ if (!mtd->dwDDR)
+ writel(bak_time, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(bch, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ set_ecc_engine(info, info->ECC_mode);
+
+ return sndcmd;
+}
+
+static int wmt_nand_read_bb_oob_plane(struct mtd_info *mtd, struct nand_chip *chip,
+int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int div = mtd->erasesize / mtd->writesize;
+ int ret = 0;
+ page = (page / div) * div + page;
+ info->oper_step = 0;
+ if (wmt_nand_read_bb_oob_multi(mtd, chip, page,sndcmd))
+ ret = 1;
+ info->oper_step = 1;
+ if(wmt_nand_read_bb_oob_multi(mtd, chip, page+div,sndcmd))
+ ret = 1;
+ info->oper_step = 0;
+ return ret;
+}
+
+
+/* write oob is no longer support */
+static int wmt_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ /*int i;*/
+ unsigned int b2r_stat;
+ /*int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;*/
+ int eccsize = chip->ecc.size; /* length = mtd->realoobsize; */
+ /* prepad = chip->ecc.prepad, bytes = chip->ecc.bytes;*/
+
+ int pos, status = 0;
+ /*int steps = chip->ecc.steps;*/ /* Vincent 2008.11.4*/
+ const uint8_t *bufpoi = chip->oob_poi;
+ /* struct nand_oobfree *free = chip->ecc.layout->oobfree;*/
+ /* uint32_t boffs;*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_write_oob()\n");
+ #endif
+
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ info->datalen = 0;
+ /*chip->write_buf(mtd, bufpoi, 32);*/
+ memcpy(info->reg+ECC_FIFO_0, bufpoi, 32);
+ pos = eccsize * chip->ecc.steps + 8*4;
+ /*pos = eccsize + i * (eccsize + chunk);*/
+ /*wmt_nfc_dma_cfg(mtd, 32, 1, 1, i);*/
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_1\n");*/
+ status = chip->waitfunc(mtd, chip);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_2\n");*/
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ /* } */
+ return 0;
+
+}
+
+static int wmt_nand_write_oob_plane(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+ /*int i;*/
+ unsigned int b2r_stat;
+ /*int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;*/
+ int eccsize = chip->ecc.size; /* length = mtd->realoobsize; */
+ /* prepad = chip->ecc.prepad, bytes = chip->ecc.bytes;*/
+
+ int pos, status = 0;
+ /*int steps = chip->ecc.steps;*/ /* Vincent 2008.11.4*/
+
+ int div = mtd->erasesize / mtd->writesize;
+
+ page = (page / div) *div + page;
+
+ // if(info->oper_step) bufpoi = chip->oob_poi + mtd->realoobsize;
+ /* struct nand_oobfree *free = chip->ecc.layout->oobfree;*/
+ /* uint32_t boffs;*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_write_oob()\n");
+ #endif
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * 512 7 1 5 0 3
+ * data-ecc-prepad-data-pad-oobecc ....
+ */
+
+ /* for (i = 0; i < steps; i++) {*/
+ /*for (i = 0; i < 4; i++) {*/
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ info->datalen = 0;
+ memcpy(info->reg+ECC_FIFO_0, bufpoi, 32);
+ pos = eccsize * chip->ecc.steps + 8*4;
+ /*pos = eccsize + i * (eccsize + chunk);*/
+ /*wmt_nfc_dma_cfg(mtd, 32, 1, 1, i);*/
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+
+ chip->cmdfunc(mtd, 0x11, -1, -1);
+
+ memcpy(info->reg+ECC_FIFO_0, bufpoi + mtd->realoobsize, 32);
+
+ chip->cmdfunc(mtd, 0x81, pos, page + div);
+
+ chip->cmdfunc(mtd, 0x10, -1, -1);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_1\n");*/
+ status = chip->waitfunc(mtd, chip);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_2\n");*/
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ /* } */
+
+ return 0;
+}
+
+static void wmt_single_plane_erase(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i;
+#if 0
+ /* Send commands to erase a block */
+ if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX && prob_end == 1) {
+ if (page < par3_ofs || (page >= par6_ofs && page < par7_ofs)) {
+ //printk("SKIP erase page 0x%x, par4_ofs 0x%x\n", page, par4_ofs);
+ return;
+ }
+ } // nand_base.c nand_erase_nand
+#endif
+ for (i = 0; i < WR_BUF_CNT; i++)
+ if (page <= info->wr_page[i] && (page+mtd->pagecnt) > info->wr_page[i])
+ info->wr_page[i] = -1;
+ info->cur_page = page;
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+static void wmt_multi_plane_erase(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i, pagecnt = mtd->pagecnt, page_plane1;
+ //if (((page/pagecnt) * pagecnt + page) != (page<<1) - (page%pagecnt))
+ //printk("erase page %d => page1=%d page2=%d\n", page, (page/pagecnt) * pagecnt + page, (page<<1) - (page%pagecnt));
+
+ for (i = 0; i < WR_BUF_CNT; i++)
+ if (page <= info->wr_page[i] && (page+mtd->pagecnt) > info->wr_page[i])
+ info->wr_page[i] = -1;
+
+ /*if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX && prob_end == 1) {
+ if (page < par3_ofs || (page >= par5_ofs && page < par7_ofs)) {
+ printk("SKIP erase page 0x%x, par4_ofs 0x%x\n", page, par3_ofs);
+ //while(1);
+ return;
+ }
+ }*/
+ page = (page / pagecnt) * pagecnt + page;
+ page_plane1 = page + pagecnt;
+ //printk("multi erase page %x => page1=%x page2=%x, pagepl1=%x\n", page, (page/pagecnt) * pagecnt + page, (page<<1) - (page%pagecnt), page_plane1);
+ //printk("blk=%d, blk1=%d\n", page/mtd->pagecnt, page_plane1/mtd->pagecnt);
+
+ info->cur_page = page;
+// chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);//simulate
+// chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); //simulate
+// chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page+div); //simulate
+// chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); //simulate
+/*******************************************************************************/
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_MICRON || (0xFF&(mtd->id>>24)) == NAND_MFR_INTEL) {
+ //printk(KERN_NOTICE"multi erase0 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ //printk(KERN_NOTICE"multi erase1 command=0x%x \n",NAND_CMD_ERASE3);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE3, -1, -1); /* send cmd 0xd0 */
+ //printk(KERN_NOTICE"multi erase1 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page_plane1);
+ //printk(KERN_NOTICE"multi erase2 command=0x%x \n",NAND_CMD_ERASE2);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); /* send cmd 0xd0 */
+ } else {
+ //printk(KERN_NOTICE"multi erase0 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ //printk(KERN_NOTICE"multi erase1 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page_plane1);
+ //printk(KERN_NOTICE"multi erase2 command=0x%x \n",NAND_CMD_ERASE2);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); /* send cmd 0xd0 */
+ }
+/*******************************************************************************/
+}
+
+
+#if 1 //faster encode function
+u32 reverse32 (u32 n)
+{
+ int i;
+ u32 tmp, y;
+
+ y=0;
+ tmp = n;
+
+ for(i=0;i<31;i++)
+ {
+ y = y + (tmp & 0x01);
+ //printf("y=%08x\n",y);
+ tmp >>= 1;
+ y <<= 1;
+ }
+ y = y + (tmp & 0x01);
+
+ return y;
+}
+
+int gen_gf2(u8 ecc_mod_bits, u32 *p_bch_gf2)
+{
+// assign bch_GF_60becc = 840'h9A_5FB0C03C_2D3F4F2F_F106E7D9_ED397A28_479724D7_F259A1CD_DB6C78DA_62668B7F_D9D13742_80F0C37C_06664C92_86CCB2D9_DD1A2B4B_3BC4895C_F7212F8C_D75FB017_7FBFE2B9_66646AAA_CEB7855F_6996F036_3D096201_F62357BD_EB9AD670_03F47DD9_73D6AE65_E5A30A27;
+// assign bch_GF_40becc = 560'hC07F_89B1_A0DC_5D96_619F_32D0_4967_54F6_DE9D_4F93_F527_EF14_EFB0_FD53_9915_A82C_CD92_5528_8030_477D_EE3F_338A_59EC_5FA2_10AF_E2EF_DFAE_D244_DF31_4DA5_0762_B724_A002_9CEF_2DC1;
+// assign bch_GF_24becc = 560'h8E94_E024_8D90_9D2B_4525_72D1_EDD9_D098_FE73_0E8E_8D26_C2D2_2893_A3A0_485B_D0AB_6E0B_4992_9A35_6BD4_30EF;
+// assign bch_GF_12becc = 560'hE48_7325_6115_A567_84A6_940A_4C6E_6D7E_1205_E051;
+// assign bch_GF_8becc = 560'h15_F914_E07B_0C13_8741_C5C4_FB23;
+// assign bch_GF_4becc = 560'h4_5230_43AB_86AB;
+u32 bch_GF_60becc[] = { 0x9A, 0x5FB0C03C, 0x2D3F4F2F, 0xF106E7D9, 0xED397A28, 0x479724D7, 0xF259A1CD, 0xDB6C78DA, 0x62668B7F, 0xD9D13742, 0x80F0C37C, 0x06664C92, 0x86CCB2D9, 0xDD1A2B4B, 0x3BC4895C, 0xF7212F8C, 0xD75FB017, 0x7FBFE2B9, 0x66646AAA, 0xCEB7855F, 0x6996F036, 0x3D096201, 0xF62357BD, 0xEB9AD670, 0x03F47DD9, 0x73D6AE65, 0xE5A30A27};
+u32 bch_GF_40becc[] = { 0xC07F, 0x89B1A0DC, 0x5D96619F, 0x32D04967, 0x54F6DE9D, 0x4F93F527, 0xEF14EFB0, 0xFD539915, 0xA82CCD92, 0x55288030, 0x477DEE3F, 0x338A59EC, 0x5FA210AF, 0xE2EFDFAE, 0xD244DF31, 0x4DA50762, 0xB724A002, 0x9CEF2DC1};
+u32 bch_GF_24becc[] = { 0x8E94, 0xE0248D90, 0x9D2B4525, 0x72D1EDD9, 0xD098FE73, 0x0E8E8D26, 0xC2D22893, 0xA3A0485B, 0xD0AB6E0B, 0x49929A35, 0x6BD430EF};
+u32 bch_GF_12becc[] = { 0xE487325, 0x6115A567, 0x84A6940A, 0x4C6E6D7E, 0x1205E051};
+u32 bch_GF_8becc[] = { 0x15, 0xF914E07B, 0x0C138741, 0xC5C4FB23};
+u32 bch_GF_4becc[] = { 0x45230, 0x43AB86AB};
+u32 *p_tmp;
+int i,len,width;
+
+ switch (ecc_mod_bits)
+ {
+ case 4 : width = 52; p_tmp = bch_GF_4becc; break;
+ case 8 : width = 104; p_tmp = bch_GF_8becc; break;
+ case 12 : width = 156; p_tmp = bch_GF_12becc; break;
+ case 24 : width = 336; p_tmp = bch_GF_24becc; break;
+ case 40 : width = 560; p_tmp = bch_GF_40becc; break;
+ case 60 : width = 840; p_tmp = bch_GF_60becc; break;
+ default : width = 52; p_tmp = bch_GF_4becc; break;
+ }
+len = width/32 +1;
+for(i=0;i<len;i++)
+ *(p_bch_gf2+i) = *(p_tmp+len-1-i);
+
+return (width);
+}
+//calculate ecc with 1bit data
+u32 calc_1b_bch( u32 *parity_buf, u32 *bch_GF2, u32 din, u8 parity_len, u8 parity_msb_pos)
+{
+ //parity_buf: pointer to parity buffer
+ //bch_GF2: pointer to generation polynomial
+ //din: input data, bit31-1 should be 0, only bit0 is valid
+ //parity_len: parity length in DW
+ //parity_msb_pos: the msb position of parity
+ u8 i;
+ u32 mask = ~(0xffffffff << (parity_msb_pos));
+ u32 msb_word = mask & parity_buf[parity_len-1];
+ u32 parity_msb = msb_word >> (parity_msb_pos-1) ;
+
+ for(i=parity_len-1;i>0;i--)
+ {
+ parity_buf[i]= (parity_buf[i]<<1) +(parity_buf[i-1]>>31);
+ if(din ^ parity_msb) parity_buf[i]= parity_buf[i]^ bch_GF2[i];
+ }
+
+ parity_buf[0]= (parity_buf[0]<<1);
+ if(din ^ parity_msb) parity_buf[i]= parity_buf[i]^ bch_GF2[i];
+
+ return (parity_msb );
+}
+
+int bch_encoder(u32 *p_parity, u32 *p_data, u8 ecc_mod_bits, u32 datacnt)
+{
+ //p_parity: pointer to parity buffer
+ //p_data: pointer to input data
+ //ecc_mod_bits: ecc mode select, options are 4,8,12,24,40,60
+ //datacnt: data length in DW
+ int i,j;
+ int bchGF_width;
+ u8 parity_len,parity_msb_pos;
+ u32 bch_GF2[27]; //support 60becc, 105bytes 27DW
+ u32 tmp;
+ u32 *p;
+ u8 align_offset;
+
+ bchGF_width = gen_gf2( ecc_mod_bits, bch_GF2);
+ parity_len = (u8)(bchGF_width /32 + 1);
+ parity_msb_pos = (u8)(bchGF_width %32);
+ align_offset = 32 - parity_msb_pos;
+
+ //p = (u32 *)malloc((parity_len) * sizeof(u32));
+ p = (unsigned int *)kmalloc((parity_len) * sizeof(unsigned int), GFP_KERNEL);
+ if (p == NULL) {
+ printk("malloc Error!");
+ return -1;
+ } else {
+ //initialize parity buffer
+ for(i=parity_len-1;i>=0;i--)
+ *(p+i) = 0;
+
+ //Caculate bit by bit
+ for (i=0;i<datacnt;i++) {
+ tmp = p_data[i];
+ for (j=0;j<32;j++) {
+ calc_1b_bch( p, bch_GF2, tmp&0x00000001, parity_len, parity_msb_pos);
+ tmp >>= 1;
+ }
+ }
+ //adjust parity align offset
+ for (i=parity_len -1 ; i >0; i--)
+ p[i] = (p[i] << align_offset) + (p[i-1] >> (32-align_offset));
+
+ p[0] = p[0] << align_offset;
+
+ //reverse parity order
+ for(i=0;i<parity_len;i++)
+ p_parity[parity_len-1-i] =reverse32(p[i]);
+ kfree(p); //release malloc
+ }
+ return 0;
+}
+#endif //faster encode function
+
+#if 0 //old fast encode function
+
+unsigned int reverse32 (unsigned int n)
+{
+ int i=0;
+ unsigned int tmp = n, y=0;
+ for(;i<31;i++) {
+ y += tmp&0x00000001;
+ tmp >>= 1;
+ y <<= 1;
+ }
+ y += tmp&0x00000001;
+ return y;
+}
+
+int Gen_GF2(u8 bits, unsigned int *buf)
+{
+// assign bch_GF_40becc = 560'hC07F_89B1_A0DC_5D96_619F_32D0_4967_54F6_DE9D_4F93_F527_EF14_EFB0_FD53_9915_A82C_CD92_5528_8030_477D_EE3F_338A_59EC_5FA2_10AF_E2EF_DFAE_D244_DF31_4DA5_0762_B724_A002_9CEF_2DC1;
+// assign bch_GF_24becc = 560'h8E94_E024_8D90_9D2B_4525_72D1_EDD9_D098_FE73_0E8E_8D26_C2D2_2893_A3A0_485B_D0AB_6E0B_4992_9A35_6BD4_30EF;
+// assign bch_GF_12becc = 560'hE48_7325_6115_A567_84A6_940A_4C6E_6D7E_1205_E051;
+// assign bch_GF_8becc = 560'h15_F914_E07B_0C13_8741_C5C4_FB23;
+// assign bch_GF_4becc = 560'h4_5230_43AB_86AB;
+ unsigned int bch_GF_40becc[] = { 0xC07F, 0x89B1A0DC, 0x5D96619F, 0x32D04967, 0x54F6DE9D, 0x4F93F527, 0xEF14EFB0, 0xFD539915, 0xA82CCD92, 0x55288030, 0x477DEE3F, 0x338A59EC, 0x5FA210AF, 0xE2EFDFAE, 0xD244DF31, 0x4DA50762, 0xB724A002, 0x9CEF2DC1};
+ unsigned int bch_GF_24becc[] = { 0x8E94, 0xE0248D90, 0x9D2B4525, 0x72D1EDD9, 0xD098FE73, 0x0E8E8D26, 0xC2D22893, 0xA3A0485B, 0xD0AB6E0B, 0x49929A35, 0x6BD430EF};
+ unsigned int bch_GF_12becc[] = { 0xE487325, 0x6115A567, 0x84A6940A, 0x4C6E6D7E, 0x1205E051};
+ unsigned int bch_GF_8becc[] = { 0x15, 0xF914E07B, 0x0C138741, 0xC5C4FB23};
+ unsigned int bch_GF_4becc[] = { 0x45230, 0x43AB86AB};
+ unsigned int *p;
+ int i,len,width;
+
+ switch (bits) {
+ case 4 : width = 51; p = bch_GF_4becc; break;
+ case 8 : width = 103; p = bch_GF_8becc; break;
+ case 12 : width = 155; p = bch_GF_12becc; break;
+ case 24 : width = 335; p = bch_GF_24becc; break;
+ case 40 : width = 559; p = bch_GF_40becc; break;
+ default : width = 51; p = bch_GF_4becc; break;
+ }
+ len = width/32 +1;
+ for(i=0;i<len;i++)
+ buf[i] = *(p+len-1-i);
+
+ return (width);
+}
+
+unsigned int Caculat_1b_bch( unsigned int *pariA, unsigned int *bch_GF2, unsigned int din, u8 pari_len, u8 pari_lb)
+{
+ //din: bit31-1 should be 0, only bit0 is valid
+ //pari_len: the index of last DW of the parity
+ //pari_lb: the MSB of the last DW
+ u8 i;
+ unsigned int mask = ~(0xffffffff <<(pari_lb+1));
+ unsigned int lstdw = mask & pariA[pari_len];
+ unsigned int ldwMSB = lstdw >> pari_lb ;
+ // for(i=pari_len;i>=0;i--) printk("%8x",pariA[i]);printk("\n---before\n");
+ for(i=pari_len;i>0;i--) {
+ pariA[i]= (pariA[i]<<1) +(pariA[i-1]>>31);
+ if(din ^ ldwMSB) pariA[i] = pariA[i] ^ bch_GF2[i];
+ }
+ pariA[0]= (pariA[0]<<1);
+ if(din ^ ldwMSB) pariA[i] = pariA[i]^ bch_GF2[i];
+ // for(i=pari_len;i>=0;i--) printk("%8x",pariA[i]);printk("\n---after\n");
+ return (ldwMSB );
+}
+
+int bch_encoder(unsigned int *p_parity, unsigned int *p_data, u8 bits, unsigned int datacnt)
+{
+ int i,j;
+ int bchGF_msb;
+ u8 pari_len,pari_lb;
+ unsigned int bch_GF2[18];
+ unsigned int tmp;
+ unsigned int *p, *p1;
+ u8 *p2;//, p3[50];
+
+ bchGF_msb = Gen_GF2( bits, bch_GF2);
+ pari_len = (u8)(bchGF_msb /32);
+ pari_lb = (u8)(bchGF_msb %32);
+ //p = (unsigned int *)malloc((pari_len+2) * sizeof(unsigned int));
+ p = (unsigned int *)kmalloc((pari_len+2) * sizeof(unsigned int), GFP_KERNEL);
+ if (p == NULL) {
+ printk("malloc Error!");
+ return -1;
+ } else {
+ /*gen parity[ bchGF_msb:0] begin*/
+ //Init
+ for(i=pari_len+1;i>=0;i--)
+ *(p+i) = 0;
+ //Caculate
+ p1 = &p[1];
+ for (i=0;i<datacnt;i++) {
+ tmp = p_data[i];
+ for (j=0;j<32;j++) {
+ Caculat_1b_bch( p1, bch_GF2, tmp&0x00000001, pari_len, pari_lb);
+ tmp >>= 1;
+ }
+ }
+ //printk("encode finiah!pari_len=%d p_parity=0x%x\n",pari_len, (unsigned int)p_parity);
+ /*gen parity[ bchGF_msb:0] end*/
+
+ /*reverse oder of parity begin*/
+ p2 = (u8 *)p;
+ //printk("pari_lb=%d p2=0x%x\n", pari_lb, (unsigned int)p2);
+ p1 = (unsigned int *)(p2+3-(pari_lb/8));
+ /*p2 = (p2+3-(pari_lb/8));
+ for(i=0;i<((pari_len+1)*4);i++)
+ p3[i] = p2[i];
+ p1 = p3;
+ */
+ //printk("p2=0x%x p3=0x%x\n", (unsigned int)p2, (unsigned int)p3);
+ for(i=0;i<=pari_len;i++) {
+ p_parity[pari_len-i] = reverse32(p1[i]);
+ }
+ /*reverse oder of parity end*/
+ //printk("reverse finiah!\n");
+ kfree(p); //release malloc
+ }
+ //printk("leave encode\n");
+ return 0;
+}
+#endif //old fast encode function
+
+#if 0 //slow encode function
+int encode_ecc(unsigned char *src_data, unsigned char *parity, unsigned int ecc_bit, unsigned char *c_len, unsigned int encode_len)
+{
+ //unsigned char src_data[512];//24
+ //unsigned char parity[26];//42
+ //unsigned char ecc_bit;
+ unsigned char c_len1 = *c_len;
+ unsigned int fail;
+
+ //char in_char;
+ int i;
+ //int j,in_v;
+
+
+
+ //for (i=0; i<encode_len; i++) src_data[i] = 0x00;
+ // for (i = 0; i < encode_len; i += 2) {
+ // src_data[i] = 0xFF&(jj>>8);
+ // src_data[i+1] = 0xFF&jj;
+ // jj++;
+ // jj %= 0x10000;
+ // src_data[i] = 0x12;
+ // src_data[i+1] = 0x12;
+ // }
+/*
+ i = 0; j = 0;
+ in_char = getchar();
+ while (in_char != EOF) {
+ in_v = hextoint(in_char);
+ if (in_v != -1) {
+ if (j==0) {
+ src_data[i] = 0;
+ src_data[i] += in_v * 16;
+ j++;
+ } else {
+ src_data[i] += in_v;
+ i++;
+ j = 0;
+ }
+ }
+ in_char = getchar();
+ }*/
+ //printk("start encode\n");
+ fail = wmt_bchencoder(src_data,parity,ecc_bit,&c_len1, encode_len);
+ if (fail)
+ printk("----------------Encode Error Detected! code=%d-----------------\n",fail);
+ else
+ *c_len = c_len1;
+ /*printk("\nCodeLengh=%d %d Parity=",*c_len, c_len1);
+ for (i=(c_len1-1); i>=0; i--)
+ printk("%02x ",parity[i]);
+ printk("\n");*/
+
+ return 0;
+}
+
+int hextoint(char hex)
+// Convert HEX number to Integer
+{
+ int r, h;
+ r = -1;
+ h = (int)hex;
+ if ((h >= 97) && (h <= 102))
+ r = h - 87;
+ else if ((h >= 65) && (h <= 70))
+ r = h - 55;
+ else if ((h >= 48) && (h <= 57))
+ r = h - 48;
+ else if ((h != 10) && (h != 13))
+ printk("Error detected!!! hex=%c",hex);
+ return r;
+}
+
+
+// This function is used to encode the BCH code for the input data
+// data : [IN] The information data to be encoded by BCH. The lendth of this buffer is fixed at 512Bytes.
+// bch_code : [OUT] Buffer pointer to keep the BCH code.
+// bits : [IN] The number of bits for the BCH error correcting capability.
+// bch_codelen : [IN/OUT] This parameter is used to specify the length of the buffer bch_code in unit of byte for input for the
+// encoder. And will specify the length of encoded bch for the data with error correcting capability bits as output.
+// RETURN : 0 indicates success. Nonzero indicates failure.
+unsigned int wmt_bchencoder (unsigned char *data, unsigned char *bch_code, unsigned char bits, unsigned char *bch_codelen, unsigned int encode_len)
+{
+ unsigned char bch_codelen_in;
+ unsigned char bch_i;
+ /*unsigned char b_data[MAX_BANK_SIZE*8];
+ unsigned char bch_sera[MAX_PARITY_SIZE*8];
+ unsigned char bch_sera_tmp[MAX_PARITY_SIZE*8];*/
+ unsigned char bch_sera_back;
+ unsigned int width;
+ unsigned int i,j,k;
+ unsigned long retval;
+ unsigned char offset;
+
+ unsigned char *bch_GF2;
+ /*unsigned char bch_GF_4becc[MAX_PARITY_SIZE*8] = {0,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0,1,1,1,0,1,0,1,0,1,1,1,0,0,0,0,1,1,0,1,0,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_8becc[MAX_PARITY_SIZE*8] = {0,0,0,1,0,1,0,1,1,1,1,1,1,0,0,1,0,0,0,1,0,1,0,0,1,1,1,0,0,0,0,0,0,1,1,1,1,0,1,1,0,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,1,1,1,0,1,0,0,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0,0,1,0,0,1,1,1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_12becc[MAX_PARITY_SIZE*8] = {1,1,1,0,0,1,0,0,1,0,0,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,1,0,1,1,0,0,0,0,1,0,0,0,1,0,1,0,1,1,0,1,0,0,1,0,1,0,1,1,0,0,1,1,1,1,0,0,0,0,1,0,0,1,0,1,0,0,1,1,0,1,0,0,1,0,1,0,0,0,0,0,0,1,0,1,0,0,1,0,0,1,1,0,0,0,1,1,0,1,1,1,0,0,1,1,0,1,1,0,1,0,1,1,1,1,1,1,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,1,1,1,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_16becc[MAX_PARITY_SIZE*8] = {1,1,0,0,1,0,1,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,1,0,0,0,0,1,1,0,1,1,0,1,1,1,1,1,0,1,1,0,0,0,1,0,1,0,1,1,0,0,0,1,1,1,0,1,1,0,1,0,1,1,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,0,1,1,1,1,0,1,0,1,0,1,0,0,1,0,0,0,1,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,0,0,1,1,0,1,1,1,1,1,0,1,1,0,0,1,1,0,1,1,1,1,0,0,0,1,0,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,0,0,1,1,0,1,1,1,0,1,0,0,0,0,1,1,1,1,1,1,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_24becc[MAX_PARITY_SIZE*8] = {1,0,0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,1,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0,1,1,0,1,1,0,0,1,0,0,0,0,1,0,0,1,1,1,0,1,0,0,1,0,1,0,1,1,0,1,0,0,0,1,0,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,1,1,0,1,0,0,0,1,1,1,1,0,1,1,0,1,1,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,1,0,0,1,1,0,0,0,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,1,0,0,0,0,1,1,1,0,1,0,0,0,1,1,1,0,1,0,0,0,1,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,1,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,1,1,1,0,1,0,0,0,1,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,1,1,1,1,0,1,0,0,0,0,1,0,1,0,1,0,1,1,0,1,1,0,1,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,1,0,0,1,1,0,0,1,0,0,1,0,1,0,0,1,1,0,1,0,0,0,1,1,0,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_40becc[MAX_PARITY_SIZE*8] = {1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,1,0,0,1,1,0,1,1,0,0,0,1,1,0,1,0,0,0,0,0,1,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1,1,0,0,1,0,1,1,0,0,1,1,0,0,0,0,1,1,0,0,1,1,1,1,1,0,0,1,1,0,0,1,0,1,1,0,1,0,0,0,0,0,1,0,0,1,0,0,1,0,1,1,0,0,1,1,1,0,1,0,1,0,1,0,0,1,1,1,1,0,1,1,0,1,1,0,1,1,1,1,0,1,0,0,1,1,1,0,1,0,1,0,0,1,1,1,1,1,0,0,1,0,0,1,1,1,1,1,1,0,1,0,1,0,0,1,0,0,1,1,1,1,1,1,0,1,1,1,1,0,0,0,1,0,1,0,0,1,1,1,0,1,1,1,1,1,0,1,1,0,0,0,0,1,1,1,1,1,1,0,1,0,1,0,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,0,1,0,1,0,1,1,0,1,0,1,0,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,1,1,1,0,1,1,1,1,1,0,1,1,1,1,0,1,1,1,0,0,0,1,1,1,1,1,1,0,0,1,1,0,0,1,1,1,0,0,0,1,0,1,0,0,1,0,1,1,0,0,1,1,1,1,0,1,1,0,0,0,1,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,1,0,1,1,1,1,1,1,1,0,0,0,1,0,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,0,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,1,0,0,1,1,0,1,1,1,1,1,0,0,1,1,0,0,0,1,0,1,0,0,1,1,0,1,1,0,1,0,0,1,0,1,0,0,0,0,0,1,1,1,0,1,1,0,0,0,1,0,1,0,1,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,1,1,0,1,1,1,1,0,0,1,0,1,1,0,1,1,1,0,0,0,0,0,1};*/
+
+ // initialization
+ retval = 0;
+ for(i=0; i<MAX_PARITY_SIZE*8; i++) {
+ bch_sera[i] = 0;
+ bch_sera_tmp[i] = 0;
+ }
+
+ for (i=0; i <=(encode_len*8); i++) {
+ if ((unsigned char)((unsigned int)(1<<(i%8))) & data[i/8])
+ b_data[i] = 1;
+ else
+ b_data[i] = 0;
+ }
+
+ // select width and poly-nominal
+ switch (bits) {
+ case 4 : width = 51; bch_GF2 = bch_GF_4becc; break;
+ case 8 : width = 103; bch_GF2 = bch_GF_8becc; break;
+ case 12 : width = 155; bch_GF2 = bch_GF_12becc; break;
+ case 16 : width = 207; bch_GF2 = bch_GF_16becc; break;
+ case 24 : width = 335; bch_GF2 = bch_GF_24becc; break;
+ case 40 : width = 559; bch_GF2 = bch_GF_40becc; break;
+ default : width = 51; bch_GF2 = bch_GF_4becc; retval += 1; break;
+ }
+
+ // calculate the parity
+ for (k=0; k<(encode_len*8); k++) {
+ bch_i = b_data[k];
+ bch_sera_back = bch_sera[width] ^ bch_i;
+ bch_sera_tmp[0] = bch_sera_back;
+ for (i=0; i<width; i++) {
+ bch_sera_tmp[i+1] = bch_sera[i] ^ (bch_sera_back * bch_GF2[width-(i+1)]);
+ }
+ for (i=0; i<=width; i++)
+ bch_sera[i] = bch_sera_tmp[i];
+ }
+
+ i = 0;
+ bch_code[0] = 0;
+ bch_codelen_in = *bch_codelen;
+ if(bits == 4 || bits == 12)
+ offset = 4;
+ else
+ offset = 0;
+ for (j = 0; j <= width; j++) {
+ *bch_codelen = i+1;
+ bch_code[i] += bch_sera[j] * (unsigned char)((unsigned int)(1<<(7-((j+offset)%8))));
+ if (i>=bch_codelen_in) {
+ retval += 2;
+ break;
+ }
+ if((j+offset)%8==7) {
+ i++;
+ bch_code[i] = 0;
+ }
+ }
+
+ return(retval);
+}
+#endif //end of #if 0 : slow encode function
+
+/**
+ * wmt_nand_read_page - hardware ecc syndrom based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static int wmt_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+
+ //#ifdef WMT_SW_RDMZ
+ unsigned int rdmz_mark = 0;//,g1=0,g2=0,g3=0;
+ //#endif
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_read_page()page=0x%x\n", page);
+ #endif
+ if (info->oper_step)
+ bufpoi = chip->oob_poi+20;//bufpoi = chip->oob_poi+mtd->realoobsize;
+//g1 = wmt_read_oscr();
+ info->datalen = 0;
+ if (info->unc_allFF && !mtd->bbt_sw_rdmz) {
+ set_FIFO_FF((uint32_t *)buf, mtd->realwritesize/4);
+ } else
+ chip->read_buf(mtd, buf, mtd->realwritesize);
+//g2 = wmt_read_oscr();
+ if (chip->cur_chip && prob_end == 1 &&
+ (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ if (!chip->realplanenum)
+ if (page < par4_ofs && second_chip == 0) {
+ #ifdef ESLC_DEBUG
+ if (page%mtd->pagecnt == 0 || page%mtd->pagecnt == (mtd->pagecnt/2))
+ printk("\nread: \n");
+ #endif
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ if (page < 0)
+ return 0;
+ }
+ }
+ /*if (page == 0xaa00) {
+ print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 24);
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ print_nand_buffer((uint8_t *)bufpoi, 24);
+ }*/
+ /*if (info->cur_page != page) {
+ printk("cur_page=%x, page=%x\n", info->cur_page, page);
+ while(1);
+ }*/
+ //#ifdef WMT_SW_RDMZ
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ //printk("check read page derdmz page= 0x%x\n", page);
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+ if ((*(unsigned int *)(info->reg+ECC_FIFO_5)) == (*(unsigned int *)wmt_rdmz) ||
+ rdmz_mark == (*(unsigned int *)wmt_rdmz)) {
+ //printk("read page derdmz page= 0x%x\n", page);
+ rdmzier(buf, mtd->realwritesize/4, page);
+ }
+ }
+ //#endif
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+
+ //printk("re page=0x%x rdmz_mark=0x%x wmt_rdmz=0x%x fifo5=0x%x\n",page , rdmz_mark, *(unsigned int *)wmt_rdmz, *(unsigned int *)(info->reg+ECC_FIFO_5));
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz && mtd->bbt_sw_rdmz) {
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 24);
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 5/*20/4*/, page, mtd->realwritesize/4);
+ //print_nand_buffer((uint8_t *)bufpoi, 24);
+ } else if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(bufpoi), 4);
+ } else
+ memcpy(bufpoi, info->reg+ECC_FIFO_0, 20);
+ /*print_nand_buffer((char *)(chip->oob_poi), 32);
+ print_nand_buffer((char *)(buf), 16);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));*/
+ /*if (*(uint32_t *)(info->reg+ECC_FIFO_0) != 0xFFFFFFFF) {
+ printk(KERN_NOTICE "rd PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));
+ }*/
+//g3 = wmt_read_oscr();
+ //printk(KERN_DEBUG"g12=%d,g23=%d\n",(g2-g1)/3,(g3-g1)/3);
+ return 0;
+}
+
+/**
+ * wmt_nand_read_page - hardware ecc syndrom based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static int wmt_nand_read_page_noalign(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+
+ unsigned int rdmz_mark = 0;
+
+ if (info->oper_step)
+ bufpoi = chip->oob_poi+20;
+
+ info->datalen = 0;
+ if (info->unc_allFF && !mtd->bbt_sw_rdmz) {
+ set_FIFO_FF((uint32_t *)buf, mtd->realwritesize/4);
+ } else
+ chip->read_buf(mtd, buf, mtd->realwritesize);
+
+ if (chip->cur_chip && prob_end == 1 &&
+ (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ if (!chip->realplanenum)
+ if (page < par4_ofs && second_chip == 0) {
+ #ifdef ESLC_DEBUG
+ if (page%mtd->pagecnt == 0 || page%mtd->pagecnt == (mtd->pagecnt/2))
+ printk("\nread: \n");
+ #endif
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ if (page < 0)
+ return 0;
+ }
+ }
+
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ //printk("check read page derdmz page= 0x%x\n", page);
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->dmabuf + mtd->realwritesize + 20), 1, page, (mtd->realwritesize+20)/4);
+ if ((*(unsigned int *)(info->dmabuf + mtd->realwritesize + 20)) == (*(unsigned int *)wmt_rdmz) ||
+ rdmz_mark == (*(unsigned int *)wmt_rdmz)) {
+ //printk("read page derdmz page= 0x%x\n", page);
+ rdmzier(buf, mtd->realwritesize/4, page);
+ }
+ }
+
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz && mtd->bbt_sw_rdmz) {
+ //print_nand_buffer((uint8_t *)(info->dmabuf + mtd->realwritesize), 24);
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->dmabuf + mtd->realwritesize /*+ 20*/), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer((uint8_t *)bufpoi, 24);
+ } else if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(bufpoi), 6);
+ } else
+ memcpy(bufpoi, info->dmabuf + mtd->realwritesize, 20);
+ //print_nand_buffer((char *)(chip->oob_poi), 32);
+ /*print_nand_buffer((char *)(buf), 16);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));*/
+
+ /*if (*(uint32_t *)(info->reg+ECC_FIFO_0) != 0xFFFFFFFF) {
+ printk(KERN_NOTICE "rd PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));
+ }*/
+ /*if (info->dmabuf[0] == 1)
+ printk( "R%x:%x ", page, *(uint32_t *)info->dmabuf);*/
+/*printk(KERN_DEBUG "RPG=0x%x : 0x%x 0x%x 0x%x 0x%x\n", page, *(uint32_t *)info->dmabuf,
+*((uint32_t *)info->dmabuf+1), *((uint32_t *)info->dmabuf+2), *((uint32_t *)info->dmabuf+3));*/
+ return 0;
+}
+
+#if 0
+static int wmt_nand_cp_data(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int rdmz_mark = 0;
+
+ info->datalen = 0;
+ if (info->unc_allFF && !mtd->bbt_sw_rdmz) {
+ set_FIFO_FF((uint32_t *)buf, mtd->realwritesize/4);
+ } else
+ chip->read_buf(mtd, buf, mtd->realwritesize);
+
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+ if ((*(unsigned int *)(info->reg+ECC_FIFO_5)) == (*(unsigned int *)wmt_rdmz) ||
+ rdmz_mark == (*(unsigned int *)wmt_rdmz)) {
+ rdmzier(buf, mtd->realwritesize/4, page);
+ }
+ }
+ return 0;
+}
+
+static int wmt_nand_cp_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+ unsigned int rdmz_mark = 0;
+
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz)
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz && mtd->bbt_sw_rdmz) {
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ } else if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(bufpoi), 4);
+ } else
+ memcpy(bufpoi, info->reg+ECC_FIFO_0, 20);
+
+ return 0;
+}
+#endif
+
+int reset_nfc(struct mtd_info *mtd, unsigned int *buf, int step)
+{
+ int ret = 0;
+ unsigned int backup1[7], *backup;
+ //unsigned int t1, t2;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+
+ backup = backup1;
+ if (step != 3 && buf != NULL)
+ backup = buf;
+
+ if (step&1) {
+ backup[0] = readl(info->reg + NFCR9_ECC_BCH_CTRL);
+ backup[1] = readl(info->reg + NFCRe_CALC_TADL);
+ backup[2] = readl(info->reg + NFCR10_OOB_ECC_SIZE);
+ backup[3] = readl(info->reg + NFCR12_NAND_TYPE_SEL);
+ backup[4] = readl(info->reg + NFCR13_INT_MASK);
+ backup[5] = readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ backup[6] = readl(info->reg + NFCR7_DLYCOMP);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ writew(1, info->reg + NFCR12_NAND_TYPE_SEL);
+ writeb(0x2, info->reg + NFCR11_SOFT_RST);
+ }
+ if (step&2) {
+ ret = NFC_WAIT_IDLE(mtd);
+ if (ret)
+ printk("reset nfc, wait idle time out\n");
+ writeb(0x0, info->reg + NFCR11_SOFT_RST);
+
+ ret = wmt_wait_chip_ready(mtd);
+ if (ret) {
+ printk(KERN_ERR "reset nfc, The chip is not ready\n");
+ print_nand_register(mtd);
+ while(1);
+ }
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0, info->reg + NFCRd_OOB_CTRL);
+ writel(backup[0], info->reg + NFCR9_ECC_BCH_CTRL);
+ writel(backup[1], info->reg + NFCRe_CALC_TADL);
+ writel(backup[2], info->reg + NFCR10_OOB_ECC_SIZE);
+ writel(backup[3], info->reg + NFCR12_NAND_TYPE_SEL);
+ writel(backup[4], info->reg + NFCR13_INT_MASK);
+ writel(backup[5], info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writel(backup[6], info->reg + NFCR7_DLYCOMP);
+ }
+
+ return ret;
+}
+void nfc_hw_rdmz(struct mtd_info *mtd, int on)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ if (on)
+ writel(RDMZ/*|(page%RDMZ)*/, info->reg + NFCRf_CALC_RDMZ);
+ else
+ writel(0, info->reg + NFCRf_CALC_RDMZ);
+}
+
+int hw_encode_oob(struct mtd_info *mtd)
+{
+ int ret = 0;
+ unsigned int ecc_mode, oob_ecc_mode, tmp;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ tmp = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ ecc_mode = tmp & ECC_MODE;
+ oob_ecc_mode = ecc_mode;
+ if (ecc_mode > 5)
+ oob_ecc_mode = 5;
+
+ if (oob_ecc_mode != ecc_mode)
+ writeb((tmp & (~ECC_MODE)) | oob_ecc_mode, info->reg + NFCR9_ECC_BCH_CTRL);
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ, info->reg + NFCRd_OOB_CTRL);
+ writew(DPAHSE_DISABLE|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ ret = NFC_WAIT_IDLE(mtd);
+ if (ret)
+ printk("hw encode oob idle time out\n");
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ), info->reg + NFCRd_OOB_CTRL);
+
+ if (oob_ecc_mode != ecc_mode)
+ writeb(tmp, info->reg + NFCR9_ECC_BCH_CTRL);
+
+ return ret;
+}
+
+/************************Johnny Liu****************************************************/
+static int wmt_multi_plane_read(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+/*
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ int div = mtd->erasesize / mtd->writesize;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+ wmt_nand_read_page(mtd, chip, buf, page);
+
+ info->oper_step = 1;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page+div);
+
+ wmt_nand_read_page(mtd, chip, buf+mtd->realwritesize, page+div);
+
+ info->oper_step = 0;
+
+ return 0;
+*/
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int tmp, ret = 0, plane_0_uncor_err = 0, plane_1_uncor_err = 0;
+
+ info->cur_lpage = page;
+
+ tmp = cache_read_data(mtd, chip, page, buf);
+ if (!tmp) {
+ //printk("re lpg=0x%x from cache\n", page);
+ return 0;
+ }
+
+ if (page >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ //printk("re pg=%x bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", page, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+
+ if (chip->cur_chip && prob_end == 1 && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ #ifdef ESLC_READ_WRITE
+ if (page < par4_ofs && second_chip == 0) {
+ //printk("multi read page=%x ",page);
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ //printk("eslc cal page0=%x page1=0x%x \n", (page / mtd->pagecnt) * mtd->pagecnt + page,
+ //(page / mtd->pagecnt) * mtd->pagecnt + page + mtd->pagecnt);
+ if (page < 0)
+ return 0;
+ }
+ #endif
+ }
+
+ page = (page / mtd->pagecnt) * mtd->pagecnt + page;//dan_multi 65->129, 129->257
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+ if (/*(0xFF&(mtd->id>>24)) != NAND_MFR_MICRON && (0xFF&(mtd->id>>24)) != NAND_MFR_INTEL &&*/ (0xFF&(mtd->id>>24)) != NAND_MFR_TOSHIBA) {
+
+ #ifdef WMT_HW_RDMZ
+ tmp = DIS_BCH_ECC & readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz || tmp) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+ #endif
+
+ chip->cmdfunc(mtd, MULTI_READ_1CYCLE, -1, page);
+ chip->cmdfunc(mtd, MULTI_READ_2CYCLE, 0x00, page);
+
+ if (info->data_ecc_uncor_err == 0) {
+ //printk("multi read plane0page=%x\n",page);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf, page);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf, page);
+ if (ret) {
+ printk("multi read plane0 data fail\n");
+ ret = 1;
+ } else
+ ret = 0;
+ } else
+ plane_0_uncor_err = 1;
+ info->oper_step = 1;
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+ chip->cmdfunc(mtd, MULTI_READ_2CYCLE, 0x00, page + mtd->pagecnt);
+
+ if (info->data_ecc_uncor_err == 0) {
+ //printk("multi read plane1 page=%x\n", page+mtd->pagecnt);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ if (ret) {
+ printk("multi read plane1 data fail\n");
+ ret = 1;
+ } else
+ ret = 0;
+ } else
+ plane_1_uncor_err = 1;
+ } else {
+ plane_0_uncor_err = 1;
+ plane_1_uncor_err = 1;
+ }
+ //print_nand_buffer((uint8_t *)buf, mtd->writesize);
+
+ info->oper_step = 0;
+ if (plane_0_uncor_err == 1) {
+ //printk("multi read plane_0_uncor_err\n");
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf, page);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf, page);
+ if (ret)
+ ret = 1;
+ else
+ ret = 0;
+ }
+ info->oper_step = 1;
+ if (plane_1_uncor_err == 1) {
+ //printk("multi read plane_1_uncor_err\n");
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page + mtd->pagecnt);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ if (ret)
+ ret = 1;
+ else
+ ret = 0;
+ }
+ info->oper_step = 0;
+ //printk("mrp=%d=0x%x\n", page, page);
+ //print_nand_buffer((uint8_t *)chip->oob_poi, 48);
+ /*printk(KERN_NOTICE "re sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x PID:%d Comm:%s\n",
+ *(uint32_t *)(chip->oob_poi+ECC_FIFO_0),
+ *(uint32_t *)(chip->oob_poi+4), *(uint32_t *)(chip->oob_poi+8),
+ *(uint32_t *)(chip->oob_poi+12), page,current->pid, current->comm);*/
+ if (ret)
+ printk("----------multi read ret=%d\n", ret);
+ return ret;
+}
+
+static void wmt_nand_write_page_lowlevel_noalign(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int b2r_stat, backup[6];//, w1, w2, w3;
+ uint8_t *bufpoi = chip->oob_poi;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_page_write_lowlevel() writesize %x\n", mtd->realwritesize);
+ #endif
+
+ info->dma_finish = 0;
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ if (mtd->dwRdmz == 1) {
+ *(unsigned int *)(bufpoi+20) = *(unsigned int *)wmt_rdmz;
+ }
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+ //memcpy(info->reg+ECC_FIFO_0, bufpoi, 24);
+
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 32);
+ if(!chip->realplanenum) {
+ info->datalen = 0;
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ memcpy(info->dmabuf + mtd->realwritesize, bufpoi, 24);
+ memset(info->dmabuf + mtd->realwritesize+24, 0x55, 24);
+ reset_nfc(mtd, backup, 2);
+ //hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize+1024, 1, 0, -1);
+ //print_nand_buffer((uint8_t *)(info->dmabuf+6144), 32);print_nand_register(mtd);
+ } else if (chip->realplanenum && info->datalen == 0) {
+ //printk("copybuf 1\n");
+ //w1 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->realwritesize);
+ memcpy(info->dmabuf + mtd->realwritesize, bufpoi, 24);
+ memset(info->dmabuf + mtd->realwritesize+24, 0x55, 24);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 1)
+ rdmzier_oob((info->dmabuf + mtd->realwritesize), (info->dmabuf + mtd->realwritesize), 1024/4, info->cur_page, mtd->realwritesize/4);
+ //w2 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 2);
+ //hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ //w3 = wmt_read_oscr();
+ //printk(KERN_DEBUG "w2-w1=%d w3-w1=%d---------------\n",w2-w1, w3-w1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize+1024, 1, 0, -1);
+ //print_nand_register(mtd);
+ } else if (info->datalen == mtd->writesize) {
+ //printk("copybuf 2\n");
+ //info->datalen = mtd->realwritesize;
+ //chip->write_buf(mtd, buf, mtd->writesize);
+ memcpy(info->dmabuf, buf+mtd->realwritesize, mtd->realwritesize);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize+1024, 1, 0, 2);
+ //print_nand_register(mtd);
+ }
+/*printk(KERN_DEBUG "WPG=0x%x : 0x%x 0x%x 0x%x 0x%x\n", info->cur_page, *(uint32_t *)info->dmabuf,
+*((uint32_t *)info->dmabuf+1), *((uint32_t *)info->dmabuf+2), *((uint32_t *)info->dmabuf+3));*/
+ /*if ((info->cur_page%256) == 0)dannier
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);*/
+}
+//extern unsigned int wmt_read_oscr(void);
+/**
+ * wmt_nand_write_page_lowlevel - hardware ecc syndrom based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ *
+ */
+static void wmt_nand_write_page_lowlevel(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int b2r_stat, backup[6];//, w1, w2, w3;
+ uint8_t *bufpoi = chip->oob_poi;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_page_write_lowlevel() writesize %x\n", mtd->realwritesize);
+ #endif
+
+ info->dma_finish = 0;
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ if (mtd->dwRdmz == 1) {
+ *(unsigned int *)(bufpoi+20) = *(unsigned int *)wmt_rdmz;
+ }
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ //print_nand_buffer((uint8_t *)bufpoi, 24);
+ printk(KERN_NOTICE "wr sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x PID:%d Comm:%s\n",
+ *(uint32_t *)(chip->oob_poi+0),
+ *(uint32_t *)(chip->oob_poi+4), *(uint32_t *)(chip->oob_poi+8),
+ *(uint32_t *)(chip->oob_poi+12), info->cur_page,
+ current->pid, current->comm);
+
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)bufpoi, 6, info->cur_page, mtd->realwritesize/4);
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 64);
+ } else
+ memcpy(info->reg+ECC_FIFO_0, bufpoi, 24);
+
+
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 32);
+ if(!chip->realplanenum) {
+ info->datalen = 0;
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ reset_nfc(mtd, backup, 2);
+ hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 1, 0, -1);
+ } else if (chip->realplanenum && info->datalen == 0) {
+ //printk("copybuf 1\n");
+ //w1 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ //w2 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 2);
+ hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ //w3 = wmt_read_oscr();
+ //printk(KERN_DEBUG "w2-w1=%d w3-w1=%d---------------\n",w2-w1, w3-w1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 1, 0, -1);
+ //print_nand_register(mtd);
+ } else if (info->datalen == mtd->writesize) {
+ //printk("copybuf 2\n");
+ //info->datalen = mtd->realwritesize;
+ //chip->write_buf(mtd, buf, mtd->writesize);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 1, 0, 2);
+ //print_nand_register(mtd);
+ }
+//while(info->datalen);
+
+ #if 0
+ if (info->cur_lpage >= 19456/*992768*/) {
+ if (strcmp(current->comm, "yaffs-bg-1") == 0) {
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ #if 0
+ } else if (strcmp(current->comm, "cp") == 0 /*&& *(uint32_t *)(info->reg+ECC_FIFO_2) > 0x1f90*/
+ && lst_chunkid == 0 /*&& *(uint32_t *)(info->reg+ECC_FIFO_2) != (lst_chunkid+1)*/) {
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ lst_chunkid = 11;
+ //#endif
+ } else if (strcmp(current->comm, "cp") == 0 && *(uint32_t *)(info->reg+ECC_FIFO_2) > 0x1f60) {
+ chunk[idx] = *(uint32_t *)(info->reg+ECC_FIFO_2);
+ cpg[idx] = info->cur_page;
+ idx++;
+ } else if (strcmp(current->comm, "sync") == 0) {
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ if (*(uint32_t *)(info->reg+ECC_FIFO_0) == 0x21 && *(uint32_t *)(info->reg+ECC_FIFO_2) == 0x4)
+ print_nand_buffer((char *)info->dmabuf, mtd->realwritesize);
+ #endif
+ }
+ }
+ #endif
+}
+
+static int hynix_eslc_mode_change(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX /*&& mtd->dwRetry*/) {
+ #ifdef ESLC_READ_WRITE
+ #ifdef ESLC_DEBUG
+ int ori_page = page;
+ #endif
+ if ((page < par4_ofs && second_chip == 0) || (page >= (mtd->blkcnt-8)*mtd->pagecnt)) {
+ //printk("page=0x%x\n", page);
+ //dump_stack();
+ //while(1);
+ if (page < (mtd->blkcnt-8)*mtd->pagecnt) {
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ if (page < 0)
+ return -1;
+ if (page%(mtd->pagecnt/2) == 0) {
+ if(chip->realplanenum == 0) {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ } else if(chip->realplanenum == 1) {
+ wmt_multi_plane_erase(mtd, page);
+ }
+ #ifdef ESLC_DEBUG
+ printk("eslc erase page=0x%x => eslc page = 0x%x when write.\n", ori_page, page);
+ #endif
+ }
+ }
+ if (eslc_write != 2) {
+ eslc_write = 2;
+ chip->cur_chip->set_parameter(mtd, ESLC_MODE, ECC_ERROR_VALUE);
+ #ifdef ESLC_DEBUG
+ printk(KERN_WARNING "page=0x%x----ENABLE ESLC", ori_page);
+ if (page >= (mtd->blkcnt-8)*mtd->pagecnt) {
+ printk(KERN_WARNING "(BBT) page%x,bbtpage=%x pagecnt=%d, blkcnt=%d\n", page, (mtd->blkcnt-8)*mtd->pagecnt,mtd->pagecnt, mtd->blkcnt);
+ dump_stack();
+ } else
+ printk(KERN_WARNING "\n");
+ #endif
+ }
+ } else if (eslc_write == 2) {
+ chip->cur_chip->set_parameter(mtd, ESLC_MODE, DEFAULT_VALUE);
+ eslc_write = 0;
+ #ifdef ESLC_DEBUG
+ printk(KERN_NOTICE "page=0x%x****DIS ESLC\n", page);
+ #endif
+ }
+ #endif
+ }
+ return page;
+}
+
+int cache_read_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int cache_index;
+
+ if (prob_end == 0)
+ return 1;
+
+ if (wr_cache == NULL)
+ return 1;
+
+ cache_index = page%mtd->pagecnt;
+ cache_index %= WR_BUF_CNT;
+
+ if (info->wr_page[cache_index] == page && page >= 0) {
+ if (buf)
+ memcpy((char *)buf, wr_cache+(cache_index*(mtd->writesize+32)), mtd->writesize);
+ memcpy(chip->oob_poi, wr_cache+(cache_index*(mtd->writesize+32)) + mtd->writesize, 32);
+ return 0;
+ }
+ return 1;
+}
+
+void cache_write_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int cache_index;
+
+ cache_index = page%mtd->pagecnt;
+ cache_index %= WR_BUF_CNT;
+
+ if (wr_cache == NULL)
+ return;
+
+ if ((page%mtd->pagecnt) == 0 && prob_end == 1)
+ init_wr_cache(mtd);
+
+ if (prob_end == 1) {
+ info->wr_page[cache_index] = page;//printk("wr-cache lpage[%d]=0x%x\n", cache_index, page);
+ memcpy(wr_cache+(cache_index*(mtd->writesize+32)), buf, mtd->writesize);
+ memcpy(wr_cache+(cache_index*(mtd->writesize+32)) + mtd->writesize, chip->oob_poi, 32);
+ }
+}
+
+static int wmt_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+const uint8_t *buf, int page, int cached, int raw)
+{
+ int status;
+ uint8_t *tmp_buf = (uint8_t *)buf;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_write_page() raw = %d\n", raw);
+ #endif
+
+ cache_write_data(mtd, chip, page, buf);
+ info->cur_lpage = page;
+
+ if (page > ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ page = hynix_eslc_mode_change(mtd, chip, page);
+
+ if (page < 0)
+ return 0;
+
+ info->cur_page = page;
+ wmb();
+
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ tmp_buf = buf_rdmz;
+ memcpy(tmp_buf, buf, mtd->realwritesize);//print_nand_buffer(tmp_buf, 64);
+ rdmzier(tmp_buf, mtd->realwritesize/4, page);//print_nand_buffer(tmp_buf, 64);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ info->datalen = 0;
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nand_write_page_lowlevel_noalign(mtd, chip, tmp_buf);
+ else
+ chip->ecc.write_page(mtd, chip, tmp_buf);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+
+ /*
+ * * * Cached progamming disabled for now, Not sure if its worth the
+ * * * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+ * * */
+ cached = 0;
+
+ if (!cached || !(chip->options & NAND_CACHEPRG)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ //nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+
+ status = chip->waitfunc(mtd, chip);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ /*
+ * * See if operation failed and additional status checks are
+ * * available
+ * *
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+ if (status & NAND_STATUS_FAIL)
+ goto GO_EIO;//return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ tmp_buf = buf_rdmz;
+ memcpy(tmp_buf, buf, mtd->realwritesize);
+ rdmzier(tmp_buf, mtd->realwritesize/4, page);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /* Send command to read back the data */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ if (chip->verify_buf(mtd, buf, mtd->realwritesize))
+ goto GO_EIO;//return -EIO;
+ #endif
+ return 0;
+
+GO_EIO:
+ return -EIO;
+}
+int abcc;
+static int wmt_multi_plane_program(struct mtd_info *mtd, struct nand_chip *chip,
+const uint8_t *buf, int page, int cached, int raw)
+{
+ int status, page_plane1;
+
+ uint8_t *tmp_buf = (uint8_t *)buf;
+ int pagecnt = mtd->pagecnt, p1;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef ESLC_DEBUG
+ int p0 = page;
+ #endif
+
+ cache_write_data(mtd, chip, page, buf);//logical address
+ info->cur_lpage = page;
+
+ if (page > ((mtd->blkcnt-8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ //printk("multi program page %d => page1=%d page2=%d\n", page, (page/pagecnt) * pagecnt + page, (page<<1) - (page%pagecnt));
+ p1 = page = hynix_eslc_mode_change(mtd, chip, page);
+
+ if (page < 0)
+ return 0;
+
+ page = (page /pagecnt) * pagecnt + page;//1->1, 128 -> 256, 256->512
+ page_plane1 = page + pagecnt;
+ #ifdef ESLC_DEBUG
+ if (p0 != p1)
+ printk("multi program page 0x%x eslc 0x%x => page1=0x%x page2=0x%x \n", p0, p1, page, page_plane1);
+ #endif
+
+ info->lst_wpage = page;
+ //page_plane1 = hynix_eslc_mode_change(mtd, chip, page_plane1);
+ //printk("mw p1=%x page %x => page plane1=%x\n", p1, page, page_plane1);
+
+ info->cur_page = page;
+ wmb();
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ tmp_buf = buf_rdmz;
+ memcpy(tmp_buf, buf, mtd->writesize);
+ rdmzier(tmp_buf, mtd->realwritesize/4, page);
+ //memcpy(tmp_buf, buf+mtd->realwritesize, mtd->realwritesize);
+ rdmzier(tmp_buf+mtd->realwritesize, mtd->realwritesize/4, page_plane1);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ info->datalen = 0;
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nand_write_page_lowlevel_noalign(mtd, chip, tmp_buf);
+ else
+ chip->ecc.write_page(mtd, chip, tmp_buf);
+ chip->cmdfunc(mtd, 0x80, 0x00, page);
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ /***********************Johnny Liu start**************************************/
+ chip->cmdfunc(mtd, 0x11,-1,-1);
+
+ info->datalen = mtd->writesize;//need
+ info->cur_page = page_plane1;
+
+ /*#ifdef WMT_SW_RDMZ
+ if (mtd->dwRdmz == 1) {
+ tmp_buf = buf_rdmz;
+ //memcpy(tmp_buf, buf+mtd->realwritesize, mtd->realwritesize);
+ //rdmzier(tmp_buf, mtd->realwritesize/4, page_plane1);
+ }
+ #endif
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz)
+ nfc_hw_rdmz(mtd, 1);
+ #endif*/
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nand_write_page_lowlevel_noalign(mtd, chip, tmp_buf);
+ else
+ chip->ecc.write_page(mtd, chip, tmp_buf);
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_MICRON)
+ chip->cmdfunc(mtd, 0x80, 0x00, page_plane1);
+ else
+ chip->cmdfunc(mtd, 0x81, 0x00, page_plane1);
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ /************************Johnny Liu end*************************************/
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ //nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+ status = chip->waitfunc(mtd, chip);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ if (chip->realplanenum && (status & NAND_STATUS_FAIL)) {
+ printk(KERN_ERR "multi write page=0x%x fail status= %x\n", page, status);
+ //dump_stack();
+ /*if (abcc != 13479) {
+ status = 0xe3;//0xe5;
+ abcc = 13479;
+ printk("write page=%x error abv=%d\n", page, abcc);
+ dump_stack();
+ }*/
+ chip->status_plane[0] = page;
+ chip->status_plane[1] = status;
+ }
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat)) {
+ printk(KERN_ERR "write fail status= %x\n", status);
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+#if 0
+static int wmt_multi_plane_copy(struct mtd_info *mtd, struct nand_chip *chip,
+int source, int des)
+{
+// printk("\n copy data from %d to %d",source, des);
+ unsigned int page = 0;
+
+ int div = mtd->erasesize / mtd->writesize;
+ page = (source / div) * div + source;
+ chip->cmdfunc(mtd, MULTI_COPY_1CYCLE, 0x00, page);
+
+ chip->cmdfunc(mtd, MULTI_COPY_2CYCLE, 0x00, page);
+ chip->cmdfunc(mtd, MULTI_COPY_2CYCLE, 0x00, page + div);
+
+ page = (des / div) * div + des;
+ chip->cmdfunc(mtd, MULTI_COPY_3CYCLE, 0x00, page);
+
+ return 0;
+}
+static int wmt_nand_copy_page(struct mtd_info *mtd, struct nand_chip *chip,
+int source, int des)
+{
+ unsigned int page = 0;
+ //int status = -1;
+ //printk("\n copy data from %d to %d", source, des);
+ //First, we calculate the source page
+ page = source;
+ //Copy back read cycle
+ chip->cmdfunc(mtd, COPY_BACK_1CYCLE, 0x00, page);
+
+ //Second, we calculate the des page
+ page = des;
+ //Copy back program cycle
+ chip->cmdfunc(mtd, COPY_BACK_2CYCLE, 0x00, page);
+ return 0;
+}
+#endif
+
+#if 0
+/**
+ * wmt_errstat - perform additional error status checks
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @state: state or the operation
+ * @status: status code returned from read status
+ * @page: startpage inside the chip, must be called with (page & this->pagemask)
+ *
+ * Perform additional error status checks on erase and write failures
+ * to determine if errors are correctable. For this device, correctable
+ * 1-bit errors on erase and write are considered acceptable.
+ *
+ *
+ */
+static int wmt_errstat(struct mtd_info *mtd, struct nand_chip *this,
+ int state, int status, int page)
+{
+ int er_stat = 0;
+ int rtn, retlen;
+ size_t len;
+ uint8_t *buf;
+ int i;
+
+ this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
+
+ if (state == FL_ERASING) {
+
+ for (i = 0; i < 4; i++) {
+ if (!(status & 1 << (i + 1)))
+ continue;
+ this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
+ -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
+
+ /* err_ecc_not_avail */
+ //if (!(rtn & ERR_STAT_ECC_AVAILABLE))
+ //er_stat |= 1 << (i + 1);
+ }
+
+ } else if (state == FL_WRITING) {
+
+ unsigned long corrected = mtd->ecc_stats.corrected;
+
+ /* single bank write logic */
+ this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
+
+ if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
+ /* err_ecc_not_avail */
+ er_stat |= 1 << 1;
+ goto out;
+ }
+
+ len = mtd->writesize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ er_stat = 1;
+ goto out;
+ }
+
+ /* recovery read */
+ rtn = nand_do_read(mtd, page, len, &retlen, buf);
+
+ /* if read failed or > 1-bit error corrected */
+ if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
+ er_stat |= 1 << 1;
+ kfree(buf);
+ }
+out:
+ rtn = status;
+ if (er_stat == 0) { /* if ECC is available */
+ rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
+ }
+
+ return rtn;
+}
+#endif
+
+/* wmt_nand_init_chip
+ *
+ * init a single instance of an chip
+ */
+
+static void wmt_nand_init_chip(struct mtd_info *mtd,
+struct ECC_size_info *ECC_size)
+{
+ //struct nand_chip *chip = &nmtd->chip;
+ //struct mtd_info *mtd = &nmtd->mtd;
+ struct nand_chip *chip = mtd->priv;
+
+ /* chip->cmd_ctrl = wmt_nand_hwcontrol;*/
+ #if 0
+ switch (info->cpu_type) {
+ case TYPE_wmt:
+ break;
+
+ case TYPE_vt8620:
+ break;
+
+ case TYPE_vt8610:
+ break;
+ }
+ #endif
+
+ /* nmtd->set = set;*/
+ if (hardware_ecc) {
+ /* chip->ecc.calculate = wmt_nand_calculate_ecc;*/
+ /* chip->ecc.correct = wmt_nand_correct_data;*/
+
+ /*if (mtd->realwritesize == 2048) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 8;
+ chip->ecc.steps = 4;
+ chip->ecc.layout = &wmt_oobinfo_2048;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else if (mtd->realwritesize == 4096) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 20;
+ chip->ecc.steps = 8;
+ chip->ecc.layout = &wmt_oobinfo_4096;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else if (mtd->realwritesize == 8192) {
+ chip->ecc.size = 1024;
+ chip->ecc.bytes = 42;
+ chip->ecc.steps = 8;
+ chip->ecc.layout = &wmt_oobinfo_8192;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else if (mtd->realwritesize == 16384) {
+ chip->ecc.size = 1024;
+ chip->ecc.bytes = 70;
+ chip->ecc.steps = 16;
+ chip->ecc.layout = &wmt_oobinfo_16k;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else { // 512 page
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.steps = 1;
+ chip->ecc.layout = &wmt_oobinfo_512;
+ chip->ecc.prepad = 4;
+ chip->ecc.postpad = 9;
+ }*/
+ chip->ecc.size = (mtd->realwritesize/ECC_size->banks);
+ chip->ecc.bytes = ECC_size->ECC_bytes;
+ chip->ecc.steps = ECC_size->banks;
+
+ chip->write_page = wmt_nand_write_page;
+ //chip->copy_page = wmt_nand_copy_page;
+
+ chip->ecc.write_page = wmt_nand_write_page_lowlevel;
+ chip->ecc.write_oob = wmt_nand_write_oob;
+ chip->ecc.read_page = wmt_nand_read_page;
+ chip->ecc.read_oob = wmt_nand_read_oob_single;
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ chip->ecc.read_page = wmt_nand_read_page_noalign;
+
+ chip->ecc.read_bb_oob = wmt_nand_read_bb_oob;
+ chip->erase_cmd = wmt_single_plane_erase;
+ if (chip->realplanenum) {
+ chip->write_page = wmt_multi_plane_program;
+ //chip->copy_page = wmt_multi_plane_copy;
+ chip->ecc.read_page = wmt_multi_plane_read;
+ chip->erase_cmd = wmt_multi_plane_erase;
+ chip->ecc.write_oob = wmt_nand_write_oob_plane;
+ chip->ecc.read_oob = wmt_nand_read_oob_plane;
+ chip->ecc.read_bb_oob = wmt_nand_read_bb_oob_plane;
+ }
+
+
+ /* switch (info->cpu_type) {*/
+ /* case TYPE_wmt:*/
+ chip->ecc.hwctl = wmt_nand_enable_hwecc;
+ /* chip->ecc.calculate = wmt_nand_calculate_ecc;*/
+ /* break;*/
+ #if 0
+ case TYPE_vt8620:
+ chip->ecc.hwctl = vt8620_nand_enable_hwecc;
+ chip->ecc.calculate = vt86203_nand_calculate_ecc;
+ break;
+
+ case TYPE_vt8610:
+ chip->ecc.hwctl = vt8610_nand_enable_hwecc;
+ chip->ecc.calculate = vt8610_nand_calculate_ecc;
+ break;
+ #endif
+ } else
+ chip->ecc.mode = NAND_ECC_SOFT;
+}
+
+
+static int wmt_nand_remove(struct platform_device *pdev)
+{
+ struct wmt_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ /* struct mtd_info *mtd = dev_get_drvdata(pdev);*/
+ dev_set_drvdata(&pdev->dev, NULL);
+ /* platform_set_drvdata(pdev, NULL);*/
+ /* dev_set_drvdata(pdev, NULL);*/
+ if (info == NULL)
+ return 0;
+
+ /* first thing we need to do is release all our mtds
+ * and their partitions, then go through freeing the
+ * resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct wmt_nand_mtd *ptr = info->mtds;
+ /* int mtdno;*/
+
+ /* for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {*/
+ /* pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);*/
+ nand_release(&ptr->mtd);
+ /* }*/
+ kfree(info->mtds);
+ }
+
+ /* free the common resources */
+
+ if (info->reg != NULL) {
+ //iounmap(info->reg);
+ info->reg = NULL;
+ }
+
+ if (info->area != NULL) {
+ release_resource(info->area);
+ kfree(info->area);
+ info->area = NULL;
+ }
+ kfree(info);
+ if (buf_rdmz)
+ vfree(buf_rdmz);
+ remove_proc_entry(NANDINFO, NULL);
+ return 0;
+}
+
+#if 0
+/*Lch */
+static int wmt_recovery_call(struct notifier_block *nb, unsigned long code, void *_cmd)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ chip = (struct nand_chip *)mtd->priv;
+ if(chip->cur_chip && (((mtd->id >>24)&0xff) == NAND_MFR_HYNIX)) {
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ #ifdef RETRY_DEBUG
+ printk("current try times: %d\n", chip->cur_chip->cur_try_times);
+ #endif
+ chip->select_chip(mtd, 0);
+ chip->cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ //chip->cur_chip->get_parameter(mtd,READ_RETRY_MODE);
+ chip->select_chip(mtd, -1);
+ }
+ return NOTIFY_DONE;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+
+ if((code == SYS_RESTART) && _cmd) {
+ char *cmd = _cmd;
+ if (!strcmp(cmd, "recovery")) {
+ err = search_mtd_table("android-data", &ret1);
+ ret = (int)ret1;
+ if (!err) {
+ // printk(KERN_EMERG "Lch jump2 android-data wmt_recovery_call.ret =%d\n",ret);
+ struct erase_info einfo;
+ loff_t to;
+ memset(&einfo, 0, sizeof(einfo));
+ to = nand_partitions[ret].offset;
+ einfo.mtd = mtd;
+ einfo.addr = (unsigned long)to;
+ einfo.len = nand_partitions[ret].size;
+
+ // printk("android-data einfo.addr is %8.8x\n",einfo.addr);
+ // printk("android-data einfo.len is %8.8x\n",einfo.len);
+ // printk("android-data nand_partitions[%d].offset is %8.8x\n",ret,nand_partitions[ret].offset);
+ // printk("android-data nand_partitions[%d].size is %8.8x\n",ret,nand_partitions[ret].size);
+ ret = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (ret < 0)
+ printk("enand_erase_nand result is %x\n",ret);
+ }
+
+ err = search_mtd_table("android-cache", &ret1);
+ ret = (int)ret1;
+ if (!err) {
+ // printk(KERN_EMERG "Lch jump3 wmt_recovery_call.android-cache ret=%d\n",ret);
+ struct erase_info einfo;
+ loff_t to;
+ memset(&einfo, 0, sizeof(einfo));
+ to = nand_partitions[ret].offset;
+ einfo.mtd = mtd;
+ einfo.addr = (unsigned long)to;
+ einfo.len = nand_partitions[ret].size;
+
+ // printk("android-cache einfo.addr is %8.8x\n",einfo.addr);
+ // printk("android-cache einfo.len is %8.8x\n",einfo.len);
+ // printk("android-data nand_partitions[%d].offset is %8.8x\n",ret,nand_partitions[ret].offset);
+ // printk("android-data nand_partitions[%d].size is %8.8x\n",ret,nand_partitions[ret].size);
+ ret = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (ret < 0)
+ printk("enand_erase_nand result is %x\n",ret);
+ }
+ }
+ }
+ return NOTIFY_DONE;
+}
+#endif
+
+/**********************************************************************
+Name : nfc_pdma_isr
+Function :.
+Calls :
+Called by :
+Parameter :
+Author : Dannier Chen
+History :
+***********************************************************************/
+static irqreturn_t nfc_pdma_isr(int irq, void *dev_id)
+{
+ struct wmt_nand_info *info = (struct wmt_nand_info *)dev_id;
+ struct mtd_info *mtd = &info->mtds->mtd;
+ disable_irq_nosync(irq);
+ //spin_lock(&host->lock);
+ writel(0, info->reg + NFC_DMA_IER);
+ wmb();
+ //writel(/*readl(info->reg + NFC_DMA_ISR)&*/NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+ //printk(" pdmaisr finish NFC_DMA_ISR=0x%x\n", readl(info->reg + NFC_DMA_ISR));
+ //print_nand_register(mtd);
+ info->dma_finish++;
+ WARN_ON(info->done_data == NULL);
+ if (info->done_data == NULL) {
+ printk(" pdmaisr finish pointer is null info->dma_finish=%d\n", info->dma_finish);
+ print_nand_register(mtd);
+ dump_stack();
+ //while(1);
+ }
+ if (info->done_data != NULL) {
+ complete(info->done_data);
+ info->done_data = NULL;
+ }
+ //info->done = NULL;
+ //spin_unlock(&host->lock);
+ enable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+
+/**********************************************************************
+Name : nfc_regular_isr
+Function :.
+Calls :
+Called by :
+Parameter :
+Author : Dannier Chen
+History :
+***********************************************************************/
+//static irqreturn_t nfc_regular_isr(int irq, void *dev_id, struct pt_regs *regs)
+irqreturn_t nfc_regular_isr(int irq, void *dev_id)
+{
+
+ struct wmt_nand_info *info = dev_id;
+ struct mtd_info *mtd = &info->mtds->mtd;
+ unsigned int bank_stat1, bank_stat2=0,status = 0, intsts;
+
+ disable_irq_nosync(irq);
+ //spin_lock(&host->lock);
+ //printk("isrCMD=0x%x\n", info->isr_cmd);
+ if (info->isr_cmd == 0) {
+ //print_nand_register(mtd);
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat1&(ERR_CORRECT | BCH_ERR)) {
+ while ((bank_stat1&(ERR_CORRECT|BCH_ERR)) != (ERR_CORRECT|BCH_ERR)) {
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ bank_stat2++;
+ if (bank_stat2 >= 0x10000) {
+ printk("ecc error, but ecc correct not assert ecc status=0x%x\n",bank_stat1);
+ print_nand_register(mtd);
+ //while(1);
+ break;
+ }
+ }
+ writeb((B2R | ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+ bank_stat2 = readw(info->reg + NFCR9_ECC_BCH_CTRL);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE" BCH Read data ecc eror page_addr:%x cmd=%d\n", info->cur_page, info->isr_cmd);
+ #endif
+ if ((bank_stat2 & BANK_DR) || info->oob_ecc_error == 0x50) {
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ bch_data_last_bk_ecc_correct_noalign(mtd);
+ else
+ bch_data_last_bk_ecc_correct(mtd);
+ } else {
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ bch_data_ecc_correct_noalign(mtd);
+ else
+ bch_data_ecc_correct(mtd);
+ }
+ } else {
+ printk("read page error but not ecc error sts=0x%x\n",bank_stat1);
+ print_nand_register(mtd);
+ //while(1);
+ }
+ } else if (info->isr_cmd == 0x50) {
+ //print_nand_register(mtd);
+ wmt_wait_nfc_ready(info);
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat1&(ERR_CORRECT | BCH_ERR)) {
+ while ((bank_stat1&(ERR_CORRECT|BCH_ERR)) != (ERR_CORRECT|BCH_ERR)) {
+ bank_stat2++;
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat2 >= 0x10000) {
+ printk("oob ecc error, but ecc correct not assert ecc status=0x%x\n",bank_stat1);
+ print_nand_register(mtd);
+ //while(1);
+ break;
+ }
+ }
+ bank_stat2 = readb(info->reg + NFCRd_OOB_CTRL)&OOB_READ;
+ if (!bank_stat2)
+ printk("oob cmd error, but oob flag is not set\n");
+ bch_redunt_ecc_correct(mtd);
+ }
+ writeb((B2R | ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+ status = NFC_WAIT_IDLE(mtd);
+ if (status)
+ printk("B2R isr not ecc error occurs, but idle fail\n");
+ WARN_ON(info->done_data == NULL);
+ complete(info->done_data);
+ info->done_data = NULL;
+ } else /*if (info->isr_cmd != 0 && info->isr_cmd != 0x50) */{
+ /* only erase/write operation enter for B2R interrupt */
+ intsts = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (intsts&B2R) {
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ printk("[nfc_isr] erase/write cmd B2R staus can't clear\n");
+ } else
+ printk("[nfc_isr] erase/write cmd B2R staus not assert\n");
+
+ status = (readb(info->reg + NFCR13_INT_MASK)&0xFF);
+ if ((status&0x1C) != 0x18) {
+ printk("[nfc_isr] isr is not check busy interrup =0x%x\n", status);
+ dump_stack();
+ print_nand_register(mtd);
+ //while(info->isr_cmd);
+ }
+
+ WARN_ON(info->done_data == NULL);
+ complete(info->done_data);
+ info->done_data = NULL;
+ }
+ //spin_unlock(&host->lock);
+ enable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void wmt_set_logo_offset(void)
+{
+ int ret1;
+ int err = 0, ret = 0, status = 0, i;
+ unsigned char varval[100], tmp[100];
+ unsigned int varlen;
+ unsigned long long offs_data = 0;
+
+ err = search_mtd_table("u-boot-logo", &ret1);
+ ret = (int) ret1;
+ varlen = 100;
+ status = wmt_getsyspara("wmt.nfc.mtd.u-boot-logo", tmp, &varlen);
+ for (i = 0; i < ret; i++)
+ offs_data += nand_partitions[i].size;
+ sprintf(varval, "0x%llx", offs_data);
+ if (!status && (strcmp(varval, tmp) == 0))
+ status = 0;
+ else
+ status = 1;
+ if (!err && status) {
+ ret = wmt_setsyspara("wmt.nfc.mtd.u-boot-logo", varval);
+ if (ret)
+ printk(KERN_NOTICE "write u-boot-logo offset to env fail\n");
+ } else if (err)
+ printk(KERN_NOTICE "search u-boot-logo partition fail\n");
+
+ err = search_mtd_table("kernel-logo", &ret1);
+ ret = (int) ret1;
+ varlen = 100;
+ status = wmt_getsyspara("wmt.nfc.mtd.kernel-logo", tmp, &varlen);
+ offs_data = 0;
+ for (i = 0; i < ret; i++)
+ offs_data += nand_partitions[i].size;
+ sprintf(varval, "0x%llx", offs_data);
+ if (!status && (strcmp(varval, tmp) == 0))
+ status = 0;
+ else
+ status = 1;
+ if (!err && status) {
+ ret = wmt_setsyspara("wmt.nfc.mtd.kernel-logo", varval);
+ if (ret)
+ printk(KERN_NOTICE "write kernel-logo offset to env fail\n");
+ } else if (err)
+ printk(KERN_NOTICE "search kernel-logo partition fail\n");
+
+}
+
+#if 0
+static void wmt_set_partition_info(struct nand_chip *chip)
+{
+ int ret = 0, status = 0, i, j;
+ unsigned char varval[256], tmp[256];
+ unsigned int varlen = 256;
+ unsigned int offs_data, size;
+
+ varval[0] = '\0';
+ for (i = 0; i < NUM_NAND_PARTITIONS; i++) {
+ if (&nand_partitions[i]) {
+ offs_data = 0;
+ for (j = 0; j < i; j++)
+ offs_data += (unsigned int)(nand_partitions[j].size>>20);
+ if (i < (NUM_NAND_PARTITIONS - 1))
+ size = (unsigned int)(nand_partitions[i].size>>20);
+ else
+ size = (unsigned int)(chip->chipsize>>20) - offs_data;
+
+ if (i == 0)
+ sprintf(tmp, "%dm@%dm(%s)", size, offs_data, nand_partitions[i].name);
+ else
+ sprintf(tmp, ",%dm@%dm(%s)", size, offs_data, nand_partitions[i].name);
+ strcat(varval, tmp);
+ } else
+ break;
+ }
+ printk(KERN_DEBUG "fbparts=%s\n", varval);
+ status = wmt_getsyspara("fbparts", tmp, &varlen);
+ if (status) {
+ printk(KERN_DEBUG "fbparts not found varlen=256=>%d\n", varlen);
+ ret = wmt_setsyspara("fbparts", varval);
+ } else {
+ if (strcmp(tmp, varval) != 0) {
+ printk(KERN_DEBUG "tmp=%s\n", tmp);
+ printk(KERN_WARNING "fbparts not sync => update\n");
+ ret = wmt_setsyspara("fbparts", varval);
+ } else
+ printk(KERN_DEBUG "fbparts env compare pass\n");
+ }
+ if (ret)
+ printk(KERN_ERR "set fbparts env fail\n");
+}
+#endif
+
+void set_ecc_info(struct mtd_info *mtd)
+{
+ unsigned int ecc_bit_mode;
+ struct ECC_size_info ECC_size, *ECC_size_pt;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ ecc_bit_mode = mtd->dwECCBitNum;
+ if (ecc_bit_mode > 40)
+ ecc_bit_mode = (ecc_bit_mode == 60) ? 7 : (-1);
+ else if (ecc_bit_mode > 24)
+ ecc_bit_mode = (ecc_bit_mode == 40) ? 6 : (-1);
+ else
+ ecc_bit_mode = (ecc_bit_mode > 16) ? ((ecc_bit_mode/4) - 1) : (ecc_bit_mode/4);
+
+ info->ECC_mode = ECC_size.ecc_engine = ecc_bit_mode;
+ calculate_ECC_info(mtd, &ECC_size);
+ writew((ECC_size.oob_ECC_bytes<<8) /*+ (ECC_size.unprotect&0xFF)*/, info->reg + NFCR10_OOB_ECC_SIZE);
+ info->oob_ECC_bytes = ECC_size.oob_ECC_bytes;
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ info->last_bank_dmaaddr = info->dmaaddr + mtd->realwritesize;
+ info->oob_col = mtd->realwritesize + (ECC_size.ECC_bytes * ECC_size.banks);
+ info->last_bank_col = info->oob_col;
+ } else {
+ info->last_bank_dmaaddr = info->dmaaddr + mtd->realwritesize - ECC_size.bank_size;
+ info->oob_col = mtd->realwritesize + (ECC_size.ECC_bytes * (ECC_size.banks-1));
+ info->last_bank_col = info->oob_col - ECC_size.bank_size;
+ }
+ info->oob_ECC_mode = ECC_size.oob_ECC_mode;
+ info->oob_ecc_error = 0;
+ info->banks = ECC_size.banks;
+ info->bank_size = ECC_size.bank_size;
+ info->oob_max_bit_error = ECC_size.oob_max_bit_error;
+
+ ECC_size_pt = &ECC_size;
+ wmt_nand_init_chip(mtd, ECC_size_pt);
+
+ printk(KERN_DEBUG "last_bank_dmaaddr=0x%x banks=%d\n", info->last_bank_dmaaddr, info->banks);
+ printk(KERN_DEBUG "oob_col=%d\n", info->oob_col);
+ printk(KERN_DEBUG "last_bank_col=%d\n", info->last_bank_col);
+
+ printk(KERN_NOTICE "BCH ECC %d BIT mode\n", mtd->dwECCBitNum);
+ set_ecc_engine(info, ecc_bit_mode); /* BCH ECC new structure */
+}
+
+void set_partition_size(struct mtd_info *mtd)
+{
+ int ret, index;
+ char varval[256], partition_name[32];
+ int varlen = 256;
+ char *s = NULL, *tmp = NULL;
+ uint64_t part_size = 0;
+ struct nand_chip *chip = mtd->priv;
+
+ if(((mtd->id>>24)&0xff) == NAND_MFR_HYNIX) {
+ if(chip->realplanenum == 1) {
+ nand_partitions[0].size = 0x4000000;
+ nand_partitions[1].size = 0x4000000;
+ nand_partitions[2].size = 0x4000000;
+ } else {
+ nand_partitions[0].size = 0x2000000;
+ nand_partitions[1].size = 0x2000000;
+ nand_partitions[2].size = 0x2000000;
+ }
+ }
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek) - 1)) != 1) {
+ if (mtd->pageSizek == 12) {
+ nand_partitions[0].size = 0x1080000;
+ nand_partitions[1].size = 0x1080000;
+ nand_partitions[2].size = 0x1080000;
+ nand_partitions[3].size = 0x1080000;
+ nand_partitions[4].size = 0x4200000;
+ nand_partitions[5].size = 0x30000000;
+ nand_partitions[6].size = 0x20100000;
+ nand_partitions[7].size = MTDPART_SIZ_FULL;
+ } else if (mtd->pageSizek == 28) {
+ nand_partitions[0].size = 0x3800000;
+ nand_partitions[1].size = 0x3800000;
+ nand_partitions[2].size = 0x3800000;
+ nand_partitions[3].size = 0x1c00000;
+ nand_partitions[4].size = 0x7000000;
+ nand_partitions[5].size = 0x31000000;
+ nand_partitions[6].size = 0x21400000;
+ nand_partitions[7].size = 0x1c000000;
+ nand_partitions[8].size = MTDPART_SIZ_FULL;
+ }
+ //printk("(pageSizek>>(ffs(pageSizek)-1)=%d\n", mtd->pageSizek >> (ffs(mtd->pageSizek)-1));
+ }
+
+ ret = wmt_getsyspara("wmt.nand.partition", varval, &varlen);
+ if(ret == 0) {
+ printk("wmt.nand.partition: %s\n", varval);
+ s = varval;
+ while(*s != '\0')
+ {
+ index = NUM_NAND_PARTITIONS;
+ memset(partition_name, 0, 32);
+ get_partition_name(s, &tmp, partition_name);
+ search_mtd_table(partition_name, &index);
+ s = tmp + 1;
+ part_size = simple_strtoul(s, &tmp, 16);
+ s = tmp;
+ if(*s == ':')
+ s++;
+
+ //data can't be resized by uboot env, its size is left whole nand.
+ if((index >= 0) && (index < (NUM_NAND_PARTITIONS-1)) && (part_size < chip->chipsize)) {
+ nand_partitions[index].size = part_size;
+ } else {
+ printk("Invalid parameter \"wmt.nand.partition\". Use default partition size for \"%s\" partition.\n", partition_name);
+ }
+ }
+ }
+
+
+
+ if(((mtd->id>>24)&0xff) == NAND_MFR_HYNIX) {
+ par1_ofs = nand_partitions[0].size;
+ par2_ofs = par1_ofs + nand_partitions[1].size;
+ par3_ofs = par2_ofs + nand_partitions[2].size;
+ par4_ofs = par3_ofs + nand_partitions[3].size;
+
+ par1_ofs = ((unsigned int )(par1_ofs >> 10))/mtd->pageSizek;
+ par2_ofs = ((unsigned int )(par2_ofs >> 10))/mtd->pageSizek;
+ par3_ofs = ((unsigned int )(par3_ofs >> 10))/mtd->pageSizek;
+ par4_ofs = ((unsigned int )(par4_ofs >> 10))/mtd->pageSizek;
+ }
+
+
+
+ /*min_partition_size = 0;
+ for (i = 0; i < 11; i++)
+ min_partition_size += nand_partitions[i].size;
+ nand_partitions[11].size = chip->chipsize - min_partition_size - (mtd->erasesize * 8);*/
+}
+
+void init_wr_cache(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i;
+
+ for (i = 0; i < WR_BUF_CNT; i++)
+ info->wr_page[i] = -1;
+}
+
+int alloc_write_cache(struct mtd_info *mtd)
+{
+ wr_cache = vmalloc((mtd->writesize+32)*WR_BUF_CNT);
+ if (!wr_cache) {
+ printk(KERN_ERR"wr_cache=0x%x alloc fail\n", (mtd->writesize+32)*WR_BUF_CNT);
+ return 1;
+ }
+
+ return 0;
+}
+
+int alloc_rdmz_buffer(struct mtd_info *mtd)
+{
+ if (mtd->dwRdmz == 1) {
+ buf_rdmz = vmalloc(mtd->writesize);
+ if (!buf_rdmz) {
+ printk(KERN_ERR"buf_rdmz alloc fail\n");
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int nandinfo_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data) {
+ char mfr_name[32];
+ int len = 0;
+ int mfr =(mtd_nandinfo->id>>24)&0xff;
+
+ switch(mfr) {
+ case NAND_MFR_SANDISK:
+ strcpy(mfr_name, "Sandisk");
+ break;
+ case NAND_MFR_HYNIX:
+ strcpy(mfr_name, "Hynix");
+ break;
+ case NAND_MFR_TOSHIBA:
+ strcpy(mfr_name, "Toshiba");
+ break;
+ case NAND_MFR_SAMSUNG:
+ strcpy(mfr_name, "Samsung");
+ break;
+ case NAND_MFR_MICRON:
+ strcpy(mfr_name, "Micron");
+ break;
+ case NAND_MFR_INTEL:
+ strcpy(mfr_name, "Intel");
+ break;
+ default:
+ strcpy(mfr_name, "Unknown");
+ break;
+ }
+
+ len = sprintf(page, "Manufacturer : %s\n"
+ "nand id1 : %lu\n"
+ "nand id2 : %lu\n" , mfr_name, mtd_nandinfo->id, mtd_nandinfo->id2);
+ return len;
+}
+
+extern int wmt_recovery_call(struct notifier_block *nb, unsigned long code, void *_cmd);
+static int wmt_nand_probe(struct platform_device *pdev)
+{
+ /* struct wmt_platform_nand *plat = to_nand_plat(pdev);*/
+ /*struct device *dev = &pdev->dev;*/
+ struct wmt_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct wmt_nand_info *info;
+ struct wmt_nand_mtd *nmtd;
+ struct mtd_info *mtd;
+ static const char *part_parsers[] = {"cmdlinepart", NULL};
+ /*struct mtd_part_parser_data ppdata;*/
+ /* struct wmt_nand_set *sets; */ /* extend more chips and partitions structure*/
+ struct resource *res;
+ int err = 0, ret = 0;
+ int size;
+ /* ------------------------*/
+ unsigned char sd_buf[80];
+ int sd_varlen = 80;
+ char *varname = "wmt.sd1.param";
+ int sd_enable = 0, SD1_function = 0; /*0 :disable 1:enable*/
+ /* ------------------------*/
+ buf_rdmz = NULL;
+ wr_cache = NULL;
+ prob_end = 0;
+ eslc_write = 0;
+ /* int nr_sets;*/
+ /* int setno;*/
+ pr_debug("wmt_nand_probe(%p)\n", pdev);
+ ret = wmt_getsyspara("wmt.boot.dev", sd_buf, &sd_varlen);
+ printk("wmt.boot.dev ret = %d\n", ret);
+ if(!ret && (!strncmp(sd_buf, "TF", 2) || (!strncmp(sd_buf, "UDISK", 5))))
+ {
+ printk("Boot from SD card or udisk card.\n");
+ return -1;
+ }
+
+ /*Read system param to identify host function 0: SD/MMC 1:SDIO wifi*/
+ ret = wmt_getsyspara(varname, sd_buf, &sd_varlen);
+ if (ret == 0) {
+ sscanf(sd_buf,"%d:%d", &sd_enable,&SD1_function);
+ if (sd_enable == 1) {
+ printk(KERN_NOTICE "SD1 enabled => NAND probe disabled\n");
+ return -EINVAL;
+ }
+ }
+ /*err = -EINVAL;
+ return err;*/
+ *(volatile unsigned int *)(GPIO_BASE_ADDR + 0x200) &= ~(1<<11); /*PIN_SHARE_SDMMC1_NAND*/
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "no memory for flash info\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info, sizeof(*info));
+ dev_set_drvdata(&pdev->dev, info);
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ret = request_irq(IRQ_NFC,
+ nfc_regular_isr,
+ IRQF_SHARED, //SA_SHIRQ, /*SA_INTERRUPT, * that is okay?*/ //zhf: modified by James Tian, should be IRQF_SHARED?
+ "NFC",
+ (void *)info);
+ if (ret) {
+ printk(KERN_ALERT "[NFC driver] Failed to register regular ISR!\n");
+ goto unmap;
+ }
+
+ ret = request_irq(IRQ_NFC_DMA,
+ nfc_pdma_isr,
+ IRQF_DISABLED, // SA_INTERRUPT, //zhf: modified by James Tian
+ "NFC",
+ (void *)info);
+ if (ret) {
+ printk(KERN_ALERT "[NFC driver] Failed to register DMA ISR!\n");
+ goto fr_regular_isr;
+ }
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = res->end - res->start + 1;
+
+ info->area = request_mem_region(res->start, size, pdev->name);
+ info->oper_step = 0;
+
+
+ if (info->area == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ info->device = &pdev->dev;
+ /* info->platform = plat;*/
+ info->reg = (void __iomem *)NF_CTRL_CFG_BASE_ADDR;/*ioremap(res->start, size);*/
+ /* info->cpu_type = cpu_type;*/
+
+ if (info->reg == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -EIO;
+ goto exit_error;
+ }
+
+/*
+ * * extend more partitions
+ *
+ err = wmt_nand_inithw(info, pdev);
+ if (err != 0)
+ goto exit_error;
+
+ sets = (plat != NULL) ? plat->sets : NULL;
+ nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+
+ info->mtd_count = nr_sets;
+*/
+ /* allocate our information */
+
+/* size = nr_sets * sizeof(*info->mtds);*/
+ size = sizeof(*info->mtds);
+ info->mtds = kmalloc(size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mtd storage\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info->mtds, size);
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ mtd = &nmtd->mtd;
+ info->dmabuf = dma_alloc_coherent(&pdev->dev, 40960, &info->dmaaddr, GFP_KERNEL);
+
+ if (!info->dmabuf && (info->dmaaddr & 0x0f)) {
+ err = -ENOMEM;
+ goto out_free_dma;
+ }
+ /* nmtd->chip.buffers = (void *)info->dmabuf + 2112;*/
+
+ nmtd->chip.cmdfunc = wmt_nand_cmdfunc;
+ nmtd->chip.dev_ready = wmt_device_ready;
+ nmtd->chip.read_byte = wmt_read_byte;
+ nmtd->chip.write_buf = wmt_nand_write_buf;
+ nmtd->chip.read_buf = wmt_nand_read_buf;
+ nmtd->chip.select_chip = wmt_nand_select_chip;
+ nmtd->chip.get_para = nand_get_para;
+ nmtd->chip.chip_delay = 20;
+ nmtd->chip.priv = nmtd;
+ nmtd->chip.bbt_options = NAND_BBT_LASTBLOCK | NAND_BBT_USE_FLASH | NAND_BBT_PERCHIP | NAND_BBT_NO_OOB_BBM;
+ /* nmtd->chip.controller = &info->controller;*/
+
+ /*nmtd->chip.ecc.steps = 1;
+ nmtd->chip.ecc.prepad = 1;
+ nmtd->chip.ecc.postpad = 8;*/
+
+ nmtd->chip.ecc.mode = NAND_ECC_HW;
+ /*nmtd->chip.ecc.mode = 0;*/
+
+
+ /* for (setno = 0; setno < nr_sets; setno++, nmtd++)*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "initialising (%p, info %p)\n", nmtd, info);
+ #endif
+
+ /* Set up DMA address */
+ /*writel(info->dmaaddr & 0xffffffff, info->reg + NFC_DMA_DAR);*/
+
+ /*info->dmabuf = readl(info->reg + WMT_NFC_DMA_TRANS_CONFIG);*/
+
+ /* nmtd->nand.chip_delay = 0;*/
+
+ /* Enable the following for a flash based bad block table */
+ /* nmtd->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;*/
+
+ nmtd->chip.bbt_td = &wmt_bbt_main_descr_2048;
+ nmtd->chip.bbt_md = &wmt_bbt_mirror_descr_2048;
+ nmtd->chip.retry_pattern = &wmt_rdtry_descr;
+ nmtd->chip.cur_chip = NULL;
+
+ nmtd->info = info;
+ nmtd->mtd.priv = &nmtd->chip;
+ nmtd->mtd.owner = THIS_MODULE;
+ nmtd->mtd.reboot_notifier.notifier_call = wmt_recovery_call;//Lch
+ {/*unsigned int s1, s2;
+ s1 = wmt_read_oscr();*/
+ ret = reset_nfc(mtd, NULL, 3);
+ //s2 = wmt_read_oscr();printk("s2-s1=%d------------\n", (s2-s1)/3);
+ }
+ set_ecc_engine(info, 1);
+
+ info->datalen = 0;
+ /* initialise the hardware */
+ wmt_nfc_init(info, &nmtd->mtd);
+ writeb(0xff, info->reg + NFCR12_NAND_TYPE_SEL+1); //chip disable
+
+ /*rc = set_ECC_mode(mtd);
+ if (rc)
+ goto out_free_dma;*/
+
+ nmtd->chip.ecc.layout = &wmt_oobinfo_16k;
+ writeb(0x0, info->reg + NFCR11_SOFT_RST);
+
+ nmtd->scan_res = nand_scan(&nmtd->mtd, MAX_CHIP);
+ /*nmtd->scan_res = nand_scan(&nmtd->mtd, (sets) ? sets->nr_chips : 1);*/
+
+ if (nmtd->chip.cur_chip && mtd->dwRetry && ((mtd->id>>24)&0xFF) == NAND_MFR_SANDISK) {
+ /* Activating and initializing Dynamic Read Register */
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ sandisk_init_retry_register(mtd, nmtd->chip.cur_chip);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+ }
+
+ if (nmtd->scan_res == 0) {
+ if (pdata)
+ pdata->partitions = nand_partitions;
+
+ ret = mtd_device_parse_register(mtd, part_parsers, NULL/*&ppdata*/,
+ pdata ? pdata->partitions : nand_partitions,
+ pdata ? NUM_NAND_PARTITIONS : NUM_NAND_PARTITIONS);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mtd device\n");
+ goto out_free_dma;
+ }
+ }
+
+ //wmt_set_logo_offset();
+
+ /* write back mtd partition to env */
+ /* wmt_set_partition_info(&nmtd->chip); */
+
+ nandinfo_proc = create_proc_entry(NANDINFO, 0666, NULL);
+ if(nandinfo_proc == NULL) {
+ printk("Failed to create nandinfo proccess device\n");
+ goto out_free_dma;
+ } else {
+ mtd_nandinfo = mtd;
+ }
+ nandinfo_proc->read_proc = nandinfo_proc_read;
+
+ register_reboot_notifier(&mtd->reboot_notifier);//Lch
+
+ /*if (((mtd->id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ writel(0x1312, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ printk("prob_end timing=%x\n",readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL));
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+ }*/
+
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ if (!mtd->dwDDR) {
+ writeb(RD_DLY|readb(info->reg + NFCR12_NAND_TYPE_SEL), info->reg + NFCR12_NAND_TYPE_SEL);
+ writel(0x1212, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ } else {
+ //writel(0x0101, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ while ((*(volatile unsigned long *)(PMCS_ADDR+0x18))&0x7F0038)
+ ;
+ *(volatile unsigned long *)PMNAND_ADDR = (*(volatile unsigned long *)PMNAND_ADDR) - 5;
+ }
+ printk("prob_end timing=%x nfcr12%x divisor=0x%x\n",readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL),
+ readb(info->reg + NFCR12_NAND_TYPE_SEL), *(volatile unsigned long *)PMNAND_ADDR);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+
+ init_wr_cache(mtd);
+
+ printk(KERN_NOTICE "nand initialised ok\n");
+ prob_end = 1;
+ second_chip = 0;
+ return 0;
+
+out_free_dma:
+ dma_free_coherent(&pdev->dev, 32000/*17664 + 0x300*/, info->dmabuf, info->dmaaddr);
+
+fr_regular_isr:
+unmap:
+exit_error:
+ wmt_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+int wmt_nand_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct wmt_nand_info *info = dev_get_drvdata(&pdev->dev);
+ struct mtd_info *mtd = &info->mtds->mtd;
+
+ /*nand_suspend->nand_get_device*/
+ mtd_suspend(mtd);
+
+ if ((STRAP_STATUS_VAL&0x400E) == 0x4008) {
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ *(volatile unsigned long *)(NF_CTRL_CFG_BASE_ADDR + 0x44) |= (1<<1);
+ printk(KERN_NOTICE "reset nand boot register NF_CTRL_CFG_BASE_ADDR + 0x44\n");
+ *(volatile unsigned long *)(NF_CTRL_CFG_BASE_ADDR + 0x44) &= ~(1<<1);
+ }
+ printk(KERN_NOTICE "wmt_nand_suspend\n");
+ return 0;
+}
+
+int wmt_nand_resume(struct platform_device *pdev)
+{
+ struct wmt_nand_info *info = dev_get_drvdata(&pdev->dev);
+ struct mtd_info *mtd = &info->mtds->mtd;
+ struct wmt_nand_mtd *nmtd;
+ struct nand_chip *chip;
+ unsigned char reset = NAND_CMD_RESET;
+ int i;
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ if (info) {
+ nmtd = info->mtds;
+ chip = nmtd->mtd.priv;
+ //if ((STRAP_STATUS_VAL&0x400E) == 0x4008)
+ writeb(0x0, info->reg + NFCR11_SOFT_RST);
+ /* initialise the hardware */
+ wmt_nfc_init(info, &nmtd->mtd);
+ set_ecc_engine(info, info->ECC_mode); /* BCH ECC */
+ writew((info->oob_ECC_bytes<<8) /*+ (ECC_size.unprotect&0xFF)*/, info->reg + NFCR10_OOB_ECC_SIZE);
+
+ if ((&nmtd->mtd)->dwDDR)
+ writeb(0x7F, info->reg + NFCR7_DLYCOMP);
+ wmt_nand_select_chip(&nmtd->mtd, 0);
+ write_bytes_cmd(&nmtd->mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ for (i = 1; i < chip->numchips; i++) {
+ wmt_nand_select_chip(&nmtd->mtd, i);
+ write_bytes_cmd(&nmtd->mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ }
+ wmt_init_nfc(&nmtd->mtd, nmtd->mtd.spec_clk, nmtd->mtd.spec_tadl, 0);
+ wmt_nand_select_chip(&nmtd->mtd, -1);
+
+ if ((&nmtd->mtd)->dwRdmz) {
+ nfc_hw_rdmz(&nmtd->mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ printk(KERN_NOTICE "wmt_nand_resume OK\n");
+ } else
+ printk(KERN_NOTICE "wmt_nand_resume error\n");
+
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+
+ /*nand_resume->nand_release_device*/
+ mtd_resume(mtd);
+
+ return 0;
+}
+
+#else /* else of #define PM */
+#define wmt_nand_suspend NULL
+#define wmt_nand_resume NULL
+#endif
+
+/*struct platform_driver wmt_nand_driver = {*/
+struct platform_driver wmt_nand_driver = {
+ .driver.name = "nand",
+ .probe = wmt_nand_probe,
+ .remove = wmt_nand_remove,
+ .suspend = wmt_nand_suspend,
+ .resume = wmt_nand_resume
+ /*
+ .driiver = {
+ .name = "wmt-nand",
+ .owner = THIS_MODULE,
+ },
+ */
+};
+
+static int __init wmt_nand_init(void)
+{
+ //printk(KERN_NOTICE "NAND Driver, WonderMedia Technologies, Inc\n");
+ return platform_driver_register(&wmt_nand_driver);
+}
+
+static void __exit wmt_nand_exit(void)
+{
+ platform_driver_unregister(&wmt_nand_driver);
+}
+
+module_init(wmt_nand_init);
+module_exit(wmt_nand_exit);
+
+MODULE_AUTHOR("WonderMedia Technologies, Inc.");
+MODULE_DESCRIPTION("WMT [Nand Flash Interface] driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/wmt_nand.h b/drivers/mtd/nand/wmt_nand.h
new file mode 100755
index 00000000..e5692ac7
--- /dev/null
+++ b/drivers/mtd/nand/wmt_nand.h
@@ -0,0 +1,365 @@
+/*++
+ Copyright (c) 2008 WonderMedia Technologies, Inc. All Rights Reserved.
+
+ This PROPRIETARY SOFTWARE is the property of WonderMedia Technologies, Inc.
+ and may contain trade secrets and/or other confidential information of
+ WonderMedia Technologies, Inc. This file shall not be disclosed to any third
+ party, in whole or in part, without prior written consent of WonderMedia.
+
+ THIS PROPRIETARY SOFTWARE AND ANY RELATED DOCUMENTATION ARE PROVIDED AS IS,
+ WITH ALL FAULTS, AND WITHOUT WARRANTY OF ANY KIND EITHER EXPRESS OR IMPLIED,
+ AND WonderMedia TECHNOLOGIES, INC. DISCLAIMS ALL EXPRESS OR IMPLIED WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR
+ NON-INFRINGEMENT.
+
+ Module Name:
+
+ $Workfile: post_nand.c $
+
+ Abstract:
+
+ POST functions, called by main().
+
+ Revision History:
+
+ Dec.04.2008 First created
+ Dec.19.2008 Dannier change coding style and support spi flash boot with nor accessible.
+ $JustDate: 2008/12/19 $
+--*/
+
+#ifndef __NFC_H__
+#define __NFC_H__
+
+/* include/asm-arm/arch-vt8630.h or arch-vt8620.h or include/asm-zac/arch-vt8620.h or arch-vt8630.h */
+/* #define NFC_BASE_ADDR 0xd8009000 */
+
+#define NFCR0_DATAPORT 0x00
+#define NFCR1_COMCTRL 0x04
+#define NFCR2_COMPORT0 0x08
+#define NFCR3_COMPORT1_2 0x0c
+#define NFCR4_COMPORT3_4 0x10
+#define NFCR5_COMPORT5_6 0x14
+#define NFCR6_COMPORT7 0x18
+#define NFCR7_DLYCOMP 0x1c
+#define NFCR8_DMA_CNT 0x20
+#define NFCR9_ECC_BCH_CTRL 0x24
+#define NFCRa_NFC_STAT 0x28
+#define NFCRb_NFC_INT_STAT 0x2c
+#define NFCRc_CMD_ADDR 0x30
+#define NFCRd_OOB_CTRL 0x34
+#define NFCRe_CALC_TADL 0x38
+#define NFCRf_CALC_RDMZ 0x3c
+#define NFCR10_OOB_ECC_SIZE 0x40
+#define NFCR11_SOFT_RST 0x44
+#define NFCR12_NAND_TYPE_SEL 0x48
+#define NFCR13_INT_MASK 0x4c
+#define NFCR14_READ_CYCLE_PULE_CTRL 0x50
+#define NFCR15_IDLE_STAT 0x54
+#define NFCR16_TIMER_CNT_CFG 0x58
+#define NFCR17_ECC_BCH_ERR_STAT 0x5c
+#define NFCR18_ECC_BCH_ERR_POS 0x60
+
+#define ECC_FIFO_0 0x1c0
+#define ECC_FIFO_1 0x1c4
+#define ECC_FIFO_2 0x1c8
+#define ECC_FIFO_3 0x1cc
+#define ECC_FIFO_4 0x1d0
+#define ECC_FIFO_5 0x1d4
+#define ECC_FIFO_6 0x1d8
+#define ECC_FIFO_7 0x1dc
+#define ECC_FIFO_8 0x1e0
+#define ECC_FIFO_9 0x1e4
+#define ECC_FIFO_a 0x1e8
+#define ECC_FIFO_b 0x1ec
+#define ECC_FIFO_c 0x1f0
+#define ECC_FIFO_d 0x1f4
+#define ECC_FIFO_e 0x1f8
+#define ECC_FIFO_f 0x1fc
+
+struct ECC_size_info{
+ int ecc_engine;
+ int oob_ECC_mode;
+ int banks;
+ int bank_size;
+ int max_bit_error;
+ int oob_max_bit_error;
+ int ecc_bits_count;
+ int oob_ecc_bits_count;
+ int bank_offset; /* add data + data ecc ex: 512+8 4-bit engine */
+ int ECC_bytes;
+ int oob_ECC_bytes;
+ int unprotect;
+};
+#define ECC1bit 0
+#define ECC4bit 1
+#define ECC8bit 2
+#define ECC12bit 3
+#define ECC16bit 4
+#define ECC24bitPer1K 5
+#define ECC40bitPer1K 6
+#define ECC60bitPer1K 7
+#define ECC44bitPer1K 7
+#define ECC44bit 8
+#define ECC1bit_bit_count 32
+#define ECC4bit_bit_count 52
+#define ECC8bit_bit_count 104
+#define ECC12bit_bit_count 156
+#define ECC16bit_bit_count 208
+#define ECC24bitPer1K_bit_count 336
+#define ECC40bitPer1K_bit_count 560
+#define ECC60bitPer1K_bit_count 830
+#define ECC44bitPer1K_bit_count 616
+#define ECC44bit_bit_count 576
+#define ECC1bit_byte_count 4
+#define ECC4bit_byte_count 8
+#define ECC8bit_byte_count 16
+#define ECC12bit_byte_count 20
+#define ECC16bit_byte_count 26
+#define ECC24bitPer1K_byte_count 42
+#define ECC40bitPer1K_byte_count 70
+#define ECC60bitPer1K_byte_count 106
+#define ECC44bitPer1K_byte_count 77
+#define ECC44bit_byte_count 72
+#define ECC1bit_unprotect 0
+#define ECC4bit_unprotect 0
+#define ECC8bit_unprotect 50
+#define ECC12bit_unprotect 14
+#define ECC16bit_unprotect 26
+#define ECC24bitPer1K_unprotect 34
+#define ECC40bitPer1K_unprotect 14
+#define ECC60bitPer1K_unprotect 46 //8k+960
+#define ECC44bitPer1K_unprotect 14
+#define ECC44bit_unprotect 72
+#define MAX_BANK_SIZE 1024
+#define MAX_PARITY_SIZE 106
+#define MAX_ECC_BIT_ERROR 60
+/*
+ * NAND PDMA
+ */
+#define NAND_DESC_BASE_ADDR 0x00D00000
+
+#define NFC_DMA_GCR 0x100
+#define NFC_DMA_IER 0x104
+#define NFC_DMA_ISR 0x108
+#define NFC_DMA_DESPR 0x10C
+#define NFC_DMA_RBR 0x110
+#define NFC_DMA_DAR 0x114
+#define NFC_DMA_BAR 0x118
+#define NFC_DMA_CPR 0x11C
+#define NFC_DMA_CCR 0X120
+
+#define NAND_GET_FEATURE 0xEE
+#define NAND_SET_FEATURE 0xEF
+/*
+ * NAND PDMA - DMA_GCR : DMA Global Control Register
+ */
+#define NAND_PDMA_GCR_DMA_EN 0x00000001 /* [0] -- DMA controller enable */
+#define NAND_PDMA_GCR_SOFTRESET 0x00000100 /* [8] -- Software rest */
+
+/*
+ * NAND PDMA - DMA_IER : DMA Interrupt Enable Register
+ */
+#define NAND_PDMA_IER_INT_EN 0x00000001 /* [0] -- DMA interrupt enable */
+/*
+ * NAND PDMA - DMA_ISR : DMA Interrupt Status Register
+ */
+#define NAND_PDMA_IER_INT_STS 0x00000001 /* [0] -- DMA interrupt status */
+/*
+ * NAND PDMA - DMA_DESPR : DMA Descriptor base address Pointer Register
+ */
+
+/*
+ * NAND PDMA - DMA_RBR : DMA Residual Bytes Register
+ */
+#define NAND_PDMA_RBR_End 0x80000000 /* [31] -- DMA descriptor end flag */
+#define NAND_PDMA_RBR_Format 0x40000000 /* [30] -- DMA descriptor format */
+/*
+ * NAND PDMA - DMA_DAR : DMA Data Address Register
+ */
+
+/*
+ * NAND PDMA - DMA_BAR : DMA Rbanch Address Register
+ */
+
+/*
+ * NAND PDMA - DMA_CPR : DMA Command Pointer Register
+ */
+
+/*
+ * NAND PDMA - DMA_CCR : DMAContext Control Register for Channel 0
+ */
+#define NAND_PDMA_READ 0x00
+#define NAND_PDMA_WRITE 0x01
+#define NAND_PDMA_CCR_RUN 0x00000080
+#define NAND_PDMA_CCR_IF_to_peripheral 0x00000000
+#define NAND_PDMA_CCR_peripheral_to_IF 0x00400000
+#define NAND_PDMA_CCR_EvtCode 0x0000000f
+#define NAND_PDMA_CCR_Evt_no_status 0x00000000
+#define NAND_PDMA_CCR_Evt_ff_underrun 0x00000001
+#define NAND_PDMA_CCR_Evt_ff_overrun 0x00000002
+#define NAND_PDMA_CCR_Evt_desp_read 0x00000003
+#define NAND_PDMA_CCR_Evt_data_rw 0x00000004
+#define NAND_PDMA_CCR_Evt_early_end 0x00000005
+#define NAND_PDMA_CCR_Evt_success 0x0000000f
+
+/*
+ * PDMA Descriptor short
+ */
+struct _NAND_PDMA_DESC_S{
+ unsigned int volatile ReqCount : 16; /* bit 0 -15 -Request count */
+ unsigned int volatile i : 1; /* bit 16 -interrupt */
+ unsigned int volatile reserve : 13; /* bit 17-29 -reserved */
+ unsigned int volatile format : 1; /* bit 30 -The descriptor format */
+ unsigned int volatile end : 1; /* bit 31 -End flag of descriptor list*/
+ unsigned int volatile DataBufferAddr : 32;/* bit 31 -Data Buffer address */
+};
+
+/*
+ * PDMA Descriptor long
+ */
+struct _NAND_PDMA_DESC_L{
+ unsigned long volatile ReqCount : 16; /* bit 0 -15 -Request count */
+ unsigned long volatile i : 1; /* bit 16 -interrupt */
+ unsigned long volatile reserve : 13; /* bit 17-29 -reserved */
+ unsigned long volatile format : 1; /* bit 30 -The descriptor format */
+ unsigned long volatile end : 1; /* bit 31 -End flag of descriptor list*/
+ unsigned long volatile DataBufferAddr : 32;/* bit 31-0 -Data Buffer address */
+ unsigned long volatile BranchAddr : 32; /* bit 31-2 -Descriptor Branch address */
+ unsigned long volatile reserve0 : 32; /* bit 31-0 -reserved */
+};
+
+struct NFC_RW_T {
+ unsigned int T_R_setup;
+ unsigned int T_R_hold;
+ unsigned int T_W_setup;
+ unsigned int T_W_hold;
+ unsigned int divisor;
+ unsigned int T_TADL;
+ unsigned int T_TWHR;
+ unsigned int T_TWB;
+ unsigned int T_RHC_THC;
+};
+
+/* cfg_1 */
+#define TWHR 0x800
+#define OLD_CMD 0x400 /* enable old command trigger mode */
+#define DPAHSE_DISABLE 0x80 /*disable data phase */
+#define NAND2NFC 0x40 /* direction : nand to controller */
+#define SING_RW 0x20 /* enable signal read/ write command */
+#define MUL_CMDS 0x10 /* support cmd+addr+cmd */
+#define NFC_TRIGGER 0x01 /* start cmd&addr sequence */
+/*cfg_9 */
+#define READ_RESUME 1 //0x100
+#define BCH_INT_EN 0x60
+#define BANK_DR 0x10
+#define DIS_BCH_ECC 0x08
+#define USE_HW_ECC 0
+#define ECC_MODE 7
+
+/*cfg_a */
+#define NFC_CMD_RDY 0x04
+#define NFC_BUSY 0x02 /* command and data is being transfer in flash I/O */
+#define FLASH_RDY 0x01 /* flash is ready */
+/*cfg_b */
+#define B2R 0x08 /* status form busy to ready */
+#define ERR_CORRECT 0x2
+#define BCH_ERR 0x1
+/*cfg_d */
+#define HIGH64FIFO 8 /* read high 64 bytes fifo */
+#define OOB_READ 0x4 /* calculate the side info BCH decoder */
+#define RED_DIS 0x2 /* do not read out oob area data to FIFO */
+/*cfg_f */
+#define RDMZ 0x10000 /* enable randomizer */
+#define RDMZH 1 /* enable randomizer */
+/*cfg_12 */
+#define PAGE_512 0
+#define PAGE_2K 1
+#define PAGE_4K 2
+#define PAGE_8K 3
+#define PAGE_16K 4
+#define PAGE_32K 5
+#define WD8 0
+#define WIDTH_16 (1<<3)
+#define WP_DISABLE (1<<4) /*disable write protection */
+#define DIRECT_MAP (1<<5)
+#define RD_DLY (1<<6)
+#define TOGGLE (1<<7)
+
+/*cfg_13*/ /* Dannier Add */
+#define B2RMEN 0x08 /* interrupt mask enable of nand flash form busy to ready */
+/*cfg_15 */
+#define NFC_IDLE 0x01
+/*cfg_17 status */
+#define BANK_NUM 0x1F00
+#define BCH_ERR_CNT 0x3F
+/*cfg_18 */
+#define BCH_ERRPOS0 0x3fff
+#define BCH_ERRPOS1 (BCH_ERRPOS0<<16)
+
+#define ADDR_COLUMN 1
+#define ADDR_PAGE 2
+#define ADDR_COLUMN_PAGE 3
+#define WRITE_NAND_COMMAND(d, adr) do { *(volatile unsigned char *)(adr) = (unsigned char)(d); } while (0)
+#define WRITE_NAND_ADDRESS(d, adr) do { *(volatile unsigned char *)(adr) = (unsigned char)(d); } while (0)
+
+
+#define SOURCE_CLOCK 24
+#define MAX_SPEED_MHZ 96
+#define MAX_READ_DELAY 9 /* 8.182 = tSKEW 3.606 + tDLY 4.176 + tSETUP 0.4 */
+#define MAX_WRITE_DELAY 9 /* 8.72 = tDLY 10.24 - tSKEW 1.52*/
+
+#define DMA_SINGNAL 0
+#define DMA_INC4 0x10
+#define DMA_INC8 0x20
+/*#define first4k218 0
+#define second4k218 4314 *//* 4096 + 218 */
+#define NFC_TIMEOUT_TIME (HZ*2)
+
+int nand_init_pdma(struct mtd_info *mtd);
+int nand_free_pdma(struct mtd_info *mtd);
+int nand_alloc_desc_pool(unsigned int *DescAddr);
+int nand_init_short_desc(unsigned int *DescAddr, unsigned int ReqCount, unsigned int *BufferAddr, int End);
+int nand_init_long_desc(unsigned long *DescAddr, unsigned int ReqCount, unsigned long *BufferAddr,
+unsigned long *BranchAddr, int End);
+int nand_config_pdma(struct mtd_info *mtd, unsigned long *DescAddr, unsigned int dir);
+int nand_pdma_handler(struct mtd_info *mtd);
+void nand_hamming_ecc_1bit_correct(struct mtd_info *mtd);
+void bch_data_ecc_correct(struct mtd_info *mtd);
+void bch_redunt_ecc_correct(struct mtd_info *mtd);
+void bch_data_last_bk_ecc_correct(struct mtd_info *mtd);
+void copy_filename (char *dst, char *src, int size);
+int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen);
+int wmt_setsyspara(char *varname, char *varval);
+void calculate_ECC_info(struct mtd_info *mtd, struct ECC_size_info *ECC_size);
+/*unsigned int wmt_bchencoder (unsigned char *data, unsigned char *bch_code, unsigned char bits, unsigned char *bch_codelen, unsigned int encode_len);*/
+int bch_encoder(unsigned int *p_parity, u32 *p_data, u8 bits, u32 datacnt);
+unsigned int Caculat_1b_bch( unsigned int *pariA, unsigned int *bch_GF2, unsigned int din, u8 pari_len, u8 pari_lb);
+int Gen_GF2(u8 bits, unsigned int *buf);
+int nand_get_feature(struct mtd_info *mtd, int addr);
+int nand_set_feature(struct mtd_info *mtd, int cmd, int addrss, int value);
+void rdmzier(uint8_t *buf, int size, int page);
+void rdmzier_oob(uint8_t *buf, uint8_t *src, int size, int page, int ofs);
+int nand_hynix_get_retry_reg(struct mtd_info *mtd, uint8_t *addr, uint8_t *para, int size);
+int nand_hynix_set_retry_reg(struct mtd_info *mtd, int reg);
+int write_bytes_cmd(struct mtd_info *mtd, int cmd_cnt, int addr_cnt, int data_cnt, uint8_t *cmd, uint8_t *addr, uint8_t *data);
+int hynix_read_retry_set_para(struct mtd_info *mtd, int reg);
+int load_hynix_opt_reg(struct mtd_info *mtd, struct nand_chip *chip);
+void wmt_init_nfc(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, int busw);
+void set_ecc_info(struct mtd_info *mtd);
+int alloc_rdmz_buffer(struct mtd_info *mtd);
+int alloc_write_cache(struct mtd_info *mtd);
+void init_wr_cache(struct mtd_info *mtd);
+int cache_read_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf);
+void cache_write_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf);
+void set_partition_size(struct mtd_info *mtd);
+int get_flash_info_from_env(unsigned int id, unsigned int id2, struct WMT_nand_flash_dev *type);
+int reset_nfc(struct mtd_info *mtd, unsigned int *buf, int step);
+void nfc_hw_rdmz(struct mtd_info *mtd, int on);
+
+
+#define REG_SEED 1
+#define BYTE_SEED 2112
+
+
+
+#endif