summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/drivers/mtd')
-rw-r--r--ANDROID_3.4.5/drivers/mtd/Makefile2
-rw-r--r--ANDROID_3.4.5/drivers/mtd/cmdlinepart.c19
-rw-r--r--ANDROID_3.4.5/drivers/mtd/devices/Kconfig5
-rw-r--r--ANDROID_3.4.5/drivers/mtd/devices/Makefile2
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/devices/wmt_sf.c1112
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/devices/wmt_sf.h182
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/devices/wmt_sf_ids.c74
-rw-r--r--ANDROID_3.4.5/drivers/mtd/mtd_blkdevs.c92
-rw-r--r--ANDROID_3.4.5/drivers/mtd/mtdchar.c92
-rw-r--r--ANDROID_3.4.5/drivers/mtd/mtdcore.c65
-rw-r--r--ANDROID_3.4.5/drivers/mtd/mtdpart.c14
-rw-r--r--ANDROID_3.4.5/drivers/mtd/mtdswap.c2700
-rw-r--r--ANDROID_3.4.5/drivers/mtd/nand/Kconfig63
-rw-r--r--ANDROID_3.4.5/drivers/mtd/nand/Makefile3
-rw-r--r--ANDROID_3.4.5/drivers/mtd/nand/nand_base.c916
-rw-r--r--ANDROID_3.4.5/drivers/mtd/nand/nand_bbt.c1496
-rw-r--r--ANDROID_3.4.5/drivers/mtd/nand/nand_ids.c372
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/nand/wmt_nand.c8285
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/nand/wmt_nand.h365
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/Kconfig69
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/Makefile6
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/ubi/attach.c1769
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/build.c356
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/cdev.c56
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/debug.c300
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/debug.h165
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/eba.c384
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/ubi/fastmap.c1668
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/gluebi.c60
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/io.c443
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/kapi.c151
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/misc.c39
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/ubi-media.h145
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/ubi.h389
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/upd.c22
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/vmt.c86
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/vtbl.c240
-rw-r--r--ANDROID_3.4.5/drivers/mtd/ubi/wl.c909
-rwxr-xr-xANDROID_3.4.5/drivers/mtd/wmt_env.c1099
39 files changed, 21177 insertions, 3038 deletions
diff --git a/ANDROID_3.4.5/drivers/mtd/Makefile b/ANDROID_3.4.5/drivers/mtd/Makefile
index f9013542..2c427fff 100644
--- a/ANDROID_3.4.5/drivers/mtd/Makefile
+++ b/ANDROID_3.4.5/drivers/mtd/Makefile
@@ -4,7 +4,7 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o wmt_env.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/ANDROID_3.4.5/drivers/mtd/cmdlinepart.c b/ANDROID_3.4.5/drivers/mtd/cmdlinepart.c
index ddf9ec6d..278df88d 100644
--- a/ANDROID_3.4.5/drivers/mtd/cmdlinepart.c
+++ b/ANDROID_3.4.5/drivers/mtd/cmdlinepart.c
@@ -90,8 +90,8 @@ static struct mtd_partition * newpart(char *s,
int extra_mem_size)
{
struct mtd_partition *parts;
- unsigned long size;
- unsigned long offset = OFFSET_CONTINUOUS;
+ unsigned long long size;
+ unsigned long long offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -305,7 +305,9 @@ static int mtdpart_setup_real(char *s)
}
return 1;
}
-
+#ifdef CONFIG_MTD_NAND_WMT
+extern struct mtd_partition nand_partitions[];
+#endif
/*
* Main function to be called from the MTD mapping driver/device to
* obtain the partitioning information. At this point the command line
@@ -317,7 +319,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
- unsigned long offset;
+ unsigned long long offset;
int i;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
@@ -348,6 +350,15 @@ static int parse_cmdline_partitions(struct mtd_info *master,
}
offset += part->parts[i].size;
}
+ #ifdef CONFIG_MTD_NAND_WMT
+ if (!strcmp(part->mtd_id, "WMT.nand")) {
+ for (i = 0; i < part->num_parts; i++) {
+ strcpy(nand_partitions[i].name, part->parts[i].name);
+ nand_partitions[i].offset = part->parts[i].offset;
+ nand_partitions[i].size = part->parts[i].size;
+ }
+ }
+ #endif
*pparts = kmemdup(part->parts,
sizeof(*part->parts) * part->num_parts,
GFP_KERNEL);
diff --git a/ANDROID_3.4.5/drivers/mtd/devices/Kconfig b/ANDROID_3.4.5/drivers/mtd/devices/Kconfig
index 4cdb2af7..40c2890b 100644
--- a/ANDROID_3.4.5/drivers/mtd/devices/Kconfig
+++ b/ANDROID_3.4.5/drivers/mtd/devices/Kconfig
@@ -334,4 +334,9 @@ config MTD_DOCPROBE_55AA
LinuxBIOS or if you need to recover a DiskOnChip Millennium on which
you have managed to wipe the first block.
+config MTD_WMT_SF
+ tristate "WonderMedia SF Support"
+ depends on MTD
+ help
+ Download files using IOCTL to SPI Flash.
endmenu
diff --git a/ANDROID_3.4.5/drivers/mtd/devices/Makefile b/ANDROID_3.4.5/drivers/mtd/devices/Makefile
index a4dd1d82..404608e6 100644
--- a/ANDROID_3.4.5/drivers/mtd/devices/Makefile
+++ b/ANDROID_3.4.5/drivers/mtd/devices/Makefile
@@ -19,5 +19,5 @@ obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
-
+obj-$(CONFIG_MTD_WMT_SF) += wmt_sf_ids.o wmt_sf.o
CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf.c b/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf.c
new file mode 100755
index 00000000..d0463425
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf.c
@@ -0,0 +1,1112 @@
+/*++
+drivers/mtd/devices/wmt_sf.c
+
+Copyright (c) 2008 WonderMedia Technologies, Inc.
+
+This program is free software: you can redistribute it and/or modify it under the
+terms of the GNU General Public License as published by the Free Software Foundation,
+either version 2 of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <http://www.gnu.org/licenses/>.
+
+WonderMedia Technologies, Inc.
+10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+--*/
+
+/*
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/sizes.h>
+
+#include <asm/arch/vt8610_pmc.h>
+#include <asm/arch/vt8610_gpio.h>
+#include <asm/arch/vt8610_dma.h>
+*/
+
+//#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <asm/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <mach/hardware.h>
+#include <linux/delay.h>
+
+#include "wmt_sf.h"
+
+SF_FPTR wmt_sf_prot = 0;
+EXPORT_SYMBOL(wmt_sf_prot);
+
+
+
+typedef enum {
+ FL_READY,
+ FL_READING,
+ FL_WRITING,
+ FL_ERASING,
+} sf_state_t;
+
+struct sf_hw_control {
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ //struct sf_chip *active;
+};
+
+struct wmt_sf_info_t {
+ struct mtd_info *sfmtd;
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct sfreg_t *reg ;
+ void *io_base;
+ struct sf_hw_control controller;
+ sf_state_t state;
+};
+static struct mutex sector_lock;
+static struct sfreg_t *reg_sf;
+extern int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen);
+extern int wmt_setsyspara(char *varname, char *varval);
+extern int wmt_is_secure_enabled(void);
+
+/*
+ * 512k Flash
+ * +------------------------+
+ * | | 0xffff,ffff
+ * | 64k W-Load |
+ * | |
+ * +------------------------+ 0xffff,0000
+ * | |
+ * | 64k OTP env |
+ * | |
+ * +------------------------+ 0xfffe,0000
+ * | |
+ * | 64k uboot env |
+ * | |
+ * +------------------------+ 0xfffd,0000
+ * | |
+ * | |
+ * | 320k uboot.bin |
+ * | |
+ * | |
+ * | |
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~ 0xfff8,0000
+ */
+/* only bootable devices have a default partitioning */
+/*static*/ struct mtd_partition boot_partitions[] = {
+ {
+ .name = "u-boot-SF",
+ .offset = 0x00000000,
+ .size = 0x00050000,
+ },
+ {
+ .name = "u-boot env. cfg. 1-SF",
+ .offset = 0x00050000,
+ .size = 0x00010000,
+ },
+#if 0
+ // for OTP env partition
+ {
+ .name = "otp-env-SF",
+ .offset = 0x00060000,
+ .size = 0x00010000,
+ },
+#endif
+ {
+ .name = "w-load-SF",
+ .offset = 0x00070000,
+ .size = 0x00010000,
+ }
+};
+#define NUM_SF_PARTITIONS ARRAY_SIZE(boot_partitions)
+
+struct mtd_partition boot_partitions_16m[] = {
+ {
+ .name = "boot-img-SF",
+ .offset = 0x00000000,
+ .size = 0x00f40000,
+ },
+ {
+ .name = "logo-SF",
+ .offset = 0x00f40000,
+ .size = 0x00040000,
+ },
+ {
+ .name = "u-boot-SF",
+ .offset = 0x00f80000,
+ .size = 0x00050000,
+ },
+ {
+ .name = "u-boot env. cfg. 1-SF",
+ .offset = 0x00fd0000,
+ .size = 0x00010000,
+ },
+#if 0
+ // for OTP env partition
+ {
+ .name = "otp-env-SF",
+ .offset = 0x00fe0000,
+ .size = 0x00010000,
+ },
+#endif
+ {
+ .name = "w-load-SF",
+ .offset = 0x00ff0000,
+ .size = 0x00010000,
+ }
+};
+
+#define NUM_SF_PARTITIONS_16M ARRAY_SIZE(boot_partitions_16m)
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+static struct mtd_partition *parts;
+
+static unsigned int g_sf_force_size;
+
+static int __init wmt_force_sf_size(char *str)
+{
+ char dummy;
+
+ sscanf(str, "%d%c", (int *)&g_sf_force_size, &dummy);
+
+ return 1;
+}
+
+__setup("sf_mtd=", wmt_force_sf_size);
+
+struct wmt_flash_info_t g_sf_info[2];
+extern struct wm_sf_dev_t sf_ids[];
+
+unsigned int MTDSF_PHY_ADDR;
+
+int get_sf_info(int index, struct wmt_flash_info_t *info)
+{
+ unsigned int i;
+
+ if (info->id == FLASH_UNKNOW)
+ return -1;
+ for (i = 0; sf_ids[i].id != 0; i++) {
+ if (sf_ids[i].id == info->id) {
+ info->total = (sf_ids[i].size*1024);
+ break;
+ }
+ }
+ if (sf_ids[i].id == 0) {
+ printk(KERN_WARNING "un-know id%d = 0x%x\n", index, info->id);
+ if (index == 0 && info->id != 0) {
+ // if not identified, set size 512K as default.
+ info->id = sf_ids[2].id;
+ info->total = (sf_ids[2].size*1024);
+ } else {
+ info->id = FLASH_UNKNOW;
+ info->total = 0;
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
+int wmt_sfc_ccr(struct wmt_flash_info_t *info)
+{
+ unsigned int cnt = 0, size;
+
+ size = info->total;
+ while (size) {
+ size >>= 1;
+ cnt++;
+ }
+ cnt -= 16;
+ cnt = cnt<<8;
+ info->val = (info->phy|cnt);
+ return 0;
+}
+
+int wmt_sfc_init(struct sfreg_t *sfc)
+{
+ unsigned int tmp;
+ int i, ret;
+
+ tmp = STRAP_STATUS_VAL;
+ if ((tmp & 0x4008) == (0x4000|SPI_FLASH_TYPE)) {
+ MTDSF_PHY_ADDR = 0xFFFFFFFF;
+ /* set default */
+ sfc->SPI_RD_WR_CTR = 0x11;
+ /*sfc->CHIP_SEL_0_CFG = 0xFF800800;*/
+ sfc->SPI_INTF_CFG = 0x00030000;
+ sfc->SPI_ERROR_STATUS = 0x3F;
+ } else {
+ /*MTDSF_PHY_ADDR = 0xEFFFFFFF;*/
+ /* set default */
+ /*sfc->SPI_RD_WR_CTR = 0x11;
+ sfc->CHIP_SEL_0_CFG = 0xEF800800;
+ sfc->SPI_INTF_CFG = 0x00030000;*/
+ printk(KERN_WARNING "strapping not support sf\n");
+ return -EIO;
+ }
+ memset(&g_sf_info[0], 0, 2*sizeof(struct wmt_flash_info_t));
+ g_sf_info[0].id = FLASH_UNKNOW;
+ g_sf_info[1].id = FLASH_UNKNOW;
+
+ /* read id */
+ sfc->SPI_RD_WR_CTR = 0x11;
+ g_sf_info[0].id = sfc->SPI_MEM_0_SR_ACC;
+ sfc->SPI_RD_WR_CTR = 0x01;
+ sfc->SPI_RD_WR_CTR = 0x11;
+ g_sf_info[1].id = sfc->SPI_MEM_1_SR_ACC;
+ sfc->SPI_RD_WR_CTR = 0x01;
+
+ printk("wmt_sfc_init id0 is %x, id1 is %x\n", g_sf_info[0].id, g_sf_info[1].id);
+
+ for (i = 0; i < 2; i++) {
+ ret = get_sf_info(i, &g_sf_info[i]);
+ if (ret)
+ break;
+ }
+ if (g_sf_info[0].id == FLASH_UNKNOW)
+ return -1;
+ g_sf_info[0].phy = (MTDSF_PHY_ADDR-g_sf_info[0].total+1);
+
+ MTDSF_PHY_ADDR = MTDSF_PHY_ADDR-g_sf_info[0].total+1;
+ if (g_sf_info[0].phy&0xFFFF) {
+ printk(KERN_ERR "WMT SFC Err : start address must align to 64KByte\n");
+ return -1;
+ }
+ wmt_sfc_ccr(&g_sf_info[0]);
+ sfc->CHIP_SEL_0_CFG = g_sf_info[0].val;
+ if (g_sf_info[1].id != FLASH_UNKNOW) {
+ g_sf_info[1].phy = (g_sf_info[0].phy-g_sf_info[1].total);
+ MTDSF_PHY_ADDR = MTDSF_PHY_ADDR-g_sf_info[1].total;
+ tmp = g_sf_info[1].phy;
+ g_sf_info[1].phy &= ~(g_sf_info[1].total-1);
+ if (g_sf_info[0].phy&0xFFFF) {
+ printk(KERN_ERR "WMT SFC Err : start address must align to 64KByte\n");
+ printk(KERN_ERR "WMT SFC Err : CS1 could not be used\n");
+ g_sf_info[1].id = FLASH_UNKNOW;
+ return 0;
+ }
+ wmt_sfc_ccr(&g_sf_info[1]);
+ sfc->CHIP_SEL_1_CFG = g_sf_info[1].val;
+ }
+ /*printk("CS0 : 0x%x , CS1 : 0x%x\n",g_sf_info[0].val,g_sf_info[1].val);*/
+
+ if (g_sf_force_size) {
+ tmp = (g_sf_force_size*1024*1024);
+ MTDSF_PHY_ADDR = (0xFFFFFFFF-tmp)+1;
+ }
+
+ return 0;
+}
+
+int flash_error(unsigned long code)
+{
+
+ /* check Timeout */
+ if (code & BIT_TIMEOUT) {
+ printk(KERN_ERR "Serial Flash Timeout\n");/* For UBOOT */
+ return ERR_TIMOUT;
+ }
+
+ if (code & SF_BIT_WR_PROT_ERR) {
+ printk(KERN_ERR "Serial Flash Write Protect Error\n"); /* For UBOOT */
+ return ERR_PROG_ERROR;
+ }
+
+ if (code & SF_BIT_MEM_REGION_ERR) {
+ printk(KERN_ERR "Serial Flash Memory Region Error\n") ;/* For UBOOT */
+ return ERR_PROG_ERROR;
+ }
+
+ if (code & SF_BIT_PWR_DWN_ACC_ERR) {
+ printk(KERN_ERR "Serial Flash Power Down Access Error\n") ;/* For UBOOT */
+ return ERR_PROG_ERROR;
+ }
+
+ if (code & SF_BIT_PCMD_OP_ERR) {
+ printk(KERN_ERR "Serial Flash Program CMD OP Error\n") ;/* For UBOOT */
+ return ERR_PROG_ERROR;
+ }
+
+ if (code & SF_BIT_PCMD_ACC_ERR) {
+ printk(KERN_ERR "Serial Flash Program CMD OP Access Error\n") ;/* For UBOOT */
+ return ERR_PROG_ERROR;
+ }
+
+ if (code & SF_BIT_MASLOCK_ERR) {
+ printk(KERN_ERR "Serial Flash Master Lock Error\n") ;/* For UBOOT */
+ return ERR_PROG_ERROR;
+ }
+
+ /* OK, no error */
+ return ERR_OK;
+}
+int spi_read_status(int chip)
+{
+ struct sfreg_t *sfreg = reg_sf;
+ unsigned long temp, timeout = 0x30000000;
+ int rc;
+
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ do {
+ if (chip == 0)
+ temp = sfreg->SPI_MEM_0_SR_ACC;
+ else
+ temp = sfreg->SPI_MEM_1_SR_ACC;
+ /* please SPI flash data sheet */
+ if ((temp & 0x1) == 0x0) {
+ //printk(KERN_ERR "ok re flash status=0x%x\n", (unsigned int)sfreg->SPI_MEM_0_SR_ACC);
+ break;
+ }
+
+ rc = flash_error(sfreg->SPI_ERROR_STATUS);
+ if (rc != ERR_OK) {
+ /*printk(KERN_ERR "re sts flash error rc = 0x%x\n", rc);*/
+ sfreg->SPI_ERROR_STATUS = 0x3F; /* write 1 to clear status*/
+ goto sf_err1;
+ } else if (sfreg->SPI_ERROR_STATUS) {
+ sfreg->SPI_ERROR_STATUS = 0x3F;
+ printk(KERN_ERR "re flash error rc = 0x%x status=0x%x\n", rc, (unsigned int)sfreg->SPI_MEM_0_SR_ACC);
+ }
+ timeout--;
+
+ } while (timeout);
+
+ if (timeout == 0) {
+ printk(KERN_ERR "Check SF status timeout\n");
+ return ERR_TIMOUT;
+ }
+ return 0;
+
+sf_err1:
+ return rc;
+}
+EXPORT_SYMBOL(spi_read_status);
+
+int spi_write_status(int chip, unsigned int value)
+{
+ struct sfreg_t *sfreg = reg_sf;
+ int rc, index = 0, ii;
+ unsigned int temp;
+ ii = *(volatile unsigned char *)(GPIO_BASE_ADDR + 0xDF);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+
+ rc = spi_read_status(chip);
+
+wr_sts:
+ if (chip == 0) {
+ sfreg->SPI_WR_EN_CTR = SF_CS0_WR_EN;
+ sfreg->SPI_MEM_0_SR_ACC = value;
+ } else {
+ sfreg->SPI_WR_EN_CTR = SF_CS1_WR_EN;
+ sfreg->SPI_MEM_1_SR_ACC = value;
+ }
+
+ rc = spi_read_status(chip);
+ temp = sfreg->SPI_MEM_0_SR_ACC;
+ if ((temp&1) == 0 && (value&0x9C) != (temp&0x9C)) {
+ printk(KERN_ERR "0x%x wr sf sts reg 0x%x fail i=%d gpio=0x%x\n",value, temp, index, ii);
+ if (index < 10) {
+ index++;
+ goto wr_sts;
+ } else
+ printk(KERN_ERR "write sf status reg 0x%x fail\n", temp);
+ }
+
+ sfreg->SPI_WR_EN_CTR = SF_CS0_WR_DIS;
+
+ rc = spi_read_status(chip);
+
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(spi_write_status);
+
+int spi_flash_sector_erase(unsigned long addr, struct sfreg_t *sfreg)
+{
+ unsigned long timeout = 0x600000;
+ unsigned long temp ;
+ int rc;
+
+ mutex_lock(&sector_lock);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ /*
+ SPI module chip erase
+ SPI flash write enable control register: write enable on chip sel 0
+ */
+ if ((addr + MTDSF_PHY_ADDR) >= g_sf_info[0].phy) {
+ sfreg->SPI_WR_EN_CTR = SF_CS0_WR_EN ;
+ /* printk("sfreg->SPI_ER_START_ADDR = %x \n",sfreg->SPI_ER_START_ADDR);*/
+ /*printk("!!!! Erase chip 0\n"); */
+
+ /* select sector to erase */
+ addr &= 0xFFFF0000;
+ sfreg->SPI_ER_START_ADDR = (addr+MTDSF_PHY_ADDR);
+
+ /*
+ SPI flash erase control register: start chip erase
+ Auto clear when transmit finishes.
+ */
+ sfreg->SPI_ER_CTR = SF_SEC_ER_EN;
+ /*printk("sfreg->SPI_ER_START_ADDR = %x \n",sfreg->SPI_ER_START_ADDR);*/
+
+ /* poll status reg of chip 0 for chip erase */
+ do {
+ //printk("0s");
+ msleep(50);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ //printk(" 0e\n");
+ udelay(1);
+ temp = sfreg->SPI_MEM_0_SR_ACC;
+ /* please SPI flash data sheet */
+ if ((temp & 0x1) == 0x0)
+ break;
+ timeout--;
+
+ } while (timeout);
+
+ if (timeout == 0)
+ goto er_err_timout;
+
+ rc = flash_error(sfreg->SPI_ERROR_STATUS);
+ if (rc != ERR_OK) {
+ /*printk(KERN_ERR "flash error rc = 0x%x\n", rc);*/
+ sfreg->SPI_ERROR_STATUS = 0x3F; /* write 1 to clear status*/
+ goto sf_err;
+ } else if (sfreg->SPI_ERROR_STATUS) {
+ printk(KERN_ERR "flash error rc = 0x%x status=0x%x\n", rc, (unsigned int)sfreg->SPI_MEM_0_SR_ACC);
+ sfreg->SPI_ERROR_STATUS = 0x3F;
+ printk(KERN_ERR "1flash error rc = 0x%x status=0x%x\n", rc, (unsigned int)sfreg->SPI_MEM_0_SR_ACC);
+ }
+
+ sfreg->SPI_WR_EN_CTR = SF_CS0_WR_DIS;
+ goto sf_OK;
+ } else {
+ sfreg->SPI_WR_EN_CTR = SF_CS1_WR_EN;
+ /* select sector to erase */
+ addr &= 0xFFFF0000;
+ sfreg->SPI_ER_START_ADDR = (addr+MTDSF_PHY_ADDR);
+
+ /*
+ SPI flash erase control register: start chip erase
+ Auto clear when transmit finishes.
+ */
+ sfreg->SPI_ER_CTR = SF_SEC_ER_EN;
+
+ /* poll status reg of chip 0 for chip erase */
+ do {
+ //printk("1s");
+ msleep(50);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ //printk(" 1e\n");
+ udelay(1);
+ temp = sfreg->SPI_MEM_1_SR_ACC;
+ /* please SPI flash data sheet */
+ if ((temp & 0x1) == 0x0)
+ break;
+
+ rc = flash_error(sfreg->SPI_ERROR_STATUS);
+ if (rc != ERR_OK) {
+ sfreg->SPI_ERROR_STATUS = 0x3F ; /* write 1 to clear status*/
+ goto sf_err;
+ }
+ timeout--;
+ } while (timeout);
+
+ if (timeout == 0)
+ goto er_err_timout;
+
+ sfreg->SPI_WR_EN_CTR = SF_CS1_WR_DIS ;
+ goto sf_OK;
+ }
+sf_OK:
+ mutex_unlock(&sector_lock);
+ return ERR_OK;
+sf_err:
+ mutex_unlock(&sector_lock);
+ return rc;
+er_err_timout:
+ mutex_unlock(&sector_lock);
+ return ERR_TIMOUT;
+}
+
+/*
+ We could store these in the mtd structure, but we only support 1 device..
+ static struct mtd_info *mtd_info;
+*/
+static int sf_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int ret;
+ struct wmt_sf_info_t *info = (struct wmt_sf_info_t *)mtd->priv;
+ struct sfreg_t *sfreg = info->reg;
+
+ mutex_lock(&info->lock);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ ret = spi_flash_sector_erase((unsigned long)instr->addr, sfreg);
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+ mutex_unlock(&info->lock);
+
+ if (ret != ERR_OK) {
+ printk(KERN_ERR "sf_erase() error at address 0x%lx \n", (unsigned long)instr->addr);
+ return -EINVAL;
+ }
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+
+int sf_copy_env(char *dest, char *src, int len)
+{
+ int i = 0;
+ int rc,blk;
+ char *s,*p;
+
+ mutex_lock(&sector_lock);
+ rc = spi_read_status(0);
+ if (rc){
+ printk("sfread: sf0 is busy");
+ }
+
+ s = src;
+ p = dest;
+ blk = len/1024;
+ if(len%1024)
+ blk++;
+
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ memcpy(p, s, 0x400);//1K
+ while((p[0x3fe]|p[0x3ff]) != '\0' && i++ < (blk -1)){
+ s += 0x400;
+ p += 0x400;
+ memcpy(p, s, 0x400);//1K
+ }
+
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+ mutex_unlock(&sector_lock);
+
+ return len;
+
+}
+
+
+static int sf_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct wmt_sf_info_t *info = (struct wmt_sf_info_t *)mtd->priv;
+ unsigned char *sf_base_addr = info->io_base;
+ int rc;
+
+ mutex_lock(&info->lock);
+ mutex_lock(&sector_lock);
+//printk("re sf check");
+ if ((from + MTDSF_PHY_ADDR) >= g_sf_info[0].phy) {
+ rc = spi_read_status(0);
+ if (rc)
+ printk("sfread: sf0 is busy");
+ } else {
+ rc = spi_read_status(1);
+ if (rc)
+ printk("sfread: sf1 is busy");
+ }
+ //printk("end\n");
+ /*printk("sf_read(pos:%x, len:%x)\n", (long)from, (long)len);*/
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "sf_read() out of bounds (%lx > %lx)\n", (long)(from + len), (long)mtd->size);
+ return -EINVAL;
+ }
+
+ //printk("sfread: lock from%llx, len=%d\n", from, len);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ //_memcpy_fromio(buf, (sf_base_addr+from), len);
+ memcpy(buf, (sf_base_addr+from), len);
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+ mutex_unlock(&sector_lock);
+ mutex_unlock(&info->lock);
+
+ *retlen = len;
+
+
+ return 0;
+
+}
+
+int spi_flash_sector_write(struct sfreg_t *sfreg, unsigned char *sf_base_addr,
+ loff_t to, size_t len, u_char *buf)
+{
+ unsigned long temp;
+ unsigned int i = 0;
+ int rc ;
+ unsigned long timeout = 0x30000000;
+ size_t retlen;
+
+ mutex_lock(&sector_lock);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ udelay(1);
+ //printk("wr sf check");
+ if ((to + MTDSF_PHY_ADDR) >= g_sf_info[0].phy) {
+ rc = spi_read_status(0);
+ if (rc)
+ printk("wr c0 wait status ret=%d\n", rc);
+ } else {
+ rc = spi_read_status(1);
+ if (rc)
+ printk("wr c1 wait status ret=%d\n", rc);
+ }
+ //printk("end\n");
+ sfreg->SPI_WR_EN_CTR = 0x03;
+
+ while (len >= 8) {
+ _memcpy_toio(((u_char *)(sf_base_addr+to+i)), buf+i, 4);
+ i += 4;
+ _memcpy_toio(((u_char *)(sf_base_addr+to+i)), (buf+i), 4);
+ i += 4;
+ len -= 8;
+ timeout = 0x30000000;
+ do {
+ temp = sfreg->SPI_MEM_0_SR_ACC ;
+ /* please see SPI flash data sheet */
+ if ((temp & 0x1) == 0x0)
+ break ;
+ rc = flash_error(sfreg->SPI_ERROR_STATUS);
+ if (rc != ERR_OK) {
+ sfreg->SPI_ERROR_STATUS = 0x3F ; /* write 1 to clear status */
+ goto sf_wr_err;
+ }
+ timeout--;
+ } while (timeout);
+
+ if (timeout == 0) {
+ printk(KERN_ERR "time out \n");
+ goto err_timeout;
+ }
+ }
+ while (len >= 4) {
+ _memcpy_toio(((u_char *)(sf_base_addr+to+i)), (u_char*)(buf+i), 4);
+ i += 4;
+ len -= 4;
+ if (len) {
+ _memcpy_toio(((u_char *)(sf_base_addr+to+i)), (u_char*)(buf+i), 1);
+ i++;
+ len--;
+ }
+ timeout = 0x30000000;
+ do {
+ temp = sfreg->SPI_MEM_0_SR_ACC ;
+ /* please see SPI flash data sheet */
+ if ((temp & 0x1) == 0x0)
+ break;
+ rc = flash_error(sfreg->SPI_ERROR_STATUS);
+ if (rc != ERR_OK) {
+ sfreg->SPI_ERROR_STATUS = 0x3F ; /* write 1 to clear status */
+ goto sf_wr_err;
+ }
+ timeout--;
+ } while (timeout);
+ if (timeout == 0) {
+ printk(KERN_ERR "time out \n");
+ goto err_timeout;
+ }
+ }
+ while (len) {
+ _memcpy_toio(((u_char *)(sf_base_addr+to+i)), (buf+i), 1);
+ i++;
+ len--;
+ if (len) {
+ _memcpy_toio(((u_char *)(sf_base_addr+to+i)), (buf+i), 1);
+ i++;
+ len--;
+ }
+ timeout = 0x30000000;
+ do {
+ temp = sfreg->SPI_MEM_0_SR_ACC ;
+ /* please see SPI flash data sheet */
+ if ((temp & 0x1) == 0x0)
+ break;
+ rc = flash_error(sfreg->SPI_ERROR_STATUS);
+ if (rc != ERR_OK) {
+ sfreg->SPI_ERROR_STATUS = 0x3F ; /* write 1 to clear status */
+ goto sf_wr_err;
+ }
+ timeout--;
+ } while (timeout);
+
+ if (timeout == 0) {
+ printk(KERN_ERR "time out \n");
+ goto err_timeout;
+ }
+ }
+
+ retlen = i;
+ sfreg->SPI_WR_EN_CTR = 0x00;
+
+ //REG32_VAL(PMCEU_ADDR) &= ~(SF_CLOCK_EN);
+
+ mutex_unlock(&sector_lock);
+ return retlen;
+
+err_timeout:
+ mutex_unlock(&sector_lock);
+ return ERR_TIMOUT;
+sf_wr_err:
+ mutex_unlock(&sector_lock);
+ return rc;
+}
+
+static int sf_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+
+ struct wmt_sf_info_t *info = (struct wmt_sf_info_t *)mtd->priv;
+ unsigned char *sf_base_addr = info->io_base;
+ struct sfreg_t *sfreg = info->reg;
+ size_t ret;
+
+
+ /*printk("sf_write(pos:0x%x, len:0x%x )\n", (long)to, (long)len);*/
+
+ if (to + len > mtd->size) {
+ printk(KERN_ERR "sf_write() out of bounds (%ld > %ld)\n", (long)(to + len), (long)mtd->size);
+ return -EINVAL;
+ }
+
+ mutex_lock(&info->lock);
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ ret = spi_flash_sector_write(sfreg, sf_base_addr, to, len, (u_char *)buf);
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+ mutex_unlock(&info->lock);
+
+
+ *retlen = ret;
+
+ return 0;
+}
+
+#if 0
+void print_reg()
+{
+ printk(KERN_INFO "sfreg->CHIP_SEL_0_CFG = %lx\n", sfreg->CHIP_SEL_0_CFG);
+ printk(KERN_INFO "sfreg->CHIP_SEL_1_CFG = %lx\n", sfreg->CHIP_SEL_1_CFG);
+ printk(KERN_INFO "sfreg->SPI_WR_EN_CTR = %lx \n", sfreg->SPI_WR_EN_CTR);
+ printk(KERN_INFO "sfreg->SPI_ER_CTR = %lx \n", sfreg->SPI_ER_CTR);
+ printk(KERN_INFO "sfreg->SPI_ER_START_ADDR = %lx \n", sfreg->SPI_ER_START_ADDR);
+}
+
+void identify_sf_device_id(int sf_num)
+{
+ sfreg->SPI_RD_WR_CTR = 0x10;
+ if (sf_num == 0)
+ printk(KERN_INFO "sfreg->SPI_MEM_0_SR_ACC=%lx\n", sfreg->SPI_MEM_0_SR_ACC);
+ else if (sf_num == 1)
+ printk(KERN_INFO "sfreg->SPI_MEM_0_SR_ACC=%lx\n", sfreg->SPI_MEM_0_SR_ACC);
+ else
+ printk(KERN_ERR "Unkown spi flash! \n");
+}
+#endif
+
+void config_sf_reg(struct sfreg_t *sfreg)
+{
+#if 0
+ sfreg->CHIP_SEL_0_CFG = (MTDSF_PHY_ADDR | 0x0800800); /*0xff800800;*/
+ sfreg->CHIP_SEL_1_CFG = (MTDSF_PHY_ADDR | 0x0800); /*0xff000800;*/
+ sfreg->SPI_INTF_CFG = 0x00030000;
+ printk(KERN_INFO "Eric %s Enter chip0=%x chip1=%x\n"
+ , __func__, sfreg->CHIP_SEL_0_CFG, sfreg->CHIP_SEL_1_CFG);
+#else
+ if (g_sf_info[0].val)
+ sfreg->CHIP_SEL_0_CFG = g_sf_info[0].val;
+ if (g_sf_info[1].val)
+ sfreg->CHIP_SEL_1_CFG = g_sf_info[1].val;
+ else
+ sfreg->CHIP_SEL_1_CFG = 0xff780800;
+ sfreg->SPI_INTF_CFG = 0x00030000;
+#endif
+}
+/*
+void shift_partition_content(int index)
+{
+ int i, j;
+ for ( i = index, j = 0; i < 6; i++, j++) {
+ boot_partitions[j].name = boot_partitions[i].name;
+ boot_partitions[j].offset = boot_partitions[i].offset;
+ boot_partitions[j].size = boot_partitions[i].size;
+ }
+}
+*/
+
+static int sf_erase_disabled(struct mtd_info *mtd, struct erase_info *instr)
+{
+ printk(KERN_WARNING "sf_erase addr 0x%llx, len 0x%llx denied\n", instr->addr, instr->len);
+ return -EPERM;
+}
+
+static int sf_write_disabled(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ printk(KERN_WARNING "sf_write addr 0x%llx, len 0x%x denied\n", to, len);
+ return -EPERM;
+}
+
+int mtdsf_init_device(struct mtd_info *mtd, unsigned long size, char *name)
+{
+ int i;
+ int nr_parts = 0, cut_parts = 0, ret;
+ int secure = wmt_is_secure_enabled();
+
+ mtd->name = name;
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = size;
+ mtd->erasesize = MTDSF_ERASE_SIZE;
+ mtd->owner = THIS_MODULE;
+ mtd->_read = sf_read;
+ if(secure){
+ mtd->_erase = sf_erase_disabled;
+ mtd->_write = sf_write_disabled;
+ }
+ else {
+ mtd->_erase = sf_erase;
+ mtd->_write = sf_write;
+ }
+ mtd->writesize = 1;
+
+
+ if(size == 0x1000000){
+ parts = boot_partitions_16m;
+ nr_parts = NUM_SF_PARTITIONS_16M;
+ }else{
+ parts = boot_partitions;
+ nr_parts = NUM_SF_PARTITIONS;
+ for(i=0; i < NUM_SF_PARTITIONS; i++){
+ parts[i].offset += (0XFFF80000-MTDSF_PHY_ADDR);
+ }
+
+ }
+
+ printk(KERN_INFO "SF Using builtin partition table count=%d %s\n",
+ nr_parts - cut_parts, secure ? "secure" : "" );
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, parts, nr_parts - cut_parts);
+ /*if (ret) {
+ dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
+ }*/
+
+ return ret;
+}
+
+static int wmt_sf_probe(struct platform_device *pdev)
+{
+ int err;
+ /*int retval, len = 40;
+ char *buf[40];
+ char *buf1 = "7533967";*/
+/* struct platform_device *pdev = to_platform_device(dev);*/
+ struct wmt_sf_info_t *info;
+ unsigned int sfsize = 0;
+
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ //REG32_VAL(0xFE130314) = 0xc;
+ printk("sf clock =0x%x \n", REG32_VAL(0xFE130314));
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ mutex_init(&sector_lock);
+
+ info->sfmtd = &info->mtd;
+
+ dev_set_drvdata(&pdev->dev, info);
+
+ info->reg = (struct sfreg_t *)SF_BASE_ADDR;
+ /*config_sf_reg(info->reg);*/
+ if (info->reg)
+ err = wmt_sfc_init(info->reg);
+ else
+ err = -EIO;
+
+ if (err) {
+ printk(KERN_ERR "wmt sf controller initial failed\n");
+ goto exit_error;
+ }
+
+ if (g_sf_force_size)
+ sfsize = (g_sf_force_size*1024*1024);
+ else
+ sfsize = (0xFFFFFFFF-MTDSF_PHY_ADDR)+1;//MTDSF_TOTAL_SIZE;
+ printk("MTDSF_PHY_ADDR = %08X, sfsize = %08X\n",MTDSF_PHY_ADDR,sfsize);
+ if (MTDSF_PHY_ADDR == 0xFFFFFFFF || MTDSF_PHY_ADDR == 0xEFFFFFFF) {
+ MTDSF_PHY_ADDR = MTDSF_PHY_ADDR-sfsize+1;
+ printk("MTDSF_PHY_ADDR = %08X, sfsize = %08X\n",MTDSF_PHY_ADDR,sfsize);
+ }
+ info->io_base = (unsigned char *)ioremap(MTDSF_PHY_ADDR, sfsize);
+ if (info->io_base == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -EIO;
+ goto exit_error;
+ }
+
+ err = mtdsf_init_device(info->sfmtd, sfsize, "mtdsf device");
+ if (err)
+ goto exit_error;
+
+ info->sfmtd->priv = info;
+ reg_sf = info->reg; //for global use.
+
+/* retval = wmt_getsyspara("dan", buf, &len);
+ printk(KERN_INFO "sf read env buf=%s\n", buf);
+ retval = wmt_setsyspara("dan", buf1);
+ retval = wmt_getsyspara("dan", buf, &len);
+ printk(KERN_INFO "sf read env buf=%s\n", buf);*/
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+
+ printk(KERN_INFO "wmt sf controller initial ok\n");
+
+exit_error:
+ return err;
+}
+
+/*static int wmt_sf_remove(struct device *dev)*/
+static int wmt_sf_remove(struct platform_device *pdev)
+{
+ struct wmt_sf_info_t *info = dev_get_drvdata(&pdev->dev);
+ int status;
+
+ pr_debug("%s: remove\n", dev_name(&pdev->dev));
+
+ status = mtd_device_unregister(&info->mtd);
+ if (status == 0) {
+ dev_set_drvdata(&pdev->dev, NULL);
+ if (info->io_base)
+ iounmap(info->io_base);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int wmt_sf_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ unsigned int boot_value = STRAP_STATUS_VAL;
+ int rc = 0;
+
+ /*Judge whether boot from SF in order to implement power self management*/
+ if ((boot_value & 0x4008) == (0x4000|SPI_FLASH_TYPE)) {
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ rc = spi_read_status(0);
+ if (rc)
+ printk("sfread: sf0 is busy");
+ }
+
+ printk("suspend pllc=0x%x, div0x%x\n",
+ *(volatile unsigned int *)(0xfe130208), *(volatile unsigned int *)(0xfe13036c));
+
+ printk(KERN_INFO "wmt_sf_suspend\n");
+
+ return 0;
+}
+
+int wmt_sf_resume(struct platform_device *pdev)
+{
+ struct wmt_sf_info_t *info = dev_get_drvdata(&pdev->dev);
+ struct sfreg_t *sfreg = info->reg;
+
+ auto_pll_divisor(DEV_SF, CLK_ENABLE, 0, 0);
+ if (info->reg)
+ config_sf_reg(info->reg);
+ else
+ printk(KERN_ERR "wmt sf restore state error\n");
+
+ if (g_sf_info[0].id == SF_IDALL(ATMEL_MANUF, AT_25DF041A_ID)) {
+ printk(KERN_INFO "sf resume and set Global Unprotect\n");
+ sfreg->SPI_INTF_CFG |= SF_MANUAL_MODE; /* enter programmable command mode */
+ sfreg->SPI_PROG_CMD_WBF[0] = SF_CMD_WREN;
+ sfreg->SPI_PROG_CMD_CTR = (0x01000000 | (0<<1)); /* set size and chip select */
+ sfreg->SPI_PROG_CMD_CTR |= SF_RUN_CMD; /* enable programmable command */
+ while ((sfreg->SPI_PROG_CMD_CTR & SF_RUN_CMD) != 0)
+ ;
+ sfreg->SPI_PROG_CMD_WBF[0] = SF_CMD_WRSR;
+ sfreg->SPI_PROG_CMD_WBF[1] = 0x00; /* Global Unprotect */
+ sfreg->SPI_PROG_CMD_CTR = (0x02000000 | (0<<1)); /* set size and chip select */
+ sfreg->SPI_PROG_CMD_CTR |= SF_RUN_CMD; /* enable programmable command */
+ while ((sfreg->SPI_PROG_CMD_CTR & SF_RUN_CMD) != 0)
+ ;
+ sfreg->SPI_PROG_CMD_CTR = 0; /* reset programmable command register*/
+ sfreg->SPI_INTF_CFG &= ~SF_MANUAL_MODE; /* leave programmable mode */
+ }
+
+ auto_pll_divisor(DEV_SF, CLK_DISABLE, 0, 0);
+
+ printk("resume pllc=0x%x, div0x%x\n",
+ *(volatile unsigned int *)(0xfe130208), *(volatile unsigned int *)(0xfe13036c));
+
+ return 0;
+}
+
+#else
+#define wmt_sf_suspend NULL
+#define wmt_sf_resume NULL
+#endif
+
+/*
+struct device_driver wmt_sf_driver = {
+ .name = "sf",
+ .bus = &platform_bus_type,
+ .probe = wmt_sf_probe,
+ .remove = wmt_sf_remove,
+ .suspend = wmt_sf_suspend,
+ .resume = wmt_sf_resume
+};
+*/
+
+struct platform_driver wmt_sf_driver = {
+ .driver.name = "sf",
+ .probe = wmt_sf_probe,
+ .remove = wmt_sf_remove,
+ .suspend = wmt_sf_suspend,
+ .resume = wmt_sf_resume
+};
+
+
+static int __init wmt_sf_init(void)
+{
+ //printk(KERN_INFO "WMT SPI Flash Driver, WonderMedia Technologies, Inc\n");
+ return platform_driver_register(&wmt_sf_driver);
+}
+
+static void __exit wmt_sf_exit(void)
+{
+ platform_driver_unregister(&wmt_sf_driver);
+}
+
+module_init(wmt_sf_init);
+module_exit(wmt_sf_exit);
+
+MODULE_AUTHOR("WonderMedia Technologies, Inc.");
+MODULE_DESCRIPTION("WMT [SF] driver");
+MODULE_LICENSE("GPL");
diff --git a/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf.h b/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf.h
new file mode 100755
index 00000000..d6bc1acc
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf.h
@@ -0,0 +1,182 @@
+/*++
+drivers/mtd/devices/wmt_sf.h
+
+Copyright (c) 2008 WonderMedia Technologies, Inc.
+
+This program is free software: you can redistribute it and/or modify it under the
+terms of the GNU General Public License as published by the Free Software Foundation,
+either version 2 of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <http://www.gnu.org/licenses/>.
+
+WonderMedia Technologies, Inc.
+10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+--*/
+
+#ifndef __WMT_SF_H__
+#define __WMT_SF_H__
+
+#define DRIVER_NAME "MTDSF"
+#define MTDSF_TOTAL_SIZE (16 * 1024 * 1024)
+#define MTDSF_ERASE_SIZE (64 * 1024)
+/*#define MTDSF_PHY_ADDR (0xFF000000)*/
+#define MTDSF_BOOT_PHY_ADDR (0xFF000000)
+#define MTDSF_NOT_BOOT_PHY_ADDR (0xEF000000)
+#define SPI_FLASH_TYPE 0
+#define EMMC_FLASH_TYPE 1
+#define NAND_FLASH_TYPE 2
+#define ROM_EMMC_FLASH_TYPE 3
+#define SD_FLASH_TYPE 4
+#define USB_FLASH_TYPE 5
+#define NAND_FLASH_TYPE 6
+#define NOR_FLASH_TYPE 7
+#define BOOT_TYPE_8BIT 0
+#define BOOT_TYPE_16BIT 1
+
+#define SF_CLOCK_EN 0x0800000
+
+struct sfreg_t {
+ unsigned long volatile CHIP_SEL_0_CFG ; /* 0xD8002000*/
+ unsigned long volatile Res1 ; /* 0x04*/
+ unsigned long volatile CHIP_SEL_1_CFG ; /* 0xD8002008 */
+ unsigned long volatile Res2[13] ; /* 0x0C */
+ unsigned long volatile SPI_INTF_CFG ; /* 0xD8002040 */
+ unsigned long volatile Res3[3] ; /* 0x44 */
+ unsigned long volatile SPI_RD_WR_CTR ; /* 0xD8002050 */
+ unsigned long volatile Res4[3] ; /* 0x54 */
+ unsigned long volatile SPI_WR_EN_CTR ; /* 0xD8002060 */
+ unsigned long volatile Res5[3] ; /* 0x64 */
+ unsigned long volatile SPI_ER_CTR ; /* 0xD8002070 */
+ unsigned long volatile SPI_ER_START_ADDR ; /* 0xD8002074 */
+ unsigned long volatile Res6[2] ; /* 0x78 */
+ unsigned long volatile SPI_ERROR_STATUS ; /* 0xD8002080 */
+ unsigned long volatile Res7[31] ; /* 0x84 */
+ unsigned long volatile SPI_MEM_0_SR_ACC ; /* 0xD8002100 */
+ unsigned long volatile Res8[3] ; /* 0x104 */
+ unsigned long volatile SPI_MEM_1_SR_ACC ; /* 0xD8002110 */
+ unsigned long volatile Res9[27] ; /* 0x114 */
+ unsigned long volatile SPI_PDWN_CTR_0 ; /* 0xD8002180 */
+ unsigned long volatile Res10[3] ; /* 0x184 */
+ unsigned long volatile SPI_PDWN_CTR_1 ; /* 0xD8002190 */
+ unsigned long volatile Res11[27] ; /* 0x194 */
+ unsigned long volatile SPI_PROG_CMD_CTR ; /* 0xD8002200 */
+ unsigned long volatile Res12[3] ; /* 0x204 */
+ unsigned long volatile SPI_USER_CMD_VAL ; /* 0xD8002210 */
+ unsigned long volatile Res13[59] ; /* 0x214 */
+ unsigned char volatile SPI_PROG_CMD_WBF[64] ; /* 0xD8002300 */
+ unsigned long volatile Res14[16] ; /* 0x340 */
+ unsigned char volatile SPI_PROG_CMD_RBF[64]; /* 0xD8002380 */
+};
+
+struct wmt_flash_info_t {
+ unsigned int id;
+ unsigned int phy;
+ unsigned int val;
+ unsigned int total;
+};
+
+/* SPI flash erase control register, 0x70 */
+#define SF_SEC_ER_EN 0x8000 /* [15:15] */
+#define SF_SEC_ER_DIS 0x0 /* [15:15] */
+#define SF_CHIP_ER_EN 0x1 /* [0:0] */
+#define SF_CHIP_ER_DIS 0x0 /* [0:0] */
+
+#define FLASH_UNKNOW 0xFFFFFFFF
+
+#define SF_BIT_WR_PROT_ERR 0x20 /* [5:5] */
+#define SF_BIT_MEM_REGION_ERR 0x10 /* [4:4] */
+#define SF_BIT_PWR_DWN_ACC_ERR 0x8 /* [3:3] */
+#define SF_BIT_PCMD_OP_ERR 0x4 /* [2:2] */
+#define SF_BIT_PCMD_ACC_ERR 0x2 /* [1:1] */
+#define SF_BIT_MASLOCK_ERR 0x1 /* [0:0] */
+#define BIT_SEQUENCE_ERROR 0x00300030
+#define BIT_TIMEOUT 0x80000000
+
+#define ERR_OK 0x0
+#define ERR_TIMOUT 0x11
+#define ERR_PROG_ERROR 0x22
+
+#define EON_MANUFACT 0x1C
+#define NUMONYX_MANUFACT 0x20
+#define MXIC_MANUFACT 0xC2
+#define SPANSION_MANUFACT 0x01
+#define SST_MANUFACT 0xBF
+#define WB_MANUFACT 0xEF
+#define ATMEL_MANUF 0x1F
+#define GD_MANUF 0xC8
+
+/* EON */
+#define EON_25P16_ID 0x2015 /* 2 MB */
+#define EON_25P64_ID 0x2017 /* 8 MB */
+#define EON_25Q64_ID 0x3017 /* 8 MB */
+#define EON_25F40_ID 0x3113 /* 512 KB */
+#define EON_25F16_ID 0x3115 /* 2 MB */
+
+/* NUMONYX */
+#define NX_25P16_ID 0x2015 /* 2 MB */
+#define NX_25P64_ID 0x2017 /* 8 MB */
+
+/* MXIC */
+#define MX_L512_ID 0x2010 /* 64 KB , 4KB*/
+#define MX_L1605D_ID 0x2015 /* 2 MB */
+#define MX_L3205D_ID 0x2016 /* 4 MB */
+#define MX_L6405D_ID 0x2017 /* 8 MB */
+#define MX_L1635D_ID 0x2415 /* 2 MB */
+#define MX_L3235D_ID 0x5E16 /* 4 MB */
+#define MX_L12805D_ID 0x2018 /* 16 MB */
+
+/* SPANSION */
+#define SPAN_FL016A_ID 0x0214 /* 2 MB */
+#define SPAN_FL064A_ID 0x0216 /* 8 MB */
+
+/* SST */
+#define SST_VF016B_ID 0x2541 /* 2 MB */
+
+/* WinBond */
+#define WB_X40BV_ID 0x3013 /* 512KB */
+#define WB_X16A_ID 0x3015 /* 2 MB */
+#define WB_X16C_ID 0x4015 /* 2 MB */
+#define WB_X32_ID 0x3016 /* 4 MB */
+#define WB_X64_ID 0x3017 /* 8 MB */
+#define WB_X64_25Q64_ID 0x4017 /* 8 MB */
+#define WB_X128_ID 0x4018 /* 16 MB */
+
+/* ATMEL */
+#define AT_25DF041A_ID 0x4401 /* 512KB */
+
+/* GD -Giga Device- */
+#define GD_25Q40_ID 0x4013 /* 512KB */
+#define GD_25Q128_ID 0x4018 /* 16MB */
+
+/* ST M25P64 CMD */
+#define SF_CMD_WREN 0x06
+#define SF_CMD_WRDI 0x04
+#define SF_CMD_RDID 0x9F
+#define SF_CMD_RDSR 0x05
+#define SF_CMD_WRSR 0x01
+#define SF_CMD_READ 0x03
+#define SF_CMD_FAST_READ 0x0B
+#define SF_CMD_PP 0x02
+#define SF_CMD_SE 0xD8
+#define SF_CMD_BE 0xC7
+#define SF_CMD_RES 0xAB
+
+/* SPI Interface Configuration Register(0x40) */
+#define SF_MANUAL_MODE 0x40
+
+/* SPI Programmable Command Mode Control Register(0x200) */
+#define SF_RUN_CMD 0x01
+
+struct wm_sf_dev_t {
+ unsigned int id;
+ unsigned int size; /* KBytes */
+};
+
+#define SF_IDALL(x, y) ((x<<16)|y)
+void shift_partition_content(int index);
+
+#endif /* __WMT_SF_H__ */
diff --git a/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf_ids.c b/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf_ids.c
new file mode 100755
index 00000000..cf1ddbbe
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/devices/wmt_sf_ids.c
@@ -0,0 +1,74 @@
+/*++
+Copyright (c) 2010 WonderMedia Technologies, Inc.
+
+This program is free software: you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation, either version 2 of the License, or (at your option) any later
+version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details. You
+should have received a copy of the GNU General Public License along with this
+program. If not, see http://www.gnu.org/licenses/>.
+
+WonderMedia Technologies, Inc.
+10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+--*/
+
+#include <linux/module.h>
+#include "wmt_sf.h"
+
+/*
+* Chip ID list
+*
+* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
+* options
+*
+* Pagesize; 0, 256, 512
+* 0 get this information from the extended chip ID
++ 256 256 Byte page size
+* 512 512 Byte page size
+*/
+struct wm_sf_dev_t sf_ids[] = {
+ /* EON */
+ {SF_IDALL(EON_MANUFACT, EON_25P16_ID), (2*1024)},
+ {SF_IDALL(EON_MANUFACT, EON_25P64_ID), (8*1024)},
+ {SF_IDALL(EON_MANUFACT, EON_25F40_ID), 512},
+ {SF_IDALL(EON_MANUFACT, EON_25F16_ID), (2*1024)},
+ {SF_IDALL(EON_MANUFACT, EON_25Q64_ID), (8*1024)},
+ /* NUMONYX */
+ {SF_IDALL(NUMONYX_MANUFACT, NX_25P16_ID), (2*1024)},
+ {SF_IDALL(NUMONYX_MANUFACT, NX_25P64_ID), (8*1024)},
+ /* MXIC */
+ {SF_IDALL(MXIC_MANUFACT, MX_L512_ID), 64},
+ {SF_IDALL(MXIC_MANUFACT, MX_L1605D_ID), (2*1024)},
+ {SF_IDALL(MXIC_MANUFACT, MX_L3205D_ID), (4*1024)},
+ {SF_IDALL(MXIC_MANUFACT, MX_L6405D_ID), (8*1024)},
+ {SF_IDALL(MXIC_MANUFACT, MX_L1635D_ID), (2*1024)},
+ {SF_IDALL(MXIC_MANUFACT, MX_L3235D_ID), (4*1024)},
+ {SF_IDALL(MXIC_MANUFACT, MX_L12805D_ID), (16*1024)},
+ /* SPANSION */
+ {SF_IDALL(SPANSION_MANUFACT, SPAN_FL016A_ID), (2*1024)},
+ {SF_IDALL(SPANSION_MANUFACT, SPAN_FL064A_ID), (8*1024)},
+ /* SST */
+ {SF_IDALL(SST_MANUFACT, SST_VF016B_ID), (2*1024)},
+ /*WinBond*/
+ {SF_IDALL(WB_MANUFACT, WB_X16A_ID), (2*1024)},
+ {SF_IDALL(WB_MANUFACT, WB_X16C_ID), (2*1024)},
+ {SF_IDALL(WB_MANUFACT, WB_X32_ID), (4*1024)},
+ {SF_IDALL(WB_MANUFACT, WB_X64_ID), (8*1024)},
+ {SF_IDALL(WB_MANUFACT, WB_X64_25Q64_ID), (8*1024)},
+ {SF_IDALL(WB_MANUFACT, WB_X128_ID), (16*1024)},
+ {SF_IDALL(WB_MANUFACT, WB_X40BV_ID), 512},
+ /* ATMEL */
+ {SF_IDALL(ATMEL_MANUF, AT_25DF041A_ID), 512},
+ /* GD */
+ {SF_IDALL(GD_MANUF, GD_25Q40_ID), 512},
+ {SF_IDALL(GD_MANUF, GD_25Q128_ID), 16*1024},
+ {0, }
+};
+EXPORT_SYMBOL(sf_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SF device IDs");
diff --git a/ANDROID_3.4.5/drivers/mtd/mtd_blkdevs.c b/ANDROID_3.4.5/drivers/mtd/mtd_blkdevs.c
index f1f06715..f3b8d15b 100644
--- a/ANDROID_3.4.5/drivers/mtd/mtd_blkdevs.c
+++ b/ANDROID_3.4.5/drivers/mtd/mtd_blkdevs.c
@@ -39,11 +39,12 @@
static LIST_HEAD(blktrans_majors);
static DEFINE_MUTEX(blktrans_ref_mutex);
+struct mutex wmt_lock;
static void blktrans_dev_release(struct kref *kref)
{
struct mtd_blktrans_dev *dev =
- container_of(kref, struct mtd_blktrans_dev, ref);
+ container_of(kref, struct mtd_blktrans_dev, ref);
dev->disk->private_data = NULL;
blk_cleanup_queue(dev->rq);
@@ -74,16 +75,28 @@ static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
mutex_unlock(&blktrans_ref_mutex);
}
-
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
struct request *req)
{
unsigned long block, nsect;
char *buf;
+#if 0/*original: for use mtdblock */
+ int blkshift = tr->blkshift;
+ int blksize = tr->blksize;
+#else/*4K : for use mtdswap */
+ int blkshift = 12;
+ int blksize = 1<<12;
+
+
+ if(dev->mtd->writesize < blksize) {
+ blkshift = tr->blkshift;
+ blksize = tr->blksize;
+ }
- block = blk_rq_pos(req) << 9 >> tr->blkshift;
- nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
+#endif
+ block = blk_rq_pos(req) << 9 >> blkshift;
+ nsect = blk_rq_cur_bytes(req) >> blkshift;
buf = req->buffer;
@@ -97,9 +110,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (req->cmd_flags & REQ_DISCARD)
return tr->discard(dev, block, nsect);
- switch(rq_data_dir(req)) {
+ switch (rq_data_dir(req)) {
case READ:
- for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ for (; nsect > 0; nsect--, block++, buf += blksize) /*tr->blksize*/
if (tr->readsect(dev, block, buf))
return -EIO;
rq_flush_dcache_pages(req);
@@ -109,7 +122,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
return -EIO;
rq_flush_dcache_pages(req);
- for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ for (; nsect > 0; nsect--, block++, buf += blksize) /*tr->blksize*/
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
@@ -126,6 +139,7 @@ int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
return dev->bg_stop;
}
+
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
static int mtd_blktrans_thread(void *arg)
@@ -136,6 +150,7 @@ static int mtd_blktrans_thread(void *arg)
struct request *req = NULL;
int background_done = 0;
+ mutex_lock(&wmt_lock);
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
@@ -160,10 +175,11 @@ static int mtd_blktrans_thread(void *arg)
if (kthread_should_stop())
set_current_state(TASK_RUNNING);
-
+ mutex_unlock(&wmt_lock);
spin_unlock_irq(rq->queue_lock);
schedule();
spin_lock_irq(rq->queue_lock);
+ mutex_lock(&wmt_lock);
continue;
}
@@ -185,7 +201,7 @@ static int mtd_blktrans_thread(void *arg)
__blk_end_request_all(req, -EIO);
spin_unlock_irq(rq->queue_lock);
-
+ mutex_unlock(&wmt_lock);
return 0;
}
@@ -211,7 +227,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
int ret = 0;
if (!dev)
- return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ return -ERESTARTSYS; /* FIXME: busy loop! -arnd */
mutex_lock(&dev->lock);
@@ -299,7 +315,7 @@ unlock:
}
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
int ret = -ENXIO;
@@ -326,11 +342,11 @@ unlock:
}
static const struct block_device_operations mtd_blktrans_ops = {
- .owner = THIS_MODULE,
- .open = blktrans_open,
- .release = blktrans_release,
- .ioctl = blktrans_ioctl,
- .getgeo = blktrans_getgeo,
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .ioctl = blktrans_ioctl,
+ .getgeo = blktrans_getgeo,
};
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
@@ -350,9 +366,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
- if (d->devnum != last_devnum+1) {
+ if (d->devnum != last_devnum + 1) {
/* Found a free devnum. Plug it in here */
- new->devnum = last_devnum+1;
+ new->devnum = last_devnum + 1;
list_add_tail(&new->list, &d->list);
goto added;
}
@@ -370,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
ret = -EBUSY;
if (new->devnum == -1)
- new->devnum = last_devnum+1;
+ new->devnum = last_devnum + 1;
/* Check that the device and any partitions will get valid
* minor numbers and that the disk naming code below can cope
@@ -382,7 +398,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
list_add_tail(&new->list, &tr->devs);
- added:
+added:
mutex_unlock(&blktrans_ref_mutex);
mutex_init(&new->lock);
@@ -417,6 +433,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
"%s%d", tr->name, new->devnum);
set_capacity(gd, (new->size * tr->blksize) >> 9);
+
+ mutex_init(&wmt_lock);
/* Create the request queue */
spin_lock_init(&new->queue_lock);
@@ -425,6 +443,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (!new->rq)
goto error3;
+ elevator_exit(new->rq->elevator);
+ new->rq->elevator = NULL;
+ elevator_init(new->rq, "noop");
+
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
@@ -440,7 +462,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
/* Create processing thread */
/* TODO: workqueue ? */
new->thread = kthread_run(mtd_blktrans_thread, new,
- "%s%d", tr->name, new->mtd->index);
+ "%s%d", tr->name, new->mtd->index);
if (IS_ERR(new->thread)) {
ret = PTR_ERR(new->thread);
goto error4;
@@ -454,7 +476,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
- new->disk_attributes);
+ new->disk_attributes);
WARN_ON(ret);
}
return 0;
@@ -479,12 +501,11 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
if (old->disk_attributes)
sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
- old->disk_attributes);
+ old->disk_attributes);
/* Stop new requests to arrive */
del_gendisk(old->disk);
-
/* Stop the thread */
kthread_stop(old->thread);
@@ -495,7 +516,7 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
spin_unlock_irqrestore(&old->queue_lock, flags);
/* If the device is currently open, tell trans driver to close it,
- then put mtd device, and don't touch it again */
+ then put mtd device, and don't touch it again */
mutex_lock(&old->lock);
if (old->open) {
if (old->tr->release)
@@ -516,9 +537,9 @@ static void blktrans_notify_remove(struct mtd_info *mtd)
struct mtd_blktrans_dev *dev, *next;
list_for_each_entry(tr, &blktrans_majors, list)
- list_for_each_entry_safe(dev, next, &tr->devs, list)
- if (dev->mtd == mtd)
- tr->remove_dev(dev);
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
+ if (dev->mtd == mtd)
+ tr->remove_dev(dev);
}
static void blktrans_notify_add(struct mtd_info *mtd)
@@ -529,7 +550,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
return;
list_for_each_entry(tr, &blktrans_majors, list)
- tr->add_mtd(tr, mtd);
+ tr->add_mtd(tr, mtd);
}
static struct mtd_notifier blktrans_notifier = {
@@ -548,12 +569,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
-
mutex_lock(&mtd_table_mutex);
ret = register_blkdev(tr->major, tr->name);
if (ret < 0) {
- printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
+ printk(KERN_WARNING
+ "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
mutex_unlock(&mtd_table_mutex);
return ret;
@@ -568,8 +589,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
- if (mtd->type != MTD_ABSENT)
- tr->add_mtd(tr, mtd);
+ if (mtd->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
return 0;
@@ -585,7 +606,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
- tr->remove_dev(dev);
+ tr->remove_dev(dev);
unregister_blkdev(tr->major, tr->name);
mutex_unlock(&mtd_table_mutex);
@@ -611,4 +632,5 @@ EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
+MODULE_DESCRIPTION
+ ("Common interface to block layer for MTD 'translation layers'");
diff --git a/ANDROID_3.4.5/drivers/mtd/mtdchar.c b/ANDROID_3.4.5/drivers/mtd/mtdchar.c
index f2f482be..6c4a4f21 100644
--- a/ANDROID_3.4.5/drivers/mtd/mtdchar.c
+++ b/ANDROID_3.4.5/drivers/mtd/mtdchar.c
@@ -35,11 +35,16 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
+#include <linux/vmalloc.h>
#include <asm/uaccess.h>
static DEFINE_MUTEX(mtd_mutex);
+extern int wmt_getsyspara(char *varname, char *varval, int *varlen);
+extern int wmt_setsyspara(char *varname, char *varval);
+extern int wmt_write_signed_image(struct write_signed_image *wsi);
+
/*
* Data structure to hold the pointer to the mtd device as well
* as mode information of various use cases.
@@ -888,6 +893,93 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
+ case MEMGETENV:
+ {
+ struct env_info_user env_data;
+
+ if (copy_from_user(&env_data, argp, sizeof(struct env_info_user)))
+ return -EFAULT;
+
+ env_data.varname[sizeof(env_data.varname) -1] = '\0';
+ env_data.varlen = sizeof(env_data.varval);
+ ret = wmt_getsyspara(env_data.varname, env_data.varval, &env_data.varlen);
+ if (ret)
+ return -EIO;
+
+ if (copy_to_user(argp, &env_data, sizeof(struct env_info_user)))
+ return -EFAULT;
+ break;
+ }
+
+ case MEMSETENV:
+ {
+ struct env_info_user env_data;
+
+ if (copy_from_user(&env_data, argp, sizeof(struct env_info_user)))
+ return -EFAULT;
+
+ env_data.varname[sizeof(env_data.varname) -1] = '\0';
+ env_data.varval[sizeof(env_data.varval) -1] = '\0';
+ env_data.varlen = sizeof(env_data.varval);
+
+ if (env_data.varpoint == NULL)
+ ret = wmt_setsyspara(env_data.varname, NULL);
+ else
+ ret = wmt_setsyspara(env_data.varname, env_data.varval);
+
+ if (ret)
+ return -EIO;
+
+ break;
+ }
+
+ case MEM_WRITE_SIGNED_IMAGE:
+ {
+ struct write_signed_image w;
+ char * kimage, *ksig;
+ printk("MEM_WRITE_SIGNED_IMAGE : %x\n", MEM_WRITE_SIGNED_IMAGE);
+
+ //if(!access_ok(VERIFY_READ,argp,size)
+ if (copy_from_user(&w, argp, sizeof(struct write_signed_image)))
+ return -EFAULT;
+
+ if( w.img_len > SZ_512K || w.sig_len > SZ_4K)
+ return -E2BIG;
+
+ printk("begin wmt_write_signed_image: type %d/ imglen:%d signlen:%d\n",
+ w.type, w.img_len - 1, w.sig_len);
+
+ kimage = vmalloc(w.img_len);
+ if(!kimage)
+ return -ENOMEM;
+
+ ksig = vmalloc(w.sig_len);
+ if(!ksig) {
+ vfree(kimage);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kimage, w.img_data, w.img_len) ||
+ copy_from_user(ksig, w.sig_data, w.sig_len)) {
+ vfree(kimage);
+ vfree(ksig);
+ return -EFAULT;
+ }
+
+ w.img_data = kimage;
+ w.sig_data = ksig;
+
+ ret = wmt_write_signed_image(&w);
+
+ printk(" wmt_write_signed_image: type %d/ %x-%x %x-%x return %d\n",
+ w.type, kimage[0], kimage[w.img_len - 1],
+ ksig[0], ksig[w.sig_len - 1], ret);
+
+ vfree(kimage);
+ vfree(ksig);
+ break;
+ }
+
#ifdef CONFIG_HAVE_MTD_OTP
case OTPSELECT:
{
diff --git a/ANDROID_3.4.5/drivers/mtd/mtdcore.c b/ANDROID_3.4.5/drivers/mtd/mtdcore.c
index c837507d..923561d9 100644
--- a/ANDROID_3.4.5/drivers/mtd/mtdcore.c
+++ b/ANDROID_3.4.5/drivers/mtd/mtdcore.c
@@ -454,15 +454,20 @@ out_error:
*
* Returns zero in case of success and a negative error code in case of failure.
*/
+#if defined(CONFIG_MTD_NAND)
+extern struct mtd_partition nand_partitions[];
+#endif
int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
struct mtd_part_parser_data *parser_data,
const struct mtd_partition *parts,
int nr_parts)
{
- int err;
+ int err, i, env_nr_parts = 0;
struct mtd_partition *real_parts;
err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (err > 0)
+ env_nr_parts = err;
if (err <= 0 && nr_parts && parts) {
real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
GFP_KERNEL);
@@ -471,7 +476,18 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
else
err = nr_parts;
}
-
+#if defined(CONFIG_MTD_NAND)
+ if (env_nr_parts) {
+ nr_parts = (env_nr_parts < nr_parts) ? env_nr_parts : nr_parts;
+ for (i = 0; i < nr_parts; i++) {
+ if (!strcmp(nand_partitions[i].name, real_parts[i].name)) {
+ /*pdata->partitions[i].offset = real_parts[i].offset;*/
+ nand_partitions[i].offset = real_parts[i].offset;
+ nand_partitions[i].size = real_parts[i].size;
+ }
+ }
+ }
+#endif
if (err > 0) {
err = add_mtd_partitions(mtd, real_parts, err);
kfree(real_parts);
@@ -1021,6 +1037,9 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;
+ if (*size > KMALLOC_MAX_SIZE && KMALLOC_MAX_SIZE >=(4*1024*1024) && (mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ *size >>= 1;
+
*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
while (*size > min_alloc) {
@@ -1040,12 +1059,13 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
}
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
-#ifdef CONFIG_PROC_FS
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_MTD_NAND)
/*====================================================================*/
/* Support for /proc/mtd */
static struct proc_dir_entry *proc_mtd;
+static struct proc_dir_entry *proc_wmt_mtd;
static int mtd_proc_show(struct seq_file *m, void *v)
{
@@ -1062,17 +1082,49 @@ static int mtd_proc_show(struct seq_file *m, void *v)
return 0;
}
+extern struct mtd_partition nand_partitions[];
+static int wmt_mtd_proc_show(struct seq_file *m, void *v)
+{
+ struct mtd_info *mtd;
+ unsigned long long size = 0;
+
+ seq_puts(m, "dev: offset name\n");
+ mutex_lock(&mtd_table_mutex);
+ mtd_for_each_device(mtd) {
+ if(!strcmp(mtd->name, nand_partitions[0].name) || mtd->index == 0) {
+ size = 0;
+ }
+ seq_printf(m, "mtd%d: %8.8llx \"%s\"\n", mtd->index, size, mtd->name);
+ size +=mtd->size;
+ }
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+
static int mtd_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, mtd_proc_show, NULL);
}
+static int wmt_mtd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wmt_mtd_proc_show, NULL);
+}
+
static const struct file_operations mtd_proc_ops = {
.open = mtd_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
+
+static const struct file_operations wmt_mtd_proc_ops = {
+ .open = wmt_mtd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#endif /* CONFIG_PROC_FS */
/*====================================================================*/
@@ -1112,8 +1164,9 @@ static int __init init_mtd(void)
if (ret)
goto err_bdi3;
-#ifdef CONFIG_PROC_FS
+#if defined(CONFIG_PROC_FS)&&defined(CONFIG_MTD_NAND)
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
+ proc_wmt_mtd = proc_create("wmt_mtd", 0, NULL, &wmt_mtd_proc_ops);
#endif /* CONFIG_PROC_FS */
return 0;
@@ -1130,9 +1183,11 @@ err_reg:
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PROC_FS
+#if defined(CONFIG_PROC_FS)&&defined(CONFIG_MTD_NAND)
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
+ if(proc_wmt_mtd)
+ remove_proc_entry( "wmt_mtd", NULL);
#endif /* CONFIG_PROC_FS */
class_unregister(&mtd_class);
bdi_destroy(&mtd_bdi_unmappable);
diff --git a/ANDROID_3.4.5/drivers/mtd/mtdpart.c b/ANDROID_3.4.5/drivers/mtd/mtdpart.c
index 9651c06d..d3f3ce3e 100644
--- a/ANDROID_3.4.5/drivers/mtd/mtdpart.c
+++ b/ANDROID_3.4.5/drivers/mtd/mtdpart.c
@@ -171,13 +171,14 @@ static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
struct mtd_part *part = PART(mtd);
return part->master->_get_fact_prot_info(part->master, buf, len);
}
-
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
+ int ret;
struct mtd_part *part = PART(mtd);
- return part->master->_write(part->master, to + part->offset, len,
+ ret = part->master->_write(part->master, to + part->offset, len,
retlen, buf);
+ return ret;
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -758,3 +759,12 @@ int mtd_is_partition(struct mtd_info *mtd)
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+uint64_t mtd_get_device_size(struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return mtd->size;
+
+ return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/ANDROID_3.4.5/drivers/mtd/mtdswap.c b/ANDROID_3.4.5/drivers/mtd/mtdswap.c
index c92f0f6b..1435f880 100644
--- a/ANDROID_3.4.5/drivers/mtd/mtdswap.c
+++ b/ANDROID_3.4.5/drivers/mtd/mtdswap.c
@@ -1,1591 +1,1109 @@
-/*
- * Swap block device support for MTDs
- * Turns an MTD device into a swap device with block wear leveling
- *
- * Copyright © 2007,2011 Nokia Corporation. All rights reserved.
- *
- * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
- *
- * Based on Richard Purdie's earlier implementation in 2007. Background
- * support and lock-less operation written by Adrian Hunter.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/blktrans.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/genhd.h>
-#include <linux/swap.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/device.h>
-#include <linux/math64.h>
-
-#define MTDSWAP_PREFIX "mtdswap"
-
-/*
- * The number of free eraseblocks when GC should stop
- */
-#define CLEAN_BLOCK_THRESHOLD 20
-
-/*
- * Number of free eraseblocks below which GC can also collect low frag
- * blocks.
- */
-#define LOW_FRAG_GC_TRESHOLD 5
-
-/*
- * Wear level cost amortization. We want to do wear leveling on the background
- * without disturbing gc too much. This is made by defining max GC frequency.
- * Frequency value 6 means 1/6 of the GC passes will pick an erase block based
- * on the biggest wear difference rather than the biggest dirtiness.
- *
- * The lower freq2 should be chosen so that it makes sure the maximum erase
- * difference will decrease even if a malicious application is deliberately
- * trying to make erase differences large.
- */
-#define MAX_ERASE_DIFF 4000
-#define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
-#define COLLECT_NONDIRTY_FREQ1 6
-#define COLLECT_NONDIRTY_FREQ2 4
-
-#define PAGE_UNDEF UINT_MAX
-#define BLOCK_UNDEF UINT_MAX
-#define BLOCK_ERROR (UINT_MAX - 1)
-#define BLOCK_MAX (UINT_MAX - 2)
-
-#define EBLOCK_BAD (1 << 0)
-#define EBLOCK_NOMAGIC (1 << 1)
-#define EBLOCK_BITFLIP (1 << 2)
-#define EBLOCK_FAILED (1 << 3)
-#define EBLOCK_READERR (1 << 4)
-#define EBLOCK_IDX_SHIFT 5
-
-struct swap_eb {
- struct rb_node rb;
- struct rb_root *root;
-
- unsigned int flags;
- unsigned int active_count;
- unsigned int erase_count;
- unsigned int pad; /* speeds up pointer decrement */
-};
-
-#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
- rb)->erase_count)
-#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
- rb)->erase_count)
-
-struct mtdswap_tree {
- struct rb_root root;
- unsigned int count;
-};
-
-enum {
- MTDSWAP_CLEAN,
- MTDSWAP_USED,
- MTDSWAP_LOWFRAG,
- MTDSWAP_HIFRAG,
- MTDSWAP_DIRTY,
- MTDSWAP_BITFLIP,
- MTDSWAP_FAILING,
- MTDSWAP_TREE_CNT,
-};
-
-struct mtdswap_dev {
- struct mtd_blktrans_dev *mbd_dev;
- struct mtd_info *mtd;
- struct device *dev;
-
- unsigned int *page_data;
- unsigned int *revmap;
-
- unsigned int eblks;
- unsigned int spare_eblks;
- unsigned int pages_per_eblk;
- unsigned int max_erase_count;
- struct swap_eb *eb_data;
-
- struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
-
- unsigned long long sect_read_count;
- unsigned long long sect_write_count;
- unsigned long long mtd_write_count;
- unsigned long long mtd_read_count;
- unsigned long long discard_count;
- unsigned long long discard_page_count;
-
- unsigned int curr_write_pos;
- struct swap_eb *curr_write;
-
- char *page_buf;
- char *oob_buf;
-
- struct dentry *debugfs_root;
-};
-
-struct mtdswap_oobdata {
- __le16 magic;
- __le32 count;
-} __attribute__((packed));
-
-#define MTDSWAP_MAGIC_CLEAN 0x2095
-#define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
-#define MTDSWAP_TYPE_CLEAN 0
-#define MTDSWAP_TYPE_DIRTY 1
-#define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
-
-#define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
-#define MTDSWAP_IO_RETRIES 3
-
-enum {
- MTDSWAP_SCANNED_CLEAN,
- MTDSWAP_SCANNED_DIRTY,
- MTDSWAP_SCANNED_BITFLIP,
- MTDSWAP_SCANNED_BAD,
-};
-
-/*
- * In the worst case mtdswap_writesect() has allocated the last clean
- * page from the current block and is then pre-empted by the GC
- * thread. The thread can consume a full erase block when moving a
- * block.
- */
-#define MIN_SPARE_EBLOCKS 2
-#define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
-
-#define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
-#define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
-#define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
-#define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
-
-#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
-
-static char partitions[128] = "";
-module_param_string(partitions, partitions, sizeof(partitions), 0444);
-MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
- "partitions=\"1,3,5\"");
-
-static unsigned int spare_eblocks = 10;
-module_param(spare_eblocks, uint, 0444);
-MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
- "garbage collection (default 10%)");
-
-static bool header; /* false */
-module_param(header, bool, 0444);
-MODULE_PARM_DESC(header,
- "Include builtin swap header (default 0, without header)");
-
-static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
-
-static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
-}
-
-static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- unsigned int oldidx;
- struct mtdswap_tree *tp;
-
- if (eb->root) {
- tp = container_of(eb->root, struct mtdswap_tree, root);
- oldidx = tp - &d->trees[0];
-
- d->trees[oldidx].count--;
- rb_erase(&eb->rb, eb->root);
- }
-}
-
-static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
-{
- struct rb_node **p, *parent = NULL;
- struct swap_eb *cur;
-
- p = &root->rb_node;
- while (*p) {
- parent = *p;
- cur = rb_entry(parent, struct swap_eb, rb);
- if (eb->erase_count > cur->erase_count)
- p = &(*p)->rb_right;
- else
- p = &(*p)->rb_left;
- }
-
- rb_link_node(&eb->rb, parent, p);
- rb_insert_color(&eb->rb, root);
-}
-
-static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
-{
- struct rb_root *root;
-
- if (eb->root == &d->trees[idx].root)
- return;
-
- mtdswap_eb_detach(d, eb);
- root = &d->trees[idx].root;
- __mtdswap_rb_add(root, eb);
- eb->root = root;
- d->trees[idx].count++;
-}
-
-static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
-{
- struct rb_node *p;
- unsigned int i;
-
- p = rb_first(root);
- i = 0;
- while (i < idx && p) {
- p = rb_next(p);
- i++;
- }
-
- return p;
-}
-
-static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- int ret;
- loff_t offset;
-
- d->spare_eblks--;
- eb->flags |= EBLOCK_BAD;
- mtdswap_eb_detach(d, eb);
- eb->root = NULL;
-
- /* badblocks not supported */
- if (!mtd_can_have_bb(d->mtd))
- return 1;
-
- offset = mtdswap_eb_offset(d, eb);
- dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
- ret = mtd_block_markbad(d->mtd, offset);
-
- if (ret) {
- dev_warn(d->dev, "Mark block bad failed for block at %08llx "
- "error %d\n", offset, ret);
- return ret;
- }
-
- return 1;
-
-}
-
-static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- unsigned int marked = eb->flags & EBLOCK_FAILED;
- struct swap_eb *curr_write = d->curr_write;
-
- eb->flags |= EBLOCK_FAILED;
- if (curr_write == eb) {
- d->curr_write = NULL;
-
- if (!marked && d->curr_write_pos != 0) {
- mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
- return 0;
- }
- }
-
- return mtdswap_handle_badblock(d, eb);
-}
-
-static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
- struct mtd_oob_ops *ops)
-{
- int ret = mtd_read_oob(d->mtd, from, ops);
-
- if (mtd_is_bitflip(ret))
- return ret;
-
- if (ret) {
- dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
- ret, from);
- return ret;
- }
-
- if (ops->oobretlen < ops->ooblen) {
- dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
- "%zd) for block at %08llx\n",
- ops->oobretlen, ops->ooblen, from);
- return -EIO;
- }
-
- return 0;
-}
-
-static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- struct mtdswap_oobdata *data, *data2;
- int ret;
- loff_t offset;
- struct mtd_oob_ops ops;
-
- offset = mtdswap_eb_offset(d, eb);
-
- /* Check first if the block is bad. */
- if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
- return MTDSWAP_SCANNED_BAD;
-
- ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
- ops.oobbuf = d->oob_buf;
- ops.ooboffs = 0;
- ops.datbuf = NULL;
- ops.mode = MTD_OPS_AUTO_OOB;
-
- ret = mtdswap_read_oob(d, offset, &ops);
-
- if (ret && !mtd_is_bitflip(ret))
- return ret;
-
- data = (struct mtdswap_oobdata *)d->oob_buf;
- data2 = (struct mtdswap_oobdata *)
- (d->oob_buf + d->mtd->ecclayout->oobavail);
-
- if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
- eb->erase_count = le32_to_cpu(data->count);
- if (mtd_is_bitflip(ret))
- ret = MTDSWAP_SCANNED_BITFLIP;
- else {
- if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
- ret = MTDSWAP_SCANNED_DIRTY;
- else
- ret = MTDSWAP_SCANNED_CLEAN;
- }
- } else {
- eb->flags |= EBLOCK_NOMAGIC;
- ret = MTDSWAP_SCANNED_DIRTY;
- }
-
- return ret;
-}
-
-static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
- u16 marker)
-{
- struct mtdswap_oobdata n;
- int ret;
- loff_t offset;
- struct mtd_oob_ops ops;
-
- ops.ooboffs = 0;
- ops.oobbuf = (uint8_t *)&n;
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = NULL;
-
- if (marker == MTDSWAP_TYPE_CLEAN) {
- n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
- n.count = cpu_to_le32(eb->erase_count);
- ops.ooblen = MTDSWAP_OOBSIZE;
- offset = mtdswap_eb_offset(d, eb);
- } else {
- n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
- ops.ooblen = sizeof(n.magic);
- offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
- }
-
- ret = mtd_write_oob(d->mtd, offset, &ops);
-
- if (ret) {
- dev_warn(d->dev, "Write OOB failed for block at %08llx "
- "error %d\n", offset, ret);
- if (ret == -EIO || mtd_is_eccerr(ret))
- mtdswap_handle_write_error(d, eb);
- return ret;
- }
-
- if (ops.oobretlen != ops.ooblen) {
- dev_warn(d->dev, "Short OOB write for block at %08llx: "
- "%zd not %zd\n",
- offset, ops.oobretlen, ops.ooblen);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Are there any erase blocks without MAGIC_CLEAN header, presumably
- * because power was cut off after erase but before header write? We
- * need to guestimate the erase count.
- */
-static void mtdswap_check_counts(struct mtdswap_dev *d)
-{
- struct rb_root hist_root = RB_ROOT;
- struct rb_node *medrb;
- struct swap_eb *eb;
- unsigned int i, cnt, median;
-
- cnt = 0;
- for (i = 0; i < d->eblks; i++) {
- eb = d->eb_data + i;
-
- if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
- continue;
-
- __mtdswap_rb_add(&hist_root, eb);
- cnt++;
- }
-
- if (cnt == 0)
- return;
-
- medrb = mtdswap_rb_index(&hist_root, cnt / 2);
- median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
-
- d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
-
- for (i = 0; i < d->eblks; i++) {
- eb = d->eb_data + i;
-
- if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
- eb->erase_count = median;
-
- if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
- continue;
-
- rb_erase(&eb->rb, &hist_root);
- }
-}
-
-static void mtdswap_scan_eblks(struct mtdswap_dev *d)
-{
- int status;
- unsigned int i, idx;
- struct swap_eb *eb;
-
- for (i = 0; i < d->eblks; i++) {
- eb = d->eb_data + i;
-
- status = mtdswap_read_markers(d, eb);
- if (status < 0)
- eb->flags |= EBLOCK_READERR;
- else if (status == MTDSWAP_SCANNED_BAD) {
- eb->flags |= EBLOCK_BAD;
- continue;
- }
-
- switch (status) {
- case MTDSWAP_SCANNED_CLEAN:
- idx = MTDSWAP_CLEAN;
- break;
- case MTDSWAP_SCANNED_DIRTY:
- case MTDSWAP_SCANNED_BITFLIP:
- idx = MTDSWAP_DIRTY;
- break;
- default:
- idx = MTDSWAP_FAILING;
- }
-
- eb->flags |= (idx << EBLOCK_IDX_SHIFT);
- }
-
- mtdswap_check_counts(d);
-
- for (i = 0; i < d->eblks; i++) {
- eb = d->eb_data + i;
-
- if (eb->flags & EBLOCK_BAD)
- continue;
-
- idx = eb->flags >> EBLOCK_IDX_SHIFT;
- mtdswap_rb_add(d, eb, idx);
- }
-}
-
-/*
- * Place eblk into a tree corresponding to its number of active blocks
- * it contains.
- */
-static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- unsigned int weight = eb->active_count;
- unsigned int maxweight = d->pages_per_eblk;
-
- if (eb == d->curr_write)
- return;
-
- if (eb->flags & EBLOCK_BITFLIP)
- mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
- else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
- mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
- if (weight == maxweight)
- mtdswap_rb_add(d, eb, MTDSWAP_USED);
- else if (weight == 0)
- mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
- else if (weight > (maxweight/2))
- mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
- else
- mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
-}
-
-
-static void mtdswap_erase_callback(struct erase_info *done)
-{
- wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
- wake_up(wait_q);
-}
-
-static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- struct mtd_info *mtd = d->mtd;
- struct erase_info erase;
- wait_queue_head_t wq;
- unsigned int retries = 0;
- int ret;
-
- eb->erase_count++;
- if (eb->erase_count > d->max_erase_count)
- d->max_erase_count = eb->erase_count;
-
-retry:
- init_waitqueue_head(&wq);
- memset(&erase, 0, sizeof(struct erase_info));
-
- erase.mtd = mtd;
- erase.callback = mtdswap_erase_callback;
- erase.addr = mtdswap_eb_offset(d, eb);
- erase.len = mtd->erasesize;
- erase.priv = (u_long)&wq;
-
- ret = mtd_erase(mtd, &erase);
- if (ret) {
- if (retries++ < MTDSWAP_ERASE_RETRIES) {
- dev_warn(d->dev,
- "erase of erase block %#llx on %s failed",
- erase.addr, mtd->name);
- yield();
- goto retry;
- }
-
- dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
- erase.addr, mtd->name);
-
- mtdswap_handle_badblock(d, eb);
- return -EIO;
- }
-
- ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
- erase.state == MTD_ERASE_FAILED);
- if (ret) {
- dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
- erase.addr, mtd->name);
- return -EINTR;
- }
-
- if (erase.state == MTD_ERASE_FAILED) {
- if (retries++ < MTDSWAP_ERASE_RETRIES) {
- dev_warn(d->dev,
- "erase of erase block %#llx on %s failed",
- erase.addr, mtd->name);
- yield();
- goto retry;
- }
-
- mtdswap_handle_badblock(d, eb);
- return -EIO;
- }
-
- return 0;
-}
-
-static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
- unsigned int *block)
-{
- int ret;
- struct swap_eb *old_eb = d->curr_write;
- struct rb_root *clean_root;
- struct swap_eb *eb;
-
- if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
- do {
- if (TREE_EMPTY(d, CLEAN))
- return -ENOSPC;
-
- clean_root = TREE_ROOT(d, CLEAN);
- eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
- rb_erase(&eb->rb, clean_root);
- eb->root = NULL;
- TREE_COUNT(d, CLEAN)--;
-
- ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
- } while (ret == -EIO || mtd_is_eccerr(ret));
-
- if (ret)
- return ret;
-
- d->curr_write_pos = 0;
- d->curr_write = eb;
- if (old_eb)
- mtdswap_store_eb(d, old_eb);
- }
-
- *block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
- d->curr_write_pos;
-
- d->curr_write->active_count++;
- d->revmap[*block] = page;
- d->curr_write_pos++;
-
- return 0;
-}
-
-static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
-{
- return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
- d->pages_per_eblk - d->curr_write_pos;
-}
-
-static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
-{
- return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
-}
-
-static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
- unsigned int page, unsigned int *bp, int gc_context)
-{
- struct mtd_info *mtd = d->mtd;
- struct swap_eb *eb;
- size_t retlen;
- loff_t writepos;
- int ret;
-
-retry:
- if (!gc_context)
- while (!mtdswap_enough_free_pages(d))
- if (mtdswap_gc(d, 0) > 0)
- return -ENOSPC;
-
- ret = mtdswap_map_free_block(d, page, bp);
- eb = d->eb_data + (*bp / d->pages_per_eblk);
-
- if (ret == -EIO || mtd_is_eccerr(ret)) {
- d->curr_write = NULL;
- eb->active_count--;
- d->revmap[*bp] = PAGE_UNDEF;
- goto retry;
- }
-
- if (ret < 0)
- return ret;
-
- writepos = (loff_t)*bp << PAGE_SHIFT;
- ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
- if (ret == -EIO || mtd_is_eccerr(ret)) {
- d->curr_write_pos--;
- eb->active_count--;
- d->revmap[*bp] = PAGE_UNDEF;
- mtdswap_handle_write_error(d, eb);
- goto retry;
- }
-
- if (ret < 0) {
- dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
- ret, retlen);
- goto err;
- }
-
- if (retlen != PAGE_SIZE) {
- dev_err(d->dev, "Short write to MTD device: %zd written",
- retlen);
- ret = -EIO;
- goto err;
- }
-
- return ret;
-
-err:
- d->curr_write_pos--;
- eb->active_count--;
- d->revmap[*bp] = PAGE_UNDEF;
-
- return ret;
-}
-
-static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
- unsigned int *newblock)
-{
- struct mtd_info *mtd = d->mtd;
- struct swap_eb *eb, *oldeb;
- int ret;
- size_t retlen;
- unsigned int page, retries;
- loff_t readpos;
-
- page = d->revmap[oldblock];
- readpos = (loff_t) oldblock << PAGE_SHIFT;
- retries = 0;
-
-retry:
- ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
-
- if (ret < 0 && !mtd_is_bitflip(ret)) {
- oldeb = d->eb_data + oldblock / d->pages_per_eblk;
- oldeb->flags |= EBLOCK_READERR;
-
- dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
- oldblock);
- retries++;
- if (retries < MTDSWAP_IO_RETRIES)
- goto retry;
-
- goto read_error;
- }
-
- if (retlen != PAGE_SIZE) {
- dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
- oldblock);
- ret = -EIO;
- goto read_error;
- }
-
- ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
- if (ret < 0) {
- d->page_data[page] = BLOCK_ERROR;
- dev_err(d->dev, "Write error: %d\n", ret);
- return ret;
- }
-
- eb = d->eb_data + *newblock / d->pages_per_eblk;
- d->page_data[page] = *newblock;
- d->revmap[oldblock] = PAGE_UNDEF;
- eb = d->eb_data + oldblock / d->pages_per_eblk;
- eb->active_count--;
-
- return 0;
-
-read_error:
- d->page_data[page] = BLOCK_ERROR;
- d->revmap[oldblock] = PAGE_UNDEF;
- return ret;
-}
-
-static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
-{
- unsigned int i, block, eblk_base, newblock;
- int ret, errcode;
-
- errcode = 0;
- eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
-
- for (i = 0; i < d->pages_per_eblk; i++) {
- if (d->spare_eblks < MIN_SPARE_EBLOCKS)
- return -ENOSPC;
-
- block = eblk_base + i;
- if (d->revmap[block] == PAGE_UNDEF)
- continue;
-
- ret = mtdswap_move_block(d, block, &newblock);
- if (ret < 0 && !errcode)
- errcode = ret;
- }
-
- return errcode;
-}
-
-static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
-{
- int idx, stopat;
-
- if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
- stopat = MTDSWAP_LOWFRAG;
- else
- stopat = MTDSWAP_HIFRAG;
-
- for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
- if (d->trees[idx].root.rb_node != NULL)
- return idx;
-
- return -1;
-}
-
-static int mtdswap_wlfreq(unsigned int maxdiff)
-{
- unsigned int h, x, y, dist, base;
-
- /*
- * Calculate linear ramp down from f1 to f2 when maxdiff goes from
- * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
- * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
- */
-
- dist = maxdiff - MAX_ERASE_DIFF;
- if (dist > COLLECT_NONDIRTY_BASE)
- dist = COLLECT_NONDIRTY_BASE;
-
- /*
- * Modelling the slop as right angular triangle with base
- * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
- * equal to the ratio h/base.
- */
- h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
- base = COLLECT_NONDIRTY_BASE;
-
- x = dist - base;
- y = (x * h + base / 2) / base;
-
- return COLLECT_NONDIRTY_FREQ2 + y;
-}
-
-static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
-{
- static unsigned int pick_cnt;
- unsigned int i, idx = -1, wear, max;
- struct rb_root *root;
-
- max = 0;
- for (i = 0; i <= MTDSWAP_DIRTY; i++) {
- root = &d->trees[i].root;
- if (root->rb_node == NULL)
- continue;
-
- wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
- if (wear > max) {
- max = wear;
- idx = i;
- }
- }
-
- if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
- pick_cnt = 0;
- return idx;
- }
-
- pick_cnt++;
- return -1;
-}
-
-static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
- unsigned int background)
-{
- int idx;
-
- if (TREE_NONEMPTY(d, FAILING) &&
- (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
- return MTDSWAP_FAILING;
-
- idx = mtdswap_choose_wl_tree(d);
- if (idx >= MTDSWAP_CLEAN)
- return idx;
-
- return __mtdswap_choose_gc_tree(d);
-}
-
-static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
- unsigned int background)
-{
- struct rb_root *rp = NULL;
- struct swap_eb *eb = NULL;
- int idx;
-
- if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
- TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
- return NULL;
-
- idx = mtdswap_choose_gc_tree(d, background);
- if (idx < 0)
- return NULL;
-
- rp = &d->trees[idx].root;
- eb = rb_entry(rb_first(rp), struct swap_eb, rb);
-
- rb_erase(&eb->rb, rp);
- eb->root = NULL;
- d->trees[idx].count--;
- return eb;
-}
-
-static unsigned int mtdswap_test_patt(unsigned int i)
-{
- return i % 2 ? 0x55555555 : 0xAAAAAAAA;
-}
-
-static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
- struct swap_eb *eb)
-{
- struct mtd_info *mtd = d->mtd;
- unsigned int test, i, j, patt, mtd_pages;
- loff_t base, pos;
- unsigned int *p1 = (unsigned int *)d->page_buf;
- unsigned char *p2 = (unsigned char *)d->oob_buf;
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.len = mtd->writesize;
- ops.ooblen = mtd->ecclayout->oobavail;
- ops.ooboffs = 0;
- ops.datbuf = d->page_buf;
- ops.oobbuf = d->oob_buf;
- base = mtdswap_eb_offset(d, eb);
- mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
-
- for (test = 0; test < 2; test++) {
- pos = base;
- for (i = 0; i < mtd_pages; i++) {
- patt = mtdswap_test_patt(test + i);
- memset(d->page_buf, patt, mtd->writesize);
- memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
- ret = mtd_write_oob(mtd, pos, &ops);
- if (ret)
- goto error;
-
- pos += mtd->writesize;
- }
-
- pos = base;
- for (i = 0; i < mtd_pages; i++) {
- ret = mtd_read_oob(mtd, pos, &ops);
- if (ret)
- goto error;
-
- patt = mtdswap_test_patt(test + i);
- for (j = 0; j < mtd->writesize/sizeof(int); j++)
- if (p1[j] != patt)
- goto error;
-
- for (j = 0; j < mtd->ecclayout->oobavail; j++)
- if (p2[j] != (unsigned char)patt)
- goto error;
-
- pos += mtd->writesize;
- }
-
- ret = mtdswap_erase_block(d, eb);
- if (ret)
- goto error;
- }
-
- eb->flags &= ~EBLOCK_READERR;
- return 1;
-
-error:
- mtdswap_handle_badblock(d, eb);
- return 0;
-}
-
-static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
-{
- struct swap_eb *eb;
- int ret;
-
- if (d->spare_eblks < MIN_SPARE_EBLOCKS)
- return 1;
-
- eb = mtdswap_pick_gc_eblk(d, background);
- if (!eb)
- return 1;
-
- ret = mtdswap_gc_eblock(d, eb);
- if (ret == -ENOSPC)
- return 1;
-
- if (eb->flags & EBLOCK_FAILED) {
- mtdswap_handle_badblock(d, eb);
- return 0;
- }
-
- eb->flags &= ~EBLOCK_BITFLIP;
- ret = mtdswap_erase_block(d, eb);
- if ((eb->flags & EBLOCK_READERR) &&
- (ret || !mtdswap_eblk_passes(d, eb)))
- return 0;
-
- if (ret == 0)
- ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
-
- if (ret == 0)
- mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
- else if (ret != -EIO && !mtd_is_eccerr(ret))
- mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
-
- return 0;
-}
-
-static void mtdswap_background(struct mtd_blktrans_dev *dev)
-{
- struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- int ret;
-
- while (1) {
- ret = mtdswap_gc(d, 1);
- if (ret || mtd_blktrans_cease_background(dev))
- return;
- }
-}
-
-static void mtdswap_cleanup(struct mtdswap_dev *d)
-{
- vfree(d->eb_data);
- vfree(d->revmap);
- vfree(d->page_data);
- kfree(d->oob_buf);
- kfree(d->page_buf);
-}
-
-static int mtdswap_flush(struct mtd_blktrans_dev *dev)
-{
- struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
-
- mtd_sync(d->mtd);
- return 0;
-}
-
-static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
-{
- loff_t offset;
- unsigned int badcnt;
-
- badcnt = 0;
-
- if (mtd_can_have_bb(mtd))
- for (offset = 0; offset < size; offset += mtd->erasesize)
- if (mtd_block_isbad(mtd, offset))
- badcnt++;
-
- return badcnt;
-}
-
-static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
- unsigned long page, char *buf)
-{
- struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- unsigned int newblock, mapped;
- struct swap_eb *eb;
- int ret;
-
- d->sect_write_count++;
-
- if (d->spare_eblks < MIN_SPARE_EBLOCKS)
- return -ENOSPC;
-
- if (header) {
- /* Ignore writes to the header page */
- if (unlikely(page == 0))
- return 0;
-
- page--;
- }
-
- mapped = d->page_data[page];
- if (mapped <= BLOCK_MAX) {
- eb = d->eb_data + (mapped / d->pages_per_eblk);
- eb->active_count--;
- mtdswap_store_eb(d, eb);
- d->page_data[page] = BLOCK_UNDEF;
- d->revmap[mapped] = PAGE_UNDEF;
- }
-
- ret = mtdswap_write_block(d, buf, page, &newblock, 0);
- d->mtd_write_count++;
-
- if (ret < 0)
- return ret;
-
- eb = d->eb_data + (newblock / d->pages_per_eblk);
- d->page_data[page] = newblock;
-
- return 0;
-}
-
-/* Provide a dummy swap header for the kernel */
-static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
-{
- union swap_header *hd = (union swap_header *)(buf);
-
- memset(buf, 0, PAGE_SIZE - 10);
-
- hd->info.version = 1;
- hd->info.last_page = d->mbd_dev->size - 1;
- hd->info.nr_badpages = 0;
-
- memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
-
- return 0;
-}
-
-static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
- unsigned long page, char *buf)
-{
- struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- struct mtd_info *mtd = d->mtd;
- unsigned int realblock, retries;
- loff_t readpos;
- struct swap_eb *eb;
- size_t retlen;
- int ret;
-
- d->sect_read_count++;
-
- if (header) {
- if (unlikely(page == 0))
- return mtdswap_auto_header(d, buf);
-
- page--;
- }
-
- realblock = d->page_data[page];
- if (realblock > BLOCK_MAX) {
- memset(buf, 0x0, PAGE_SIZE);
- if (realblock == BLOCK_UNDEF)
- return 0;
- else
- return -EIO;
- }
-
- eb = d->eb_data + (realblock / d->pages_per_eblk);
- BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
-
- readpos = (loff_t)realblock << PAGE_SHIFT;
- retries = 0;
-
-retry:
- ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
-
- d->mtd_read_count++;
- if (mtd_is_bitflip(ret)) {
- eb->flags |= EBLOCK_BITFLIP;
- mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
- ret = 0;
- }
-
- if (ret < 0) {
- dev_err(d->dev, "Read error %d\n", ret);
- eb->flags |= EBLOCK_READERR;
- mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
- retries++;
- if (retries < MTDSWAP_IO_RETRIES)
- goto retry;
-
- return ret;
- }
-
- if (retlen != PAGE_SIZE) {
- dev_err(d->dev, "Short read %zd\n", retlen);
- return -EIO;
- }
-
- return 0;
-}
-
-static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
- unsigned nr_pages)
-{
- struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- unsigned long page;
- struct swap_eb *eb;
- unsigned int mapped;
-
- d->discard_count++;
-
- for (page = first; page < first + nr_pages; page++) {
- mapped = d->page_data[page];
- if (mapped <= BLOCK_MAX) {
- eb = d->eb_data + (mapped / d->pages_per_eblk);
- eb->active_count--;
- mtdswap_store_eb(d, eb);
- d->page_data[page] = BLOCK_UNDEF;
- d->revmap[mapped] = PAGE_UNDEF;
- d->discard_page_count++;
- } else if (mapped == BLOCK_ERROR) {
- d->page_data[page] = BLOCK_UNDEF;
- d->discard_page_count++;
- }
- }
-
- return 0;
-}
-
-static int mtdswap_show(struct seq_file *s, void *data)
-{
- struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
- unsigned long sum;
- unsigned int count[MTDSWAP_TREE_CNT];
- unsigned int min[MTDSWAP_TREE_CNT];
- unsigned int max[MTDSWAP_TREE_CNT];
- unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
- uint64_t use_size;
- char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
- "failing"};
-
- mutex_lock(&d->mbd_dev->lock);
-
- for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
- struct rb_root *root = &d->trees[i].root;
-
- if (root->rb_node) {
- count[i] = d->trees[i].count;
- min[i] = rb_entry(rb_first(root), struct swap_eb,
- rb)->erase_count;
- max[i] = rb_entry(rb_last(root), struct swap_eb,
- rb)->erase_count;
- } else
- count[i] = 0;
- }
-
- if (d->curr_write) {
- cw = 1;
- cwp = d->curr_write_pos;
- cwecount = d->curr_write->erase_count;
- }
-
- sum = 0;
- for (i = 0; i < d->eblks; i++)
- sum += d->eb_data[i].erase_count;
-
- use_size = (uint64_t)d->eblks * d->mtd->erasesize;
- bb_cnt = mtdswap_badblocks(d->mtd, use_size);
-
- mapped = 0;
- pages = d->mbd_dev->size;
- for (i = 0; i < pages; i++)
- if (d->page_data[i] != BLOCK_UNDEF)
- mapped++;
-
- mutex_unlock(&d->mbd_dev->lock);
-
- for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
- if (!count[i])
- continue;
-
- if (min[i] != max[i])
- seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
- "max %d times\n",
- name[i], count[i], min[i], max[i]);
- else
- seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
- "times\n", name[i], count[i], min[i]);
- }
-
- if (bb_cnt)
- seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
-
- if (cw)
- seq_printf(s, "current erase block: %u pages used, %u free, "
- "erased %u times\n",
- cwp, d->pages_per_eblk - cwp, cwecount);
-
- seq_printf(s, "total erasures: %lu\n", sum);
-
- seq_printf(s, "\n");
-
- seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
- seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
- seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
- seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
- seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
- seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
-
- seq_printf(s, "\n");
- seq_printf(s, "total pages: %u\n", pages);
- seq_printf(s, "pages mapped: %u\n", mapped);
-
- return 0;
-}
-
-static int mtdswap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mtdswap_show, inode->i_private);
-}
-
-static const struct file_operations mtdswap_fops = {
- .open = mtdswap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int mtdswap_add_debugfs(struct mtdswap_dev *d)
-{
- struct gendisk *gd = d->mbd_dev->disk;
- struct device *dev = disk_to_dev(gd);
-
- struct dentry *root;
- struct dentry *dent;
-
- root = debugfs_create_dir(gd->disk_name, NULL);
- if (IS_ERR(root))
- return 0;
-
- if (!root) {
- dev_err(dev, "failed to initialize debugfs\n");
- return -1;
- }
-
- d->debugfs_root = root;
-
- dent = debugfs_create_file("stats", S_IRUSR, root, d,
- &mtdswap_fops);
- if (!dent) {
- dev_err(d->dev, "debugfs_create_file failed\n");
- debugfs_remove_recursive(root);
- d->debugfs_root = NULL;
- return -1;
- }
-
- return 0;
-}
-
-static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
- unsigned int spare_cnt)
-{
- struct mtd_info *mtd = d->mbd_dev->mtd;
- unsigned int i, eblk_bytes, pages, blocks;
- int ret = -ENOMEM;
-
- d->mtd = mtd;
- d->eblks = eblocks;
- d->spare_eblks = spare_cnt;
- d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
-
- pages = d->mbd_dev->size;
- blocks = eblocks * d->pages_per_eblk;
-
- for (i = 0; i < MTDSWAP_TREE_CNT; i++)
- d->trees[i].root = RB_ROOT;
-
- d->page_data = vmalloc(sizeof(int)*pages);
- if (!d->page_data)
- goto page_data_fail;
-
- d->revmap = vmalloc(sizeof(int)*blocks);
- if (!d->revmap)
- goto revmap_fail;
-
- eblk_bytes = sizeof(struct swap_eb)*d->eblks;
- d->eb_data = vzalloc(eblk_bytes);
- if (!d->eb_data)
- goto eb_data_fail;
-
- for (i = 0; i < pages; i++)
- d->page_data[i] = BLOCK_UNDEF;
-
- for (i = 0; i < blocks; i++)
- d->revmap[i] = PAGE_UNDEF;
-
- d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!d->page_buf)
- goto page_buf_fail;
-
- d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
- if (!d->oob_buf)
- goto oob_buf_fail;
-
- mtdswap_scan_eblks(d);
-
- return 0;
-
-oob_buf_fail:
- kfree(d->page_buf);
-page_buf_fail:
- vfree(d->eb_data);
-eb_data_fail:
- vfree(d->revmap);
-revmap_fail:
- vfree(d->page_data);
-page_data_fail:
- printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
- return ret;
-}
-
-static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
-{
- struct mtdswap_dev *d;
- struct mtd_blktrans_dev *mbd_dev;
- char *parts;
- char *this_opt;
- unsigned long part;
- unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
- uint64_t swap_size, use_size, size_limit;
- struct nand_ecclayout *oinfo;
- int ret;
-
- parts = &partitions[0];
- if (!*parts)
- return;
-
- while ((this_opt = strsep(&parts, ",")) != NULL) {
- if (strict_strtoul(this_opt, 0, &part) < 0)
- return;
-
- if (mtd->index == part)
- break;
- }
-
- if (mtd->index != part)
- return;
-
- if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
- printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
- "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
- return;
- }
-
- if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
- printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
- " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
- return;
- }
-
- oinfo = mtd->ecclayout;
- if (!oinfo) {
- printk(KERN_ERR "%s: mtd%d does not have OOB\n",
- MTDSWAP_PREFIX, mtd->index);
- return;
- }
-
- if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
- printk(KERN_ERR "%s: Not enough free bytes in OOB, "
- "%d available, %zu needed.\n",
- MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
- return;
- }
-
- if (spare_eblocks > 100)
- spare_eblocks = 100;
-
- use_size = mtd->size;
- size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
-
- if (mtd->size > size_limit) {
- printk(KERN_WARNING "%s: Device too large. Limiting size to "
- "%llu bytes\n", MTDSWAP_PREFIX, size_limit);
- use_size = size_limit;
- }
-
- eblocks = mtd_div_by_eb(use_size, mtd);
- use_size = eblocks * mtd->erasesize;
- bad_blocks = mtdswap_badblocks(mtd, use_size);
- eavailable = eblocks - bad_blocks;
-
- if (eavailable < MIN_ERASE_BLOCKS) {
- printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
- "%d needed\n", MTDSWAP_PREFIX, eavailable,
- MIN_ERASE_BLOCKS);
- return;
- }
-
- spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
-
- if (spare_cnt < MIN_SPARE_EBLOCKS)
- spare_cnt = MIN_SPARE_EBLOCKS;
-
- if (spare_cnt > eavailable - 1)
- spare_cnt = eavailable - 1;
-
- swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
- (header ? PAGE_SIZE : 0);
-
- printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
- "%u spare, %u bad blocks\n",
- MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
-
- d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
- if (!d)
- return;
-
- mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
- if (!mbd_dev) {
- kfree(d);
- return;
- }
-
- d->mbd_dev = mbd_dev;
- mbd_dev->priv = d;
-
- mbd_dev->mtd = mtd;
- mbd_dev->devnum = mtd->index;
- mbd_dev->size = swap_size >> PAGE_SHIFT;
- mbd_dev->tr = tr;
-
- if (!(mtd->flags & MTD_WRITEABLE))
- mbd_dev->readonly = 1;
-
- if (mtdswap_init(d, eblocks, spare_cnt) < 0)
- goto init_failed;
-
- if (add_mtd_blktrans_dev(mbd_dev) < 0)
- goto cleanup;
-
- d->dev = disk_to_dev(mbd_dev->disk);
-
- ret = mtdswap_add_debugfs(d);
- if (ret < 0)
- goto debugfs_failed;
-
- return;
-
-debugfs_failed:
- del_mtd_blktrans_dev(mbd_dev);
-
-cleanup:
- mtdswap_cleanup(d);
-
-init_failed:
- kfree(mbd_dev);
- kfree(d);
-}
-
-static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
-{
- struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
-
- debugfs_remove_recursive(d->debugfs_root);
- del_mtd_blktrans_dev(dev);
- mtdswap_cleanup(d);
- kfree(d);
-}
-
-static struct mtd_blktrans_ops mtdswap_ops = {
- .name = "mtdswap",
- .major = 0,
- .part_bits = 0,
- .blksize = PAGE_SIZE,
- .flush = mtdswap_flush,
- .readsect = mtdswap_readsect,
- .writesect = mtdswap_writesect,
- .discard = mtdswap_discard,
- .background = mtdswap_background,
- .add_mtd = mtdswap_add_mtd,
- .remove_dev = mtdswap_remove_dev,
- .owner = THIS_MODULE,
-};
-
-static int __init mtdswap_modinit(void)
-{
- return register_mtd_blktrans(&mtdswap_ops);
-}
-
-static void __exit mtdswap_modexit(void)
-{
- deregister_mtd_blktrans(&mtdswap_ops);
-}
-
-module_init(mtdswap_modinit);
-module_exit(mtdswap_modexit);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
-MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
- "swap space");
+/*++
+/some descriptions of this software.
+Copyright ©2014 WonderMediaTechnologies, Inc.
+This program is free software: you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation, either version 2 of the License, or(at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESSFOR A PARTICULAR PURPOSE. See
+the GNU General Public License for more details.You should have received
+a copy of the GNU General Public License along with this program. If not,
+see <http://www.gnu.org/licenses/>.
+WonderMediaTechnologies, Inc.
+4F, 533, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+--*/
+
+/*
+ * Swap block device support for MTDs
+ * Turns an MTD device into a swap device with block wear leveling
+ *
+ * Copyright 漏 2007,2011 Nokia Corporation. All rights reserved.
+ *
+ * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
+ *
+ * Based on Richard Purdie's earlier implementation in 2007. Background
+ * support and lock-less operation written by Adrian Hunter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/kthread.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/genhd.h>
+#include <linux/swap.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/math64.h>
+#include <linux/random.h>
+#include <linux/suspend.h>
+
+#define MTDSWAP_VERSION "1.0"
+
+#define MTDSWAP_SECTOR_SIZE 4096
+#define MTDSWAP_SECTOR_SHIFT 12
+
+#define STATUS_FREE (0xff)
+#define STATUS_USED (0x55)
+
+#define MTDSWAP_IO_RETRIES 3
+
+int suspend_counts __nosavedata;
+int eba_tbl[1024] __nosavedata;
+
+enum {
+ MTDSWAP_SCANNED_FREE,
+ MTDSWAP_SCANNED_USED,
+ MTDSWAP_SCANNED_BAD,
+};
+
+struct mtdswap_oobdata {
+ unsigned int magic;
+ unsigned int erase_count;
+ unsigned int lnum;
+ unsigned int seq_number;
+};
+
+struct mtdswap_eb {
+
+ union {
+ struct rb_node rb;
+ struct rb_root *root;
+ } u;
+
+ unsigned int erase_count;
+ unsigned int lnum;
+ unsigned int pnum;
+ unsigned int seq_number;
+};
+
+struct mtdswap_dev {
+
+ struct mtd_blktrans_dev *mbd;
+ struct mtd_info *mtd; /* mtd device */
+ struct device *dev;
+ struct notifier_block pm_notifier;
+
+ struct mtdswap_eb *eb_data;
+ spinlock_t mtdswap_lock;
+ struct rb_root used;
+ struct rb_root free;
+
+ unsigned int pblocks;
+ unsigned int badblock;
+ unsigned int freeblock;
+ unsigned int usedblock;
+
+ unsigned int page_per_block;
+ unsigned int sector_per_block;
+ unsigned int mean_count;
+ unsigned int seq_number;
+
+ struct mutex cache_mutex;
+ unsigned char *cache_data;
+ unsigned long cache_offset;
+ unsigned int cache_size;
+ unsigned char *oob_data;
+ enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
+
+};
+
+#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
+
+unsigned char partitions[32] = "16";
+
+extern char resume_file[64]; /*defined in hibernation.c*/
+static char *parts = NULL; /* str: mtd part number defined by resume_file */
+static unsigned long part = 0; /* mtd part number defined by resume_file */
+
+static DEFINE_MUTEX(mtdswap_lock);
+
+extern void print_nand_buffer(char *value, unsigned int length);
+
+static void mtdswap_cleanup(struct mtdswap_dev *d);
+static int mtdswap_check_resume(struct mtdswap_dev *d);
+static int swap_tree_add(struct mtdswap_eb *eb, struct rb_root *root);
+static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct mtdswap_eb *eb);
+
+void print_mapping_table(struct mtdswap_dev *d)
+{
+ int i;
+ for (i = 0; i < d->pblocks; i++)
+ printk("\n After checking, lnum%d pnum%d", i, eba_tbl[i]);
+}
+
+static void swaptree_destroy(struct rb_root *root)
+{
+ struct rb_node *rb;
+ struct mtdswap_eb *e;
+
+ rb = root->rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ e = rb_entry(rb, struct mtdswap_eb, u.rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &e->u.rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+ /* kfree(e); */
+ }
+ }
+}
+
+static void mtdswap_cleanup(struct mtdswap_dev *d)
+{
+ swaptree_destroy(&d->used);
+ swaptree_destroy(&d->free);
+ vfree(d->eb_data);
+ vfree(d->cache_data);
+ vfree(d->oob_data);
+}
+
+static unsigned int get_logic_block(struct mtdswap_dev *d, unsigned int pos)
+{
+ return pos / d->mtd->erasesize;
+}
+
+static unsigned int get_logic_page(struct mtdswap_dev *d, unsigned int pos)
+{
+ return pos % d->mtd->erasesize;
+}
+
+struct mtdswap_eb *find_mtdswap_eb(struct rb_root *root, int diff)
+{
+
+ struct rb_node *p;
+ struct mtdswap_eb *e;
+
+ e = rb_entry(rb_first(root), struct mtdswap_eb, u.rb);
+
+ p = root->rb_node;
+
+ while (p) {
+ struct mtdswap_eb *e1;
+
+ e1 = rb_entry(p, struct mtdswap_eb, u.rb);
+ if (e1->erase_count > diff)
+ p = p->rb_left;
+ else {
+ p = p->rb_right;
+ e = e1;
+ }
+ }
+ return e;
+}
+
+static int find_new_block(struct mtdswap_dev *d, int lnum)
+{
+ /* first we find block from free tree */
+ int key = 0;
+ struct mtdswap_eb *eb;
+
+ d->seq_number++;
+ eb = find_mtdswap_eb(&d->free, key);
+
+ if (eb == NULL) {
+ eb = find_mtdswap_eb(&d->used, key);
+ if (eb == NULL)
+ return -1;
+ rb_erase(&eb->u.rb, &d->used);
+ eb->erase_count++;
+ eb->lnum = lnum;
+ eb->seq_number = d->seq_number;
+
+ } else {
+ rb_erase(&eb->u.rb, &d->free);
+ if (eb->erase_count == 0)
+ eb->erase_count = d->mean_count;
+ eb->lnum = lnum;
+ eb->seq_number = d->seq_number;
+ }
+ eba_tbl[lnum] = eb->pnum;
+ return eb->pnum;
+}
+
+static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct mtdswap_eb *eb)
+{
+ int ret;
+ loff_t offset;
+
+ if (!mtd_can_have_bb(d->mtd))
+ return 1;
+
+ offset = mtdswap_eb_offset(d, eb);
+ dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
+ ret = mtd_block_markbad(d->mtd, offset);
+ if (ret) {
+ dev_warn(d->dev, "Mark block bad failed for block at %08llx "
+ "error %d\n", offset, ret);
+ return ret;
+ }
+
+ return 1;
+
+}
+
+static int swap_erase(struct mtdswap_dev *d, struct erase_info *erase)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct mtdswap_eb *eb;
+ unsigned long pos = erase->addr;
+ int lnum = get_logic_block(d, pos);
+ int page = get_logic_page(d, pos);
+ int pnum, ret = 0, retries = 0;
+
+ if (eba_tbl[lnum] != -1) {
+ eb = d->eb_data + eba_tbl[lnum];
+ spin_lock(&d->mtdswap_lock);
+ swap_tree_add(eb, &d->used);
+ spin_unlock(&d->mtdswap_lock);
+ }
+
+RETRY:
+ spin_lock(&d->mtdswap_lock);
+ pnum = find_new_block(d, lnum);
+ /*printk("\n lnum %d -> %d", lnum, pnum); */
+ spin_unlock(&d->mtdswap_lock);
+ if (pnum == -1)
+ return -EIO;
+
+ eb = d->eb_data + pnum;
+ erase->addr = pnum * mtd->erasesize + page;
+
+ ret = mtd_erase(mtd, erase);
+
+ if (ret) {
+ mtdswap_handle_badblock(d, eb);
+ retries++;
+ if (retries > MTDSWAP_IO_RETRIES)
+ return -EIO;
+ goto RETRY;
+ }
+ return 0;
+}
+
+static int mtdswap_write_marker(struct mtdswap_dev *d, struct mtdswap_eb *eb,
+ loff_t offset, size_t len, unsigned char *buf)
+{
+ struct mtdswap_oobdata *data;
+ struct mtd_info *mtd = d->mtd;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ data = (struct mtdswap_oobdata *)d->oob_data;
+ ops.len = ((len >= mtd->writesize) ? mtd->writesize : len);
+ ops.ooblen = 16;
+ ops.oobbuf = d->oob_data;
+ ops.ooboffs = 0;
+ ops.datbuf = buf;
+ ops.mode = MTD_OPS_AUTO_OOB;
+
+ data->magic = cpu_to_le32(STATUS_USED);
+ data->erase_count = cpu_to_le32(eb->erase_count);
+ data->lnum = cpu_to_le32(eb->lnum);
+ data->seq_number = cpu_to_le32(eb->seq_number);
+
+ ret = mtd_write_oob(mtd, offset, &ops);
+
+ return ret;
+}
+
+static int swap_write(struct mtdswap_dev *d, unsigned long pos, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mtd_info *mtd = d->mtd;
+ int lnum = get_logic_block(d, pos);
+ int page = get_logic_page(d, pos);
+ int pnum = eba_tbl[lnum];
+ unsigned long addr = pnum * mtd->erasesize + page;
+ struct mtdswap_eb *eb = d->eb_data + pnum;
+ int ret;
+
+ *retlen = len;
+ /* First, write datbuf and oobbuf */
+ ret = mtdswap_write_marker(d, eb, addr, len, buf);
+ if (ret) {
+ mtdswap_handle_badblock(d, eb);
+ return ret;
+ }
+ /* Second, just write databuf */
+ len -= mtd->writesize;
+ if (len <= 0)
+ return 0;
+ ret =
+ mtd_write(mtd, addr + mtd->writesize, len, retlen,
+ buf + mtd->writesize);
+ /*printk("\nwrite data to %d, %s", pnum, current->comm); */
+ if (ret) {
+ mtdswap_handle_badblock(d, eb);
+ return ret;
+ }
+ *retlen += mtd->writesize;
+
+ return ret;
+}
+
+static int swap_read(struct mtdswap_dev *d, unsigned long pos, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mtd_info *mtd = d->mtd;
+ int lnum = get_logic_block(d, pos);
+ int page = get_logic_page(d, pos);
+ int pnum = eba_tbl[lnum];
+ unsigned long addr = pnum * mtd->erasesize + page;
+ /*
+ printk("\nread data from pos 0x%lx, lnum %d, pnum%d page%d",
+ pos, lnum, pnum, page);
+ */
+ if (pnum == -1) {
+ *retlen = len;
+ return 0;
+ }
+
+ return mtd_read(mtd, addr, len, retlen, buf);
+}
+
+static int swap_read_oob(struct mtdswap_dev *d, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = mtd_read_oob(d->mtd, from, ops);
+
+ return ret;
+}
+
+static void erase_callback(struct erase_info *done)
+{
+ wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv;
+ wake_up(wait_q);
+}
+
+static int erase_write(struct mtdswap_dev *d, unsigned long pos,
+ int len, unsigned char *buf)
+{
+ struct erase_info erase;
+ struct mtd_info *mtd = d->mtd;
+ DECLARE_WAITQUEUE(wait, current);
+ wait_queue_head_t wait_q;
+ size_t retlen;
+ int ret, retries = 0;
+ /*
+ * First, let's erase the flash block.
+ */
+#if 0
+ if (pos == 0x0)
+ printk("\n Update Swap Header!");
+#endif
+RETRY:
+ init_waitqueue_head(&wait_q);
+ erase.mtd = mtd;
+ erase.callback = erase_callback;
+ erase.len = len;
+ erase.addr = pos;
+ erase.priv = (u_long) & wait_q;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&wait_q, &wait);
+ ret = swap_erase(d, &erase);
+ if (ret) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&wait_q, &wait);
+ return ret;
+ }
+
+ schedule(); /* Wait for erase to finish. */
+ remove_wait_queue(&wait_q, &wait);
+ /*
+ * Next, write the data to flash.
+ */
+
+ ret = swap_write(d, pos, len, &retlen, buf);
+ if (ret) {
+ retries++;
+ if (retries > MTDSWAP_IO_RETRIES)
+ return -EIO;
+ goto RETRY;
+ }
+ if (retlen != len)
+ return -EIO;
+ return 0;
+}
+
+static int write_cached_data(struct mtdswap_dev *d)
+{
+ int ret;
+ if (d->cache_state != STATE_DIRTY)
+ return 0;
+
+ ret = erase_write(d, d->cache_offset, d->cache_size, d->cache_data);
+ if (ret)
+ return ret;
+ d->cache_state = STATE_EMPTY;
+ return 0;
+}
+
+static int do_cached_write(struct mtdswap_dev *d, unsigned long pos,
+ unsigned int len, unsigned char *buf)
+{
+ unsigned int sect_size = d->cache_size;
+ size_t retlen;
+ int ret;
+ /* print_nand_buffer(buf, len); */
+ while (len > 0) {
+ unsigned long sect_start = (pos / sect_size) * sect_size;
+ unsigned int offset = pos - sect_start;
+ unsigned int size = sect_size - offset;
+ if (size > len)
+ size = len;
+ if (size == sect_size) {
+ ret = erase_write(d, pos, size, buf);
+ if (ret)
+ return ret;
+ } else {
+ if (d->cache_state == STATE_DIRTY &&
+ d->cache_offset != sect_start) {
+ mutex_lock(&d->cache_mutex);
+ ret = write_cached_data(d);
+ mutex_unlock(&d->cache_mutex);
+ if (ret)
+ return ret;
+ }
+
+ if (d->cache_state == STATE_EMPTY ||
+ d->cache_offset != sect_start) {
+ d->cache_state = STATE_EMPTY;
+ ret = swap_read(d, sect_start, sect_size,
+ &retlen, d->cache_data);
+ if (ret)
+ return ret;
+
+ if (retlen != sect_size)
+ return -EIO;
+
+ d->cache_offset = sect_start;
+ d->cache_state = STATE_CLEAN;
+ }
+ memcpy(d->cache_data + offset, buf, size);
+ d->cache_state = STATE_DIRTY;
+ }
+ buf += size;
+ pos += size;
+ len -= size;
+ }
+ return 0;
+}
+
+static int do_cached_read(struct mtdswap_dev *d, unsigned long pos,
+ int len, char *buf)
+{
+ unsigned int sect_size = d->cache_size;
+ size_t retlen;
+ int ret;
+ /* printk("\n Read data from pos 0x%lx, len 0x%x", pos, len); */
+ mutex_lock(&d->cache_mutex);
+ while (len > 0) {
+
+ unsigned long sect_start = (pos / sect_size) * sect_size;
+ unsigned int offset = pos - sect_start;
+ unsigned int size = sect_size - offset;
+
+ if (size > len)
+ size = len;
+ if (d->cache_state != STATE_EMPTY &&
+ d->cache_offset == sect_start) {
+ memcpy(buf, d->cache_data + offset, size);
+ } else {
+ ret = swap_read(d, pos, size, &retlen, buf);
+ if (ret)
+ return ret;
+ if (retlen != size)
+ return -EIO;
+ }
+ /* print_nand_buffer(buf, len); */
+ buf += size;
+ pos += size;
+ len -= size;
+ }
+
+ mutex_unlock(&d->cache_mutex);
+ return 0;
+}
+
+static int mtdswap_flush(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ mutex_lock(&d->cache_mutex);
+ write_cached_data(d);
+ mutex_unlock(&d->cache_mutex);
+ mtd_sync(d->mtd);
+ return 0;
+}
+
+static int mtdswap_readsect(struct mtd_blktrans_dev *dev, unsigned long block,
+ char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ if (likely(dev->mtd->writesize >= MTDSWAP_SECTOR_SIZE))
+ return do_cached_read(d, block << MTDSWAP_SECTOR_SHIFT,
+ MTDSWAP_SECTOR_SIZE, buf);
+
+ return do_cached_read(d, block << 9, 512, buf);
+}
+
+static int mtdswap_writesect(struct mtd_blktrans_dev *dev, unsigned long block,
+ char *buf)
+{
+
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ if (likely(dev->mtd->writesize >= MTDSWAP_SECTOR_SIZE))
+ return do_cached_write(d, block << MTDSWAP_SECTOR_SHIFT,
+ MTDSWAP_SECTOR_SIZE, buf);
+
+ return do_cached_write(d, block << 9, 512, buf);
+}
+
+static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ del_mtd_blktrans_dev(dev);
+ mtdswap_cleanup(d);
+ kfree(d);
+}
+
+static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct mtdswap_eb *eb)
+{
+ return (loff_t) (eb - d->eb_data) * d->mtd->erasesize;
+}
+
+static int mtdswap_read_markers(struct mtdswap_dev *d, struct mtdswap_eb *eb)
+{
+ struct mtdswap_oobdata *data;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops;
+
+ offset = mtdswap_eb_offset(d, eb);
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) {
+ d->badblock++;
+ return MTDSWAP_SCANNED_BAD;
+ }
+
+ ops.ooblen = 16;
+ ops.oobbuf = d->oob_data;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ret = swap_read_oob(d, offset, &ops);
+ data = (struct mtdswap_oobdata *)d->oob_data;
+
+ if (le32_to_cpu(data->magic) == STATUS_USED) {
+ eb->erase_count = le32_to_cpu(data->erase_count);
+ eb->lnum = le32_to_cpu(data->lnum);
+ eb->seq_number = le32_to_cpu(data->seq_number);
+ d->usedblock++;
+ d->mean_count += eb->erase_count;
+
+ if (eb->seq_number > d->seq_number)
+ d->seq_number = eb->seq_number;
+ ret = MTDSWAP_SCANNED_USED;
+ } else {
+ eb->erase_count = 0;
+ d->freeblock++;
+ ret = MTDSWAP_SCANNED_FREE;
+ }
+ eb->pnum = (unsigned int)(eb - d->eb_data);
+ return ret;
+
+}
+
+static int swap_tree_add(struct mtdswap_eb *eb, struct rb_root *root)
+{
+ struct rb_node **p, *parent = NULL;
+
+ p = &root->rb_node;
+ while (*p) {
+ struct mtdswap_eb *eb1;
+ parent = *p;
+ eb1 = rb_entry(parent, struct mtdswap_eb, u.rb);
+
+ if (eb->erase_count < eb1->erase_count)
+ p = &(*p)->rb_left;
+ else if (eb->erase_count > eb1->erase_count)
+ p = &(*p)->rb_right;
+ else {
+ if (eb->pnum == eb1->pnum)
+ return 0;
+
+ if (eb->pnum < eb1->pnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ }
+
+ rb_link_node(&eb->u.rb, parent, p);
+ rb_insert_color(&eb->u.rb, root);
+
+ return 0;
+}
+
+static int build_mapping_table(struct mtdswap_dev *d, struct mtdswap_eb *eb)
+{
+
+ int pnum;
+ struct mtdswap_eb *eb1;
+ pnum = eba_tbl[eb->lnum];
+
+ if (pnum >= 0) {
+ eb1 = d->eb_data + pnum;
+ if (eb1->seq_number > eb->seq_number)
+ return 0;
+ }
+
+ eba_tbl[eb->lnum] = eb->pnum;
+ return 0;
+}
+
+static int mtdswap_check_counts(struct mtdswap_dev *d)
+{
+ return (d->pblocks - d->usedblock - d->freeblock - d->badblock) ? 1 : 0;
+}
+
+static int mtdswap_scan_eblks(struct mtdswap_dev *d, unsigned int need_build)
+{
+ int status, i;
+ struct mtdswap_eb *eb;
+
+ for (i = 0; i < d->pblocks; i++) {
+ eb = d->eb_data + i;
+ eb->pnum = i;
+ status = mtdswap_read_markers(d, eb);
+ if (status == MTDSWAP_SCANNED_BAD)
+ continue;
+ switch (status) {
+ case MTDSWAP_SCANNED_FREE:
+ spin_lock(&d->mtdswap_lock);
+ swap_tree_add(eb, &d->free);
+ spin_unlock(&d->mtdswap_lock);
+ break;
+ case MTDSWAP_SCANNED_USED:
+ spin_lock(&d->mtdswap_lock);
+ swap_tree_add(eb, &d->used);
+ spin_unlock(&d->mtdswap_lock);
+ if(need_build)
+ build_mapping_table(d, eb);
+ break;
+ }
+ }
+
+ if (mtdswap_check_counts(d))
+ printk(KERN_CRIT "\n NOTICE: MTDSWAP counts are illegal");
+
+ return 0;
+}
+
+#if 0
+static void test_swap(struct mtdswap_dev *d)
+{
+ unsigned long start_sector = 0x0;
+ unsigned long sector_count = 0;
+ unsigned long rand_seed = 544;
+ unsigned char write_data = 0;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < 10000; i++) {
+ /* seed the randome: no seed to freeze the test case */
+ srandom32(random32() + i + rand_seed);
+
+/* start_sector = (unsigned long)(random32()%(d->sector_per_block * 64)) & (~(32-1));
+ rand_seed = (unsigned long)(random32()%(d->sector_per_block * 64-start_sector));
+*/
+ write_data = (unsigned char)(random32() % ((unsigned char)-1));
+ sector_count = 1;
+
+ /* set data */
+ memset(sector_buffer, (unsigned char)write_data, 2097152);
+
+ /* write */
+/* ret = ONFM_Write(c, start_sector, sector_count, sector_buffer); */
+ ret = do_cached_write(d, start_sector, 2097152, sector_buffer);
+/* ret = erase_write(d, start_sector, 512, sector_buffer); */
+ if (ret == 0) {
+ /* read and check */
+ ret =
+ do_cached_read(d, start_sector, 2097152,
+ read_sector_buffer);
+ if (ret == 0) {
+ ret =
+ memcmp(sector_buffer, read_sector_buffer,
+ 2097152);
+ }
+ }
+
+ /* print */
+ if (ret != 0) {
+ printk
+ ("\n%d:*FAIL* start address: %d, sector count: %d, data: %d",
+ i, start_sector, sector_count, write_data);
+ break;
+ } else {
+ printk
+ ("\n%d-PASS. start address: %d, sector count: %d, data: %d.",
+ i, start_sector, sector_count, write_data);
+ start_sector += 0x200;
+ }
+ }
+}
+#endif
+
+static int mtdswap_check_resume(struct mtdswap_dev *d)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct mtdswap_eb *eb;
+
+ spin_lock(&d->mtdswap_lock);
+ swaptree_destroy(&d->used);
+ swaptree_destroy(&d->free);
+ spin_unlock(&d->mtdswap_lock);
+ d->mean_count = 1;
+ d->used = d->free = RB_ROOT;
+ d->badblock = d->freeblock = d->usedblock = 0;
+ memset(d->eb_data, 0x00, sizeof(struct mtdswap_eb) * d->pblocks);
+
+ mutex_lock(&d->cache_mutex);
+ d->cache_size = mtd->erasesize;
+ d->cache_state = STATE_EMPTY;
+ d->cache_offset = -1;
+ memset(d->cache_data, 0xFF, mtd->erasesize);
+ mutex_unlock(&d->cache_mutex);
+
+ memset(d->oob_data, 0xFF, mtd->oobsize);
+ mtdswap_scan_eblks(d, 0);
+ eb = d->eb_data + eba_tbl[0];
+ spin_lock(&d->mtdswap_lock);
+ rb_erase(&eb->u.rb, &d->used);
+ spin_unlock(&d->mtdswap_lock);
+#if 0
+ for (i = 0; i < d->pblocks; i++) {
+ if (eba_tbl[i] != -1) {
+ eb = d->eb_data + eba_tbl[i];
+ printk("\n Remove %d from used tree", eb->pnum);
+ rb_erase(&eb->u.rb, &d->used);
+ }
+ }
+#endif
+ if (d->usedblock)
+ d->mean_count = d->mean_count / d->usedblock;
+ return 0;
+}
+
+static int mtdswap_check_suspend(struct mtdswap_dev *d)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct mtdswap_eb *eb;
+ int i;
+
+ spin_lock(&d->mtdswap_lock);
+ swaptree_destroy(&d->used);
+ swaptree_destroy(&d->free);
+ spin_unlock(&d->mtdswap_lock);
+
+ d->mean_count = 1;
+ d->used = d->free = RB_ROOT;
+ d->badblock = d->freeblock = d->usedblock = 0;
+ memset(d->eb_data, 0x00, sizeof(struct mtdswap_eb) * d->pblocks);
+ mutex_lock(&d->cache_mutex);
+ d->cache_size = mtd->erasesize;
+ d->cache_state = STATE_EMPTY;
+ d->cache_offset = -1;
+ memset(d->cache_data, 0xFF, mtd->erasesize);
+ mutex_unlock(&d->cache_mutex);
+ memset(d->oob_data, 0xFF, mtd->oobsize);
+
+ if(!suspend_counts) {
+ for (i = 1; i < d->pblocks; i++)
+ eba_tbl[i] = -1;
+ }
+ mtdswap_scan_eblks(d, 0);
+ eb = d->eb_data + eba_tbl[0];
+ spin_lock(&d->mtdswap_lock);
+ rb_erase(&eb->u.rb, &d->used);
+ spin_unlock(&d->mtdswap_lock);
+ suspend_counts = 1;
+#if 0
+ for (i = 0; i < d->pblocks; i++) {
+ if (eba_tbl[i] != -1) {
+ eb = d->eb_data + eba_tbl[i];
+ rb_erase(&eb->u.rb, &d->used);
+ }
+ }
+#endif
+ if (d->usedblock)
+ d->mean_count = d->mean_count / d->usedblock;
+ return 0;
+}
+
+static int mtdswap_resume(struct mtdswap_dev *d)
+{
+ mtdswap_check_resume(d);
+ return 0;
+}
+
+static int mtdswap_suspend(struct mtdswap_dev *d)
+{
+ mtdswap_check_suspend(d);
+ return 0;
+}
+
+static int swap_power_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mtdswap_dev *d =
+ container_of(this, struct mtdswap_dev, pm_notifier);
+ switch (event) {
+ case PM_POST_RESTORE: /* in case hibernation restore fail */
+ case PM_POST_HIBERNATION: /* normal case for hibernation finished */
+ mtdswap_resume(d);
+ break;
+ case PM_HIBERNATION_PREPARE:
+ mtdswap_suspend(d);
+ break;
+ case PM_HIBERNATION_FINISH:
+ mutex_lock(&d->cache_mutex);
+ write_cached_data(d);
+ mutex_unlock(&d->cache_mutex);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks)
+{
+ struct mtd_info *mtd = d->mbd->mtd;
+ struct mtdswap_eb *eb;
+ int i;
+
+ d->mtd = mtd;
+ d->pblocks = eblocks;
+ d->pm_notifier.notifier_call = swap_power_event;
+ register_pm_notifier(&d->pm_notifier);
+
+ d->page_per_block = mtd->erasesize / mtd->writesize;
+ d->sector_per_block = mtd->erasesize >> MTDSWAP_SECTOR_SHIFT;
+ d->mean_count = 1;
+ d->used = d->free = RB_ROOT;
+ spin_lock_init(&d->mtdswap_lock);
+ mutex_init(&d->cache_mutex);
+
+ d->badblock = d->freeblock = d->usedblock = 0;
+
+ d->cache_data = vmalloc(mtd->erasesize);
+ d->cache_size = mtd->erasesize;
+ d->cache_state = STATE_EMPTY;
+ d->cache_offset = -1;
+ d->oob_data = vmalloc(mtd->oobsize);
+ d->eb_data = vmalloc(sizeof(struct mtdswap_eb) * d->pblocks);
+
+ memset(d->eb_data, 0x00, sizeof(struct mtdswap_eb) * d->pblocks);
+ memset(d->cache_data, 0xFF, mtd->erasesize);
+ memset(d->oob_data, 0xFF, mtd->oobsize);
+
+ for (i = 0; i < d->pblocks; i++)
+ eba_tbl[i] = -1;
+
+ mtdswap_scan_eblks(d, 1);
+
+ for (i = 0; i < d->pblocks; i++) {
+ if (eba_tbl[i] != -1) {
+ eb = d->eb_data + eba_tbl[i];
+ rb_erase(&eb->u.rb, &d->used);
+ }
+ }
+#if 0
+ for (i = 0; i < d->pblocks; i++)
+ printk("\n lnum%d pnum%d", i, eba_tbl[i]);
+#endif
+ if (d->usedblock)
+ d->mean_count = d->mean_count / d->usedblock;
+ /* test_swap(d); */
+
+ return 0;
+}
+
+static int mtdswap_find_mtd(unsigned char *target, unsigned char *source)
+{
+ /*extract partition number from string */
+ unsigned char *temp;
+ unsigned int slen = strlen(source);
+ unsigned int tlen=0;
+
+ temp = strstr(target, source);
+
+ if (temp) {
+ tlen = strlen(temp);
+ strncpy(partitions, temp + slen, tlen-slen+1);
+ /*find mtd = true*/
+ return 1;
+ }
+
+ /*find mtd = false*/
+ return 0;
+}
+
+
+static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtdswap_dev *d;
+ struct mtd_blktrans_dev *mbd_dev;
+ struct nand_ecclayout *info;
+ unsigned long use_size;
+ int eblocks;
+
+ if (memcmp(mtd->name, "swap", sizeof("swap"))!=0)
+ return;
+ if (mtd->index != part){
+ printk(KERN_WARNING"\n Find swap partition mtdswap%d != mtdswap%lu\n", mtd->index, part);
+ /*replace original resume_file with what we actaully find.*/
+ memset(resume_file, 0, sizeof(resume_file));
+ strncat(resume_file, "/dev/mtdswap", sizeof("/dev/mtdswap"));
+ snprintf(partitions, sizeof(partitions), "%d", mtd->index);
+ strncat(resume_file, partitions, sizeof(partitions));
+ printk(KERN_WARNING"Replace resume_file As : %s\n", resume_file);
+ }
+
+ printk(KERN_INFO "Enabling MTD swap on device %d, size %lldMB, ",
+ mtd->index, mtd->size / 1024 / 1024);
+
+ info = mtd->ecclayout;
+
+ use_size = mtd->size;
+ eblocks = mtd_div_by_eb(use_size, mtd);
+
+ d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
+
+ if (!d)
+ return;
+ mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!mbd_dev) {
+ kfree(d);
+ return;
+ }
+
+ d->mbd = mbd_dev;
+ mbd_dev->priv = d;
+
+ mbd_dev->mtd = mtd;
+ mbd_dev->devnum = mtd->index;
+ mbd_dev->size = use_size >> 9;
+ mbd_dev->tr = tr;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ mbd_dev->readonly = 1;
+
+ if (mtdswap_init(d, eblocks) < 0)
+ goto init_failed;
+ if (add_mtd_blktrans_dev(mbd_dev) < 0)
+ goto cleanup;
+ d->dev = disk_to_dev(mbd_dev->disk);
+ return;
+
+cleanup:
+ mtdswap_cleanup(d);
+
+init_failed:
+ kfree(mbd_dev);
+ kfree(d);
+}
+
+static int mtdswap_open(struct mtd_blktrans_dev *dev)
+{
+ return 0;
+}
+
+static int mtdswap_release(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ mutex_lock(&d->cache_mutex);
+ write_cached_data(d);
+ mutex_unlock(&d->cache_mutex);
+ return 0;
+}
+
+static struct mtd_blktrans_ops mtdswap_ops = {
+ .name = "mtdswap",
+ .major = 0,
+ .part_bits = 0,
+ .blksize = 512,
+ .open = mtdswap_open,
+ .flush = mtdswap_flush,
+ .release = mtdswap_release,
+ .readsect = mtdswap_readsect,
+ .writesect = mtdswap_writesect,
+ .add_mtd = mtdswap_add_mtd,
+ .remove_dev = mtdswap_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init mtdswap_modinit(void)
+{
+ /* find if resume_file name contains "mtdswap" */
+ int ret = mtdswap_find_mtd(resume_file, "mtdswap");
+ if (!ret){
+ printk(KERN_WARNING"\n[mtdswap] Resume Partition Is Not mtdswap !!!\n");
+ return 0;
+ }
+ parts = &partitions[0];
+ printk(KERN_WARNING"[mtdswap] resume_file:%s, parts=%s\n", resume_file, parts);
+ if(kstrtoul(parts, 0, &part) < 0){
+ printk(KERN_WARNING"[mtdswap] Invalid MTDSWAP Partition Number!!!\n");
+ }
+ return register_mtd_blktrans(&mtdswap_ops);
+}
+
+static void __exit mtdswap_modexit(void)
+{
+ deregister_mtd_blktrans(&mtdswap_ops);
+}
+
+module_init(mtdswap_modinit);
+module_exit(mtdswap_modexit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Johnny Liu <johnnyliu@viatech.com.cn>");
+MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
+ "swap space");
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/Kconfig b/ANDROID_3.4.5/drivers/mtd/nand/Kconfig
index 884f99ca..8a13c2f3 100644
--- a/ANDROID_3.4.5/drivers/mtd/nand/Kconfig
+++ b/ANDROID_3.4.5/drivers/mtd/nand/Kconfig
@@ -343,12 +343,12 @@ config MTD_NAND_SHARPSL
depends on ARCH_PXA
config MTD_NAND_CAFE
- tristate "NAND support for OLPC CAFÉ chip"
+ tristate "NAND support for OLPC CAF? chip"
depends on PCI
select REED_SOLOMON
select REED_SOLOMON_DEC16
help
- Use NAND flash attached to the CAFÉ chip designed for the OLPC
+ Use NAND flash attached to the CAF? chip designed for the OLPC
laptop.
config MTD_NAND_CS553X
@@ -571,4 +571,63 @@ config MTD_NAND_FSMC
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
+config MTD_NAND_WMT
+ tristate "NAND Flash support for WMT SoC"
+# depends on ARCH_VT8500
+ help
+ This enables the NAND flash controller on the WMT
+ SoCs
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_CHIP_NUM
+ int "NAND Flash numbers select verbosity (1 = CE0, 2 = CE0,CE1)"
+ depends on MTD_NAND_WMT
+ default "2"
+ help
+ Determines the verbosity numbers of nand chip supported by WMT.
+
+config MTD_NAND_WMT_HWECC
+ bool "WMT NAND Hardware ECC"
+ depends on MTD_NAND_WMT
+ default y
+ help
+ Enable the use of the WMT's internal ECC generator when
+ using NAND.
+
+config MTD_NAND_HM_ECC
+ int "WMT NAND Hardware ECC Algorithm select verbosity(Harming ECC: =1, BCH ECC: =2)"
+ depends on MTD_NAND_WMT
+ default "2"
+ help
+ Enable the use of the WMT's internal ECC generator when
+ using NAND.
+
+choice
+ prompt "WMT NAND Partition for System"
+ default MTD_NAND_WMT_ANDROID
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for Android, Ubuntu or Android/Ubuntu Dual system
+
+config MTD_NAND_WMT_ANDROID
+ bool "Android"
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for WMT Android System
+
+config MTD_NAND_WMT_UBUNTU
+ bool "Ubuntu"
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for WMT Ubuntu System
+
+config MTD_NAND_WMT_ANDROID_UBUNTU_DUALOS
+ bool "Android + Ubuntu"
+ depends on MTD_NAND_WMT
+ help
+ Partition Nand Flash for WMT Android/Ubuntu Dual System
+endchoice
+
endif # MTD_NAND
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/Makefile b/ANDROID_3.4.5/drivers/mtd/nand/Makefile
index d4b4d873..41fb98f5 100644
--- a/ANDROID_3.4.5/drivers/mtd/nand/Makefile
+++ b/ANDROID_3.4.5/drivers/mtd/nand/Makefile
@@ -51,5 +51,6 @@ obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_WMT) += wmt_nand.o
-nand-objs := nand_base.o nand_bbt.o
+nand-$(CONFIG_MTD_NAND) := nand_base.o nand_bbt.o
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/nand_base.c b/ANDROID_3.4.5/drivers/mtd/nand/nand_base.c
index 47b19c0b..00be451a 100644
--- a/ANDROID_3.4.5/drivers/mtd/nand/nand_base.c
+++ b/ANDROID_3.4.5/drivers/mtd/nand/nand_base.c
@@ -47,8 +47,74 @@
#include <linux/bitops.h>
#include <linux/leds.h>
#include <linux/io.h>
+#include <mach/hardware.h>
#include <linux/mtd/partitions.h>
+#include "../../../arch/arm/mach-wmt/wmt_clk.h"
+#include "wmt_nand.h"
+#define myDEBUG
+//#undef myDEBUG
+#ifdef myDEBUG
+#define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+//#define DBG_60BIT_ECC
+
+#ifdef NAND_BBT_BCH_ECC
+
+#if(CONFIG_MTD_NAND_PAGE_SIZE == 2048)
+static struct nand_ecclayout wmt_oobinfo_2048_backup = {
+ /* nand flash new structure and BCH ECC oob info */
+ .eccbytes = 40,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 61, 62, 63},
+ .oobavail = 16,
+ .oobfree = {{9, 4},{25, 4},{41, 4},{57, 4}}
+};
+
+static struct nand_ecclayout wmt_hm_oobinfo_2048_backup = {
+ /* nand flash old structure and Harming ECC oob info */
+ .eccbytes = 14,
+ .eccpos = { 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49},
+ .oobavail = 32,
+ .oobfree = {{0, 32}}
+};
+#else
+
+static struct nand_ecclayout wmt_hm_oobinfo_4096_backup = {
+ /* nand flash old structure and Harming ECC oob info */
+ .eccbytes = 27,
+ .eccpos = { 64, 65, 66, 68, 69, 70, 72, 73, 74, 76, 77, 78,
+ 80, 81, 82, 84, 85, 86, 88, 89, 90, 92, 93, 94,
+ 96, 97, 98},
+ .oobavail = 64,
+ .oobfree = {{0, 32}}
+};
+
+static struct nand_ecclayout wmt_oobinfo_4096_backup = {
+ /* nand flash old structure and Harming ECC oob info */
+ .eccbytes = 80,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 61, 62, 63},
+ // 64, 65, 66, 67, 68, 69, 70, 77, 78, 79,
+ // 80, 81, 82, 83, 84, 85, 86, 93, 94, 95,
+ // 96, 97, 98, 99, 100,101,102,109,110,111,
+ // 112,113,114,115,116,117,118,125,126,127},
+ .oobavail = 16,
+ .oobfree = {{9, 4},{25, 4},{41, 4},{57, 4}}
+ // .oobfree = {{9, 4},{25, 4},{41, 4},{57, 4},{73,4},{89,4},{105,4},{121,4}}
+};
+#endif
+
+#endif
+extern struct nand_bbt_descr largepage_flashbased;
+extern int second_chip;
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
.eccbytes = 3,
@@ -103,7 +169,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
* For devices which display every fart in the system on a separate LED. Is
* compiled away when LED support is disabled.
*/
-DEFINE_LED_TRIGGER(nand_led_trigger);
+//DEFINE_LED_TRIGGER(nand_led_trigger);
static int check_offs_len(struct mtd_info *mtd,
loff_t ofs, uint64_t len)
@@ -145,6 +211,7 @@ static void nand_release_device(struct mtd_info *mtd)
chip->state = FL_READY;
wake_up(&chip->controller->wq);
spin_unlock(&chip->controller->lock);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
}
/**
@@ -159,6 +226,28 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
return readb(chip->IO_ADDR_R);
}
+int wmt_recovery_call(struct notifier_block *nb, unsigned long code, void *_cmd)
+{
+ struct mtd_info *mtd = NULL;
+ struct nand_chip *chip = NULL;
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ chip = (struct nand_chip *)mtd->priv;
+
+ if(chip->cur_chip && (((mtd->id >>24)&0xff) == NAND_MFR_HYNIX)) {
+ nand_get_device(chip, mtd, FL_WRITING);
+ #ifdef RETRY_DEBUG
+ printk("current try times: %d\n", chip->cur_chip->cur_try_times);
+ #endif
+ chip->select_chip(mtd, 0);
+ chip->cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ //chip->cur_chip->get_parameter(mtd,READ_RETRY_MODE);
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+ }
+ return NOTIFY_DONE;
+}
+EXPORT_SYMBOL(wmt_recovery_call);
+
/**
* nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
@@ -335,14 +424,22 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
int page, chipnr, res = 0, i = 0;
struct nand_chip *chip = mtd->priv;
u16 bad;
+ int page1 = 0, pagecnt = mtd->pagecnt;
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
- page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ if (mtd->planenum > 1) {//dan_multi
+ page = ((int)(ofs >> chip->page_shift) * mtd->planenum);
+ page1 = page + pagecnt;
+ page &= chip->pagemask;
+ page1 &= chip->pagemask;
+ } else
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
- chipnr = (int)(ofs >> chip->chip_shift);
+ //chipnr = (int)(ofs >> chip->chip_shift);
+ chipnr = ((int)(ofs >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
nand_get_device(chip, mtd, FL_READING);
@@ -357,18 +454,27 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
- else
- bad &= 0xFF;
+ /*else
+ bad &= 0xFF;*/ //masked dan_multi
+ if ((bad & 0xFF) != 0xff)//dan_multi
+ res = 1;
} else {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
- page);
- bad = chip->read_byte(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
+ //bad = chip->read_byte(mtd);
+ if (chip->read_byte(mtd) != 0xff)
+ res = 1;
+ if (mtd->planenum > 1) {
+ //printk("\n multiplane block bad check! \n");
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page1);
+ if (chip->read_byte(mtd) != 0xff)
+ res = 1;
+ }
}
- if (likely(chip->badblockbits == 8))
+ /*if (likely(chip->badblockbits == 8))
res = bad != 0xFF;
else
- res = hweight8(bad) < chip->badblockbits;
+ res = hweight8(bad) < chip->badblockbits;*/ //masked dan_multi
ofs += mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
i++;
@@ -395,11 +501,11 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
* Note that we retain the first error encountered in (3) or (4), finish the
* procedures, and dump the error in the end.
*/
-static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs, int type)
{
struct nand_chip *chip = mtd->priv;
uint8_t buf[2] = { 0, 0 };
- int block, res, ret = 0, i = 0;
+ int block, res = 0, ret = 0, i = 0, bits;
int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
if (write_oob) {
@@ -409,15 +515,39 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
einfo.addr = ofs;
- einfo.len = 1 << chip->phys_erase_shift;
+ //einfo.len = 1 << chip->phys_erase_shift;
+ einfo.len = mtd->erasesize;
nand_erase_nand(mtd, &einfo, 0);
}
/* Get block number */
- block = (int)(ofs >> chip->bbt_erase_shift);
+ //block = (int)(ofs >> chip->bbt_erase_shift);
+ block = (((int)(ofs >> 10))/mtd->pageSizek) >> chip->pagecnt_shift;
/* Mark block bad in memory-based BBT */
- if (chip->bbt)
- chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+ if (chip->bbt) {
+ if (chip->realplanenum) {
+ if (block == (chip->status_plane[0]/mtd->pagecnt && (chip->status_plane[1]&7))) {
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_TOSHIBA)
+ bits = ((chip->status_plane[1]&2) ? 1 : 0) + ((chip->status_plane[1]&4) ? 4 : 0);//toshiba
+ else
+ bits = ((chip->status_plane[1]&1) ? 1 : 0) + ((chip->status_plane[1]&2) ? 4 : 0);//others
+ chip->bbt[block >> 1] &= (~(0xF << ((block & 0x01) << 2)));//prevent from mark read fail then mark wort out!
+ chip->bbt[block >> 1] |= bits << ((block & 0x01) << 2);
+ } else {
+ //printk("markbad block=%d diff last err block=%d\n", block, (chip->status_plane[0]/mtd->pagecnt));
+ bits = 5;
+ if (type == 1)
+ bits = 0xa;
+ chip->bbt[block >> 1] |= bits << ((block & 0x01) << 2);
+ }
+ } else {
+ bits = 1;
+ if (type == 1)
+ bits = 0x2;
+ chip->bbt[block >> 2] &= (~(3 << ((block & 0x03) << 1)));//prevent from mark read fail then mark wort out!
+ chip->bbt[block >> 2] |= bits << ((block & 0x03) << 1);
+ }
+ }
/* Write bad block marker to OOB */
if (write_oob) {
@@ -458,7 +588,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (!ret)
ret = res;
}
-
+//printk("markbad blk fin res=%d\n",res);
if (!ret)
mtd->ecc_stats.badblocks++;
@@ -496,7 +626,7 @@ static int nand_check_wp(struct mtd_info *mtd)
* calling of the scan function.
*/
static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
- int allowbbt)
+ int allowbbt, int allow_readfail)
{
struct nand_chip *chip = mtd->priv;
@@ -504,7 +634,10 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
return chip->block_bad(mtd, ofs, getchip);
/* Return info from the table */
- return nand_isbad_bbt(mtd, ofs, allowbbt);
+ if (chip->realplanenum)
+ return nand_isbad_bbt_multi(mtd, ofs, allowbbt, allow_readfail);
+ else
+ return nand_isbad_bbt(mtd, ofs, allowbbt, allow_readfail);
}
/**
@@ -539,14 +672,14 @@ void nand_wait_ready(struct mtd_info *mtd)
if (in_interrupt() || oops_in_progress)
return panic_nand_wait_ready(mtd, 400);
- led_trigger_event(nand_led_trigger, LED_FULL);
+// led_trigger_event(nand_led_trigger, LED_FULL);
/* Wait until command is processed or timeout occurs */
do {
if (chip->dev_ready(mtd))
break;
- touch_softlockup_watchdog();
+// touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
- led_trigger_event(nand_led_trigger, LED_OFF);
+// led_trigger_event(nand_led_trigger, LED_OFF);
}
EXPORT_SYMBOL_GPL(nand_wait_ready);
@@ -803,6 +936,7 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
retry:
spin_lock(lock);
@@ -876,7 +1010,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
timeo += (HZ * 20) / 1000;
- led_trigger_event(nand_led_trigger, LED_FULL);
+// led_trigger_event(nand_led_trigger, LED_FULL);
/*
* Apply this short delay always to ensure that we do wait tWB in any
@@ -884,9 +1018,18 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
- chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
- else
+ if ((state == FL_ERASING || state == FL_WRITING) &&
+ ((chip->options & NAND_IS_AND) || chip->realplanenum)) {
+ /*if (state == FL_ERASING)
+ printk("read status multi erase\n");
+ if (state == FL_WRITING)
+ printk("read status multi write\n");*/
+ //printk("read status multi write id=0x%x\n", 0xFF&(mtd->id>>24));
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_HYNIX || (0xFF&(mtd->id>>24)) == NAND_MFR_MICRON || (0xFF&(mtd->id>>24)) == NAND_MFR_INTEL) {
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ } else
+ chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
+ } else
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
if (in_interrupt() || oops_in_progress)
@@ -903,9 +1046,17 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
cond_resched();
}
}
- led_trigger_event(nand_led_trigger, LED_OFF);
+// led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
+ /*if ((0xFF&(mtd->id>>24)) == 0xAD && chip->realplanenum)
+ while (status&0x1 || !(status&0x40)) {
+ chip->cmdfunc(mtd, 0x75, -1, -1);
+ status = (int)chip->read_byte(mtd);
+ printk("read status 75 multi=%x\n", status);
+ if (status&0x40)
+ break;
+ }*/
return status;
}
@@ -976,7 +1127,8 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
nand_get_device(chip, mtd, FL_UNLOCKING);
/* Shift to get chip number */
- chipnr = ofs >> chip->chip_shift;
+ //chipnr = ofs >> chip->chip_shift;
+ chipnr = ((int)(ofs >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chip->select_chip(mtd, chipnr);
@@ -1025,7 +1177,8 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
nand_get_device(chip, mtd, FL_LOCKING);
/* Shift to get chip number */
- chipnr = ofs >> chip->chip_shift;
+ //chipnr = ofs >> chip->chip_shift;
+ chipnr = ((int)(ofs >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chip->select_chip(mtd, chipnr);
@@ -1180,7 +1333,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
int index = 0;
-
+printk(KERN_NOTICE "r nand_read_subpage -------------------------\n");
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
end_step = (data_offs + readlen - 1) / chip->ecc.size;
@@ -1462,9 +1615,10 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int chipnr, page, realpage, col, bytes, aligned;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ //int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int blkcheck = mtd->pagecnt -1;
int sndcmd = 1;
- int ret = 0;
+ int ret = 0, nocache = 1;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
@@ -1473,38 +1627,64 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
uint8_t *bufpoi, *oob, *buf;
stats = mtd->ecc_stats;
+ mtd->ecc_err_cnt = 0;
- chipnr = (int)(from >> chip->chip_shift);
+ //chipnr = (int)(from >> chip->chip_shift);
+ chipnr = ((int)(from >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chip->select_chip(mtd, chipnr);
-
- realpage = (int)(from >> chip->page_shift);
+ if(chipnr > 0) {
+ second_chip = 1;
+ } else {
+ second_chip = 0;
+ }
+ //realpage = (int)(from >> chip->page_shift);
+ realpage = ((int)(from >> 10))/mtd->pageSizek;
page = realpage & chip->pagemask;
- col = (int)(from & (mtd->writesize - 1));
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) == 1) {
+ col = (int)(from & (mtd->writesize - 1));
+ } else {
+ col = ((int)(from>>10)) % mtd->pageSizek;
+ col = col << 10;
+ }
+ //printk("chip=%d realpage=0x%x page=0x%x mask=0x%x col=0x%x \n",chipnr, realpage, page, chip->pagemask, col);
buf = ops->datbuf;
oob = ops->oobbuf;
while (1) {
+ nocache = 1;
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
-
+ //if (!aligned || col)
+//printk("readlen=%d byte=%d align=%d col=%d\n", readlen, bytes, aligned, col);
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
if (likely(sndcmd)) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ if (!chip->realplanenum) {//dan_multi
+ /*page = (page / pagecnt) * pagecnt + page;//dan_multi 65->129, 129->257
+ else*/
+ if (aligned)
+ nocache = cache_read_data(mtd, chip, page, buf);
+ if (nocache)
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ }
sndcmd = 0;
}
/* Now read the page into the buffer */
- if (unlikely(ops->mode == MTD_OPS_RAW))
+ /*if (unlikely(ops->mode == MTD_OPS_RAW))
ret = chip->ecc.read_page_raw(mtd, chip,
bufpoi, page);
else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
ret = chip->ecc.read_subpage(mtd, chip,
col, bytes, bufpoi);
+ else*/
+ /* dannier comment: copy data + oob to bufpoi */
+ if (!chip->realplanenum && nocache == 0)
+ ret = 0;
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
page);
@@ -1594,9 +1774,16 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
+ if (mtd->ecc_err_cnt > mtd->ecc_err_level) {
+ return -NEED_REPLACEMENT;
+ }
+
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
+static int nand_block_markbad_wmt(struct mtd_info *mtd, loff_t ofs, int type);
+
+
/**
* nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
* @mtd: MTD device structure
@@ -1622,6 +1809,11 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = nand_do_read_ops(mtd, from, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
+
+ if (ret == -EBADMSG) {
+ nand_block_markbad_wmt(mtd, from, 1);
+ }
+
return ret;
}
@@ -1764,6 +1956,108 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
+
+/**
+ * nand_do_read_bb_oob - [Intern] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area
+ */
+static int nand_do_read_bb_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int page, realpage, chipnr, sndcmd = 1;
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+ len = mtd->oobsize;
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ //chipnr = (int)(from >> chip->chip_shift);
+ chipnr = ((int)(from >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ //realpage = (int)(from >> chip->page_shift);
+ realpage = ((int)(from >> 10))/mtd->pageSizek;
+ page = realpage & chip->pagemask;
+
+ while(1) {
+ sndcmd = chip->ecc.read_bb_oob(mtd, chip, page, sndcmd);
+
+ len = min(len, readlen);
+ if (((mtd->id>>24)&0xff) == 0x45) {
+ memcpy(buf, chip->oob_poi - mtd->writesize, 1024);
+ len = min((int)mtd->oobsize, readlen);
+ } else
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ if (!(chip->options & NAND_NO_READRDY)) {
+ /*
+ * Apply delay or wait for ready/busy pin. Do this
+ * before the AUTOINCR check, so no problems arise if a
+ * chip which does auto increment is marked as
+ * NOAUTOINCR by the board driver.
+ */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+
+ /* Check, if the chip supports auto page increment
+ * or if we have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
+ sndcmd = 1;
+ }
+
+ ops->oobretlen = ops->ooblen;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
* @mtd: MTD device structure
@@ -1781,7 +2075,9 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
int readlen = ops->ooblen;
int len;
- uint8_t *buf = ops->oobbuf;
+ uint8_t *buf = ops->oobbuf, *buf1;
+
+ mtd->ecc_err_cnt = 0;
pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
@@ -1808,13 +2104,20 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- chipnr = (int)(from >> chip->chip_shift);
+ //chipnr = (int)(from >> chip->chip_shift);
+ chipnr = ((int)(from >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chip->select_chip(mtd, chipnr);
/* Shift to get page */
- realpage = (int)(from >> chip->page_shift);
+ //realpage = (int)(from >> chip->page_shift);
+ realpage = ((int)(from >> 10))/mtd->pageSizek;
page = realpage & chip->pagemask;
-
+ if(chipnr > 0) {
+ second_chip = 1;
+ } else {
+ second_chip = 0;
+ }
+ buf1 = buf;
while (1) {
if (ops->mode == MTD_OPS_RAW)
sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
@@ -1865,6 +2168,9 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
+ if (mtd->ecc_err_cnt > mtd->ecc_err_level) {
+ return -NEED_REPLACEMENT;
+ }
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
@@ -1903,10 +2209,78 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
goto out;
}
- if (!ops->datbuf)
- ret = nand_do_read_oob(mtd, from, ops);
- else
+ if (!ops->datbuf) {
+ /* DannierChen20101022 : Patch for avoiding yaffs2 read checkpoint signature from a bad block*/
+ if (chip->bbt && nand_block_checkbad(mtd, from, 1, 0xFF, 1)) {
+ memset(ops->oobbuf, 0xff, ops->ooblen);
+ //printk("nand_do_read_oob: memset ops->ooblen=%d Byte\n", ops->ooblen);
+ /* DannierChen20101022 : Patch end */
+ } else {
+ ret = nand_do_read_oob(mtd, from, ops);
+ if (ret == -EBADMSG) {
+ nand_release_device(mtd);
+ nand_block_markbad_wmt(mtd, from, 1);
+ return ret;
+ }
+ }
+ } else {
+ //printk("In nand_read_oob() call nand_do_read_ops():and ops->len is %d\n", ops->len);
ret = nand_do_read_ops(mtd, from, ops);
+ if (ret == -EBADMSG) {
+ nand_release_device(mtd);
+ nand_block_markbad_wmt(mtd, from, 1);
+ return ret;
+ }
+ }
+
+ out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+
+/**
+ * nand_read_bbt_facmk - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read factory-marked bad block information
+ */
+static int nand_read_bbt_facmk(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+ //printk("enter nand_read_bbt_facmk\n");
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf) {
+ ret = nand_do_read_bb_oob(mtd, from, ops);
+ //printk("enter nand_read_bbt_facmk nand_do_read_bb_oob yes\n");
+ } else {
+ //printk("enter nand_read_bbt_facmk nand_do_read_ops no\n");
+ ret = nand_do_read_ops(mtd, from, ops);
+ }
out:
nand_release_device(mtd);
@@ -2214,23 +2588,34 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- column = to & (mtd->writesize - 1);
- subpage = column || (writelen & (mtd->writesize - 1));
-
+ //column = to & (mtd->writesize - 1);
+ column = ((int)(to>>10)) % mtd->pageSizek;
+ column = column << 10;
+ //subpage = column || (writelen & (mtd->writesize - 1));
+ subpage = column || (writelen < mtd->writesize);
+//printk("column=%d subpage=%d writelen=%d\n", column, subpage, writelen);
if (subpage && oob)
return -EINVAL;
- chipnr = (int)(to >> chip->chip_shift);
+ //chipnr = (int)(to >> chip->chip_shift);
+ chipnr = ((int)(to >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd))
return -EIO;
- realpage = (int)(to >> chip->page_shift);
+ //realpage = (int)(to >> chip->page_shift);
+ realpage = ((int)(to >> 10))/mtd->pageSizek;
page = realpage & chip->pagemask;
- blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ //blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ blockmask = (1 << (chip->pagecnt_shift)) - 1;
+ if(chipnr > 0) {
+ second_chip = 1;
+ } else {
+ second_chip = 0;
+ }
/* Invalidate the page cache, when we write to the cached page */
if (to <= (chip->pagebuf << chip->page_shift) &&
(chip->pagebuf << chip->page_shift) < (to + ops->len))
@@ -2257,6 +2642,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
+ memset(chip->oob_poi, 0xff, mtd->oobsize); /* edward wan add 20080606 */
oob = nand_fill_oob(mtd, oob, len, ops);
oobwritelen -= len;
} else {
@@ -2264,8 +2650,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
- ret = chip->write_page(mtd, chip, wbuf, page, cached,
- (ops->mode == MTD_OPS_RAW));
+ // ret = chip->write_page(mtd, chip, wbuf, page, cached,
+ // (ops->mode == MTD_OOB_RAW));
+ ret = chip->write_page(mtd, chip, wbuf, page, cached, ops->mode);
if (ret)
break;
@@ -2400,7 +2787,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- chipnr = (int)(to >> chip->chip_shift);
+ //chipnr = (int)(to >> chip->chip_shift);
+ chipnr = ((int)(to >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chip->select_chip(mtd, chipnr);
/* Shift to get page */
@@ -2481,16 +2869,41 @@ out:
}
/**
+ * get_para - [MTD Interface] NAND get retry and eslc information
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int get_para(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+
+ nand_get_device(chip, mtd, FL_READING);
+
+
+ chip->select_chip(mtd, chipnr);
+
+ chip->get_para(mtd, chip);
+
+ chip->select_chip(mtd, -1);
+
+
+ nand_release_device(mtd);
+ return ret;
+}
+/*
* single_erase_cmd - [GENERIC] NAND standard block erase command function
* @mtd: MTD device structure
* @page: the page address of the block which will be erased
*
* Standard erase command for NAND chips.
*/
+extern unsigned int par4_ofs;
+extern unsigned int prob_end;
static void single_erase_cmd(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd->priv;
- /* Send commands to erase a block */
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
}
@@ -2554,11 +2967,29 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
nand_get_device(chip, mtd, FL_ERASING);
/* Shift to get first page */
- page = (int)(instr->addr >> chip->page_shift);
- chipnr = (int)(instr->addr >> chip->chip_shift);
+ //page = (int)(instr->addr >> chip->page_shift);
+ page = ((int)(instr->addr >> 10))/mtd->pageSizek;
+ //chipnr = (int)(instr->addr >> chip->chip_shift);
+ chipnr = ((int)(instr->addr >> (10+chip->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
+
+ if(chipnr > 0)
+ second_chip = 1;
+ else
+ second_chip = 0;
+
+ if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX && prob_end == 1) {
+ if (page < par4_ofs && second_chip == 0) {
+ //printk("SKIP Multi erase page 0x%x, par4_ofs 0x%x\n", page, par4_ofs);
+ instr->state = MTD_ERASE_DONE;
+ ret = 0;
+ nand_release_device(mtd);
+ return ret;
+ }
+ }
/* Calculate pages in each block */
- pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ //pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ pages_per_block = 1 << chip->pagecnt_shift;
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
@@ -2587,13 +3018,17 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
while (len) {
/* Check if we have a bad block, we do not erase bad blocks! */
- if (nand_block_checkbad(mtd, ((loff_t) page) <<
- chip->page_shift, 0, allowbbt)) {
+ if(allowbbt != 0xFF) { /* normal flow */
+ //if (nand_block_checkbad(mtd, ((loff_t) page) << chip->page_shift, 0, allowbbt)) {
+ if (nand_block_checkbad(mtd, ((loff_t) (page*mtd->pageSizek)) << 10, 0, allowbbt, 1)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
__func__, page);
+ printk("nand_erase: attempt to erase a "
+ "bad block at page 0x%08x\n", page);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
+ }
/*
* Invalidate the page cache, if we erase the block which
@@ -2607,6 +3042,18 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->waitfunc(mtd, chip);
+ if (chip->realplanenum && (status & NAND_STATUS_FAIL)) {
+ /*if (abv != 13479) {
+ status = 0xe3;//0xe5;
+ abv = 13479;
+ printk("erase page=%x error abv=%d\n", page, abv);
+ }*/
+ chip->status_plane[0] = page;
+ chip->status_plane[1] = status;
+ printk("erase blk=%x error status=0x%x\n", page/mtd->pagecnt, status);
+ //while(1);
+ }
+
/*
* See if operation failed and additional status checks are
* available
@@ -2619,9 +3066,22 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
if (status & NAND_STATUS_FAIL) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
+ printk( "nand_erase: "
+ "Failed erase, page 0x%08x ", page);
+ if(allowbbt == 0xFF) {
+ //len -= (1 << chip->phys_erase_shift);
+ len -= mtd->erasesize;
+ page += pages_per_block;
+ printk( "continue next\n");
+ continue;
+ } else
+ printk( "\n");
+
instr->state = MTD_ERASE_FAILED;
instr->fail_addr =
- ((loff_t)page << chip->page_shift);
+ //((loff_t)page << chip->page_shift);
+ ((loff_t)(page*mtd->pageSizek)) << 10;
+ printk("nand_erase: goto erase_exit\n");
goto erase_exit;
}
@@ -2632,12 +3092,15 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
if (bbt_masked_page != 0xffffffff &&
(page & BBT_PAGE_MASK) == bbt_masked_page)
rewrite_bbt[chipnr] =
- ((loff_t)page << chip->page_shift);
+ //((loff_t)page << chip->page_shift);
+ ((loff_t)(page*mtd->pageSizek)) << 10;
/* Increment page address and decrement length */
- len -= (1 << chip->phys_erase_shift);
+ //len -= (1 << chip->phys_erase_shift);
+ len -= mtd->erasesize;
page += pages_per_block;
-
+ if (len)
+printk("-----------------------------------er%d=blk=%d len=%llu\n",page,page/256, (unsigned long long)len);
/* Check, if we cross a chip boundary */
if (len && !(page & chip->pagemask)) {
chipnr++;
@@ -2681,6 +3144,9 @@ erase_exit:
pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
__func__, chipnr, rewrite_bbt[chipnr],
chip->bbt_td->pages[chipnr]);
+ printk( "nand_erase_nand: nand_update_bbt "
+ "(%d:0x%0llx 0x%0x) page=%x\n", chipnr, rewrite_bbt[chipnr],
+ chip->bbt_td->pages[chipnr], page);
nand_update_bbt(mtd, rewrite_bbt[chipnr]);
}
@@ -2713,9 +3179,37 @@ static void nand_sync(struct mtd_info *mtd)
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
- return nand_block_checkbad(mtd, offs, 1, 0);
+ return nand_block_checkbad(mtd, offs, 1, 0, 1);
+}
+
+static int nand_block_isbad_wmt(struct mtd_info *mtd, loff_t offs)
+{
+ return nand_block_checkbad(mtd, offs, 1, 0, 0);
+}
+
+/**
+ * nand_block_markbad_wmt - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @type: worn out or reserved(unrecoveryable error occurs).
+ */
+static int nand_block_markbad_wmt(struct mtd_info *mtd, loff_t ofs, int type)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_block_isbad_wmt(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return chip->block_markbad(mtd, ofs, type);
}
+
/**
* nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
* @mtd: MTD device structure
@@ -2734,7 +3228,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
- return chip->block_markbad(mtd, ofs);
+ return chip->block_markbad(mtd, ofs, 0);
}
/**
@@ -2759,8 +3253,7 @@ static void nand_resume(struct mtd_info *mtd)
if (chip->state == FL_PM_SUSPENDED)
nand_release_device(mtd);
else
- pr_err("%s called for a chip which is not in suspended state\n",
- __func__);
+ pr_err("called for a chip which is not in suspended state\n");
}
/* Set default functions */
@@ -2804,6 +3297,7 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
}
}
+#if 0
/* Sanitize ONFI strings so we can safely print them */
static void sanitize_string(uint8_t *s, size_t len)
@@ -2834,7 +3328,20 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
return crc;
}
-
+#endif
+static int shift_bit(uint64_t value)
+{
+ int i = 0;
+ while (!(value & 1)) {
+ value >>= 1;
+ i++;
+ if (i == 63)
+ break;
+ }
+ /* return the number count of "zero" bit */
+ return i;
+}
+#if 0
/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
@@ -3161,6 +3668,233 @@ ident_done:
return type;
}
+#endif
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported
+ */
+static struct WMT_nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int busw, int *maf_id)
+{
+ struct WMT_nand_flash_dev *type = NULL, type_env;
+ int i, dev_id, maf_idx, ret = 0, varlen = 10;
+ unsigned int id = 0, id_5th = 0, id1, flash_bank;
+ char varval[10];
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
+
+ /* reset test: edwardwan add for debug 20071229 start*/
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ /* reset test: edwardwan add for debug 20071229 end*/
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ *maf_id = chip->read_byte(mtd);
+ for (i = 0; i < 3; i++) {
+ dev_id = chip->read_byte(mtd);
+ id += ((unsigned char)dev_id) <<((2-i)*8);
+ }
+ for (i = 0; i < 4; i++) {
+ dev_id = chip->read_byte(mtd);
+ id_5th += ((unsigned char)dev_id) <<((3-i)*8);
+ }
+ printk("nand chip device id = 0x%x 0x%x\n", id, id_5th);
+ #ifdef NAND_DEBUG
+ printk("nand chip device maf_id is %x, and dev_id is %x\n",*maf_id,dev_id);
+ #endif
+ id1 = (unsigned int)id + ((*maf_id)<<24);
+
+
+ /* Lookup the flash id */
+ /*for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (dev_id == nand_flash_ids[i].id) {*/
+ for (i = 0; WMT_nand_flash_ids[i].dwFlashID != 0; i++) {
+ if (((unsigned int)id + ((*maf_id)<<24)) == WMT_nand_flash_ids[i].dwFlashID) {
+ if (WMT_nand_flash_ids[i].dwFlashID == 0x98D79432)
+ if (id_5th != WMT_nand_flash_ids[i].dwFlashID2)
+ continue;
+ if (WMT_nand_flash_ids[i].dwFlashID == 0x98DE8493)
+ if (id_5th != WMT_nand_flash_ids[i].dwFlashID2)
+ continue;
+ type = &WMT_nand_flash_ids[i];
+ //printk("find nand chip device id\n");
+ break;
+ }
+ }
+ #ifdef CONFIG_MTD_NAND_WMT
+ ret = get_flash_info_from_env(id1, id_5th, &type_env);
+
+ if (!ret) {
+ if (type)
+ printk(KERN_WARNING "Both table and env have flash id info, use env info first\n");
+ type = &type_env;
+ }
+ #endif
+
+ if (!type) {
+ return ERR_PTR(-ENODEV);
+ }
+ if (!mtd->name)
+ /*mtd->name = type->name;*/
+ mtd->name = "WMT.nand";
+
+ if (wmt_getsyspara("wmt.nand.ecc", varval, &varlen) == 0) {
+ varlen = simple_strtoul(varval, NULL, 10);
+ #ifdef DBG_60BIT_ECC
+ printk("wmt_nand_ecc=%s len=%d\n", varval, varlen);
+ printk("val=%s len=%d\n", varval, varlen);
+ #endif
+ flash_bank = type->dwPageSize >> 10;
+ if ((type->dwFlashID == 0x2C64444B && type->dwFlashID2 == 0xA9000000)
+ || (type->dwFlashID == 0xADDE94EB && type->dwFlashID2 == 0x74440000)) {
+ if (varlen > type->dwECCBitNum) {
+ type->dwPageSize = type->dwPageSize - 2048;
+ type->dwBlockSize = (type->dwBlockSize/flash_bank)*(flash_bank-2);
+ type->dwECCBitNum = varlen;
+ }
+ }
+ #ifdef DBG_60BIT_ECC
+ printk("blksize=0x%x pagesize=0x%x ecc=%d\n", type->dwBlockSize, type->dwPageSize, type->dwECCBitNum);
+ #endif
+ }
+
+ /*chip->chipsize = type->chipsize << 20;*/
+ chip->chipsize = (uint64_t)type->dwBlockCount * (uint64_t)type->dwBlockSize;
+ if (((PLANE2_READ|PLANE2_PROG|PLANE2_ERASE) & type->dwSpeedUpCmd)
+ == (PLANE2_READ|PLANE2_PROG|PLANE2_ERASE)) {
+ chip->realplanenum = 1;
+ printk("\n ****realplanenum**** is %d",chip->realplanenum);
+ } else
+ chip->realplanenum = 0;
+
+ /* get all information from table */
+ mtd->blkcnt = type->dwBlockCount;
+ chip->cellinfo = type->dwNandType << 2;
+ mtd->realwritesize = mtd->writesize = type->dwPageSize;
+ mtd->realoobsize = mtd->oobsize = type->dwSpareSize;
+ mtd->realerasesize = mtd->erasesize = type->dwBlockSize;
+ if (chip->realplanenum) {//dan_multi
+ mtd->planenum = 2;
+ mtd->writesize *= 2;
+ mtd->erasesize *= 2;
+ mtd->oobsize *= 2;
+ mtd->blkcnt >>= 1;
+ } else
+ mtd->planenum = 1;
+ mtd->dwECCBitNum = type->dwECCBitNum;
+ mtd->ecc_err_level = 20;
+ if (mtd->dwECCBitNum >= 40)
+ mtd->ecc_err_level = mtd->dwECCBitNum - 10;
+
+ mtd->dwRetry = type->dwRetry;
+ mtd->dwRdmz = type->dwRdmz;
+ mtd->id = type->dwFlashID;
+ mtd->id2 = type->dwFlashID2;
+ if (((mtd->id>>24)&0xFF) == NAND_MFR_TOSHIBA && type->dwDDR == 2)
+ mtd->dwDDR = type->dwDDR;
+ else
+ mtd->dwDDR = 0;
+ mtd->pageSizek = mtd->writesize >> 10;
+ mtd->pagecnt = mtd->erasesize/mtd->writesize;
+ mtd->spec_clk = type->dwRWTimming;
+ mtd->spec_tadl = type->dwTadl;
+
+ busw = type->dwDataWidth ? NAND_BUSWIDTH_16 : 0;
+ chip->page_offset[0] = type->dwBI0Position;
+ chip->page_offset[1] = type->dwBI1Position;
+
+ /* Try to identify manufacturer */
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+ if (nand_manuf_ids[maf_idx].id == *maf_id)
+ break;
+ }
+
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct !
+ */
+ if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ printk(KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
+ /*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);*/
+ id, nand_manuf_ids[maf_idx].name, mtd->name);
+ printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ chip->pagecnt_shift = ffs(mtd->pagecnt) - 1;
+ //printk("------------------page_shift=%d pgcnt_shift=%d\n", chip->page_shift, chip->pagecnt_shift);
+ /* Convert chipsize to number of pages per chip -1. */
+ //chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ chip->pagemask = (mtd->blkcnt*mtd->pagecnt) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (chip->chipsize > 0x80000000)
+ chip->chip_shift = shift_bit(chip->chipsize);
+ else
+ chip->chip_shift = ffs(chip->chipsize) - 1;
+ //chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
+
+ chip->badblockbits = 8;
+ /* Set the bad block position */
+ chip->badblockpos = mtd->writesize > 512 ?
+ NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+
+ /* Get chip options, preserve non chip based options */
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+
+ /*
+ * Set chip as a default. Board drivers can override it, if necessary
+ */
+ chip->options |= NAND_NO_AUTOINCR;
+
+ /* Check if chip is a not a samsung device. Do not clear the
+ * options for chips which are not having an extended id.
+ */
+ /*if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)*//* Dannier:to support new table*/
+ if (*maf_id != NAND_MFR_SAMSUNG && type->dwPageSize > 512)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+
+ chip->options |= NAND_BBT_SCAN2NDPAGE;
+ /* Check for AND chips with 4 page planes */
+ if (!chip->realplanenum) {//dan_multi
+ if (chip->options & NAND_4PAGE_ARRAY)
+ chip->erase_cmd = multi_erase_cmd;
+ else
+ chip->erase_cmd = single_erase_cmd;
+ }
+
+ /* Do not replace user supplied command function ! */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ printk(KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, id,
+ nand_manuf_ids[maf_idx].name, type->ProductName);
+
+#ifdef CONFIG_MTD_NAND_WMT
+ set_partition_size(mtd);
+ wmt_init_nfc(mtd, mtd->spec_clk, mtd->spec_tadl, busw);
+ set_ecc_info(mtd);
+ ret = alloc_write_cache(mtd);
+ if (ret)
+ return 0;
+ ret = alloc_rdmz_buffer(mtd);
+ if (ret)
+ return 0;
+#endif
+
+ return type;
+}
/**
* nand_scan_ident - [NAND Interface] Scan for the NAND device
@@ -3176,9 +3910,9 @@ ident_done:
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
{
- int i, busw, nand_maf_id, nand_dev_id;
+ int i = 1, busw, nand_maf_id/*, nand_dev_id*/;
struct nand_chip *chip = mtd->priv;
- struct nand_flash_dev *type;
+ struct WMT_nand_flash_dev *type;
/* Get buswidth to select the correct functions */
busw = chip->options & NAND_BUSWIDTH_16;
@@ -3186,8 +3920,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw,
- &nand_maf_id, &nand_dev_id, table);
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ //type = nand_get_flash_type(mtd, chip, busw,
+ //&nand_maf_id, &nand_dev_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -3205,7 +3940,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd))
+ /*nand_dev_id != chip->read_byte(mtd))*/
+ ((type->dwFlashID>>16)&0xFF) != chip->read_byte(mtd))
break;
}
if (i > 1)
@@ -3449,7 +4185,11 @@ int nand_scan_tail(struct mtd_info *mtd)
break;
}
}
- chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+ //chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+ if (mtd->dwECCBitNum >= 24)
+ chip->subpagesize = 1024;
+ else
+ chip->subpagesize = 512;
/* Initialize state */
chip->state = FL_READY;
@@ -3480,11 +4220,14 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
+ mtd->get_para = get_para;
/* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+ /* edwardwan add support 4 bits BCH ECC */
+ mtd->read_bbinfo_facmk = nand_read_bbt_facmk;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
return 0;
@@ -3519,16 +4262,27 @@ EXPORT_SYMBOL(nand_scan_tail);
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
+ unsigned int ret1;
/* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
+ /*if (!mtd->owner && caller_is_module()) {
pr_crit("%s called with NULL mtd->owner!\n", __func__);
BUG();
- }
-
+ }*/
+ ret1 = *(volatile unsigned long *)PMCEU_ADDR;
+ if (!(ret1&0x0010000))
+ printk(KERN_NOTICE "1 pmc_nand: 0x%x\n", ret1);
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ ret1 = *(volatile unsigned long *)PMCEU_ADDR;
+ if (!(ret1&0x0010000))
+ printk(KERN_NOTICE "2 pmc_nand: 0x%x\n", ret1);
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+ ret1 = *(volatile unsigned long *)PMCEU_ADDR;
+ if (ret1&0x0010000)
+ printk(KERN_NOTICE "3 pmc_nand: 0x%x\n", ret1);
return ret;
}
EXPORT_SYMBOL(nand_scan);
@@ -3560,13 +4314,13 @@ EXPORT_SYMBOL_GPL(nand_release);
static int __init nand_base_init(void)
{
- led_trigger_register_simple("nand-disk", &nand_led_trigger);
+// led_trigger_register_simple("nand-disk", &nand_led_trigger);
return 0;
}
static void __exit nand_base_exit(void)
{
- led_trigger_unregister_simple(nand_led_trigger);
+// led_trigger_unregister_simple(nand_led_trigger);
}
module_init(nand_base_init);
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/nand_bbt.c b/ANDROID_3.4.5/drivers/mtd/nand/nand_bbt.c
index 30d1319f..a6a9e661 100644
--- a/ANDROID_3.4.5/drivers/mtd/nand/nand_bbt.c
+++ b/ANDROID_3.4.5/drivers/mtd/nand/nand_bbt.c
@@ -68,7 +68,8 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
-
+#include <mach/hardware.h>
+//#define RETRY_DEBUG
static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
{
int ret;
@@ -131,7 +132,7 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
* good / bad block identifiers. Same as check_pattern, but no optional empty
* check.
*/
-static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td, int ano_bytes)
{
int i;
uint8_t *p = buf;
@@ -141,6 +142,16 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
if (p[td->offs + i] != td->pattern[i])
return -1;
}
+ if (ano_bytes) {
+ //printk("sandisk flash");
+ for (i = 0; i < ano_bytes; i++) {
+ //printk("of=0x%x da=0x%x len=%x\n", td->offs + i, p[td->offs + i], td->len);
+ if (p[i] != td->pattern[0]) {
+ printk("p[%d]=0x%x of=0x%x da=0x%x len=%x\n", i, p[i], td->offs + i, p[td->offs + i], td->len);
+ return -1;
+ }
+ }
+ }
return 0;
}
@@ -188,10 +199,12 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
totlen = (num * bits) >> 3;
marker_len = add_marker_len(td);
- from = ((loff_t)page) << this->page_shift;
+ //from = ((loff_t)page) << this->page_shift;
+ from = ((loff_t)page*mtd->pageSizek) << 10;
while (totlen) {
- len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ //len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(mtd->erasesize));
if (marker_len) {
/*
* In case the BBT marker is not in the OOB area it
@@ -225,8 +238,9 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
- pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ pr_info("nand_read_bbt: (read fail)reserved block at 0x%012llx\n",
+ //(loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ (loff_t)(((offs << 2) + (act >> 1))*mtd->pageSizek) << (10+this->pagecnt_shift));
this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
mtd->ecc_stats.bbtblocks++;
continue;
@@ -235,8 +249,10 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
* Leave it for now, if it's matured we can
* move this message to pr_debug.
*/
- pr_info("nand_read_bbt: bad block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ pr_info("nand_read_bbt: bad block at 0x%012llx (block%d)\n",
+ //(loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift,
+ (loff_t)(((offs << 2) + (act >> 1))*mtd->pageSizek) << (10+this->pagecnt_shift),
+ (offs << 2) + (act >> 1));
/* Factory marked bad or worn out? */
if (tmp == 0)
this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
@@ -250,6 +266,111 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
}
return ret;
}
+extern void print_nand_buffer(char *value, unsigned int length);
+/**
+ * read_bbt_multi - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt_multi(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ int res, ret = 0, i, j, act = 0;
+ struct nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = (td->options & NAND_BBT_NRBITS_MSK)<<1;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;//=0
+//printk("--------bit=%d, msk=%d, code=%d\n",bits, msk, reserved_block_code);
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ //from = ((loff_t)page) << this->page_shift;
+ from = ((loff_t)page*mtd->pageSizek) << 10;
+//printk("----totlen=%d, marker_len=%d, page=%d\n", totlen, marker_len, page);
+ while (totlen) {
+ //len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(mtd->erasesize));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+//printk("+++++++++++++++++len=%d, offs=%d\n", len, offs);
+//print_nand_buffer(buf, 8192+64);
+//print_nand_buffer(buf+8192, 8192+64);
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ if (this->bbt_plane[0] == page || this->bbt_plane[1] == page)
+ dat = buf[i+mtd->realwritesize];
+ for (j = 0; j < 8; j += bits, act += 4) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: (read fail)reserved block at 0x%012llx\n",
+ //(loff_t)((offs << 1) + (act >> 2)) << this->bbt_erase_shift);
+ (loff_t)(((offs << 1) + (act >> 2))*mtd->pageSizek) << (10+this->pagecnt_shift));
+ this->bbt[offs + (act >> 3)] |= 0xa << (act & 0x04);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx (block%d)\n",
+ //(loff_t)((offs << 1) + (act >> 2)) << this->bbt_erase_shift,
+ ((loff_t)(((offs << 1) + (act >> 2))*mtd->pageSizek)) << (10+this->pagecnt_shift),
+ (offs << 1) + (act >> 2));
+
+ /* Factory marked bad or worn out? */
+ if (tmp == 0) {
+ this->bbt[offs + (act >> 3)] |= 0xf << (act & 0x04);
+ //printk("bbt[%d]=0x%x", offs + (act >> 3), this->bbt[offs + (act >> 3)] |= 0xf << (act & 0x04));
+ } else if (tmp == 0x3) {
+ this->bbt[offs + (act >> 3)] |= 0xc << (act & 0x04);
+ //printk("bbt[%d]=0x%x", offs + (act >> 3), this->bbt[offs + (act >> 3)] |= 0xc << (act & 0x04));
+ } else if (tmp == 0xc) {
+ this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x04);
+ //printk("bbt[%d]=0x%x", offs + (act >> 3), this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x04));
+ } else
+ this->bbt[offs + (act >> 3)] |= 0x5 << (act & 0x04);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
@@ -270,17 +391,155 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < this->numchips; i++) {
- if (chip == -1 || chip == i)
- res = read_bbt(mtd, buf, td->pages[i],
- this->chipsize >> this->bbt_erase_shift,
- td, offs);
+ if (chip == -1 || chip == i) {
+ if (this->realplanenum) {
+ /* multi plane mode use 4-bit as an block instead of 2-bit */
+ res = read_bbt_multi(mtd, buf, td->pages[i],
+ //this->chipsize >> this->bbt_erase_shift,
+ (int)(this->chipsize >> (10+this->pagecnt_shift))/mtd->pageSizek,
+ td, offs);
+ } else {
+ res = read_bbt(mtd, buf, td->pages[i],
+ //this->chipsize >> this->bbt_erase_shift,
+ (int)(this->chipsize >> (10+this->pagecnt_shift))/mtd->pageSizek,
+ td, offs);
+ }
+ }
if (res)
return res;
- offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ if (this->realplanenum) {
+ //offs += this->chipsize >> (this->bbt_erase_shift + 1);
+ offs += ((int)(this->chipsize >> (10+this->pagecnt_shift+1))/mtd->pageSizek);
+ } else {
+ //offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ offs += ((int)(this->chipsize >> (10+this->pagecnt_shift+2))/mtd->pageSizek);
+ }
}
} else {
- res = read_bbt(mtd, buf, td->pages[0],
- mtd->size >> this->bbt_erase_shift, td, 0);
+ if (this->realplanenum) {
+ /* multi plane mode use 4-bit as an block instead of 2-bit */
+ res = read_bbt_multi(mtd, buf, td->pages[0],
+ //mtd->size >> this->bbt_erase_shift, td, 0);
+ (int)(mtd->size >> (10+this->pagecnt_shift))/mtd->pageSizek, td, 0);
+ } else {
+ res = read_bbt(mtd, buf, td->pages[0],
+ //mtd->size >> this->bbt_erase_shift, td, 0);
+ (int)(mtd->size >> (10+this->pagecnt_shift))/mtd->pageSizek, td, 0);
+ if (res)
+ return res;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * read_retry_table - [GENERIC] Read the retry table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
+ *
+ * Read the read retry table starting from page.
+ *
+ */
+static int read_retry_table(struct mtd_info *mtd, uint8_t *buf, int page, int chip)
+{
+ int res;
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *rdtry;
+ size_t retlen;
+ loff_t from;
+
+ //from = ((loff_t) page) << this->page_shift;
+ from = ((loff_t) (page*mtd->pageSizek)) << 10;
+
+ res = mtd->_read(mtd, from, mtd->writesize, &retlen, buf);
+ if (res < 0) {
+ if (retlen != mtd->writesize) {
+ printk(KERN_INFO "nand_bbt: Error reading retry table\n");
+ return res;
+ }
+ printk(KERN_WARNING "nand_bbt: ECC error while reading retry table\n");
+ }
+
+ /* Analyse data */
+ rdtry = (struct nand_read_retry_param *)buf;
+ #ifdef RETRY_DEBUG
+ print_nand_buffer((uint8_t *)this->cur_chip, sizeof(chip_table[0]));
+ #endif
+ if (strcmp("readretry", rdtry->magic) /*|| info->data_ecc_uncor_err == 2*/) {
+ printk(KERN_WARNING "nand_bbt: retry table magic number wrong%s\n", rdtry->magic);
+ return -1;
+ }
+ #ifdef RETRY_DEBUG
+ printk(KERN_WARNING "nand_bbt: copy from buf\n");
+ #endif
+ memcpy(/*(uint8_t *)*/this->cur_chip, buf, sizeof(chip_table[0])-16);
+ this->cur_chip->retry_def_value[this->cur_chip->retry_reg_num] = 0xff;
+ this->cur_chip->retry_def_value[this->cur_chip->retry_reg_num+1] = 0xff;
+ #ifdef RETRY_DEBUG
+ print_nand_buffer((uint8_t *)this->cur_chip, sizeof(chip_table[0]));
+ #endif
+
+ /*if (rdtry->eslc_reg_num) {
+ if (rdtry->eslc_reg_num > 5)
+ printk(KERN_WARNING "nand_bbt: eslc reg size=%d is too big\n", rdtry->eslc_reg_num);
+ this->eslc_reg_num = rdtry->eslc_reg_num;
+ this->eslc_cmd = kzalloc(this->eslc_reg_num, GFP_KERNEL);
+ if (!this->eslc_cmd) {
+ printk(KERN_ERR "nand_scan_bbt: create eslc_cmd Out of memory\n");
+ return -ENOMEM;
+ }
+ }
+ memcpy(this->eslc_cmd, ((uint8_t *)&rdtry->retry_reg_num)+4, this->eslc_reg_num);
+ print_nand_buffer(this->eslc_cmd, this->eslc_reg_num);
+
+ if (rdtry->total_retry_cnt && rdtry->retry_reg_num) {
+ if ((rdtry->total_retry_cnt * rdtry->retry_reg_num) > 64)
+ printk(KERN_WARNING "nand_bbt: eslc reg size=%d is too big\n",
+ (rdtry->total_retry_cnt * rdtry->retry_reg_num));
+ this->total_retry_cnt = rdtry->total_retry_cnt;
+ this->retry_reg_num = rdtry->retry_reg_num;
+ this->retry_cmd = kzalloc((this->retry_reg_num*this->total_retry_cnt), GFP_KERNEL);
+ if (!this->retry_cmd) {
+ printk(KERN_ERR "nand_scan_bbt: create retry_cmd Out of memory\n");
+ return -ENOMEM;
+ }
+ }
+ memcpy(this->retry_cmd, ((uint8_t *)&rdtry->retry_reg_num)+4+this->eslc_reg_num,
+ (this->retry_reg_num*this->total_retry_cnt));
+
+
+ for (i = 0; i < this->total_retry_cnt; i++) {
+ print_nand_buffer(&this->retry_cmd[i*this->retry_reg_num], this->retry_reg_num);
+ }*/
+
+ return 0;
+}
+
+/**
+ * read_abs_retry_table - [GENERIC] Read the retry table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips.
+ *
+ * Read the retry table for all chips starting at a given page
+*/
+static int read_abs_retry_table(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ //struct nand_chip *this = mtd->priv;
+ int res = 0, i, chips;
+
+ //chips = this->numchips
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_retry_table(mtd, buf, td->pages[i], chip);
if (res)
return res;
}
@@ -351,7 +610,7 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = oob;
ops.len = len;
- return mtd_write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops); /* call mtd->_write_oob*/
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -376,11 +635,12 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
- struct nand_chip *this = mtd->priv;
+ //struct nand_chip *this = mtd->priv;
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ //scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ scan_read_raw(mtd, buf, (loff_t)(td->pages[0]*mtd->pageSizek) << 10,
mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
@@ -389,7 +649,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ //scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ scan_read_raw(mtd, buf, (loff_t)(md->pages[0]*mtd->pageSizek) << 10,
mtd->writesize, td);
md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
@@ -422,7 +683,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int len)
{
struct mtd_oob_ops ops;
- int j, ret;
+ int j, ret, more_bytes = 0, flag = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
@@ -430,20 +691,54 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
+ if ((mtd->id>>24) == 0x45) {
+ more_bytes = 6;
+ }
for (j = 0; j < len; j++) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd_read_oob(mtd, offs, &ops);
+ /* Dannier Chen patch 2010.04.20: start
+ check invalid bad block on which page of blocks
+ should be based on flash spec, for example flash type:HY27UT088G2M-T(P) bad block
+ is marked at page 125 and 127 of each block.
+ NOT always page 0 and 1.
+ */
+ //printk("scan_block_fast: j=%d len=%d bd->page_offset[0]=%d offset[1]=%d\n ", j, len, bd->page_offset[0], bd->page_offset[1]);
+#ifdef CONFIG_MTD_NAND_WMT_HWECC
+ ret = mtd->read_bbinfo_facmk(mtd, offs + bd->page_offset[j]*mtd->writesize, &ops);
+#else
+ ret = mtd->read_oob(mtd, offs + bd->page_offset[j]*mtd->writesize, &ops);
+#endif
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
- if (check_short_pattern(buf, bd))
- return 1;
-
- offs += mtd->writesize;
+ if (check_short_pattern(buf, bd, more_bytes))
+ flag |= 1;//return 1;
+
+ if ((flag&1) == 0)
+ if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000)
+ if (check_short_pattern(buf+1, bd, 0))
+ flag |= 1;//return 1;
+
+ if (mtd->planenum > 1) {
+ if (check_short_pattern(buf+more_bytes, bd, more_bytes))
+ flag |= 2;//return 1;
+
+ if (check_short_pattern(buf+32, bd, 0))
+ flag |= 2;//return 1;
+
+ if ((flag&2) == 0)
+ if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000)
+ if (check_short_pattern(buf+33, bd, 0))
+ flag |= 2;//return 1;
+ }
+ if (flag)
+ return flag;
+ /*offs += mtd->writesize;*/
+ /* Dannier Chen patch 2010.04.20: end */
}
return 0;
}
@@ -471,7 +766,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
pr_info("Scanning device for bad blocks\n");
if (bd->options & NAND_BBT_SCANALLPAGES)
- len = 1 << (this->bbt_erase_shift - this->page_shift);
+ //len = 1 << (this->bbt_erase_shift - this->page_shift);
+ len = 1 << (this->pagecnt_shift);
else if (bd->options & NAND_BBT_SCAN2NDPAGE)
len = 2;
else
@@ -492,7 +788,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
* Note that numblocks is 2 * (real numblocks) here, see i+=2
* below as it makes shifting and masking less painful
*/
- numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ //numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift-1)))/mtd->pageSizek;
startblock = 0;
from = 0;
} else {
@@ -501,10 +798,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
chip + 1, this->numchips);
return -EINVAL;
}
- numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ //numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift-1)))/mtd->pageSizek;
startblock = chip * numblocks;
numblocks += startblock;
- from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+ //from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+ from = (loff_t)(startblock*mtd->pageSizek) << (10+this->pagecnt_shift-1);
}
if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
@@ -512,6 +811,19 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
for (i = startblock; i < numblocks;) {
int ret;
+
+ if (((mtd->id>>24)&0xFF) == 0x45) {
+ /* dannierchen add to erase sandisk all blocks before check bad block 20121217 */
+ /*printk(KERN_INFO "create_bbt: erase all blocks for sandisk\n");*/
+ struct erase_info einfo;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx einfo.len is %llx\n", einfo.addr, einfo.len);*/
+ nand_erase_nand(mtd, &einfo, 0xFF);
+ } /* end of dannierchen erase 20121217 */
BUG_ON(bd->options & NAND_BBT_NO_OOB);
@@ -529,14 +841,251 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
pr_warn("Bad eraseblock %d at 0x%012llx\n",
i >> 1, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
- }
+
+ /* edwardwan add for debug 20071229 start */
+#if 0
+ if(mtd->ecc_stats.badblocks > 10){
+ printk("\rnand flash bad block number is greater than 10\n");
+ return 0;
+ }
+ /* edwardwan add for debug 20071229 end */
+#endif
+ } else if (((mtd->id>>24)&0xFF) != 0x45) { /* dannierchen add to erase good block when first creat table 20091014 */
+ /*printk(KERN_INFO "create_bbt: erase good blocks\n");*/
+ struct erase_info einfo;
+ int res = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx\n",einfo.addr);
+ printk("einfo.len is %llx\n",einfo.len);*/
+ res = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res < 0)
+ printk("enand_erase_nand addr 0x%llx result is %x\n", einfo.addr, res);
+ } /* end of dannierchen erase 20091014 */
i += 2;
- from += (1 << this->bbt_erase_shift);
+ //from += (1 << this->bbt_erase_shift);
+ from += (mtd->erasesize);
+ }
+ return 0;
+}
+
+/**
+ * create_bbt_multi - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt_multi(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ //len = 1 << (this->bbt_erase_shift - this->page_shift);
+ len = 1 << this->pagecnt_shift;
+ else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ len = 2;
+ else
+ len = 1;
+
+ if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+ /* We need only read few bytes from the OOB area */
+ scanlen = 0;
+ readlen = bd->len;
+ } else {
+ /* Full page content should be read */
+ scanlen = mtd->writesize + mtd->oobsize;
+ readlen = len * mtd->writesize;
+ }
+
+ if (chip == -1) {
+ /*
+ * Note that numblocks is 2 * (real numblocks) here, see i+=2
+ * below as it makes shifting and masking less painful
+ */
+ //numblocks = mtd->size >> (this->bbt_erase_shift - 2);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift-2)))/mtd->pageSizek;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ pr_warn("create_bbt_multi(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ //numblocks = this->chipsize >> (this->bbt_erase_shift - 2);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift-2)))/mtd->pageSizek;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ //from = (loff_t)startblock << (this->bbt_erase_shift - 2);
+ from = (loff_t)(startblock*mtd->pageSizek) << (10+this->pagecnt_shift-2);
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * len);
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ if ((mtd->id>>24) == 0x45) {
+ /* dannierchen add to erase sandisk all blocks before check bad block 20121217 */
+ /*printk(KERN_INFO "create_bbt_multi: erase all blocks for sandisk\n");*/
+ struct erase_info einfo;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx einfo.len is %llx\n", einfo.addr, einfo.len);*/
+ nand_erase_nand(mtd, &einfo, 0xFF);
+ } /* end of dannierchen erase 20121217 */
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ ret = scan_block_full(mtd, bd, from, buf, readlen,
+ scanlen, len);
+ else
+ ret = scan_block_fast(mtd, bd, from, buf, len);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ this->bbt[i >> 3] |= 0x0F << (i & 0x4);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i >> 2, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+
+ /* edwardwan add for debug 20071229 start */
+#if 0
+ if(mtd->ecc_stats.badblocks > 10){
+ printk("\rnand flash bad block number is greater than 10\n");
+ return 0;
+ }
+ /* edwardwan add for debug 20071229 end */
+#endif
+ } else if ((mtd->id>>24) != 0x45) { /* dannierchen add to erase good block when first creat table 20091014 */
+ /*printk(KERN_INFO "create_bbt_multi: erase good blocks\n");*/
+ struct erase_info einfo;
+ int res = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = from;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ /*printk("einfo.addr is %llx\n",einfo.addr);
+ printk("einfo.len is %llx\n",einfo.len);*/
+ res = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res < 0)
+ printk("enand_erase_nand addr 0x%llx result is %x\n", einfo.addr, res);
+ } /* end of dannierchen erase 20091014 */
+
+ i += 4;
+ //from += (1 << this->bbt_erase_shift);
+ from += (mtd->erasesize);
+ }
+ return 0;
+}
+
+int create_hynix_table(struct mtd_info *mtd, int chip)
+{
+ int res;
+ res = mtd->get_para(mtd, chip);
+
+ return res;
+}
+
+static int check_retry_pattern(uint8_t *buf, int paglen, struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf+paglen;
+
+ for (i = 0; i < 10; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+/*
+*
+* read oob to search retry table
+*
+*/
+
+static int search_hynix_retry_table(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, chips;
+ int startblock, block, dir;
+ int bbtblocks;
+ //int blocktopage = this->bbt_erase_shift - this->page_shift;
+ int blocktopage = this->pagecnt_shift;
+
+ /* Search direction top -> down ? */
+ //if (td->options & NAND_BBT_LASTBLOCK) {
+ //startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ startblock = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek - 1;
+ dir = -1;
+ /*} else {
+ startblock = 0;
+ dir = 1;
+ }*/
+
+ //so far use first chip parameter for read retry on 2-die chip
+ //chips = this->numchips;
+ chips = 1;
+
+ //bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ bbtblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ startblock &= bbtblocks - 5;
+
+ for (i = 0; i < chips; i++) {
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ //loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+ loff_t offs = (loff_t)(actblock*mtd->pageSizek) << (10+this->pagecnt_shift);
+
+ /* Read first page */
+ scan_read_raw(mtd, buf, offs, mtd->writesize, td);
+
+ if (!check_retry_pattern(buf, mtd->writesize, this->retry_pattern)) {
+ td->pages[i] = actblock << blocktopage;
+ break;
+ }
+ }
+ //startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ printk(KERN_WARNING "Retry block table not found for chip %d\n", i);
+ else
+ printk(KERN_WARNING "Retry block table is found for chip %d\n", i);
}
return 0;
}
+
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
* @mtd: MTD device structure
@@ -559,11 +1108,13 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int bits, startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
int bbtblocks;
- int blocktopage = this->bbt_erase_shift - this->page_shift;
+ //int blocktopage = this->bbt_erase_shift - this->page_shift;
+ int blocktopage = this->pagecnt_shift;
/* Search direction top -> down? */
if (td->options & NAND_BBT_LASTBLOCK) {
- startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ //startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ startblock = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek - 1;
dir = -1;
} else {
startblock = 0;
@@ -573,15 +1124,19 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
- bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ //bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ bbtblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
startblock &= bbtblocks - 1;
} else {
chips = 1;
- bbtblocks = mtd->size >> this->bbt_erase_shift;
+ //bbtblocks = mtd->size >> this->bbt_erase_shift;
+ bbtblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
}
/* Number of bits for each erase block in the bbt */
bits = td->options & NAND_BBT_NRBITS_MSK;
+ if (this->realplanenum)
+ bits<<=1;
for (i = 0; i < chips; i++) {
/* Reset version information */
@@ -591,20 +1146,39 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
for (block = 0; block < td->maxblocks; block++) {
int actblock = startblock + dir * block;
- loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+ //loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+ loff_t offs = (loff_t)(actblock*mtd->pageSizek) << (10+this->pagecnt_shift);
/* Read first page */
scan_read_raw(mtd, buf, offs, mtd->writesize, td);
+ //print_nand_buffer(buf+mtd->writesize, 64);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
+ printk("get bbt0 from %x\n",td->pages[i]);
+ this->bbt_plane[i] = 0;
if (td->options & NAND_BBT_VERSION) {
- offs = bbt_get_ver_offs(mtd, td);
- td->version[i] = buf[offs];
+ u32 offs_ver;
+ offs_ver = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs_ver];
}
break;
}
+ if (this->realplanenum)
+ if (!check_pattern(buf, scanlen, mtd->writesize+20, td)) {
+ td->pages[i] = actblock << blocktopage;
+ //printk("get bbt1 from %x\n",td->pages[i]);
+ this->bbt_plane[i] = td->pages[i];
+ //printk("get bbt plane[%d] from %x\n",i, this->bbt_plane[i]);
+ if (td->options & NAND_BBT_VERSION) {
+ u32 offs_ver;
+ offs_ver = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[20+offs_ver];
+ }
+ break;
+ }
}
- startblock += this->chipsize >> this->bbt_erase_shift;
+ //startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
}
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
@@ -617,6 +1191,9 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
return 0;
}
+extern int reset_nfc(struct mtd_info *mtd, unsigned int *buf, int step);
+extern void nfc_hw_rdmz(struct mtd_info *mtd, int on);
+extern void print_nand_register(struct mtd_info *mtd);
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
* @mtd: MTD device structure
@@ -673,7 +1250,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
rcode = 0xff;
/* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ //numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
/* Full device write or specific chip? */
if (chipsel == -1) {
nrchips = this->numchips;
@@ -682,7 +1260,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
chip = chipsel;
}
} else {
- numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ //numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
nrchips = 1;
}
@@ -720,7 +1299,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
continue;
}
page = block <<
- (this->bbt_erase_shift - this->page_shift);
+ //(this->bbt_erase_shift - this->page_shift);
+ this->pagecnt_shift;
/* Check, if the block is used by the mirror table */
if (!md || md->pages[chip] != page)
goto write;
@@ -731,7 +1311,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Set up shift count and masks for the flash table */
bits = td->options & NAND_BBT_NRBITS_MSK;
- msk[2] = ~rcode;
+ msk[2] = 2;//~rcode;
switch (bits) {
case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
msk[3] = 0x01;
@@ -749,14 +1329,20 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
}
bbtoffs = chip * (numblocks >> 2);
+ if (this->realplanenum)
+ bbtoffs = chip * (numblocks >> 1);
- to = ((loff_t)page) << this->page_shift;
+ //to = ((loff_t)page) << this->page_shift;
+ to = ((loff_t) (page*mtd->pageSizek)) << 10;
/* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
+ printk("inlegal------not go-----------\n");
/* Make it block aligned */
- to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
- len = 1 << this->bbt_erase_shift;
+ //to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((mtd->erasesize) - 1));//danbbg
+ //len = 1 << this->bbt_erase_shift;
+ len = mtd->erasesize;
res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
@@ -768,15 +1354,18 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
"block for writing bad block table\n");
}
/* Read oob data */
- ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ //ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.ooblen = (mtd->pagecnt) * mtd->oobsize;
ops.oobbuf = &buf[len];
res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
/* Calc the byte offset in the buffer */
- pageoffs = page - (int)(to >> this->page_shift);
- offs = pageoffs << this->page_shift;
+ //pageoffs = page - (int)(to >> this->page_shift);
+ pageoffs = page - ((int)(to >> 10))/mtd->pageSizek;
+ //offs = pageoffs << this->page_shift;
+ offs = (pageoffs*mtd->pageSizek) << 10;
/* Preset the bbt area with 0xff */
memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
@@ -801,9 +1390,12 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
len = (size_t)(numblocks >> sft);
/* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
+ if (len < mtd->writesize)
+ len = mtd->writesize;
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
- (len >> this->page_shift)* mtd->oobsize);
+ //(len >> this->page_shift)* mtd->oobsize);
+ mtd->pagecnt* mtd->oobsize);
offs = 0;
ooboffs = len;
/* Pattern is located in oob area of first page */
@@ -820,17 +1412,276 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
for (j = 0; j < 4; j++, i++) {
int sftcnt = (i << (3 - sft)) & sftmsk;
/* Do not store the reserved bbt blocks! */
- buf[offs + (i >> sft)] &=
- ~(msk[dat & 0x03] << sftcnt);
+
+ /* dannier 2014/03/01 add a condition only retry and bbt blocks are not store
+ read retry command fail blocks are marked as reserved blk and need to be stored to flash */
+ if (i >= (numblocks - td->maxblocks - 4) && (dat&0x3) == 0x2) {
+ //buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
+ //printk("offs + (i >> sft)=%d data=0x%x, dat=0x%x sft=%d\n",offs + (i >> sft), ~(msk[dat & 0x03] << sftcnt), dat, sft);
+ } else
+ buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
dat >>= 2;
}
}
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+ if (res < 0)
+ goto outerr;
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+//while(1);
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt_multi(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, j, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, bbtoffs, pageoffs, ooboffs;
+ uint8_t msk[16];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ //numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ //numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ numblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ //printk("blockstatus=%x\n",((this->bbt[block >> 1] >> (2 * (block & 0x01))) & 0x0F));
+ //printk("block=%x, sht=%d\n",block,(4 * (block & 0x01)) & 0x0F);
+ /* Check, if the block is bad */
+ switch ((this->bbt[block >> 1] >>
+ (4 * (block & 0x01))) & 0x0F) {
+ case 0x01:
+ case 0x04:
+ case 0x05://case 0x07: case 0x0D: not exist for bad_fact+bad_wort
+ case 0x03://case 0x0B: case 0x0E: not exist for bad_fact+reserved
+ case 0x0C://case 0x02: case 0x08: not exist for good + reserved
+ case 0x0F://case 0x06: case 0x09: not exist for bad_wort+reserved
+ continue;
+ //case 0x00: case 0x0A: only good or reserved is used (so far no reserved)
+ }
+ page = block <<
+ //(this->bbt_erase_shift - this->page_shift);
+ this->pagecnt_shift;
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ pr_err("No space left to write bad block table\n");
+ return -ENOSPC;
+ write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ if (this->realplanenum)
+ bits<<=1;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04;
+ msk[0] = 0x00; msk[1] = 0x01; msk[2] = 0x2; msk[3] = 0x03;
+ msk[4] = 0x04; msk[5] = 0x05; msk[6] = 0x06; msk[7] = 0x07;
+ msk[8] = 0x08; msk[9] = 0x09; msk[10] = /*~rcode*/0x0a; msk[11] = 0x0b;
+ msk[12] = 0x0c; msk[13] = 0x0d; msk[14] = 0x0e; msk[15] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ bbtoffs = chip * (numblocks >> 2);
+ if (this->realplanenum)
+ bbtoffs = chip * (numblocks >> 1);
+
+ //to = ((loff_t)page) << this->page_shift;
+ to = ((loff_t) (page*mtd->pageSizek)) << 10;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */printk("write bbt multi inlegal-----------------\n");
+ //to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((mtd->erasesize) - 1));//danbbg
+ //len = 1 << this->bbt_erase_shift;
+ len = mtd->erasesize;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block "
+ "for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading "
+ "block for writing bad block table\n");
+ }
+ /* Read oob data */
+ //ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.ooblen = (mtd->pagecnt) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ //pageoffs = page - (int)(to >> this->page_shift);
+ pageoffs = page - ((int)(to >> 10))/mtd->pageSizek;
+ //offs = pageoffs << this->page_shift;
+ offs = (pageoffs*mtd->pageSizek) << 10;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ if (len < mtd->writesize)
+ len = mtd->writesize;
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ //(len >> this->page_shift)* mtd->oobsize);
+ mtd->pagecnt* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ //printk("td->len=%d ooboffs=%d td->offs=%d\n", td->len, ooboffs, td->offs);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks;) {
+ uint8_t dat;
+ dat = this->bbt[bbtoffs + (i >> 1)];
+ for (j = 0; j < 2; j++, i++) {
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ /* Do not store the reserved bbt blocks! */
+ /* dannier 2014/03/01 add a condition only retry and bbt blocks are not store
+ read retry command fail blocks are marked as reserved blk and need to be stored to flash */
+ if (i >= (numblocks - td->maxblocks - 4) && (dat&0xF)==0xa) {
+ //buf[offs + (i >> sft)] &= ~(msk[dat & 0x0F] << sftcnt);
+ //printk("offs + (i >> sft)=%d data=0x%x, dat=0x%x sft=%d\n",offs + (i >> sft), ~(msk[dat & 0x0F] << sftcnt), dat, sft);
+ } else
+ buf[offs + (i >> sft)] &= ~(msk[dat & 0x0F] << sftcnt);
+
+ dat >>= 4;
+ }
+ }
+ memcpy(&buf[mtd->realwritesize], buf, (numblocks>>1));
+//printk("print bbt write info ");print_nand_buffer(buf, 1536);
+/*printk("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);dump_stack();while(1);*/
+//printk("erase blk=%d, page=0x%x len=%d copy=%d\n", (unsigned int)(to>>this->bbt_erase_shift), page, len, (numblocks>>1));
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
einfo.addr = to;
- einfo.len = 1 << this->bbt_erase_shift;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
res = nand_erase_nand(mtd, &einfo, 1);
+ //printk("erase ret=%d\n",res);
if (res < 0)
goto outerr;
@@ -849,7 +1700,154 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
return 0;
outerr:
- pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ pr_warn("nand_bbt: multi error while writing bad block table %d\n", res);
+ return res;
+}
+
+void copy_retry_info_to_buf(struct mtd_info *mtd, uint8_t *buf)
+{
+ /*uint8_t *bf;
+ struct nand_read_retry_param *rdtry = (struct nand_read_retry_param *)buf;*/
+ struct nand_chip *this = mtd->priv;
+
+ memcpy(buf, /*(uint8_t *)*/this->cur_chip, sizeof(chip_table[0])-16);
+ #ifdef RETRY_DEBUG
+ print_nand_buffer((uint8_t *)this->cur_chip, sizeof(chip_table[0]));
+ #endif
+
+ /*
+ memcpy(buf, "ANDROID!", 8);
+ rdtry->nand_id = FlashId;//this->nand_id;
+ //rdtry->nand_id_5th = this->nand_id_5th;
+ rdtry->eslc_reg_num = this->eslc_reg_num;
+ rdtry->total_retry_cnt = this->total_retry_cnt;
+ rdtry->retry_reg_num = this->retry_reg_num;
+ bf = buf + 28;
+ if (this->eslc_reg_num)
+ memcpy(bf, this->eslc_cmd, this->eslc_reg_num);
+ bf = buf + this->eslc_reg_num;
+ if (this->retry_reg_num)
+ memcpy(bf, this->retry_cmd, this->retry_reg_num * this->total_retry_cnt);
+ else
+ printk("no retry param is writen to retry table block\n");
+
+ printk("save rdtry to block\n");
+ print_nand_buffer(buf, 128);
+ */
+}
+
+/**
+ * write_hynix_table - [GENERIC] (Re)write the hynix table
+ *
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the retry table block
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table
+ *
+*/
+static int write_hynix_table(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int startblock, dir, page, numblocks, nrchips;
+ uint8_t rcode = td->reserved_block_code;
+ size_t len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+
+ //numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ nrchips = chipsel + 1;
+ chip = chipsel;
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+
+ /* Search direction top -> down ? */
+ startblock = numblocks * (chip + 1) - 5;
+ dir = -1;
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ if (this->realplanenum) {
+ switch ((this->bbt[block >> 1] >>
+ (4 * (block & 0x01))) & 0x0F) {
+ case 0x01:
+ case 0x04:
+ case 0x05://case 0x07: case 0x0D: not exist for bad_fact+bad_wort
+ case 0x03://case 0x0B: case 0x0E: not exist for bad_fact+reserved
+ case 0x0C://case 0x02: case 0x08: not exist for good + reserved
+ case 0x0F://case 0x06: case 0x09: not exist for bad_wort+reserved
+ continue;
+ //case 0x00: case 0x0A: only good or reserved is used (so far no reserved)
+ }
+ } else {
+ switch ((this->bbt[block >> 2] >>
+ (2 * (block & 0x03))) & 0x03) {
+ case 0x01:
+ case 0x03:
+ continue;
+ }
+ }
+ //page = block << (this->bbt_erase_shift - this->page_shift);
+ page = block << this->pagecnt_shift;
+ goto write;
+ }
+ printk(KERN_ERR "No space left to write read retry table\n");
+ return -ENOSPC;
+ write:
+
+ //to = ((loff_t) page) << this->page_shift;
+ to = ((loff_t) (page*mtd->pageSizek)) << 10;
+ len = mtd->writesize;
+ /* Preset the buffer with 0xff */
+ //memset(buf, 0xff, len + (len >> this->page_shift)* mtd->oobsize);
+ memset(buf, 0xff, len + mtd->pagecnt* mtd->oobsize);
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[len], td->pattern, 10);
+
+ //------write signature into buf retry into--/
+ #ifdef RETRY_DEBUG
+ printk("save rdtry to page=0x%x\n", page);
+ #endif
+ copy_retry_info_to_buf(mtd, buf);
+
+ //------erase block-----------/
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ //einfo.len = 1 << this->bbt_erase_shift;
+ einfo.len = mtd->erasesize;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+ printk("writing rdtry to nand flash and page addr is 0x%x, len=0x%x\n", page, len);
+ res = scan_write_bbt(mtd, to, len, buf, &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ printk(KERN_WARNING
+ "nand_bbt: Error while writing read retry table %d\n", res);
return res;
}
@@ -866,7 +1864,10 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
struct nand_chip *this = mtd->priv;
bd->options &= ~NAND_BBT_SCANEMPTY;
- return create_bbt(mtd, this->buffers->databuf, bd, -1);
+ if (this->realplanenum)
+ return create_bbt_multi(mtd, this->buffers->databuf, bd, -1);
+ else
+ return create_bbt(mtd, this->buffers->databuf, bd, -1);
}
/**
@@ -939,8 +1940,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
continue;
/* Create the table in memory by scanning the chip(s) */
- if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
- create_bbt(mtd, buf, bd, chipsel);
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY)) {
+ //print_nand_register(mtd);
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ //print_nand_register(mtd);
+
+ if (this->realplanenum)
+ create_bbt_multi(mtd, buf, bd, chipsel);
+ else
+ create_bbt(mtd, buf, bd, chipsel);
+
+
+ }
td->version[i] = 1;
if (md)
@@ -982,14 +1994,20 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, td, md, chipsel);
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, td, md, chipsel);
+ else
+ res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
return res;
}
/* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, md, td, chipsel);
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, md, td, chipsel);
+ else
+ res = write_bbt(mtd, buf, md, td, chipsel);
if (res < 0)
return res;
}
@@ -997,6 +2015,53 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
return 0;
}
+static int check_retry_table(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *rd)
+{
+ int i, chips, chipsel, res = 0, need_save = 0;
+ struct nand_chip *this = mtd->priv;
+
+ /* Do we have a retry table per chip ? */
+ /* so far, use chip 0 retry param on chip 0 and chip 1 */
+ //chips = this->numchips;
+ chips = 1;
+ for (i = 0; i < chips; i++) {
+ /* Per chip */
+ chipsel = i;
+ if (rd->pages[i] == -1) {
+ goto create;
+ }
+ #ifdef RETRY_DEBUG
+ printk("read_abs_retry_table\n");
+ #endif
+ /* Read the retry table starting at a given page */
+ res = read_abs_retry_table(mtd, buf, rd, chipsel);
+ if (res == 0) {
+ if(this->cur_chip != NULL) {
+ this->select_chip(mtd, 0);
+ this->cur_chip->get_parameter(mtd, READ_RETRY_MODE);
+ this->select_chip(mtd, -1);
+ }
+ break;
+ }
+
+ create:
+ #ifdef RETRY_DEBUG
+ printk("create_hynix_table\n");
+ #endif
+ /* Create the table in memory by get feature or get otp cmd */
+ create_hynix_table(mtd, chipsel);
+
+ need_save = 1;
+
+ //printk("dannier write_hynix_table\n");
+ /* Write the retry block table to the device ? => leave it saved after bbt searched*/
+ /*res = write_hynix_table(mtd, buf, rd, chipsel);
+ if (res < 0)
+ return res;*/
+ }
+
+ return need_save;
+}
/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
* @mtd: MTD device structure
@@ -1014,10 +2079,12 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
- nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ //nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ nrblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
} else {
chips = 1;
- nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ //nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
}
for (i = 0; i < chips; i++) {
@@ -1025,19 +2092,25 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
!(td->options & NAND_BBT_WRITE)) {
if (td->pages[i] == -1)
continue;
- block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ //block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ block = td->pages[i] >> (this->pagecnt_shift);
block <<= 1;
oldval = this->bbt[(block >> 3)];
newval = oldval | (0x2 << (block & 0x06));
this->bbt[(block >> 3)] = newval;
- if ((oldval != newval) && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+ if ((oldval != newval) && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)(block*mtd->pageSizek) << (10+this->pagecnt_shift-1));
continue;
}
update = 0;
- if (td->options & NAND_BBT_LASTBLOCK)
+ if (td->options & NAND_BBT_LASTBLOCK) {
block = ((i + 1) * nrblocks) - td->maxblocks;
- else
+ if (td->pattern[0] == 'r' && td->pattern[1] == 'e') {
+ block = ((i + 1) * nrblocks) - td->maxblocks - 4;
+ //printk("mark_bbt_region set blocks =%d ~ %d\n", block, block+3);
+ }
+ } else
block = i * nrblocks;
block <<= 1;
for (j = 0; j < td->maxblocks; j++) {
@@ -1053,12 +2126,83 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
* new ones have been marked, then we need to update the stored
* bbts. This should only happen once.
*/
- if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+ if (update && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)((block - 2)*mtd->pageSizek) << (10+this->pagecnt_shift-1));
}
}
/**
+ * mark_bbt_regions_multi - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region_multi(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval, newval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ //nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ nrblocks = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ } else {
+ chips = 1;
+ //nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrblocks = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ //block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ block = td->pages[i] >> (this->pagecnt_shift);
+ block <<= 2;
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0xA << (block & 0x04));
+ this->bbt[(block >> 3)] = newval;
+ if ((oldval != newval) && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 2));
+ nand_update_bbt(mtd, (loff_t)(block*mtd->pageSizek) << (10+this->pagecnt_shift-2));
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ if (td->pattern[0] == 'r' && td->pattern[1] == 'e') {
+ block = ((i + 1) * nrblocks) - td->maxblocks - 4;
+ //printk("mark_bbt_region set blocks =%d ~ %d\n", block, block+3);
+ }
+ } else
+ block = i * nrblocks;
+ block <<= 2;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0xA << (block & 0x04));
+ this->bbt[(block >> 3)] = newval;
+ if (oldval != newval)
+ update = 1;
+ block += 4;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && 0/*td->reserved_block_code*/)
+ //nand_update_bbt(mtd, (loff_t)(block - 4) << (this->bbt_erase_shift - 2));
+ nand_update_bbt(mtd, (loff_t)((block - 4)*mtd->pageSizek) << (10+this->pagecnt_shift-2));
+ }//print_nand_buffer(this->bbt, 2048);
+}
+
+/**
* verify_bbt_descr - verify the bad block description
* @mtd: MTD device structure
* @bd: the table to verify
@@ -1078,6 +2222,8 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
pattern_len = bd->len;
bits = bd->options & NAND_BBT_NRBITS_MSK;
+ if (this->realplanenum)
+ bits<<=1;
BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
!(this->bbt_options & NAND_BBT_USE_FLASH));
@@ -1096,14 +2242,17 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
}
if (bd->options & NAND_BBT_PERCHIP)
- table_size = this->chipsize >> this->bbt_erase_shift;
+ //table_size = this->chipsize >> this->bbt_erase_shift;
+ table_size = ((int)(this->chipsize >> (10+this->pagecnt_shift)))/mtd->pageSizek;
else
- table_size = mtd->size >> this->bbt_erase_shift;
+ //table_size = mtd->size >> this->bbt_erase_shift;
+ table_size = ((int)(mtd->size >> (10+this->pagecnt_shift)))/mtd->pageSizek;
table_size >>= 3;
table_size *= bits;
if (bd->options & NAND_BBT_NO_OOB)
table_size += pattern_len;
- BUG_ON(table_size > (1 << this->bbt_erase_shift));
+ //BUG_ON(table_size > (1 << this->bbt_erase_shift));
+ BUG_ON(table_size > mtd->erasesize);
}
/**
@@ -1118,15 +2267,21 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
* The bad block table memory is allocated here. It must be freed by calling
* the nand_free_bbt function.
*/
+extern struct nand_read_retry_param chip_table[];
int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
- int len, res = 0;
+ int len, res = 0, i, need_save = 0;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
+
+ this->bbt_plane[1] = this->bbt_plane[0] = 0;
- len = mtd->size >> (this->bbt_erase_shift + 2);
+ //len = mtd->size >> (this->bbt_erase_shift + 2);
+ len = ((int)(mtd->size >> (10+this->pagecnt_shift+2)))/mtd->pageSizek;
+ if (this->realplanenum)
+ len <<=1;
/*
* Allocate memory (2bit per block) and clear the memory bad block
* table.
@@ -1135,6 +2290,8 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
if (!this->bbt)
return -ENOMEM;
+ /* Clear the memory bad block table */
+ memset (this->bbt, 0x00, len);
/*
* If no primary table decriptor is given, scan the device to build a
* memory based bad block table.
@@ -1151,14 +2308,40 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
verify_bbt_descr(mtd, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * mtd->oobsize;
+ //len = (1 << this->bbt_erase_shift);
+ len = mtd->erasesize;
+ //len += (len >> this->page_shift) * mtd->oobsize;
+ len += (mtd->pagecnt) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
kfree(this->bbt);
this->bbt = NULL;
return -ENOMEM;
}
+
+ if (mtd->dwRetry /*&& (mtd->id>>24) == NAND_MFR_HYNIX*/) {
+ for (i = 0; /*i < READ_RETRY_CHIP_NUM*/; i++) {
+ if (chip_table[i].nand_id == 0 && chip_table[i].nand_id_5th == 0)
+ break;
+ if (mtd->id == chip_table[i].nand_id && mtd->id2 == chip_table[i].nand_id_5th) {
+ #ifdef RETRY_DEBUG
+ printk("get retry table id 0x%x, 0x%x\n", chip_table[i].nand_id, chip_table[i].nand_id_5th);
+ #endif
+ this->cur_chip = &chip_table[i];
+ break;
+ }
+ }
+ if(this->cur_chip != NULL && chip_table[i].nand_id != 0) {
+ #ifdef RETRY_DEBUG
+ printk("search_hynix_retry_table\n");
+ #endif
+ search_hynix_retry_table(mtd, buf, this->retry_pattern);
+ #ifdef RETRY_DEBUG
+ printk("check_retry_table\n");
+ #endif
+ need_save = check_retry_table(mtd, buf, this->retry_pattern);
+ }
+ }
/* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
@@ -1171,10 +2354,34 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
if (res)
res = check_create(mtd, buf, bd);
+
+ if (mtd->dwRetry && this->cur_chip != NULL && need_save) {
+ //printk("dannier write_hynix_table\n");
+ /* Write the retry block table to the device ? */
+ res = write_hynix_table(mtd, buf, this->retry_pattern, 0);
+
+ //testing
+ //this->cur_chip->cur_try_times = 5;
+ }
+
+ /* Prevent the rdtry block regions from erasing / writing */
+ if (this->realplanenum)
+ mark_bbt_region_multi(mtd, this->retry_pattern);
+ else
+ mark_bbt_region(mtd, this->retry_pattern);
+
/* Prevent the bbt regions from erasing / writing */
- mark_bbt_region(mtd, td);
- if (md)
- mark_bbt_region(mtd, md);
+ if (this->realplanenum)
+ mark_bbt_region_multi(mtd, td);
+ else
+ mark_bbt_region(mtd, td);
+ if (md) {
+ if (this->realplanenum)
+ mark_bbt_region_multi(mtd, md);
+ else
+ mark_bbt_region(mtd, md);
+ }
+
vfree(buf);
return res;
@@ -1200,15 +2407,20 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
return -EINVAL;
/* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * mtd->oobsize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
+ //len = (1 << this->bbt_erase_shift);
+ len = mtd->erasesize;
+ //len += (len >> this->page_shift) * mtd->oobsize;
+ len += (mtd->pagecnt) * mtd->oobsize;
+ //buf = kmalloc(len, GFP_KERNEL);
+ buf = vmalloc(len);
+ if (!buf) {
+ printk(KERN_ERR "nand_update_bbt: Out of memory\n");
return -ENOMEM;
-
+ }
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chip = (int)(offs >> this->chip_shift);
+ //chip = (int)(offs >> this->chip_shift);
+ chip = ((int)(offs >> (10+this->pagecnt_shift)))/(mtd->pageSizek*mtd->blkcnt);
chipsel = chip;
} else {
chip = 0;
@@ -1221,17 +2433,24 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
/* Write the bad block table to the device? */
if (td->options & NAND_BBT_WRITE) {
- res = write_bbt(mtd, buf, td, md, chipsel);
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, td, md, chipsel);
+ else
+ res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
goto out;
}
/* Write the mirror bad block table to the device? */
if (md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, md, td, chipsel);
+ if (this->realplanenum)
+ res = write_bbt_multi(mtd, buf, md, td, chipsel);
+ else
+ res = write_bbt(mtd, buf, md, td, chipsel);
}
out:
- kfree(buf);
+ vfree(buf);
+ //printk("nand_update_bbt free mem res=%d\n", res);
return res;
}
@@ -1307,6 +2526,7 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
static int nand_create_badblock_pattern(struct nand_chip *this)
{
struct nand_bbt_descr *bd;
+ //struct mtd_info *mtd = this->priv;
if (this->badblock_pattern) {
pr_warn("Bad block pattern already allocated; not replacing\n");
return -EINVAL;
@@ -1319,6 +2539,10 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
bd->pattern = scan_ff_pattern;
bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ //if ((0xFF&(mtd->id>>24)) == 0x45 || (0xFF&(mtd->id>>24)) == NAND_MFR_HYNIX)
+ bd->options |= NAND_BBT_SCAN2NDPAGE;//All type of flash need to scan 2 page per block.
+ bd->page_offset[0] = this->page_offset[0];
+ bd->page_offset[1] = this->page_offset[1];
this->badblock_pattern = bd;
return 0;
}
@@ -1366,6 +2590,11 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_td = NULL;
this->bbt_md = NULL;
}
+
+ if (this->bbt_td->reserved_block_code && this->realplanenum) {
+ this->bbt_td->reserved_block_code = 0x5;
+ this->bbt_md->reserved_block_code = 0x5;
+ }
if (!this->badblock_pattern)
nand_create_badblock_pattern(this);
@@ -1379,30 +2608,113 @@ int nand_default_bbt(struct mtd_info *mtd)
* @offs: offset in the device
* @allowbbt: allow access to bad block table region
*/
-int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt, int allow_readfail)
{
struct nand_chip *this = mtd->priv;
int block;
uint8_t res;
/* Get block number * 2 */
- block = (int)(offs >> (this->bbt_erase_shift - 1));
+ //block = (int)(offs >> (this->bbt_erase_shift - 1));
+ block = ((int)(offs >> (10+this->pagecnt_shift - 1)))/mtd->pageSizek;
res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
- pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08llx: "
"(block %d) 0x%02x\n",
- (unsigned int)offs, block >> 1, res);
+ offs, block >> 1, res);
switch ((int)res) {
case 0x00:
return 0;
case 0x01:
+ #if 0
+ if (allowbbt != 0xFF && this->state == FL_READY) {
+ struct erase_info einfo;
+ int res1 = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = offs;
+ //einfo.len = (1 << this->bbt_erase_shift);
+ einfo.len = mtd->erasesize;
+ printk("einfo.addr is 0x%llx\n",einfo.addr);
+ printk("einfo.len is 0x%llx\n", einfo.len);
+ res1 = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res1 < 0)
+ printk("nand_erase_nand addr 0x%llx result is %d\n", einfo.addr, res1);
+ }
+ #endif
return 1;
case 0x02:
- return allowbbt ? 0 : 1;
+ if ((block>>1) < (mtd->blkcnt - 8)) {
+ if (allow_readfail)
+ return 0;
+ else
+ return 1;
+ } else
+ return allowbbt ? 0 : 1;
}
return 1;
}
+/**
+ * nand_isbad_bbt_multi - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt_multi(struct mtd_info *mtd, loff_t offs, int allowbbt, int allow_readfail)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 4 */
+ //block = (int)(offs >> (this->bbt_erase_shift - 2));
+ block = ((int)(offs >> (10+this->pagecnt_shift - 2)))/mtd->pageSizek;
+ res = (this->bbt[block >> 3] >> (block & 0x4)) & 0x0F;
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08llx: "
+ "(block %d) 0x%02x\n",
+ offs, block >> 2, res);
+ /*printk("nand_isbad_bbt(): bbt info for offs 0x%08llx: "
+ "(block %d) 0x%02x\n",
+ offs, block >> 2, res);*/
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ case 0x04:
+ case 0x05://1 or both 2 blocks worn out!
+ #if 0
+ if (allowbbt != 0xFF && this->state == FL_READY) {
+ struct erase_info einfo;
+ int res1 = 0;
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = offs;
+ //einfo.len = (1 << this->bbt_erase_shift);
+ einfo.len = mtd->erasesize;
+ printk("einfo.addr is 0x%llx\n",einfo.addr);
+ printk("einfo.len is 0x%llx\n", einfo.len);
+ res1 = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (res1 < 0)
+ printk("nand_erase_nand addr 0x%llx result is %d\n", einfo.addr, res1);
+ }
+ #endif
+ return 1;
+ case 0x0A://usually two block are reserved
+ if ((block>>2) < (mtd->blkcnt - 8)) {
+ if (allow_readfail)
+ return 0;
+ else
+ return 1;
+ } else
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+
EXPORT_SYMBOL(nand_scan_bbt);
EXPORT_SYMBOL(nand_default_bbt);
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/nand_ids.c b/ANDROID_3.4.5/drivers/mtd/nand/nand_ids.c
index af4fe8ca..723379af 100644
--- a/ANDROID_3.4.5/drivers/mtd/nand/nand_ids.c
+++ b/ANDROID_3.4.5/drivers/mtd/nand/nand_ids.c
@@ -164,6 +164,373 @@ struct nand_flash_dev nand_flash_ids[] = {
{NULL,}
};
+#define MLC 1
+#define SLC 0
+#define WD8 0
+struct WMT_nand_flash_dev WMT_nand_flash_ids[] = {
+
+
+ {0xADD314A5, 4096, 2048, 64, 0x40000, 5, 125, 127, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64780046, 0, 0, 0, 0x00000000, 0x00000000, "HY27UT088G2M-T(P)", LP_OPTIONS},
+ {0xADF1801D, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0F12, 0x64780064, 0, 0, 0, 0x00000000, 0x00000000, "HY27UF081G2A", LP_OPTIONS},
+ {0xADF1001D, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0C12, 0x64780046, 0, 0, 0, 0x00000000, 0x00000000, "H27U1G8F2BFR", LP_OPTIONS},
+ {0xADD59425, 4096, 4096, 218, 0x80000, 5, 125, 127, 0, WD8, 1, 0, 1, MLC, 12, 0x140A0F12, 0x64780064, 0, 0, 0, 0x00000000, 0x00000000, "HY27UAG8T2A", LP_OPTIONS},
+ {0xADD7949A, 2048, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x64500064, 0, 0, 0, 0x74420000, 0x00000000, "H27UBG8T2ATR", LP_OPTIONS},
+ {0xADD5949A, 1024, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x64640064, 0, 0, 0, 0x74420000, 0x00000000, "H27UAG8T2BTR-BC", LP_OPTIONS},
+ {0xADD794DA, 2048, 8192, 640,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x10080A12, 0x645000C8, 0, 1, 1, 0x74C30000, 0x00000000, "H27UBG8T2BTR", LP_OPTIONS},
+ {0xADD79491, 2048, 8192, 640,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10060812, 0x647800C8, 0, 1, 1, 0x00000000, 0x00003FFF, "H27UBG8T2CTR-F20", LP_OPTIONS},
+ {0xADDE94DA, 4096, 8192, 640,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10060812, 0x647800C8, 1, 1, 1, 0x00000000, 0x00003FFF, "H27UCG8T2ATR-F20", LP_OPTIONS},
+ {0xADDE94EB, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10060812, 0x647800C8, 0, 1, 1, 0x74440000, 0x00003FFF, "H27UCG8T2BTR-F20", LP_OPTIONS},
+
+ {0xECD314A5, 4096, 2048, 64, 0x40000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9G8G08X0A", LP_OPTIONS},
+ {0xECD59429, 4096, 4096, 218, 0x80000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 12, 0x140A0F12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08UXD", LP_OPTIONS},
+ {0xECF10095, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140a1412, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9F1G08U0B", LP_OPTIONS},
+ {0xECD514B6, 4096, 4096, 128, 0x80000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08U0M", LP_OPTIONS},
+ {0xECD755B6, 8192, 4096, 128, 0x80000, 5, 127, 0, 0, WD8, 1, 0, 1, MLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "K9LBG08U0M", LP_OPTIONS},
+ {0xECD58472, 2048, 8192, 436,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0F12, 0x6440012C, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08U0E", LP_OPTIONS},
+ {0xECD7947A, 4096, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0F12, 0x6478012C, 0, 0, 1, 0x54430000, 0x00003FFF, "K9GBG08U0A", LP_OPTIONS},
+ {0xECD59476, 2048, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x6478012C, 0, 0, 0, 0x00000000, 0x00000000, "K9GAG08U0F", LP_OPTIONS},
+ {0xECD7947E, 4096, 8192,1024,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x140B0B12, 0x6478012C, 0, 1, 1, 0x64440000, 0x00000000, "K9GBG08U0B", LP_OPTIONS},
+ {0xECDED57A, 8192, 8192, 640,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x140A0C12, 0x6478012C, 0, 0, 1, 0x58430000, 0x00000000, "K9LCG08U0A", LP_OPTIONS},
+ {0xECDED57E, 8192, 8192,1024,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x140B0C12, 0x6478012C, 0, 1, 1, 0x68440000, 0x00000000, "K9LCG08U0B", LP_OPTIONS},
+
+ {0x98D594BA, 4096, 4096, 218, 0x80000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 12, 0x190F0F12, 0x64b40070, 0, 0, 0, 0x00000000, 0x00000000, "TC58NVG4D1DTG0", LP_OPTIONS},
+ {0x98D19015, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0C12, 0x64B40011, 0, 0, 0, 0x00000000, 0x00000000, "TC58NVG0S3ETA00", LP_OPTIONS},
+ {0x98D59432, 2048, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x100A0C12, 0x64B40084, 0, 0, 0, 0x00000000, 0x00000000, "TC58NVG4D2FTA00", LP_OPTIONS},
+ {0x98D58432, 2048, 8192, 640,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x100A0F12, 0x64B4012C, 0, 1, 1, 0x72560000, 0x00000000, "TC58NVG4D2HTA00", LP_OPTIONS},
+ {0x98DE8493, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B4012C, 0, 1, 1, 0x72570000, 0x00000000, "TC58NVG6DCJTA00", LP_OPTIONS},
+ {0x98DE8493, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B4012C, 2, 1, 1, 0x72D70000, 0x00000000, "TC58TEG6DCJTA00-DDR", LP_OPTIONS},
+ {0x98D79432, 4096, 8192, 448,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 24, 0x100A0C12, 0x64FF0078, 0, 0, 0, 0x76550000, 0x00000000, "TC58NVG5D2FTAI0", LP_OPTIONS},
+ {0x98D79432, 4096, 8192, 640,0x100000, 5, 0, 127, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B4012C, 0, 1, 1, 0x76560000, 0x00000000, "TC58NVG5D2HTA00", LP_OPTIONS},
+ {0x98D78493, 1024,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x6478012C, 1, 1, 1, 0x72570000, 0x00000000, "TC58TEG5DCJTA00", LP_OPTIONS},
+ {0x98DE9493, 2048,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x6478012C, 1, 1, 1, 0x76570000, 0x00000000, "TC58TEG6DDKTA00", LP_OPTIONS},
+ {0x98D78493, 1024,16384,1280,0x400000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x6478012C, 1, 1, 1, 0x72500000, 0x00000000, "TC58TEG5DCKTA00", LP_OPTIONS},
+
+ {0x2C88044B, 4096, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0x00000000, 0x00000000, "MT29F64G08CBAAA", LP_OPTIONS},
+ {0x2C88044B, 4096, 8192, 448,0x200000, 5, 0, 255, 0, WD8, 1, 0, 1, MLC, 24, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0x00000000, 0x00000000, "MT29F128G08CFAAA", LP_OPTIONS},
+ {0x2C48044A, 2048, 4096, 224,0x100000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 12, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0xA5000000, 0x00003FFF, "MT29F16G08CBACA", LP_OPTIONS},
+ {0x2C68044A, 4096, 4096, 224,0x100000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 12, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 0, 0, 0xA9000000, 0x00003FFF, "MT29F32G08CBACA", LP_OPTIONS},
+ {0x2C64444B, 4096, 8192, 744,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 1, 0, 0xA9000000, 0x00000000, "MT29F64G08CBABA", LP_OPTIONS},
+ {0x2C44444B, 2048, 8192, 744,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x100B1210/*0x321E32C8*/, 0x647800C8, 0, 1, 0, 0xA9000000, 0x00000000, "MT29F32G08CBADA", LP_OPTIONS},
+
+ {0x45DE9493, 2048,16384,1280,0x400000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B40140, 1, 1, 1, 0x76570000, 0x00003FFF, "SDTNQGAMA-008G", LP_OPTIONS},
+ {0x45D78493, 1024,16384,1280,0x400000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B40140, 1, 1, 1, 0x72570000, 0x00000000, "SDTNQFAMA-004G", LP_OPTIONS},
+
+ {0x8968044A, 4096, 4096, 224,0x100000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 12, 0x100B1210, 0x647800C8, 0, 0, 0, 0xA9000000, 0x00003FFF, "JS29F32G08AAME1", LP_OPTIONS},
+ {0x8988244B, 4096, 8192, 448,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 24, 0x10070A12, 0x64400046, 0, 0, 0, 0xA9000000, 0x00000000, "JS29F64G08AAME1", LP_OPTIONS},
+ {0x8988244B, 4096, 8192, 744,0x200000, 5, 0, 1, 0, WD8, 1, 0, 1, MLC, 40, 0x10070A12, 0x64B40046, 0, 0, 0, 0xA9840000, 0x00000000, "JS29F64G08AAMF1", LP_OPTIONS},
+
+
+ {0xC2F1801D, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0F12, 0x64400064, 0, 0, 0, 0x00000000, 0x00000000, "MX30LF1G08AA", LP_OPTIONS},
+
+ {0x92F18095, 1024, 2048, 64, 0x20000, 4, 0, 1, 0, WD8, 4, 0, 1, SLC, 4, 0x140A0C12, 0x64400064, 0, 0, 0, 0x40000000, 0x00000000, "PSU1GA(3/4)0HT", LP_OPTIONS},
+ {0,}
+ /*add new product item here.*/
+};
+
+struct nand_read_retry_param chip_table[] = {
+#ifdef CONFIG_MTD_NAND_WMT
+ //Hynix
+ {
+ .magic = "readretry",
+ .nand_id = 0xADD794DA,
+ .nand_id_5th = 0x74C30000,
+ .eslc_reg_num = 5,
+ .eslc_offset = {0xa0, 0xa1, 0xb0, 0xb1, 0xc9},
+ .eslc_set_value = {0x26, 0x26, 0x26, 0x26, 0x1},
+ .retry_reg_num = 4,
+ .retry_offset = {0xa7, 0xad, 0xae, 0xaf},
+ .retry_value = {0, 0x6,0xa, 0x6, 0x0, 0x3, 0x7, 0x8, 0, 0x6, 0xd, 0xf, 0x0, 0x9, 0x14, 0x17, 0x0, 0x0, 0x1a, 0x1e, 0x0, 0x0, 0x20, 0x25},
+ .total_try_times = 6,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = NULL,
+ .retry = 0
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xADDE94DA,
+ .nand_id_5th = 0,
+ .eslc_reg_num = 4,
+ .eslc_offset = {0xb0, 0xb1, 0xa0, 0xa1},
+ .eslc_set_value = {0xa, 0xa, 0xa, 0xa},
+ .retry_reg_num = 8,
+ .retry_offset = {0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf},
+ .otp_len = 2,
+ .otp_offset = {0xff, 0xcc},
+ .otp_data = {0x40, 0x4d},
+ .total_try_times = 7,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = hynix_get_otp,
+ .retry = 0
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xADDE94EB,
+ .nand_id_5th = 0x74440000,
+ .eslc_reg_num = 4,
+ .eslc_offset = {0xa0, 0xa1, 0xa7, 0xa8},
+ .eslc_set_value = {0xa, 0xa, 0xa, 0xa},
+ .retry_reg_num = 8,
+ .retry_offset = {0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7},
+ .otp_len = 2,
+ .otp_offset = {0xae, 0xb0},
+ .otp_data = {0x00, 0x4d},
+ .total_try_times = 7,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = hynix_get_otp,
+ .retry = 0
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xADD79491,
+ .nand_id_5th = 0x0,
+ .eslc_reg_num = 4,
+ .eslc_offset = {0xa0, 0xa1, 0xa7, 0xa8},
+ .eslc_set_value = {0xa, 0xa, 0xa, 0xa},
+ .retry_reg_num = 8,
+ .retry_offset = {0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7},
+ .otp_len = 2,
+ .otp_offset = {0xae, 0xb0},
+ .otp_data = {0x00, 0x4d},
+ .total_try_times = 7,
+ .cur_try_times = -1,
+ .set_parameter = hynix_set_parameter,
+ .get_parameter = hynix_get_parameter,
+ .get_otp_table = hynix_get_otp,
+ .retry = 0
+ },
+ //Toshiba
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D58432,
+ .nand_id_5th = 0x72560000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE8493,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE8493,
+ .nand_id_5th = 0x72D70000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE9482,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D79432,
+ .nand_id_5th = 0x76560000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D78493,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98D78493,
+ .nand_id_5th = 0x72500000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x98DE9493,
+ .nand_id_5th = 0x76570000,
+ .retry_reg_num = 4,
+ .retry_offset = {4, 5, 6, 7},
+ .retry_value = {0, 0, 0, 0, 4, 4, 4, 4, 0x7c, 0x7c, 0x7c, 0x7c, 0x78, 0x78, 0x78, 0x78, 0x74, 0x74, 0x74, 0x74, 0x8, 0x8, 0x8, 0x8},
+ .total_try_times = 6,
+ .cur_try_times = 0,
+ .set_parameter = toshiba_set_parameter,
+ .get_parameter = toshiba_get_parameter,
+ .retry = 0,
+ },
+
+ //samsung
+ {
+ .magic = "readretry",
+ .nand_id = 0xECD7947E,
+ .nand_id_5th = 0x64440000,
+ .retry_reg_num = 4,
+ .retry_offset = {0xA7, 0xA4, 0xA5, 0xA6},
+ .retry_def_value = {0, 0, 0, 0},
+ .retry_value = {5, 0xA, 0, 0, 0x28, 0, 0xEC, 0xD8, 0xED, 0xF5, 0xED, 0xE6, 0xA, 0xF, 5, 0,
+ 0xF, 0xA, 0xFB, 0xEC, 0xE8, 0xEF, 0xE8, 0xDC, 0xF1, 0xFB, 0xFE, 0xF0, 0xA, 0x0, 0xFB, 0xEC,
+ 0xD0, 0xE2, 0xD0, 0xC2, 0x14, 0xF, 0xFB, 0xEC, 0xE8, 0xFB, 0xE8, 0xDC, 0x1E, 0x14, 0xFB, 0xEC,
+ 0xFB, 0xFF, 0xFB, 0xF8, 0x7, 0xC, 0x2, 0},
+ .total_try_times = 14,
+ .cur_try_times = 0,
+ .set_parameter = samsung_set_parameter,
+ .get_parameter = samsung_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0xECDED57E,
+ .nand_id_5th = 0x68440000,
+ .retry_reg_num = 4,
+ .retry_offset = {0xA7, 0xA4, 0xA5, 0xA6},
+ .retry_def_value = {0, 0, 0, 0},
+ .retry_value = {5, 0xA, 0, 0, 0x28, 0, 0xEC, 0xD8, 0xED, 0xF5, 0xED, 0xE6, 0xA, 0xF, 5, 0,
+ 0xF, 0xA, 0xFB, 0xEC, 0xE8, 0xEF, 0xE8, 0xDC, 0xF1, 0xFB, 0xFE, 0xF0, 0xA, 0x0, 0xFB, 0xEC,
+ 0xD0, 0xE2, 0xD0, 0xC2, 0x14, 0xF, 0xFB, 0xEC, 0xE8, 0xFB, 0xE8, 0xDC, 0x1E, 0x14, 0xFB, 0xEC,
+ 0xFB, 0xFF, 0xFB, 0xF8, 0x7, 0xC, 0x2, 0},
+ .total_try_times = 14,
+ .cur_try_times = 0,
+ .set_parameter = samsung_set_parameter,
+ .get_parameter = samsung_get_parameter,
+ .retry = 0,
+ },
+ //Sandisk
+ {
+ .magic = "readretry",
+ .nand_id = 0x45DE9493,
+ .nand_id_5th = 0x76570000,
+ .retry_reg_num = 3,
+ .retry_offset = {4, 5, 7},
+ .retry_def_value = {0, 0, 0, 0xFF, 0xFF},
+ .retry_value = {0xF0, 0, 0xF0, 0xE0, 0, 0xE0, 0xD0, 0, 0xD0, 0x10, 0, 0x10, 0x20, 0, 0x20, 0x30, 0, 0x30,
+ 0xC0, 0, 0xD0, 0x00, 0, 0x10, 0x00, 0, 0x20, 0x10, 0, 0x20, 0xB0, 0, 0xD0, 0xA0, 0, 0xD0,
+ 0x90, 0, 0xD0, 0xB0, 0, 0xC0, 0xA0, 0, 0xC0, 0x90, 0, 0xC0,//lower page retry parameter
+ 0x00, 0xF0, 0, 0x0F, 0xE0, 0, 0x0F, 0xD0, 0, 0x0E, 0xE0, 0, 0x0E, 0xD0, 0, 0x0D, 0xF0, 0,
+ 0x0D, 0xE0, 0, 0x0D, 0xD0, 0, 0x01, 0x10, 0, 0x02, 0x20, 0, 0x02, 0x10, 0, 0x03, 0x20, 0,
+ 0x0F, 0x00, 0, 0x0E, 0xF0, 0, 0x0D, 0xC0, 0, 0x0F, 0xF0, 0, 0x01, 0x00, 0, 0x02, 0x00, 0,
+ 0x0D, 0xB0, 0, 0x0C, 0xA0, 0},//upper page retry parameter
+ .otp_len = 9,
+ .otp_offset = {0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC},
+ .otp_data = {0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .total_try_times = 0x1410,//bit15~8 for upper page, bit7~0 for lower page
+ .cur_try_times = -1,
+ .set_parameter = sandisk_set_parameter,
+ .get_parameter = sandisk_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x45D78493,
+ .nand_id_5th = 0x72570000,
+ .retry_reg_num = 3,
+ .retry_offset = {4, 5, 7},
+ .retry_def_value = {0, 0, 0, 0xFF, 0xFF},
+ .retry_value = {0xF0, 0, 0xF0, 0xE0, 0, 0xE0, 0xD0, 0, 0xD0, 0x10, 0, 0x10, 0x20, 0, 0x20, 0x30, 0, 0x30,
+ 0xC0, 0, 0xD0, 0x00, 0, 0x10, 0x00, 0, 0x20, 0x10, 0, 0x20, 0xB0, 0, 0xD0, 0xA0, 0, 0xD0,
+ 0x90, 0, 0xD0, 0xB0, 0, 0xC0, 0xA0, 0, 0xC0, 0x90, 0, 0xC0,//lower page retry parameter
+ 0x00, 0xF0, 0, 0x0F, 0xE0, 0, 0x0F, 0xD0, 0, 0x0E, 0xE0, 0, 0x0E, 0xD0, 0, 0x0D, 0xF0, 0,
+ 0x0D, 0xE0, 0, 0x0D, 0xD0, 0, 0x01, 0x10, 0, 0x02, 0x20, 0, 0x02, 0x10, 0, 0x03, 0x20, 0,
+ 0x0F, 0x00, 0, 0x0E, 0xF0, 0, 0x0D, 0xC0, 0, 0x0F, 0xF0, 0, 0x01, 0x00, 0, 0x02, 0x00, 0,
+ 0x0D, 0xB0, 0, 0x0C, 0xA0, 0},//upper page retry parameter
+ .otp_len = 9,
+ .otp_offset = {0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC},
+ .otp_data = {0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .total_try_times = 0x1410,//bit15~8 for upper page, bit7~0 for lower page
+ .cur_try_times = -1,
+ .set_parameter = sandisk_set_parameter,
+ .get_parameter = sandisk_get_parameter,
+ .retry = 0,
+ },
+
+ //Micron
+ {
+ .magic = "readretry",
+ .nand_id = 0x2C64444B,
+ .nand_id_5th = 0xA9000000,
+ .retry_reg_num = 1,
+ .retry_offset = {0x89},
+ .retry_def_value = {0},
+ .retry_value = {1, 2, 3, 4, 5, 6, 7},
+ .total_try_times = 7,
+ .cur_try_times = 0,
+ .set_parameter = micron_set_parameter,
+ .get_parameter = micron_get_parameter,
+ .retry = 0,
+ },
+ {
+ .magic = "readretry",
+ .nand_id = 0x2C44444B,
+ .nand_id_5th = 0xA9000000,
+ .retry_reg_num = 1,
+ .retry_offset = {0x89},
+ .retry_def_value = {0},
+ .retry_value = {1, 2, 3, 4, 5, 6, 7},
+ .total_try_times = 7,
+ .cur_try_times = 0,
+ .set_parameter = micron_set_parameter,
+ .get_parameter = micron_get_parameter,
+ .retry = 0,
+ },
+#endif
+ {
+ .nand_id = 0,
+ .nand_id_5th = 0,
+ }
+};
+
+
+
/*
* Manufacturer ID list
*/
@@ -176,13 +543,18 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_STMICRO, "ST Micro"},
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
+ {NAND_MFR_SANDISK, "Sandisk"},
{NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_INTEL, "Intel"},
{NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_MXIC, "Mxic"},
+ {NAND_MFR_MIRA, "Mira"},
{0x0, "Unknown"}
};
EXPORT_SYMBOL(nand_manuf_ids);
EXPORT_SYMBOL(nand_flash_ids);
+EXPORT_SYMBOL(WMT_nand_flash_ids);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/wmt_nand.c b/ANDROID_3.4.5/drivers/mtd/nand/wmt_nand.c
new file mode 100755
index 00000000..0fa2bde0
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/nand/wmt_nand.c
@@ -0,0 +1,8285 @@
+/*++
+linux/drivers/mtd/nand/wmt_nand.c
+
+Copyright (c) 2008 WonderMedia Technologies, Inc.
+
+This program is free software: you can redistribute it and/or modify it under the
+terms of the GNU General Public License as published by the Free Software Foundation,
+either version 2 of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <http://www.gnu.org/licenses/>.
+
+WonderMedia Technologies, Inc.
+10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+--*/
+
+//#include <linux/config.h>
+#include <linux/module.h>
+/*#include <linux/types.h>*/
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+/*#include <linux/platform_device.h>*/
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+/*#include <linux/clk.h>*/
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+/*#include <linux/mtd/partitions.h>*/
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/reboot.h> //Lch
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/sizes.h>
+#include <mach/irqs.h>
+#include <mach/hardware.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include "wmt_nand.h"
+
+
+#ifndef memzero
+#define memzero(s, n) memset ((s), 0, (n))
+#endif
+//#define RETRY_DEBUG
+#define WMT_HW_RDMZ
+//#ifdef WMT_SW_RDMZ
+unsigned int rdmz[BYTE_SEED]= {
+ 0xC5F7B49E, 0x85AD42B6, 0x1888A48B, 0xFBA90A42, 0xE20E7129, 0x37E8086E, 0x6F1C1918, 0x31510E20,
+ 0x382771CB, 0x6107F49D, 0x901B6D0B, 0x3CD489E1, 0xA9B9CE07, 0x6B41AC61, 0x749F181D, 0xA7DDA658,
+ 0x405276C0, 0xB67EFB43, 0xC5EE35A6, 0xF8406534, 0x73D8093A, 0xD98028A3, 0x084CE1AF, 0xB4744210,
+ 0x951E02A0, 0xF657482D, 0x6C64F9D0, 0x68DB8651, 0xD1E64A45, 0x3A0FCB39, 0x9C9BB663, 0x05322DAE,
+ 0xA4F40077, 0x801BA102, 0xB73BE0DD, 0xA2E34B6A, 0x5A50E576, 0x83CD0C99, 0x63C1440B, 0x2F82661D,
+ 0x6846C973, 0xA74C29E6, 0x880E86A2, 0xB1D7E000, 0xF9B6F2B5, 0x71E5F26C, 0xE707DE1E, 0x439D5A63,
+ 0x1F697817, 0x23DFB560, 0xE87F6BD0, 0xBD1BBCC3, 0xB1D3A074, 0x6C1B7C0A, 0xE2823FDB, 0x17F45131,
+ 0x9082625D, 0xDFD364FD, 0x88DF4E2B, 0xB6FE752D, 0x5B04FF38, 0xB27648A9, 0x8C4EF297, 0x1C595F00,
+ 0x9E7B4520, 0x826ADDFF, 0xF83FE0EE, 0xF981B0B0, 0x1F9233D7, 0xA2C148CB, 0xF73C908E, 0x18F36125,
+ 0xE45D3D77, 0xB77BA7EA, 0x6D962E25, 0xFF4BF3B8, 0x7C06714F, 0x812DFFDA, 0xE499B45A, 0x73498684,
+ 0x11DCD8C1, 0x0FE5FAEC, 0x882C8503, 0x1CBB95F8, 0x62889F09, 0xF6798B10, 0x7FFE1FE9, 0x464DBD35,
+ 0x476EA249, 0xD7D7428D, 0xD885740A, 0xA034FA2C, 0xB37FD49C, 0x9AC07AD5, 0xAEFA9F54, 0x80B1AC25,
+ 0xAFE642C5, 0x55249024, 0xC3BD79F8, 0x78D3CAB0, 0x71523E07, 0x179AD53B, 0x4C6DE12B, 0x545E4957,
+ 0xE19CDBF1, 0xB9CA4748, 0xD401EF16, 0x0C7FD0DC, 0x2D55D75B, 0x8169F899, 0xBE415FAA, 0x45355DFD,
+ 0x1EE42A38, 0x3E167903, 0x838D4BAE, 0xACB42144, 0x8A9970D3, 0x978DB4A5, 0x45A09237, 0x431554E5,
+ 0xAAD8AFF7, 0x4F260392, 0xF60E8E22, 0xDEFCBB1D, 0xA6903D2E, 0x0C041572, 0x32A1E06E, 0xD41C2E5A,
+ 0xE43F79E1, 0xD562B75D, 0x53B35557, 0x871CF712, 0x06130B69, 0x4FE6CACB, 0xA79121F3, 0x31D1804E,
+ 0xA6CDBB55, 0x2B31D900, 0x6F8D96A6, 0xF90DFE42, 0x3F8E6A88, 0x5D5F338E, 0x19BEFA53, 0xA80B5EC5,
+ 0x33A4BCC7, 0x7C6435D9, 0xE334EF6D, 0xDABCCF28, 0x0B1E822E, 0x6BC9A2E7, 0xC12ECFFD, 0xCB2410AA,
+ 0x5E239332, 0xD599FC9D, 0xD2ADA8FC, 0x985F0C4C, 0xA3FBD68F, 0x1A6857C8, 0x7CF1FA13, 0xBEC591B0,
+ 0x4E7219DC, 0xC7B5CA12, 0x31730D81, 0x954B0433, 0xFA399921, 0x17871477, 0xA42D4816, 0xAC692951,
+ 0x3346763F, 0x8097EFF0, 0x9727B982, 0x5D7D302F, 0xB4D28FAB, 0x33353379, 0xB438C5BB, 0xE49DF42E,
+ 0xE6E4083B, 0x82BB1576, 0xFF1675C3, 0x5B33BD3D, 0xDC018912, 0xC9886442, 0xA8F895ED, 0x99E15C12,
+ 0x45E855E8, 0xA73B2CD4, 0x290C2256, 0x510A601B, 0xB2DC458E, 0x9493508F, 0xEB9E844E, 0x0796D9AE,
+ 0x79741BD6, 0xEAAC9AE2, 0xC1990396, 0x3BB91B8F, 0x51D3287A, 0x9EAECDDD, 0x10EEC66D, 0xC9EA20D4,
+ 0xCAE1855A, 0xA7C42760, 0x3DBF5142, 0xDD2E56F2, 0xE7C71747, 0x1202F5B2, 0xF0444344, 0x2382331B,
+ 0xCF4AA7A2, 0xE037CA0B, 0x9CC2706C, 0xB7AA6F63, 0x6ABFBB08, 0x5DF9FE35, 0xBF95CB8A, 0xEA64D353,
+ 0xBB5DB139, 0xF25BBBB3, 0xB069B05E, 0x1FA571D2, 0xCCB68970, 0xB2FA065B, 0xAC52ABC8, 0xE3C72445,
+ 0x70F92FFD, 0x3292E21F, 0x2FC6615E, 0x329E2283, 0x9130F29F, 0x8736745B, 0x802463EF, 0xF2173C18,
+ 0xC1EA46D0, 0x0F1631C4, 0x226965D6, 0x2537F5C9, 0x26875CB0, 0x05C9666E, 0x25EAFDDC, 0x9F585A5C,
+ 0x12D33D3B, 0xF76DD669, 0x81303E96, 0x0CD91D67, 0x8B7EE682, 0xC306750F, 0x36B85254, 0xCB0AD397,
+ 0x4DB9750B, 0xFB0FC7F9, 0x442540F0, 0x758785F8, 0xE7E514E6, 0xBF6E804A, 0x6B7A2EF8, 0xA41E4A67,
+ 0x57B36655, 0xE5E72D5D, 0xC4C5AA32, 0x43A2988E, 0x5A45A4D2, 0x40D6B8DA, 0xBD39BF62, 0x1CBFD58C,
+ 0xF72511B6, 0x651E46A7, 0x8F0D90C6, 0x9552850B, 0x87D4BEA3, 0x7CD7B9C6, 0x86046AF7, 0x462BB9D7,
+ 0xB0DA3C41, 0x7A95F448, 0x5021FF8F, 0x093EB834, 0xBD0EFD67, 0x72C81437, 0xB2E38763, 0xD1BF8C4A,
+ 0x889789F4, 0x52D00D1C, 0xD8D07299, 0xAC5A2B20, 0xC89C393B, 0x5636B492, 0xD375FC40, 0x89F81123,
+ 0xB3EA1B56, 0xC7310408, 0x3A3449A0, 0x4C1AE419, 0xF55CEDA3, 0x01415BEA, 0xF2A0F073, 0x31774DF5,
+ 0x00E68A8C, 0x695E5496, 0xE7749B58, 0x77327028, 0x6CD335BB, 0x98468D74, 0xDE16F10D, 0x7138FA79,
+ 0x5ED8D8F2, 0x54870136, 0xCDEE53A2, 0x3DB7D1AA, 0xF6754B8C, 0xC1088C28, 0xF3E5EBED, 0x567A3339,
+ 0xA2F60ACE, 0x994B5135, 0x5D35F7F0, 0x50FCF79A, 0xB0E1BED8, 0xAA14A632, 0xA04F3F82, 0xAC8BE3A9,
+ 0xCFB5AC16, 0xF484B91F, 0x10E64685, 0xE2B13DAA, 0xEC2E1E35, 0x4623393F, 0x9B81213F, 0x5C5A6F27,
+ 0xB1C6E1D0, 0xAF00C849, 0x3C7AC4B2, 0x24C9E2A0, 0x0FE1BA98, 0x1D810BBC, 0x8FDC584F, 0x927B1026,
+ 0x2566B32E, 0xBF440303, 0xED4D467E, 0x19EFBCB4, 0x31C80176, 0xDB209CD7, 0x406174B1, 0x4DA4B447,
+ 0x134F6EC4, 0xBC1220F6, 0xA75D2836, 0xDEB8BC5E, 0xFC48D6DE, 0x3A78CE0B, 0x3D991297, 0xE5EFADB5,
+ 0xEF9EB74C, 0x656D03E1, 0xBBA2BA8D, 0xE6E8C8A7, 0x3C4D86B7, 0x4ABE231B, 0x4A272C4D, 0xA920C151,
+ 0x8846417D, 0x55F99831, 0x7A627F14, 0x6FC991E5, 0xA3D515B2, 0x09F2B1F1, 0x5267C177, 0x284D79BC,
+ 0xA3AA9068, 0x83AB087B, 0x9475DA03, 0x82C0D0D8, 0xE0E242F6, 0x0E466BFE, 0x867FAF59, 0x59DF8EE2,
+ 0xE5AFEA82, 0x20EBD203, 0xC076152F, 0x4469C75B, 0x04047376, 0xF75654F0, 0x51B16CEC, 0xFCB7DD6A,
+ 0x2ECBBD1F, 0xB1BD247E, 0xB0F4FF7C, 0x690F1271, 0x7EB7C4EB, 0x9FB65038, 0x50D674D3, 0x36D6D65E,
+ 0x17E550E1, 0xC63458A1, 0x924C5223, 0x4B117295, 0xFA8295D6, 0x59EC8C93, 0x1E75A586, 0xF64A8961,
+ 0x842450ED, 0x90ECE657, 0x033CE78B, 0x03526381, 0xDFBDE0F7, 0x5430CD5D, 0x3D735887, 0x32476AE2,
+ 0xBD427ACC, 0x034BE2B9, 0xA250C775, 0x3F6060EC, 0x1F5A7A66, 0xD805FA64, 0x3EDE30B2, 0xF949F901,
+ 0x65568178, 0x6B23E8F7, 0x168608AA, 0x99F8DD2A, 0x3805726A, 0xCC6B8165, 0x0B2500B7, 0xBB48F09D,
+ 0x31400FF0, 0x6E914B37, 0x2C98C243, 0x53D551B5, 0x70A8691A, 0xAB51BDAC, 0xC742414E, 0x0E9B63EB,
+ 0x3FA0A9B5, 0x4EC5D5B7, 0x3728C137, 0x3E83B6C9, 0xDE7C3573, 0x387AF7B0, 0x463238EF, 0xCD371BC3,
+ 0x11C559F9, 0x7208DD6E, 0xE37C28B2, 0x3E92B719, 0x88CA0F8F, 0x75E5C16E, 0x85FC0451, 0x814BFB38,
+ 0x132D2A52, 0xDE0B3041, 0x99785344, 0xA6EFB8F4, 0x865DACF8, 0xF4B3FB1A, 0x7E91873E, 0xA777AB7F,
+ 0x588FD4D8, 0x41B9200D, 0x5C03A928, 0x035EA31D, 0x614B7336, 0xE1989B85, 0x2C67C9F7, 0x476622A1,
+ 0xFC8C5FF3, 0xFE4AEF65, 0x41D3E473, 0x1541A4E1, 0x1BB44300, 0xF8FB69C3, 0x3DB391DE, 0x63D8C533,
+ 0x526F419F, 0x031664C2, 0x85650B07, 0x624C1624, 0x324BAA7E, 0x03B4E90D, 0xB6E3B461, 0xB3445605,
+ 0x4A4128AF, 0x5E945F59, 0x2504F7B8, 0xDD5D13B4, 0xD3683D0C, 0x61B8B81E, 0x4BDD7B50, 0x15EBA9C6,
+ 0x0369E118, 0x0F3CB28D, 0xA45E0D50, 0x98C6031A, 0x40FC3B93, 0x3B0ED7E4, 0xA14E235F, 0x915E7695,
+ 0x5BD9F72D, 0x0BA94E45, 0x9B54A9C2, 0xCEDE74B5, 0x801321EA, 0x9C60FDA3, 0x842CD005, 0xBBB7FB29,
+ 0x25F37CE4, 0xE2B57DDE, 0x7983908A, 0xD544F488, 0x6B72AE10, 0x8F455719, 0x717CFD3A, 0x04003302,
+ 0x62FBDA4F, 0xC2D6A15B, 0x0C445245, 0xFDD48521, 0x71073894, 0x1BF40437, 0x378E0C8C, 0x98A88710,
+ 0x9C13B8E5, 0xB083FA4E, 0xC80DB685, 0x9E6A44F0, 0xD4DCE703, 0xB5A0D630, 0x3A4F8C0E, 0x53EED32C,
+ 0xA0293B60, 0x5B3F7DA1, 0x62F71AD3, 0x7C20329A, 0xB9EC049D, 0xECC01451, 0x042670D7, 0x5A3A2108,
+ 0xCA8F0150, 0x7B2BA416, 0xB6327CE8, 0xB46DC328, 0xE8F32522, 0x9D07E59C, 0x4E4DDB31, 0x829916D7,
+ 0x527A003B, 0xC00DD081, 0x5B9DF06E, 0x5171A5B5, 0xAD2872BB, 0xC1E6864C, 0xB1E0A205, 0x97C1330E,
+ 0x342364B9, 0x53A614F3, 0x44074351, 0xD8EBF000, 0x7CDB795A, 0x38F2F936, 0xF383EF0F, 0xA1CEAD31,
+ 0x0FB4BC0B, 0x11EFDAB0, 0xF43FB5E8, 0x5E8DDE61, 0x58E9D03A, 0xB60DBE05, 0xF1411FED, 0x8BFA2898,
+ 0xC841312E, 0xEFE9B27E, 0xC46FA715, 0x5B7F3A96, 0xAD827F9C, 0xD93B2454, 0x4627794B, 0x0E2CAF80,
+ 0xCF3DA290, 0x41356EFF, 0x7C1FF077, 0xFCC0D858, 0x8FC919EB, 0x5160A465, 0xFB9E4847, 0x8C79B092,
+ 0x722E9EBB, 0xDBBDD3F5, 0x36CB1712, 0xFFA5F9DC, 0x3E0338A7, 0x4096FFED, 0x724CDA2D, 0xB9A4C342,
+ 0x08EE6C60, 0x87F2FD76, 0x44164281, 0x8E5DCAFC, 0x31444F84, 0xFB3CC588, 0xBFFF0FF4, 0xA326DE9A,
+ 0xA3B75124, 0x6BEBA146, 0x6C42BA05, 0x501A7D16, 0xD9BFEA4E, 0x4D603D6A, 0xD77D4FAA, 0xC058D612,
+ 0x57F32162, 0x2A924812, 0x61DEBCFC, 0xBC69E558, 0xB8A91F03, 0x8BCD6A9D, 0xA636F095, 0xAA2F24AB,
+ 0x70CE6DF8, 0x5CE523A4, 0x6A00F78B, 0x863FE86E, 0x96AAEBAD, 0x40B4FC4C, 0xDF20AFD5, 0x229AAEFE,
+ 0x8F72151C, 0x1F0B3C81, 0x41C6A5D7, 0xD65A10A2, 0xC54CB869, 0xCBC6DA52, 0xA2D0491B, 0xA18AAA72,
+ 0x556C57FB, 0x279301C9, 0xFB074711, 0x6F7E5D8E, 0x53481E97, 0x06020AB9, 0x1950F037, 0xEA0E172D,
+ 0xF21FBCF0, 0xEAB15BAE, 0x29D9AAAB, 0xC38E7B89, 0x830985B4, 0xA7F36565, 0x53C890F9, 0x98E8C027,
+ 0x5366DDAA, 0x1598EC80, 0x37C6CB53, 0x7C86FF21, 0x1FC73544, 0xAEAF99C7, 0x8CDF7D29, 0xD405AF62,
+ 0x99D25E63, 0xBE321AEC, 0x719A77B6, 0x6D5E6794, 0x858F4117, 0xB5E4D173, 0x609767FE, 0x65920855,
+ 0xAF11C999, 0x6ACCFE4E, 0x6956D47E, 0xCC2F8626, 0x51FDEB47, 0x8D342BE4, 0x3E78FD09, 0x5F62C8D8,
+ 0x27390CEE, 0xE3DAE509, 0x98B986C0, 0xCAA58219, 0xFD1CCC90, 0x0BC38A3B, 0xD216A40B, 0xD63494A8,
+ 0x19A33B1F, 0x404BF7F8, 0xCB93DCC1, 0xAEBE9817, 0xDA6947D5, 0x999A99BC, 0x5A1C62DD, 0xF24EFA17,
+ 0x7372041D, 0xC15D8ABB, 0xFF8B3AE1, 0x2D99DE9E, 0x6E00C489, 0xE4C43221, 0x547C4AF6, 0x4CF0AE09,
+ 0x22F42AF4, 0x539D966A, 0x9486112B, 0x2885300D, 0xD96E22C7, 0x4A49A847, 0x75CF4227, 0x03CB6CD7,
+ 0x3CBA0DEB, 0x75564D71, 0xE0CC81CB, 0x1DDC8DC7, 0xA8E9943D, 0xCF5766EE, 0x08776336, 0x64F5106A,
+ 0x6570C2AD, 0x53E213B0, 0x1EDFA8A1, 0xEE972B79, 0x73E38BA3, 0x09017AD9, 0xF82221A2, 0x11C1198D,
+ 0xE7A553D1, 0x701BE505, 0xCE613836, 0x5BD537B1, 0xB55FDD84, 0x2EFCFF1A, 0xDFCAE5C5, 0xF53269A9,
+ 0xDDAED89C, 0x792DDDD9, 0x5834D82F, 0x0FD2B8E9, 0xE65B44B8, 0x597D032D, 0xD62955E4, 0xF1E39222,
+ 0xB87C97FE, 0x1949710F, 0x97E330AF, 0x994F1141, 0xC898794F, 0xC39B3A2D, 0x401231F7, 0x790B9E0C,
+ 0x60F52368, 0x078B18E2, 0x9134B2EB, 0x129BFAE4, 0x1343AE58, 0x02E4B337, 0x12F57EEE, 0xCFAC2D2E,
+ 0x89699E9D, 0x7BB6EB34, 0xC0981F4B, 0x066C8EB3, 0xC5BF7341, 0x61833A87, 0x9B5C292A, 0xE58569CB,
+ 0xA6DCBA85, 0x7D87E3FC, 0x2212A078, 0x3AC3C2FC, 0x73F28A73, 0x5FB74025, 0xB5BD177C, 0xD20F2533,
+ 0xABD9B32A, 0x72F396AE, 0x6262D519, 0x21D14C47, 0x2D22D269, 0x206B5C6D, 0x5E9CDFB1, 0x0E5FEAC6,
+ 0xFB9288DB, 0x328F2353, 0xC786C863, 0xCAA94285, 0x43EA5F51, 0xBE6BDCE3, 0xC302357B, 0xA315DCEB,
+ 0x586D1E20, 0xBD4AFA24, 0x2810FFC7, 0x849F5C1A, 0xDE877EB3, 0xB9640A1B, 0x5971C3B1, 0x68DFC625,
+ 0x444BC4FA, 0xA968068E, 0x6C68394C, 0xD62D1590, 0x644E1C9D, 0x2B1B5A49, 0xE9BAFE20, 0x44FC0891,
+ 0x59F50DAB, 0x63988204, 0x9D1A24D0, 0xA60D720C, 0x7AAE76D1, 0x80A0ADF5, 0xF9507839, 0x18BBA6FA,
+ 0x00734546, 0x34AF2A4B, 0x73BA4DAC, 0xBB993814, 0x36699ADD, 0xCC2346BA, 0xEF0B7886, 0x389C7D3C,
+ 0x2F6C6C79, 0x2A43809B, 0x66F729D1, 0x1EDBE8D5, 0x7B3AA5C6, 0xE0844614, 0xF9F2F5F6, 0x2B3D199C,
+ 0xD17B0567, 0x4CA5A89A, 0x2E9AFBF8, 0x287E7BCD, 0x5870DF6C, 0x550A5319, 0xD0279FC1, 0x5645F1D4,
+ 0xE7DAD60B, 0xFA425C8F, 0x08732342, 0xF1589ED5, 0xF6170F1A, 0xA3119C9F, 0xCDC0909F, 0x2E2D3793,
+ 0xD8E370E8, 0x57806424, 0x1E3D6259, 0x1264F150, 0x07F0DD4C, 0x8EC085DE, 0x47EE2C27, 0x493D8813,
+ 0x92B35997, 0x5FA20181, 0x76A6A33F, 0x0CF7DE5A, 0x98E400BB, 0xED904E6B, 0xA030BA58, 0x26D25A23,
+ 0x09A7B762, 0x5E09107B, 0x53AE941B, 0x6F5C5E2F, 0xFE246B6F, 0x9D3C6705, 0x9ECC894B, 0x72F7D6DA,
+ 0xF7CF5BA6, 0xB2B681F0, 0xDDD15D46, 0xF3746453, 0x9E26C35B, 0xA55F118D, 0xA5139626, 0xD49060A8,
+ 0xC42320BE, 0x2AFCCC18, 0xBD313F8A, 0x37E4C8F2, 0xD1EA8AD9, 0x84F958F8, 0x2933E0BB, 0x1426BCDE,
+ 0xD1D54834, 0xC1D5843D, 0x4A3AED01, 0x4160686C, 0x7071217B, 0x872335FF, 0x433FD7AC, 0x2CEFC771,
+ 0xF2D7F541, 0x9075E901, 0xE03B0A97, 0x2234E3AD, 0x020239BB, 0x7BAB2A78, 0x28D8B676, 0xFE5BEEB5,
+ 0x1765DE8F, 0x58DE923F, 0xD87A7FBE, 0xB4878938, 0x3F5BE275, 0xCFDB281C, 0x286B3A69, 0x9B6B6B2F,
+ 0x8BF2A870, 0xE31A2C50, 0xC9262911, 0x2588B94A, 0xFD414AEB, 0x2CF64649, 0x8F3AD2C3, 0xFB2544B0,
+ 0xC2122876, 0xC876732B, 0x819E73C5, 0x81A931C0, 0xEFDEF07B, 0xAA1866AE, 0x1EB9AC43, 0x1923B571,
+ 0xDEA13D66, 0x81A5F15C, 0x512863BA, 0x1FB03076, 0x0FAD3D33, 0x6C02FD32, 0x9F6F1859, 0x7CA4FC80,
+ 0xB2AB40BC, 0x3591F47B, 0x0B430455, 0x4CFC6E95, 0x9C02B935, 0xE635C0B2, 0x8592805B, 0x5DA4784E,
+ 0x98A007F8, 0xB748A59B, 0x964C6121, 0x29EAA8DA, 0x3854348D, 0x55A8DED6, 0xE3A120A7, 0x874DB1F5,
+ 0x9FD054DA, 0xA762EADB, 0x9B94609B, 0x9F41DB64, 0x6F3E1AB9, 0x9C3D7BD8, 0xA3191C77, 0xE69B8DE1,
+ 0x08E2ACFC, 0x39046EB7, 0xF1BE1459, 0x9F495B8C, 0x446507C7, 0xBAF2E0B7, 0x42FE0228, 0x40A5FD9C,
+ 0x89969529, 0x6F059820, 0x4CBC29A2, 0x5377DC7A, 0x432ED67C, 0x7A59FD8D, 0xBF48C39F, 0x53BBD5BF,
+ 0xAC47EA6C, 0x20DC9006, 0xAE01D494, 0x01AF518E, 0xB0A5B99B, 0xF0CC4DC2, 0x9633E4FB, 0xA3B31150,
+ 0xFE462FF9, 0xFF2577B2, 0xA0E9F239, 0x0AA0D270, 0x8DDA2180, 0x7C7DB4E1, 0x9ED9C8EF, 0xB1EC6299,
+ 0x2937A0CF, 0x818B3261, 0x42B28583, 0x31260B12, 0x9925D53F, 0x81DA7486, 0xDB71DA30, 0xD9A22B02,
+ 0xA5209457, 0x2F4A2FAC, 0x12827BDC, 0x6EAE89DA, 0x69B41E86, 0x30DC5C0F, 0x25EEBDA8, 0x0AF5D4E3,
+ 0x81B4F08C, 0x079E5946, 0x522F06A8, 0xCC63018D, 0x207E1DC9, 0x9D876BF2, 0xD0A711AF, 0xC8AF3B4A,
+ 0xADECFB96, 0x05D4A722, 0xCDAA54E1, 0x676F3A5A, 0xC00990F5, 0xCE307ED1, 0xC2166802, 0x5DDBFD94,
+ 0x12F9BE72, 0x715ABEEF, 0x3CC1C845, 0x6AA27A44, 0xB5B95708, 0x47A2AB8C, 0x38BE7E9D, 0x82001981,
+ 0xB17DED27, 0xE16B50AD, 0x86222922, 0x7EEA4290, 0xB8839C4A, 0x0DFA021B, 0x1BC70646, 0xCC544388,
+ 0x4E09DC72, 0xD841FD27, 0x6406DB42, 0xCF352278, 0x6A6E7381, 0x5AD06B18, 0x1D27C607, 0x29F76996,
+ 0xD0149DB0, 0xAD9FBED0, 0x317B8D69, 0xBE10194D, 0xDCF6024E, 0xF6600A28, 0x0213386B, 0x2D1D1084,
+ 0x654780A8, 0x3D95D20B, 0x5B193E74, 0x5A36E194, 0x74799291, 0xCE83F2CE, 0xA726ED98, 0xC14C8B6B,
+ 0xA93D001D, 0x6006E840, 0xADCEF837, 0xA8B8D2DA, 0x5694395D, 0xE0F34326, 0x58F05102, 0xCBE09987,
+ 0x9A11B25C, 0xA9D30A79, 0x2203A1A8, 0x6C75F800, 0x3E6DBCAD, 0x9C797C9B, 0xF9C1F787, 0xD0E75698,
+ 0x07DA5E05, 0x08F7ED58, 0xFA1FDAF4, 0x2F46EF30, 0xAC74E81D, 0xDB06DF02, 0x78A08FF6, 0x45FD144C,
+ 0x64209897, 0xF7F4D93F, 0x6237D38A, 0x2DBF9D4B, 0x56C13FCE, 0xEC9D922A, 0x2313BCA5, 0x071657C0,
+ 0xE79ED148, 0xA09AB77F, 0x3E0FF83B, 0xFE606C2C, 0xC7E48CF5, 0xA8B05232, 0x7DCF2423, 0xC63CD849,
+ 0xB9174F5D, 0x6DDEE9FA, 0x1B658B89, 0xFFD2FCEE, 0x9F019C53, 0xA04B7FF6, 0x39266D16, 0x5CD261A1,
+ 0x04773630, 0xC3F97EBB, 0x220B2140, 0x472EE57E, 0x18A227C2, 0x7D9E62C4, 0x5FFF87FA, 0x51936F4D,
+ 0x51DBA892, 0xB5F5D0A3, 0x36215D02, 0x280D3E8B, 0x6CDFF527, 0x26B01EB5, 0x6BBEA7D5, 0x602C6B09,
+ 0x2BF990B1, 0x15492409, 0x30EF5E7E, 0xDE34F2AC, 0xDC548F81, 0xC5E6B54E, 0xD31B784A, 0x55179255,
+ 0x386736FC, 0xAE7291D2, 0x35007BC5, 0xC31FF437, 0x4B5575D6, 0xA05A7E26, 0x6F9057EA, 0x114D577F,
+ 0xC7B90A8E, 0x8F859E40, 0x20E352EB, 0xEB2D0851, 0x62A65C34, 0xE5E36D29, 0x5168248D, 0xD0C55539,
+ 0xAAB62BFD, 0x93C980E4, 0x7D83A388, 0xB7BF2EC7, 0xA9A40F4B, 0x8301055C, 0x8CA8781B, 0x75070B96,
+ 0x790FDE78, 0xF558ADD7, 0x94ECD555, 0x61C73DC4, 0xC184C2DA, 0xD3F9B2B2, 0xA9E4487C, 0x4C746013,
+ 0x29B36ED5, 0x8ACC7640, 0x9BE365A9, 0x3E437F90, 0x8FE39AA2, 0xD757CCE3, 0x466FBE94, 0xEA02D7B1,
+ 0x4CE92F31, 0x5F190D76, 0x38CD3BDB, 0xB6AF33CA, 0xC2C7A08B, 0x5AF268B9, 0xB04BB3FF, 0xB2C9042A,
+ 0x5788E4CC, 0x35667F27, 0x34AB6A3F, 0xE617C313, 0x28FEF5A3, 0xC69A15F2, 0x1F3C7E84, 0x2FB1646C,
+ 0x939C8677, 0x71ED7284, 0xCC5CC360, 0x6552C10C, 0xFE8E6648, 0x85E1C51D, 0x690B5205, 0xEB1A4A54,
+ 0x0CD19D8F, 0xA025FBFC, 0xE5C9EE60, 0xD75F4C0B, 0x6D34A3EA, 0xCCCD4CDE, 0xAD0E316E, 0xF9277D0B,
+ 0xB9B9020E, 0xE0AEC55D, 0x7FC59D70, 0x96CCEF4F, 0xB7006244, 0x72621910, 0xAA3E257B, 0x26785704,
+ 0x117A157A, 0xA9CECB35, 0xCA430895, 0x94429806, 0xECB71163, 0xA524D423, 0xBAE7A113, 0x81E5B66B,
+ 0x9E5D06F5, 0xBAAB26B8, 0xF06640E5, 0x8EEE46E3, 0x5474CA1E, 0x67ABB377, 0x043BB19B, 0xB27A8835,
+ 0x32B86156, 0xA9F109D8, 0x8F6FD450, 0xF74B95BC, 0xB9F1C5D1, 0x0480BD6C, 0xFC1110D1, 0x88E08CC6,
+ 0xF3D2A9E8, 0x380DF282, 0xE7309C1B, 0x2DEA9BD8, 0x5AAFEEC2, 0x977E7F8D, 0xEFE572E2, 0x7A9934D4,
+ 0xEED76C4E, 0xBC96EEEC, 0xAC1A6C17, 0x07E95C74, 0xF32DA25C, 0x2CBE8196, 0x6B14AAF2, 0x78F1C911,
+ 0xDC3E4BFF, 0x8CA4B887, 0xCBF19857, 0xCCA788A0, 0xE44C3CA7, 0xE1CD9D16, 0x200918FB, 0x3C85CF06,
+ 0x307A91B4, 0x83C58C71, 0x489A5975, 0x094DFD72, 0x89A1D72C, 0x0172599B, 0x097ABF77, 0xE7D61697,
+ 0x44B4CF4E, 0xBDDB759A, 0xE04C0FA5, 0x83364759, 0xE2DFB9A0, 0x30C19D43, 0xCDAE1495, 0xF2C2B4E5,
+ 0x536E5D42, 0x3EC3F1FE, 0x1109503C, 0x9D61E17E, 0xB9F94539, 0x2FDBA012, 0xDADE8BBE, 0x69079299,
+ 0x55ECD995, 0xB979CB57, 0xB1316A8C, 0x90E8A623, 0x96916934, 0x9035AE36, 0x2F4E6FD8, 0x872FF563,
+ 0xFDC9446D, 0x994791A9, 0xE3C36431, 0xE554A142, 0xA1F52FA8, 0xDF35EE71, 0xE1811ABD, 0x518AEE75,
+ 0x2C368F10, 0xDEA57D12, 0x14087FE3, 0xC24FAE0D, 0xEF43BF59, 0xDCB2050D, 0xACB8E1D8, 0x346FE312,
+ 0x2225E27D, 0x54B40347, 0x36341CA6, 0xEB168AC8, 0xB2270E4E, 0x158DAD24, 0xF4DD7F10, 0xA27E0448,
+ 0x2CFA86D5, 0x31CC4102, 0x4E8D1268, 0xD306B906, 0xBD573B68, 0xC05056FA, 0x7CA83C1C, 0x0C5DD37D,
+ 0x8039A2A3, 0x1A579525, 0x39DD26D6, 0xDDCC9C0A, 0x1B34CD6E, 0x6611A35D, 0x7785BC43, 0x9C4E3E9E,
+ 0x97B6363C, 0x9521C04D, 0xB37B94E8, 0x0F6DF46A, 0x3D9D52E3, 0x7042230A, 0x7CF97AFB, 0x959E8CCE,
+ 0x68BD82B3, 0x2652D44D, 0x974D7DFC, 0x143F3DE6, 0xAC386FB6, 0xAA85298C, 0x6813CFE0, 0xAB22F8EA,
+ 0xF3ED6B05, 0x7D212E47, 0x843991A1, 0x78AC4F6A, 0xFB0B878D, 0xD188CE4F, 0xE6E0484F, 0x17169BC9,
+ 0x6C71B874, 0xABC03212, 0x0F1EB12C, 0x093278A8, 0x03F86EA6, 0xC76042EF, 0xA3F71613, 0xA49EC409,
+ 0xC959ACCB, 0xAFD100C0, 0x3B53519F, 0x867BEF2D, 0xCC72005D, 0x76C82735, 0xD0185D2C, 0x13692D11,
+ 0x84D3DBB1, 0xAF04883D, 0xA9D74A0D, 0xB7AE2F17, 0xFF1235B7, 0xCE9E3382, 0x4F6644A5, 0x397BEB6D,
+ 0x7BE7ADD3, 0x595B40F8, 0xEEE8AEA3, 0xF9BA3229, 0xCF1361AD, 0x52AF88C6, 0x5289CB13, 0x6A483054,
+ 0x6211905F, 0x157E660C, 0x5E989FC5, 0x9BF26479, 0x68F5456C, 0xC27CAC7C, 0x1499F05D, 0x0A135E6F,
+ 0xE8EAA41A, 0xE0EAC21E, 0x251D7680, 0xA0B03436, 0xB83890BD, 0x43919AFF, 0xA19FEBD6, 0x9677E3B8,
+ 0xF96BFAA0, 0xC83AF480, 0xF01D854B, 0x911A71D6, 0x01011CDD, 0x3DD5953C, 0x946C5B3B, 0xFF2DF75A,
+ 0x8BB2EF47, 0x2C6F491F, 0x6C3D3FDF, 0xDA43C49C, 0x1FADF13A, 0xE7ED940E, 0x94359D34, 0x4DB5B597,
+ 0x45F95438, 0xF18D1628, 0x64931488, 0x92C45CA5, 0xFEA0A575, 0x967B2324, 0x479D6961, 0x7D92A258,
+ 0xE109143B, 0xE43B3995, 0x40CF39E2, 0xC0D498E0, 0x77EF783D, 0xD50C3357, 0x8F5CD621, 0x0C91DAB8,
+ 0x6F509EB3, 0x40D2F8AE, 0x289431DD, 0x8FD8183B, 0x07D69E99, 0xB6017E99, 0x4FB78C2C, 0x3E527E40,
+ 0xD955A05E, 0x9AC8FA3D, 0x85A1822A, 0xA67E374A, 0x4E015C9A, 0xF31AE059, 0x42C9402D, 0x2ED23C27,
+ 0xCC5003FC, 0xDBA452CD, 0x4B263090, 0x94F5546D, 0x1C2A1A46, 0xAAD46F6B, 0xF1D09053, 0x43A6D8FA,
+ 0xCFE82A6D, 0xD3B1756D, 0x4DCA304D, 0xCFA0EDB2, 0x379F0D5C, 0xCE1EBDEC, 0xD18C8E3B, 0x734DC6F0,
+ 0x8471567E, 0x9C82375B, 0x78DF0A2C, 0xCFA4ADC6, 0xA23283E3, 0x5D79705B, 0x217F0114, 0xA052FECE,
+ 0x44CB4A94, 0x3782CC10, 0x265E14D1, 0x29BBEE3D, 0xA1976B3E, 0xBD2CFEC6, 0xDFA461CF, 0x29DDEADF,
+ 0x5623F536, 0x106E4803, 0x5700EA4A, 0x80D7A8C7, 0x5852DCCD, 0xF86626E1, 0x4B19F27D, 0xD1D988A8,
+ 0x7F2317FC, 0xFF92BBD9, 0x5074F91C, 0x05506938, 0xC6ED10C0, 0xBE3EDA70, 0xCF6CE477, 0xD8F6314C,
+ 0x949BD067, 0xC0C59930, 0x215942C1, 0x98930589, 0x4C92EA9F, 0x40ED3A43, 0x6DB8ED18, 0xECD11581,
+ 0x52904A2B, 0x17A517D6, 0x09413DEE, 0x375744ED, 0xB4DA0F43, 0x186E2E07, 0x92F75ED4, 0x057AEA71,
+ 0x40DA7846, 0x03CF2CA3, 0xA9178354, 0xE63180C6, 0x103F0EE4, 0xCEC3B5F9, 0x685388D7, 0x64579DA5,
+ 0x56F67DCB, 0x82EA5391, 0x66D52A70, 0xB3B79D2D, 0xE004C87A, 0x67183F68, 0x610B3401, 0x2EEDFECA,
+ 0x897CDF39, 0xB8AD5F77, 0x1E60E422, 0x35513D22, 0x5ADCAB84, 0xA3D155C6, 0x9C5F3F4E, 0xC1000CC0,
+ 0xD8BEF693, 0x70B5A856, 0x43111491, 0x3F752148, 0xDC41CE25, 0x06FD010D, 0x0DE38323, 0x662A21C4,
+ 0xA704EE39, 0x6C20FE93, 0x32036DA1, 0xE79A913C, 0x353739C0, 0xAD68358C, 0x0E93E303, 0x14FBB4CB,
+ 0x680A4ED8, 0xD6CFDF68, 0x98BDC6B4, 0x5F080CA6, 0x6E7B0127, 0xFB300514, 0x01099C35, 0x168E8842,
+ 0xB2A3C054, 0x1ECAE905, 0x2D8C9F3A, 0xAD1B70CA, 0x3A3CC948, 0x6741F967, 0xD39376CC, 0xE0A645B5,
+ 0x549E800E, 0xB0037420, 0x56E77C1B, 0xD45C696D, 0x2B4A1CAE, 0x7079A193, 0xAC782881, 0x65F04CC3,
+ 0xCD08D92E, 0x54E9853C, 0x1101D0D4, 0xB63AFC00, 0x9F36DE56, 0xCE3CBE4D, 0x7CE0FBC3, 0xE873AB4C,
+ 0x03ED2F02, 0x047BF6AC, 0x7D0FED7A, 0x97A37798, 0x563A740E, 0x6D836F81, 0x3C5047FB, 0xA2FE8A26,
+ 0xB2104C4B, 0x7BFA6C9F, 0xB11BE9C5, 0x16DFCEA5, 0x2B609FE7, 0xF64EC915, 0x1189DE52, 0x038B2BE0,
+ 0xF3CF68A4, 0xD04D5BBF, 0x1F07FC1D, 0xFF303616, 0x63F2467A, 0xD4582919, 0xBEE79211, 0xE31E6C24,
+ 0x5C8BA7AE, 0xB6EF74FD, 0x0DB2C5C4, 0xFFE97E77, 0x4F80CE29, 0x5025BFFB, 0x9C93368B, 0x2E6930D0,
+ 0x823B9B18, 0x61FCBF5D, 0x110590A0, 0x239772BF, 0x0C5113E1, 0x3ECF3162, 0xAFFFC3FD, 0x28C9B7A6,
+ 0xA8EDD449, 0x5AFAE851, 0x9B10AE81, 0x94069F45, 0xB66FFA93, 0x93580F5A, 0xB5DF53EA, 0xB0163584,
+ 0x95FCC858, 0x0AA49204, 0x1877AF3F, 0xEF1A7956, 0x6E2A47C0, 0x62F35AA7, 0xE98DBC25, 0x2A8BC92A,
+ 0x1C339B7E, 0xD73948E9, 0x9A803DE2, 0x618FFA1B, 0x25AABAEB, 0x502D3F13, 0xB7C82BF5, 0x08A6ABBF,
+ 0x63DC8547, 0xC7C2CF20, 0x9071A975, 0x75968428, 0xB1532E1A, 0xF2F1B694, 0xA8B41246, 0xE862AA9C,
+ 0x555B15FE, 0x49E4C072, 0xBEC1D1C4, 0xDBDF9763, 0x54D207A5, 0xC18082AE, 0x46543C0D, 0x3A8385CB,
+ 0xBC87EF3C, 0xFAAC56EB, 0x4A766AAA, 0x30E39EE2, 0x60C2616D, 0x69FCD959, 0xD4F2243E, 0xA63A3009,
+ 0x14D9B76A, 0xC5663B20, 0x4DF1B2D4, 0x1F21BFC8, 0xC7F1CD51, 0x6BABE671, 0xA337DF4A, 0xF5016BD8,
+ 0x26749798, 0xAF8C86BB, 0x1C669DED, 0xDB5799E5, 0xE163D045, 0xAD79345C, 0x5825D9FF, 0x59648215,
+ 0xABC47266, 0x9AB33F93, 0x9A55B51F, 0xF30BE189, 0x147F7AD1, 0x634D0AF9, 0x0F9E3F42, 0x97D8B236,
+ 0x49CE433B, 0x38F6B942, 0x662E61B0, 0x32A96086, 0xFF473324, 0xC2F0E28E, 0x3485A902, 0xF58D252A,
+ 0x0668CEC7, 0x5012FDFE, 0xF2E4F730, 0x6BAFA605, 0x369A51F5, 0x6666A66F, 0xD68718B7, 0x7C93BE85,
+ 0xDCDC8107, 0x705762AE, 0xBFE2CEB8, 0x4B6677A7, 0x5B803122, 0xB9310C88, 0x551F12BD, 0x133C2B82,
+ 0x88BD0ABD, 0xD4E7659A, 0x6521844A, 0xCA214C03, 0xF65B88B1, 0xD2926A11, 0xDD73D089, 0xC0F2DB35,
+ 0x4F2E837A, 0xDD55935C, 0xF8332072, 0x47772371, 0xAA3A650F, 0xB3D5D9BB, 0x821DD8CD, 0x593D441A,
+ 0x195C30AB, 0x54F884EC, 0x47B7EA28, 0xFBA5CADE, 0x5CF8E2E8, 0x82405EB6, 0x7E088868, 0x44704663,
+ 0x79E954F4, 0x9C06F941, 0x73984E0D, 0x16F54DEC, 0xAD57F761, 0x4BBF3FC6, 0x77F2B971, 0x3D4C9A6A,
+ 0x776BB627, 0xDE4B7776, 0x560D360B, 0x03F4AE3A, 0x7996D12E, 0x165F40CB, 0xB58A5579, 0xBC78E488,
+ 0xEE1F25FF, 0xC6525C43, 0x65F8CC2B, 0xE653C450, 0x72261E53, 0xF0E6CE8B, 0x10048C7D, 0x1E42E783,
+ 0x983D48DA, 0xC1E2C638, 0x244D2CBA, 0x04A6FEB9, 0xC4D0EB96, 0x80B92CCD, 0x84BD5FBB, 0x73EB0B4B,
+ 0x225A67A7, 0xDEEDBACD, 0xF02607D2, 0x419B23AC, 0xF16FDCD0, 0x9860CEA1, 0xE6D70A4A, 0x79615A72,
+ 0x29B72EA1, 0x1F61F8FF, 0x0884A81E, 0xCEB0F0BF, 0x5CFCA29C, 0x17EDD009, 0xED6F45DF, 0xB483C94C,
+ 0xAAF66CCA, 0x5CBCE5AB, 0xD898B546, 0x48745311, 0x4B48B49A, 0x481AD71B, 0x97A737EC, 0xC397FAB1,
+ 0xFEE4A236, 0xCCA3C8D4, 0x71E1B218, 0x72AA50A1, 0xD0FA97D4, 0xEF9AF738, 0xF0C08D5E, 0x28C5773A,
+ 0x161B4788, 0xEF52BE89, 0x8A043FF1, 0xE127D706, 0xF7A1DFAC, 0x6E590286, 0x565C70EC, 0x9A37F189,
+ 0x9112F13E, 0x2A5A01A3, 0x1B1A0E53, 0x758B4564, 0x59138727, 0x0AC6D692, 0x7A6EBF88, 0xD13F0224,
+ 0x167D436A, 0x18E62081, 0x27468934, 0x69835C83, 0x5EAB9DB4, 0x60282B7D, 0xBE541E0E, 0x862EE9BE,
+ 0xC01CD151, 0x0D2BCA92, 0x1CEE936B, 0x6EE64E05, 0x8D9A66B7, 0xB308D1AE, 0x3BC2DE21, 0x4E271F4F,
+ 0xCBDB1B1E, 0x4A90E026, 0x59BDCA74, 0x87B6FA35, 0x1ECEA971, 0xB8211185, 0x3E7CBD7D, 0xCACF4667,
+ 0xB45EC159, 0x13296A26, 0x4BA6BEFE, 0x0A1F9EF3, 0x561C37DB, 0x554294C6, 0x3409E7F0, 0xD5917C75,
+ 0xF9F6B582, 0xBE909723, 0x421CC8D0, 0xBC5627B5, 0xFD85C3C6, 0xE8C46727, 0xF3702427, 0x0B8B4DE4,
+ 0x3638DC3A, 0x55E01909, 0x078F5896, 0x04993C54, 0x81FC3753, 0xE3B02177, 0xD1FB8B09, 0xD24F6204,
+ 0x64ACD665, 0xD7E88060, 0x9DA9A8CF, 0xC33DF796, 0xE639002E, 0x3B64139A, 0xE80C2E96, 0x89B49688,
+ 0xC269EDD8, 0xD782441E, 0xD4EBA506, 0xDBD7178B, 0x7F891ADB, 0xE74F19C1, 0xA7B32252, 0x9CBDF5B6,
+ 0x3DF3D6E9, 0xACADA07C, 0xF7745751, 0xFCDD1914, 0x6789B0D6, 0xA957C463, 0x2944E589, 0xB524182A,
+ 0x3108C82F, 0x8ABF3306, 0xAF4C4FE2, 0x4DF9323C, 0x347AA2B6, 0xE13E563E, 0x8A4CF82E, 0x0509AF37,
+ 0x7475520D, 0x7075610F, 0x128EBB40, 0xD0581A1B, 0xDC1C485E, 0x21C8CD7F, 0x50CFF5EB, 0x4B3BF1DC,
+ 0x7CB5FD50, 0xE41D7A40, 0x780EC2A5, 0xC88D38EB, 0x00808E6E, 0x9EEACA9E, 0x4A362D9D, 0xFF96FBAD,
+ 0xC5D977A3, 0x9637A48F, 0x361E9FEF, 0x6D21E24E, 0x0FD6F89D, 0x73F6CA07, 0xCA1ACE9A, 0x26DADACB,
+ 0x22FCAA1C, 0x78C68B14, 0xB2498A44, 0xC9622E52, 0x7F5052BA, 0xCB3D9192, 0x23CEB4B0, 0xBEC9512C,
+ 0xF0848A1D, 0x721D9CCA, 0x20679CF1, 0xE06A4C70, 0xBBF7BC1E, 0xEA8619AB, 0x47AE6B10, 0x8648ED5C,
+ 0x37A84F59, 0xA0697C57, 0x944A18EE, 0xC7EC0C1D, 0x83EB4F4C, 0x5B00BF4C, 0x27DBC616, 0x1F293F20,
+ 0xECAAD02F, 0x4D647D1E, 0x42D0C115, 0x533F1BA5, 0xA700AE4D, 0xF98D702C, 0xA164A016, 0x17691E13,
+ 0xE62801FE, 0x6DD22966, 0xA5931848, 0x4A7AAA36, 0x8E150D23, 0xD56A37B5, 0x78E84829, 0xA1D36C7D,
+ 0xE7F41536, 0xE9D8BAB6, 0x26E51826, 0x67D076D9, 0x1BCF86AE, 0xE70F5EF6, 0x68C6471D, 0x39A6E378,
+ 0xC238AB3F, 0x4E411BAD, 0x3C6F8516, 0xE7D256E3, 0xD11941F1, 0x2EBCB82D, 0x10BF808A, 0x50297F67,
+ 0x2265A54A, 0x9BC16608, 0x932F0A68, 0x14DDF71E, 0x50CBB59F, 0xDE967F63, 0xEFD230E7, 0x14EEF56F,
+ 0xAB11FA9B, 0x08372401, 0xAB807525, 0xC06BD463, 0xAC296E66, 0xFC331370, 0x258CF93E, 0x68ECC454,
+ 0xBF918BFE, 0x7FC95DEC, 0x283A7C8E, 0x02A8349C, 0x63768860, 0xDF1F6D38, 0x67B6723B, 0xEC7B18A6,
+ 0x4A4DE833, 0xE062CC98, 0x90ACA160, 0xCC4982C4, 0xA649754F, 0x20769D21, 0xB6DC768C, 0xF6688AC0,
+ 0x29482515, 0x0BD28BEB, 0x84A09EF7, 0x9BABA276, 0xDA6D07A1, 0x0C371703, 0xC97BAF6A, 0x02BD7538,
+ 0xA06D3C23, 0x01E79651, 0x548BC1AA, 0x7318C063, 0x881F8772, 0xE761DAFC, 0xB429C46B, 0xB22BCED2,
+ 0xAB7B3EE5, 0x417529C8, 0xB36A9538, 0x59DBCE96, 0x7002643D, 0xB38C1FB4, 0x30859A00, 0x9776FF65,
+ 0xC4BE6F9C, 0x5C56AFBB, 0x0F307211, 0x1AA89E91, 0x2D6E55C2, 0x51E8AAE3, 0x4E2F9FA7, 0xE0800660,
+ 0x6C5F7B49, 0xB85AD42B, 0x21888A48, 0x9FBA90A4, 0xEE20E712, 0x837E8086, 0x06F1C191, 0xB31510E2,
+ 0xD382771C, 0xB6107F49, 0x1901B6D0, 0x73CD489E, 0x1A9B9CE0, 0xD6B41AC6, 0x8749F181, 0x0A7DDA65,
+ 0x3405276C, 0x6B67EFB4, 0x4C5EE35A, 0xAF840653, 0x373D8093, 0xFD98028A, 0x0084CE1A, 0x0B474421,
+ 0xD951E02A, 0x0F657482, 0x16C64F9D, 0x568DB865, 0x9D1E64A4, 0x33A0FCB3, 0xE9C9BB66, 0x705322DA,
+ 0x2A4F4007, 0xD801BA10, 0xAB73BE0D, 0x6A2E34B6, 0x95A50E57, 0xB83CD0C9, 0xD63C1440, 0x32F82661,
+ 0x66846C97, 0x2A74C29E, 0x0880E86A, 0x5B1D7E00, 0xCF9B6F2B, 0xE71E5F26, 0x3E707DE1, 0x7439D5A6,
+ 0x01F69781, 0x023DFB56, 0x3E87F6BD, 0x4BD1BBCC, 0xAB1D3A07, 0xB6C1B7C0, 0x1E2823FD, 0xD17F4513,
+ 0xD9082625, 0xBDFD364F, 0xD88DF4E2, 0x8B6FE752, 0x95B04FF3, 0x7B27648A, 0x08C4EF29, 0x01C595F0,
+};
+//#endif
+
+#define WR_BUF_CNT 16
+#define NANDINFO "nandinfo"
+static struct proc_dir_entry *nandinfo_proc = NULL;
+static struct mtd_info *mtd_nandinfo = NULL;
+uint8_t *buf_rdmz, *wr_cache;
+/*#define NAND_DEBUG*/
+unsigned int wmt_version;
+uint32_t par1_ofs, par2_ofs, par3_ofs, par4_ofs, eslc_write, prob_end;
+#include <linux/mtd/partitions.h>
+#define NUM_NAND_PARTITIONS ARRAY_SIZE(nand_partitions)
+
+#ifndef CONFIG_MTD_NAND_WMT_UBUNTU
+
+struct mtd_partition nand_partitions[] = {
+ {
+ .name = "logo",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "recovery",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "misc",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "keydata",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x4000000,
+ },
+ {
+ .name = "system",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x40000000,
+ },
+ {
+ .name = "cache",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x20000000,
+ },
+ {
+ .name = "swap",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x10000000,
+ },
+#ifndef CONFIG_MTD_NAND_WMT_ANDROID_UBUNTU_DUALOS
+ {
+ .name = "data",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+#else // #ifdef CONFIG_MTD_NAND_WMT_ANDROID_UBUNTU_DUALOS
+ {
+ .name = "data",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x88000000,
+ },
+ { .name = "ubuntu-boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "ubuntu-rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+#endif
+};
+
+#else // #ifdef CONFIG_MTD_NAND_WMT_UBUNTU
+
+struct mtd_partition nand_partitions[] = {
+ {
+ .name = "ubuntu-logo",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "ubuntu-boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x1000000,
+ },
+ {
+ .name = "ubuntu-rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+#endif
+
+EXPORT_SYMBOL(nand_partitions);
+
+int second_chip = 0;
+EXPORT_SYMBOL(second_chip);
+
+#ifdef CONFIG_MTD_NAND_WMT_HWECC
+ static int MAX_CHIP = CONFIG_MTD_NAND_CHIP_NUM;
+ static int hardware_ecc = 1;
+#else
+ #define MAX_CHIP 1
+ static int hardware_ecc = 0;
+#endif
+
+#define HW_ENCODE_OOB
+//#define SW_ENCODE_OOB
+
+#ifdef SW_ENCODE_OOB
+static unsigned char parity[MAX_PARITY_SIZE];
+#endif
+static unsigned int bch_err_pos[MAX_ECC_BIT_ERROR];
+static unsigned int bch_err_pos[MAX_ECC_BIT_ERROR];
+
+/* used for software de-randomizer of read id and read status command */
+unsigned char rdmz_tb[128] = {
+ 0x84, 0x4a, 0x37, 0xbe, 0xd7, 0xd2, 0x39, 0x03, 0x8e, 0x77, 0xb9, 0x41, 0x99, 0xa7, 0x78, 0x62,
+ 0x53, 0x88, 0x12, 0xf4, 0x75, 0x21, 0xf0, 0x27, 0xc2, 0x0f, 0x04, 0x80, 0xd7, 0x5a, 0xce, 0x37,
+ 0x56, 0xb1, 0x1c, 0xdc, 0x61, 0x9a, 0x86, 0x10, 0xae, 0xec, 0x73, 0x54, 0xa1, 0x5a, 0x56, 0xdc,
+ 0x2b, 0x45, 0x5e, 0x09, 0x99, 0xb7, 0x64, 0x2b, 0x7f, 0x0c, 0x62, 0x91, 0xa0, 0xfe, 0x35, 0x84,
+ 0xdf, 0x7a, 0xa0, 0x21, 0xa7, 0x42, 0x30, 0x38, 0x80, 0x05, 0x6e, 0x6b, 0xda, 0x23, 0x3f, 0xf3,
+ 0x8e, 0x5d, 0xf7, 0x63, 0xbd, 0x34, 0x92, 0x19, 0x7d, 0x84, 0xcf, 0x66, 0xe9, 0x0d, 0x23, 0x32,
+ 0x55, 0xed, 0x5f, 0xc0, 0xcd, 0x76, 0xaf, 0x87, 0x9e, 0x83, 0x96, 0xa3, 0xf8, 0xb5, 0x09, 0x46,
+ 0x25, 0xa2, 0xc4, 0x3d, 0x2c, 0x46, 0x58, 0x89, 0x14, 0x2e, 0x3b, 0x29, 0x9a, 0x96, 0x0c, 0xe7
+};
+
+/*
+ * check the page is erased or not
+ * each row is oob byte 16~23 of randomizer seed(page) 0 ~ 15
+ *
+*/
+unsigned char rdmz_FF[18][24] = {
+/*{0xac,0x77,0xed,0x0b,0x8a,0xde,0x0f,0xd8},
+{0xd6,0xbb,0xf6,0x85,0x45,0xef,0x07,0x6c},
+{0xeb,0xdd,0xfb,0x42,0x22,0x77,0x03,0xb6},
+{0x75,0xee,0xfd,0xa1,0x11,0xbb,0x81,0x5b},
+{0x3a,0x77,0x7e,0xd0,0x88,0x5d,0x40,0xad},
+{0x1d,0xbb,0xbf,0x68,0x44,0xae,0x20,0x56},
+{0x8e,0xdd,0x5f,0xb4,0xa2,0xd7,0x90,0x2b},
+{0x47,0xee,0xaf,0x5a,0xd1,0x6b,0xc8,0x95},
+{0xa3,0x77,0x57,0xad,0x68,0xb5,0xe4,0x4a},
+{0x51,0xbb,0xab,0xd6,0x34,0x5a,0xf2,0x25},
+{0x28,0xdd,0x55,0xeb,0x9a,0xad,0xf9,0x12},
+{0x94,0xee,0xaa,0x75,0x4d,0xd6,0xfc,0x09},
+{0xca,0x77,0xd5,0xba,0xa6,0xeb,0xfe,0x84},
+{0x65,0xbb,0x6a,0x5d,0x53,0xf5,0x7f,0xc2},
+{0xb2,0xdd,0xb5,0x2e,0x29,0x7a,0x3f,0x61},
+{0x59,0xee,0xda,0x17,0x14,0xbd,0x1f,0xb0}*//* byte 24 ~ byte 48 */
+{0x3d,0xf0,0xfb,0x7f,0x28,0xa5,0x31,0xc8,0xa9,0x4e,0xe3,0x23,0x9e,0x65,0x79,0xef,0x51,0x13,0x8c,0xab,0x5e,0xa5,0xa9,0x23},
+{0x1e,0xf8,0xfd,0xbf,0x94,0x52,0x18,0x64,0x54,0x27,0xf1,0x91,0xcf,0x32,0x3c,0xf7,0xa8,0x09,0xc6,0x55,0xaf,0xd2,0xd4,0x91},
+{0x8f,0xfc,0xfe,0xdf,0xca,0xa9,0x8c,0x32,0x2a,0x93,0xf8,0x48,0xe7,0x99,0x9e,0xfb,0x54,0x84,0x63,0x2a,0x57,0xe9,0xea,0x48},
+{0xc7,0x7e,0x7f,0xef,0x65,0xd4,0x46,0x99,0x95,0xc9,0x7c,0x24,0xf3,0xcc,0x4f,0xfd,0xaa,0x42,0x31,0x95,0xab,0x74,0x75,0x24},
+{0xe3,0x3f,0x3f,0xf7,0xb2,0xea,0x23,0x4c,0x4a,0x64,0x3e,0x12,0x79,0x66,0xa7,0xfe,0xd5,0xa1,0x18,0xca,0xd5,0xba,0xba,0x12},
+{0x71,0x9f,0x1f,0xfb,0xd9,0x75,0x11,0xa6,0xa5,0x32,0x9f,0x09,0x3c,0x33,0xd3,0x7f,0x6a,0x50,0x0c,0xe5,0x6a,0x5d,0x5d,0x89},
+{0xb8,0x4f,0x8f,0xfd,0x6c,0xba,0x08,0x53,0x52,0x19,0x4f,0x84,0x1e,0x99,0xe9,0x3f,0x35,0x28,0x06,0xf2,0x35,0x2e,0x2e,0x44},
+{0xdc,0xa7,0x47,0xfe,0x36,0xdd,0x04,0xa9,0x29,0x0c,0x27,0x42,0x0f,0x4c,0xf4,0x9f,0x9a,0x94,0x03,0x79,0x1a,0x97,0x97,0x22},
+{0x6e,0x53,0x23,0x7f,0x1b,0xee,0x02,0x54,0x94,0x06,0x93,0x21,0x87,0xa6,0x7a,0x4f,0xcd,0xca,0x81,0x3c,0x8d,0xcb,0x4b,0x91},
+{0xb7,0x29,0x91,0xbf,0x0d,0xf7,0x81,0x2a,0xca,0x03,0xc9,0x90,0x43,0xd3,0xbd,0xa7,0xe6,0xe5,0x40,0x9e,0x46,0xe5,0xa5,0xc8},
+{0x5b,0x14,0xc8,0xdf,0x06,0x7b,0x40,0x15,0x65,0x81,0xe4,0xc8,0xa1,0x69,0x5e,0xd3,0x73,0xf2,0x20,0xcf,0xa3,0x72,0x52,0xe4},
+{0xad,0x0a,0x64,0xef,0x03,0xbd,0x20,0x0a,0x32,0xc0,0x72,0x64,0xd0,0xb4,0x2f,0xe9,0x39,0x79,0x90,0x67,0xd1,0xb9,0x29,0x72},
+{0xd6,0x05,0xb2,0xf7,0x81,0x5e,0x10,0x85,0x99,0xe0,0x39,0x32,0xe8,0x5a,0x97,0xf4,0x1c,0x3c,0xc8,0xb3,0xe8,0x5c,0x94,0x39},
+{0xeb,0x82,0xd9,0xfb,0x40,0x2f,0x88,0x42,0x4c,0x70,0x1c,0x19,0xf4,0x2d,0xcb,0x7a,0x8e,0x9e,0x64,0x59,0xf4,0x2e,0x4a,0x1c},
+{0xf5,0xc1,0xec,0xfd,0xa0,0x97,0xc4,0x21,0xa6,0x38,0x8e,0x8c,0x7a,0x96,0xe5,0xbd,0x47,0x4f,0x32,0xac,0x7a,0x97,0xa5,0x8e},
+{0x7a,0xe0,0xf6,0xfe,0x50,0x4b,0x62,0x90,0x53,0x9c,0xc7,0x46,0x3d,0xcb,0xf2,0xde,0xa3,0x27,0x19,0x56,0xbd,0x4b,0x52,0x47}
+};
+unsigned int rdmz_badblk[2][6] = {
+{0x80040fc2,0x37ce5ad7,0xdc1cb156,0x10869a61,0x5473ecae,0xdc565aa1},
+{0x400207e1,0x9be7ad6b,0x6e0ed8ab,0x8c3cd30,0xaa39f657,0x6e2b2d50}
+};
+
+unsigned char eslc_map_table[128] = {
+ 0x0, 0x1,
+ 0x2, 0x3,
+ 0x6, 0x7,
+ 0xa, 0xb,
+ 0xe, 0xf,
+ 0x12, 0x13,
+ 0x16, 0x17,
+ 0x1a, 0x1b,
+ 0x1e, 0x1f,
+ 0x22, 0x23,
+ 0x26, 0x27,
+ 0x2a, 0x2b,
+ 0x2e, 0x2f,
+ 0x32, 0x33,
+ 0x36, 0x37,
+ 0x3a, 0x3b,
+ 0x3e, 0x3f,
+ 0x42, 0x43,
+ 0x46, 0x47,
+ 0x4a, 0x4b,
+ 0x4e, 0x4f,
+ 0x52, 0x53,
+ 0x56, 0x57,
+ 0x5a, 0x5b,
+ 0x5e, 0x5f,
+ 0x62, 0x63,
+ 0x66, 0x67,
+ 0x6a, 0x6b,
+ 0x6e, 0x6f,
+ 0x72, 0x73,
+ 0x76, 0x77,
+ 0x7a, 0x7b,
+ 0x7e, 0x7f,
+ 0x82, 0x83,
+ 0x86, 0x87,
+ 0x8a, 0x8b,
+ 0x8e, 0x8f,
+ 0x92, 0x93,
+ 0x96, 0x97,
+ 0x9a, 0x9b,
+ 0x9e, 0x9f,
+ 0xa2, 0xa3,
+ 0xa6, 0xa7,
+ 0xaa, 0xab,
+ 0xae, 0xaf,
+ 0xb2, 0xb3,
+ 0xb6, 0xb7,
+ 0xba, 0xbb,
+ 0xbe, 0xbf,
+ 0xc2, 0xc3,
+ 0xc6, 0xc7,
+ 0xca, 0xcb,
+ 0xce, 0xcf,
+ 0xd2, 0xd3,
+ 0xd6, 0xd7,
+ 0xda, 0xdb,
+ 0xde, 0xdf,
+ 0xe2, 0xe3,
+ 0xe6, 0xe7,
+ 0xea, 0xeb,
+ 0xee, 0xef,
+ 0xf2, 0xf3,
+ 0xf6, 0xf7,
+ 0xfa, 0xfb
+};
+
+/*
+ * hardware specific Out Of Band information
+*/
+
+/*
+* new oob placement block for use with hardware ecc generation
+*/
+/*
+static struct nand_ecclayout wmt_oobinfo_2048 = {
+ .eccbytes = 7,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+
+
+static struct nand_ecclayout wmt_12bit_oobinfo_4096 = {
+ .eccbytes = 20,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+
+static struct nand_ecclayout wmt_oobinfo_8192 = {
+ .eccbytes = 42,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+*/
+static struct nand_ecclayout wmt_oobinfo_16k = {
+ .eccbytes = 70,
+ .eccpos = { 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93},
+ .oobavail = 24,
+ .oobfree = {{0, 24} }
+};
+
+
+/* Ick. The BBT code really ought to be able to work this bit out
+ for itself from the above, at least for the 2KiB case
+*/
+static uint8_t wmt_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t wmt_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t wmt_rdmz[] = { 'z', 'm', 'd', 'r' };
+static uint8_t retry_table[] = {'r','e','t','r','y','t','a','b','l','e'};
+
+static struct nand_bbt_descr wmt_rdtry_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 10,
+ .veroffs = 0,
+ .maxblocks = 4,
+ .pattern = retry_table,
+ .reserved_block_code = 1
+};
+
+static struct nand_bbt_descr wmt_bbt_main_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 0,
+ .maxblocks = 4,
+ .pattern = wmt_bbt_pattern_2048,
+ .reserved_block_code = 1
+};
+
+static struct nand_bbt_descr wmt_bbt_mirror_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 0,
+ .maxblocks = 4,
+ .pattern = wmt_mirror_pattern_2048,
+};
+
+
+/* controller and mtd information */
+extern unsigned int wmt_read_oscr(void);
+/*static*/ void print_nand_register(struct mtd_info *mtd);
+void print_nand_buffer(char *value, unsigned int length);
+#ifdef NAND_DEBUG
+static void print_nand_buffer_int(unsigned int *value, unsigned int length);
+#endif
+
+struct wmt_nand_set {
+ int nr_chips;
+ int nr_partitions;
+ char *name;
+ int *nr_map;
+ struct mtd_partition *partitions;
+};
+
+struct wmt_nand_platform_data {
+ const char * name;
+ int id;
+ struct device dev;
+ u32 num_resources;
+ struct resource * resource;
+
+ const struct platform_device_id *id_entry;
+
+ /* MFD cell pointer */
+ struct mfd_cell *mfd_cell;
+
+ /* arch specific additions */
+ struct pdev_archdata archdata;
+ struct mtd_partition *partitions;
+};
+
+#if 0
+ struct wmt_platform_nand {
+ /* timing information for controller, all times in nanoseconds */
+
+ int tacls; /* time for active CLE/ALE to nWE/nOE */
+ int twrph0; /* active time for nWE/nOE */
+ int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
+
+ int nr_sets;
+ struct wmt_nand_set *sets;
+ void (*select_chip)(struct s3c2410_nand_set *, int chip);
+ }
+#endif
+
+struct wmt_nand_info;
+
+struct wmt_nand_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ /*struct wmt_nand_set* set;*/
+ struct wmt_nand_info *info;
+ int scan_res;
+};
+
+/* overview of the wmt nand state */
+
+struct wmt_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct wmt_nand_mtd *mtds;
+ struct wmt_platform_nand *platform;
+ int oper_step;
+ /* device info */
+ struct device *device;
+ struct resource *area;
+ void __iomem *reg;
+ int cpu_type;
+ int datalen;
+ int nr_data;
+ int data_pos;
+ int page_addr;
+ dma_addr_t dmaaddr;
+ dma_addr_t last_bank_dmaaddr;
+ int dma_finish;
+ int phase;
+ void *done_data; /* completion data */
+ unsigned int isr_state;
+ unsigned int isr_cmd;
+ unsigned int cur_lpage;
+ unsigned int cur_page;
+ unsigned int last_bank_col;
+ unsigned int oob_col;
+ //void (*done)(void *data);/* completion function */
+ unsigned char *dmabuf;
+
+#ifdef CONFIG_MTD_NAND_DIRECT_WRITE
+
+ int vmalloc_flag;
+ int sglen;
+ struct scatterlist *sglist;
+ dma_addr_t data_address0;
+ dma_addr_t data_address1;
+ dma_addr_t tempaddr;
+ unsigned char *tempbuf;
+
+#endif
+
+ int ECC_bytes;
+ int oob_ECC_bytes;
+ int oob_ecc_error;
+ int data_ecc_uncor_err; /* use read retry for data area has uncorrectable error*/
+ int unc_bank;
+ int unc_allFF;
+ int bank_size;
+ int ECC_mode;
+ int oob_ECC_mode;
+ unsigned int lst_wpage;
+ int wr_page[WR_BUF_CNT];
+ char banks;
+ char oob_max_bit_error;
+};
+
+/* conversion functions */
+
+static struct wmt_nand_mtd *wmt_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct wmt_nand_mtd, mtd);
+}
+
+static struct wmt_nand_info *wmt_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return wmt_nand_mtd_toours(mtd)->info;
+}
+
+/*
+static struct wmt_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+*/
+/*
+static struct platform_device *to_platform(struct device *dev)
+{
+ return container_of(dev, struct platform_device, dev);
+}
+*/
+#if 0
+static struct wmt_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev->dev.platform_data;
+}
+#endif
+
+void copy_filename (char *dst, char *src, int size)
+{
+ if (*src && (*src == '"')) {
+ ++src;
+ --size;
+ }
+
+ while ((--size > 0) && *src && (*src != '"')) {
+ *dst++ = *src++;
+ }
+ *dst = '\0';
+}
+
+int set_ECC_mode(struct mtd_info *mtd)
+{
+ unsigned int ECCbit = mtd->dwECCBitNum;
+ unsigned int ECC_mode;
+ switch (ECCbit) {
+ case 1:
+ ECC_mode = ECC1bit;
+ break;
+ case 4:
+ ECC_mode = ECC4bit;
+ break;
+ case 8:
+ ECC_mode = ECC8bit;
+ break;
+ case 12:
+ ECC_mode = ECC12bit;
+ break;
+ case 16:
+ ECC_mode = ECC16bit;
+ break;
+ case 24:
+ ECC_mode = ECC24bitPer1K;
+ break;
+ case 40:
+ ECC_mode = ECC40bitPer1K;
+ break;
+ case 60:
+ ECC_mode = ECC60bitPer1K;
+ break;
+ default:
+ printk("ecc mode input not support ECCbit=%d\n", ECCbit);
+ return -1;
+ }
+ return ECC_mode;
+}
+void calculate_ECC_info(struct mtd_info *mtd, struct ECC_size_info *ECC_size)
+{
+ switch (ECC_size->ecc_engine) {
+ case ECC4bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC4bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 4;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC4bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC4bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC4bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC4bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC8bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC8bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 8;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC8bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC8bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC8bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC8bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC12bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC12bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 12;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC12bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC12bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC12bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC12bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC16bit:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC16bit_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 16;
+ ECC_size->banks = mtd->realwritesize/512;
+ ECC_size->bank_size = 512;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC16bit_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC16bit_byte_count;
+ ECC_size->oob_ECC_mode = ECC16bit;
+ ECC_size->unprotect = mtd->realoobsize - ECC16bit_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC24bitPer1K:
+ ECC_size->oob_ecc_bits_count = ECC_size->ecc_bits_count = ECC24bitPer1K_bit_count;
+ ECC_size->oob_max_bit_error = ECC_size->max_bit_error = 24;
+ ECC_size->banks = mtd->realwritesize/1024;
+ ECC_size->bank_size = 1024;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_bytes = ECC_size->ECC_bytes = ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_mode = ECC24bitPer1K;
+ ECC_size->unprotect = mtd->realoobsize - ECC24bitPer1K_byte_count*(ECC_size->banks+1) - 24;
+ break;
+ case ECC40bitPer1K:
+ ECC_size->ecc_bits_count = ECC40bitPer1K_bit_count;
+ ECC_size->oob_ecc_bits_count = ECC24bitPer1K_bit_count;
+ ECC_size->max_bit_error = 40;
+ ECC_size->oob_max_bit_error = 24;
+ ECC_size->banks = mtd->realwritesize/1024;
+ ECC_size->bank_size = 1024;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC40bitPer1K_byte_count;
+ ECC_size->ECC_bytes = ECC40bitPer1K_byte_count;
+ ECC_size->oob_ECC_bytes = ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_mode = ECC24bitPer1K;
+ ECC_size->unprotect = mtd->realoobsize - ECC40bitPer1K_byte_count*ECC_size->banks - ECC24bitPer1K_byte_count - 24;
+ break;
+ case ECC60bitPer1K:
+ ECC_size->ecc_bits_count = ECC60bitPer1K_bit_count;
+ ECC_size->oob_ecc_bits_count = ECC24bitPer1K_bit_count;
+ ECC_size->max_bit_error = 60;
+ ECC_size->oob_max_bit_error = 24;
+ ECC_size->banks = mtd->realwritesize/1024;
+ ECC_size->bank_size = 1024;
+ ECC_size->bank_offset = mtd->realwritesize/ECC_size->banks + ECC60bitPer1K_byte_count;
+ ECC_size->ECC_bytes = ECC60bitPer1K_byte_count;
+ ECC_size->oob_ECC_bytes = ECC24bitPer1K_byte_count;
+ ECC_size->oob_ECC_mode = ECC24bitPer1K;
+ ECC_size->unprotect = mtd->realoobsize - ECC60bitPer1K_byte_count*ECC_size->banks - ECC24bitPer1K_byte_count - 24;
+ break;
+ default:
+ printk("%d-bit ECC engine is not support:\r\n", ECC_size->ecc_engine);
+ break;;
+ }
+ return;
+}
+
+int get_partition_name(const char *src, char** endpp, char* buffer)
+{
+ int i = 0;
+ if(NULL == src || NULL == buffer)
+ {
+ return -1;
+ }
+
+ while(*src != ':')
+ {
+ *buffer++ = *src++;
+ i++;
+ }
+ *endpp = (char *)src;
+ buffer[i] = '\0';
+ return i;
+}
+
+int search_mtd_table(char *string, int *ret)
+{
+ int i, err = 0;
+ for (i = 0; i < NUM_NAND_PARTITIONS; i++) {
+ // printk(KERN_DEBUG "MTD dev%d size: %8.8llx \"%s\"\n",
+ //i, nand_partitions[i].size, nand_partitions[i].name);
+ if (strcmp(string, nand_partitions[i].name) == 0) {
+ *ret = i;
+ break;
+ }
+ }
+ return err;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported
+ */
+int get_flash_info_from_env(unsigned int id, unsigned int id2, struct WMT_nand_flash_dev *type)
+{
+ int ret, sf_boot_nand_en = 0x4000;
+ char varval[200], *s = NULL, *tmp, varname[] = "wmt.io.nand";
+ unsigned int varlen = 200, value;
+
+ value = STRAP_STATUS_VAL;
+ if ((value&0x4008) != sf_boot_nand_en)
+ return 1;
+ ret = wmt_getsyspara(varname, varval, &varlen);
+ if (!ret) {
+ s = varval;
+ value = simple_strtoul(s, &tmp, 16); type->dwFlashID = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBlockCount = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwPageSize = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwSpareSize = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBlockSize = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwAddressCycle = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBI0Position = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBI1Position = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwBIOffset = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwDataWidth = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwPageProgramLimit = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwSeqRowReadSupport = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwSeqPageProgram = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwNandType = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwECCBitNum = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwRWTimming = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwTadl = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwDDR = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwRetry = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwRdmz = value; s = tmp+1;
+ value = simple_strtoul(s, &tmp, 16); type->dwFlashID2 = value; s = tmp+1;
+ copy_filename(type->ProductName, s, MAX_PRODUCT_NAME_LENGTH);
+
+ if (type->dwBlockCount < 1024 || type->dwBlockCount > 16384) {
+ printk(KERN_INFO "dwBlockCount = 0x%x is abnormal\n", type->dwBlockCount);
+ return 2;
+ }
+ if (type->dwPageSize < 512 || type->dwPageSize > 16384) {
+ printk(KERN_INFO "dwPageSize = 0x%x is abnormal\n", type->dwPageSize);
+ return 2;
+ }
+ if (type->dwPageSize > 512)
+ type->options = NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ if (type->dwBlockSize < (1024*64) || type->dwBlockSize > (16384*256)) {
+ printk(KERN_INFO "dwBlockSize = 0x%x is abnormal\n", type->dwBlockSize);
+ return 2;
+ }
+ if (type->dwAddressCycle < 3 || type->dwAddressCycle > 5) {
+ printk(KERN_INFO "dwAddressCycle = 0x%x is abnoraml\n", type->dwAddressCycle);
+ return 2;
+ }
+ if (type->dwBI0Position != 0 &&
+ type->dwBI0Position > ((type->dwBlockSize/type->dwPageSize)-1)) {
+ printk(KERN_INFO "dwBI0Position = 0x%x is abnoraml\n", type->dwBI0Position);
+ return 2;
+ }
+ if (type->dwBI1Position != 0 &&
+ type->dwBI1Position > ((type->dwBlockSize/type->dwPageSize)-1)) {
+ printk(KERN_INFO "dwBI1Position = 0x%x is abnoraml\n", type->dwBI1Position);
+ return 2;
+ }
+ if (type->dwBIOffset != 0 && type->dwBIOffset != 5) {
+ printk(KERN_INFO "dwBIOffset = 0x%x is abnoraml\n", type->dwBIOffset);
+ return 2;
+ }
+ if (type->dwDataWidth != 0/* && type->dwDataWidth != 1*/) {
+ printk(KERN_INFO "dwDataWidth = 0x%x is abnoraml\n", type->dwDataWidth);
+ return 2;
+ }
+ printk(KERN_DEBUG "dwFlashID = 0x%x\n", type->dwFlashID);
+ printk(KERN_DEBUG "dwBlockCount = 0x%x\n", type->dwBlockCount);
+ printk(KERN_DEBUG "dwPageSize = 0x%x\n", type->dwPageSize);
+ printk(KERN_DEBUG "dwSpareSize = 0x%x\n", type->dwSpareSize);
+ printk(KERN_DEBUG "dwBlockSize = 0x%x\n", type->dwBlockSize);
+ printk(KERN_DEBUG "dwAddressCycle = 0x%x\n", type->dwAddressCycle);
+ printk(KERN_DEBUG "dwBI0Position = 0x%x\n", type->dwBI0Position);
+ printk(KERN_DEBUG "dwBI1Position = 0x%x\n", type->dwBI1Position);
+ printk(KERN_DEBUG "dwBIOffset = 0x%x\n", type->dwBIOffset);
+ printk(KERN_DEBUG "dwDataWidth = 0x%x\n", type->dwDataWidth);
+ printk(KERN_DEBUG "dwPageProgramLimit = 0x%x\n", type->dwPageProgramLimit);
+ printk(KERN_DEBUG "dwSeqRowReadSupport = 0x%x\n", type->dwSeqRowReadSupport);
+ printk(KERN_DEBUG "dwSeqPageProgram = 0x%x\n", type->dwSeqPageProgram);
+ printk(KERN_DEBUG "dwNandType = 0x%x\n", type->dwNandType);
+ printk(KERN_DEBUG "dwECCBitNum = 0x%x\n", type->dwECCBitNum);
+ printk(KERN_DEBUG "dwRWTimming = 0x%x\n", type->dwRWTimming);
+ printk(KERN_DEBUG "dwTadl = 0x%x\n", type->dwTadl);
+ printk(KERN_DEBUG "dwDDR = 0x%x\n", type->dwDDR);
+ printk(KERN_DEBUG "dwRetry = 0x%x\n", type->dwRetry);
+ printk(KERN_DEBUG "dwRdmz = 0x%x\n", type->dwRdmz);
+ printk(KERN_DEBUG "dwFlashID2 = 0x%x\n", type->dwFlashID2);
+ printk(KERN_DEBUG "cProductName = %s\n", type->ProductName);
+ if (id != type->dwFlashID || id2 != type->dwFlashID2) {
+ printk(KERN_ERR "env flash id is different from real id = 0x%x 0x%x\n",
+ type->dwFlashID, type->dwFlashID2);
+ return 3;
+ }
+ }
+ return ret;
+}
+
+static int wmt_calc_clock(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, struct NFC_RW_T *nfc_rw)
+{
+ unsigned int i, div1=0, clk1, clk = 0, PLLB;
+ unsigned int tREA, tREH, tADL, tWP, divisor = 11, tWH, tWB, tWHR, margin;
+
+ /*print_nand_register(mtd);*/
+ PLLB = *(volatile unsigned int *)PMPMB_ADDR;
+ PLLB = (2*(((PLLB>>16)&0x7F)+1))/((((PLLB>>8)&0x1F)+1)*(1<<(PLLB&3)));
+ printk(KERN_DEBUG "PLLB=0x%x, spec_clk=0x%x\n", PLLB, spec_clk);
+ tREA = (spec_clk>>24)&0xFF;
+ tREH = (spec_clk>>16)&0xFF;
+ tWP = (spec_clk>>8)&0xFF;
+ tWH = spec_clk&0xFF;
+ tWB = (spec_tadl>>24)&0xFF;
+ tWHR = (spec_tadl>>16)&0xFF;
+ tADL = spec_tadl&0xFFFF;
+ for (i = 1; i < 16; i++) {
+ if (MAX_SPEED_MHZ >= (PLLB*SOURCE_CLOCK)/i) {
+ div1 = i;
+ break;
+ }
+ }
+
+ margin = (tREA+10)*10+15;
+ if (mtd->id == 0x98D78493 && mtd->id2 == 0x72570000)
+ margin = (tREA+6)*10;
+ else if (mtd->id == 0x45D78493 && mtd->id2 == 0x72570000)
+ margin = (tREA+6)*10;
+
+ for (i = div1; i < 32; i++) {
+ clk1 = (10000 * i)/(PLLB*SOURCE_CLOCK);
+ if ((2*clk1) >= margin) {
+ divisor = i;
+ clk = clk1/10;
+ //printk("div=%d tREA=%d 2*clk=%d\n", i, (tREA+10)*10+15, clk*2);
+ break;
+ }
+ }
+ nfc_rw->T_R_hold = 1;
+ nfc_rw->T_R_setup = 1;
+ nfc_rw->divisor = divisor;
+ nfc_rw->T_W_hold = 1;
+ nfc_rw->T_W_setup = 1;
+
+ i = 0;
+ while ((i*clk) < tADL && i < 50)
+ i++;
+ nfc_rw->T_TADL = i;
+ i = 0;
+ while ((i*clk) < tWHR && i < 50)
+ i++;
+ nfc_rw->T_TWHR = i;
+ i = 0;
+ while ((i*clk) < tWB && i < 50)
+ i++;
+ nfc_rw->T_TWB = i;
+
+ nfc_rw->T_RHC_THC =
+ ((nfc_rw->T_R_hold&0xFF) << 12) +
+ (((nfc_rw->T_R_setup&0xFF) + (nfc_rw->T_R_hold&0xFF)) << 8) +
+ ((nfc_rw->T_W_setup&0xF) << 4) +
+ ((nfc_rw->T_W_setup + nfc_rw->T_W_hold)&0xF);
+
+ if ((MAX_SPEED_MHZ < (PLLB*SOURCE_CLOCK)/(divisor)) || clk == 0 || clk > 45)
+ return 1;
+
+ return 0;
+}
+#if 0
+static int old_wmt_calc_clock(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, struct NFC_RW_T *nfc_rw)
+{
+ unsigned int i, div1=0, div2, clk1, clk2=0, comp, T_setup, T1=0, T2=0, clk, PLLB;
+ unsigned int tREA, tREH, Thold, Thold2, Ttmp, tADL, tWP, divisor, tWH, tWB, tWHR;
+
+ /*print_nand_register(mtd);*/
+ PLLB = *(volatile unsigned int *)PMPMB_ADDR;
+ PLLB = (2*(((PLLB>>16)&0x7F)+1))/((((PLLB>>8)&0x1F)+1)*(1<<(PLLB&3)));
+ printk(KERN_DEBUG "PLLB=0x%x, spec_clk=0x%x\n", PLLB, spec_clk);
+ tREA = (spec_clk>>24)&0xFF;
+ tREH = (spec_clk>>16)&0xFF;
+ tWP = (spec_clk>>8)&0xFF;
+ tWH = spec_clk&0xFF;
+ tWB = (spec_tadl>>24)&0xFF;
+ tWHR = (spec_tadl>>16)&0xFF;
+ tADL = spec_tadl&0xFFFF;
+ for (i = 1; i < 16; i++) {
+ if (MAX_SPEED_MHZ >= (PLLB*SOURCE_CLOCK)/i) {
+ div1 = i;
+ break;
+ }
+ }
+
+ clk1 = (1000 * div1)/(PLLB*SOURCE_CLOCK);
+ //printk("clk1=%d, div1=%d, spec_clk=%d\n", clk1, div1, spec_clk);
+ for (T1 = 1; T1 < 10; T1++) {
+ if ((T1*clk1) >= (tREA + MAX_READ_DELAY))
+ break;
+ }
+ i = 1;
+ while (i*clk1 <= tREH) {
+ i++;
+ }
+ Thold = i;
+ printk(KERN_DEBUG "T1=%d, clk1=%d, div1=%d, Thold=%d, tREA=%d+delay(%d)\n", T1, clk1, div1, Thold, tREA, MAX_READ_DELAY);
+ Ttmp = T_setup = T1;
+ clk = clk1;
+ divisor = div1;
+ div2 = div1;
+ while (Ttmp > 1 && clk != 0) {
+ div2++;
+ clk2 = (1000 * div2)/(PLLB*SOURCE_CLOCK);
+ comp = 0;
+ for (T2 = 1; T2 < Ttmp; T2++) {
+ if ((T2*clk2) >= (tREA + MAX_READ_DELAY)) {
+ Ttmp = T2;
+ comp = 1;
+ i = 1;
+ while (i*clk2 <= tREH) {
+ i++;
+ }
+ Thold2 = i;
+ printk(KERN_DEBUG "T2=%d, clk2=%d, div2=%d, Thold2=%d, comp=1\n", T2, clk2, div2, Thold2);
+ break;
+ }
+ }
+ if (comp == 1) {
+ clk1 = clk * (T_setup+Thold) * mtd->realwritesize;
+ div1 = clk2 * (T2+Thold2) * mtd->realwritesize;
+ printk(KERN_DEBUG "Tim1=%d , Tim2=%d\n", clk1, div1);
+ if ((clk * (T_setup+Thold) * mtd->realwritesize) > (clk2 * (T2+Thold2) * mtd->realwritesize)) {
+ T_setup = T2;
+ clk = clk2;
+ divisor = div2;
+ Thold = Thold2;
+ } else {
+ printk(KERN_DEBUG "T2 is greater and not use\n");
+ }
+ }
+ } /* end of while */
+ nfc_rw->T_R_hold = Thold;
+ nfc_rw->T_R_setup = T_setup;
+ nfc_rw->divisor = divisor;
+
+ i = 1;
+ nfc_rw->T_W_setup = 0x1; /* set write setup/hold time */
+ while ((i*clk) <= (tWP+MAX_WRITE_DELAY)) {
+ nfc_rw->T_W_setup += 1;
+ i++;
+ }
+ nfc_rw->T_W_hold = 1;
+
+ if ((nfc_rw->T_W_hold * 2) == 2)
+ Thold = 4;
+ else if ((nfc_rw->T_W_hold * 2) == 4)
+ Thold = 6;
+ i = 0;
+ while (((i/*+Thold*/)*clk) < tADL && i < 50)
+ i++;
+ nfc_rw->T_TADL = i;
+ //printk("Tad i=%d\n", i);
+ i = 0;
+ while ((i*clk) < tWHR && i < 50)
+ i++;
+ nfc_rw->T_TWHR = i;
+ i = 0;
+ while ((i*clk) < tWB && i < 50)
+ i++;
+ nfc_rw->T_TWB = i;
+
+ nfc_rw->T_RHC_THC =
+ ((nfc_rw->T_R_hold&0xFF) << 12) +
+ (((nfc_rw->T_R_setup&0xFF) + (nfc_rw->T_R_hold&0xFF)) << 8) +
+ ((nfc_rw->T_W_setup&0xF) << 4) +
+ //((nfc_rw->T_W_hold&0xF) << 4) +
+ ((nfc_rw->T_W_setup + nfc_rw->T_W_hold)&0xF);
+
+ if ((MAX_SPEED_MHZ < (PLLB*SOURCE_CLOCK)/(divisor)) || clk == 0 || T_setup == 0 || clk > 45)
+ return 1;
+
+ return 0;
+}
+#endif
+
+static void wmt_nfc_init(struct wmt_nand_info *info, struct mtd_info *mtd)
+{
+ writeb((PAGE_2K|WP_DISABLE|DIRECT_MAP), info->reg + NFCR12_NAND_TYPE_SEL);
+
+ writel(0x2424, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x0, info->reg + NFCRd_OOB_CTRL);
+
+}
+
+void wmt_init_nfc(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, int busw)
+{
+ unsigned int status = 0, page_size, divisor, NFC_RWTimming;
+ struct nand_chip *chip = mtd->priv;
+ struct NFC_RW_T nfc_rw;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned short cfg = 0;
+
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ writel(0x0, info->reg + NFCRd_OOB_CTRL);
+
+ if (mtd->realwritesize == 2048) {
+ page_size = PAGE_2K;
+ } else if (mtd->realwritesize == 4096) {
+ page_size = PAGE_4K;
+ } else if (mtd->realwritesize == 6144) {
+ page_size = PAGE_8K;
+ } else if (mtd->realwritesize == 8192) {
+ page_size = PAGE_8K;
+ } else if (mtd->realwritesize == 16384 || mtd->realwritesize == 15360) {
+ page_size = PAGE_16K;
+ } else
+ page_size = PAGE_32K;
+
+ cfg = WP_DISABLE|DIRECT_MAP|page_size;
+ if (prob_end == 1 && !mtd->dwDDR)
+ cfg |= RD_DLY;
+
+ if (busw) {
+ cfg |= WIDTH_16;
+ printk(KERN_WARNING "nand flash use 16-bit witdth mode\n");
+ }
+ writeb(cfg, info->reg + NFCR12_NAND_TYPE_SEL);
+
+ status = wmt_calc_clock(mtd, spec_clk, spec_tadl, &nfc_rw);
+ if (status) {
+ printk(KERN_ERR "timming calculate fail\n");
+ nfc_rw.T_RHC_THC = 0x2424;
+ nfc_rw.T_TADL = 0x3c;
+ nfc_rw.T_TWHR = 0x12;
+ nfc_rw.T_TWB = 0xa;
+ nfc_rw.divisor = 10;
+ }
+ NFC_RWTimming = nfc_rw.T_RHC_THC;
+ divisor = nfc_rw.divisor;
+ if (prob_end == 0 && mtd->dwDDR)
+ divisor = divisor + 5;
+
+ switch(mtd->id)
+ {
+ case 0x2C88044B:
+ case 0x2C68044A:
+ case 0x2C64444B:
+ case 0x2C44444B:
+ case 0x2C48044A:
+ case 0x8968044A:
+ //NFC_RWTimming = 0x2424;
+ //divisor = 9;
+ //nand_get_feature(mtd, 1);
+ nand_set_feature(mtd, NAND_SET_FEATURE, 01, 05);
+ nand_get_feature(mtd, 1);
+ break;
+ }
+ //chip->select_chip(mtd, -1);
+ if (!status) {
+ while ((*(volatile unsigned long *)(PMCS_ADDR+0x18))&0x7F0038)
+ ;
+ *(volatile unsigned long *)PMNAND_ADDR = divisor;
+ while ((*(volatile unsigned long *)(PMCS_ADDR+0x18))&0x7F0038)
+ ;
+ }
+ divisor = *(volatile unsigned long *)PMNAND_ADDR;
+ if (((mtd->id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ if (prob_end == 1)
+ NFC_RWTimming = 0x1312;//0x2424;
+ else
+ NFC_RWTimming = 0x2424;
+ }
+
+ if (prob_end == 1)
+ NFC_RWTimming = 0x1212;
+ else
+ NFC_RWTimming = 0x2424;
+
+ printk(KERN_NOTICE "TWB=%dT, tWHR=%dT, tadl=%dT, div=0x%x, (RH/RC/WH/WC)=0x%x\n",
+ nfc_rw.T_TWB, nfc_rw.T_TWHR, nfc_rw.T_TADL, divisor, NFC_RWTimming);
+ writel((nfc_rw.T_TWB<<16) + (nfc_rw.T_TWHR<<8) + nfc_rw.T_TADL, info->reg + NFCRe_CALC_TADL);
+ writel(NFC_RWTimming, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+
+ if (mtd->dwDDR) {
+ if (mtd->dwDDR == 1) {
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ nand_get_feature(mtd, 0x80);
+ nand_set_feature(mtd, NAND_SET_FEATURE, 0x80, 0);
+ nand_get_feature(mtd, 0x80);
+ }
+ writel(0x0101, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(0x7F, info->reg + NFCR7_DLYCOMP);
+ writeb(readb(info->reg + NFCR12_NAND_TYPE_SEL)|0x80, info->reg + NFCR12_NAND_TYPE_SEL);
+ }
+ printk("DDR=%d\n", mtd->dwDDR);
+ /*print_nand_register(mtd);*/
+ chip->select_chip(mtd, -1);
+}
+
+#if 0
+static void disable_redunt_out_bch_ctrl(struct wmt_nand_info *info, int flag)
+{
+ if (flag == 1)
+ writeb(readb(info->reg + NFCRd_OOB_CTRL)|RED_DIS, info->reg + NFCRd_OOB_CTRL);
+ else
+ writeb(readb(info->reg + NFCRd_OOB_CTRL)&(~RED_DIS), info->reg + NFCRd_OOB_CTRL);
+}
+static void redunt_read_hm_ecc_ctrl(struct wmt_nand_info *info, int flag)
+{
+ if (flag == 1)
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ, info->reg + NFCRd_OOB_CTRL);
+ else
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ), info->reg + NFCRd_OOB_CTRL);
+}
+#endif
+
+static void set_ecc_engine(struct wmt_nand_info *info, int type)
+{
+ /*struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ writel((readl(info->reg + NFCR9_ECC_BCH_CTRL) & (~ECC_MODE)) | type,
+ info->reg + NFCR9_ECC_BCH_CTRL);
+
+ if (type > ECC1bit) { /* enable BCH ecc interrupt */
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) | BCH_INT_EN, info->reg + NFCR9_ECC_BCH_CTRL);
+ } else
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) & (~BCH_INT_EN), info->reg + NFCR9_ECC_BCH_CTRL);
+
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+}
+
+
+static int wmt_nand_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //unsigned int b2r_stat;
+ int i = 0;
+
+ while (1) {
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ break;
+ if ((++i>>20)) {
+ printk(KERN_ERR "nand flash is not ready\n");
+ /*print_nand_register(mtd);*/
+ /* while (1);*/
+ return -1;
+ }
+ }
+ //b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ wmb();
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R) {
+ printk(KERN_ERR "NFC err : B2R status not clean\n");
+ return -2;
+ }
+ return 0;
+}
+
+
+static int wmt_nfc_transfer_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (!(readb(info->reg + NFCRa_NFC_STAT) & NFC_BUSY))
+ break;
+
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+/* Vincent 2008.11.3*/
+static int wmt_wait_chip_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if ((readb(info->reg + NFCRa_NFC_STAT) & FLASH_RDY))
+ break;
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+static int wmt_wait_cmd_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (!(readb(info->reg + NFCRa_NFC_STAT) & NFC_CMD_RDY))
+ break;
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+
+/* #if (NAND_PAGE_SIZE == 512) Vincent 2008.11.4
+static int wmt_wait_dma_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (!(readb(info->reg + NFC_IDLE) & 0x02))
+ break;
+ if (++i>>20) {
+ printk(KERN_ERR"\r DMA NOT Ready!\n");
+ print_nand_register(mtd);
+ return -3;
+ }
+ }
+ return 0;
+}
+#endif Vincent 2008.11.4*/
+
+static void wmt_wait_nfc_ready(struct wmt_nand_info *info)
+{
+ unsigned int bank_stat1, i = 0;
+ while (1) {
+ bank_stat1 = readw(info->reg + NFCRb_NFC_INT_STAT);
+ if (!(readb(info->reg + NFCRa_NFC_STAT) & NFC_BUSY))
+ break;
+ else if ((bank_stat1 & (ERR_CORRECT | BCH_ERR)) == (ERR_CORRECT | BCH_ERR))
+ break;
+
+ if (i>>20)
+ return;
+ i++;
+ }
+}
+
+static void bit_correct(uint8_t *c, uint8_t pos)
+{
+ c[0] = (((c[0] ^ (0x01<<pos)) & (0x01<<pos)) | (c[0] & (~(0x01<<pos))));
+ #if 0
+ temp = info->dmabuf[bch_err_idx[0] >> 3];
+ temp >>= ((bch_err_idx[0] & 0x07) - 1);
+ #endif
+}
+
+/*
+ * flag = 0, need check BCH ECC
+ * flag = 1, don't check ECC
+ * flag = 2, need check Harming ECC
+ *
+*/
+
+static int NFC_WAIT_IDLE(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (readb(info->reg + NFCR15_IDLE_STAT) & NFC_IDLE)
+ break;
+ if (i>>20) {
+ printk(KERN_NOTICE "nfc_wait_idle() time out\n");
+ print_nand_register(mtd);
+ //while(i);
+ return -1;
+ }
+ i++;
+ }
+ return 0;
+
+}
+
+static int wmt_nfc_wait_idle(struct mtd_info *mtd, unsigned int flag, int command,
+int column, unsigned int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if (readb(info->reg + NFCR15_IDLE_STAT) & NFC_IDLE)
+ break;
+ if (i>>20) {
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ printk(KERN_NOTICE "nfc_wait_idle time out\n");
+ print_nand_register(mtd);
+ //while(i);
+ return -1;
+ }
+ i++;
+ }
+ /* continue read next bank and calc BCH ECC */
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ return 0;
+}
+
+int check_rdmz_mark(unsigned int *buf, int size, int oob, struct mtd_info *mtd)
+{
+ /*struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ int i = 0, k = 0;
+ uint8_t *bm = (uint8_t *) &buf[1];
+ for (i = 0; i < 4; i++) {
+ if (bm[i] == wmt_rdmz[i])
+ k++;
+ }
+ if (k > 0 && k < 4) {
+ printk("buf=0x%x 0x%x mark=0x%x\n", buf[0], *(unsigned int *)(buf-1), *(unsigned int *)wmt_rdmz);
+ //printk("nfcrf=%x oob=%d page=0x%x\n", readl(info->reg + NFCRf_CALC_RDMZ), oob, info->cur_page);
+ }
+ if (k >= 2)
+ return 0;
+ else
+ return 1;
+}
+void set_FIFO_FF(unsigned int *buf, int size)
+{
+ int i;
+ for (i = 0; i < size; i++)
+ buf[i] = 0xFFFFFFFF;
+}
+int check_all_FF(unsigned int *buf, int size, struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0, j = 0, k = 0;
+ unsigned int *bf = (unsigned int *)&rdmz_FF[info->cur_page%16][0];
+ //unsigned int *bf = (unsigned int *)&info->dmabuf[24];
+ unsigned int *bf1 = &rdmz_badblk[info->cur_page%2][0];
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0) {
+ for (i = 0; i < size; i++) {
+ if (buf[i] != bf[i] && buf[i] != bf1[i]) {
+ k++;
+ /*if (info->cur_page < ((mtd->blkcnt - 4) * mtd->pagecnt))
+ printk("need retry %d=[%x][%x] \n",i, buf[i],bf[i]);*/
+ } else
+ j++;
+ }
+ if (j > (size/2))
+ return 1;
+ } else {
+ if (info->ECC_mode <= 3)
+ size--;
+ for (i = 0; i < size; i++) {
+ if (buf[i] != 0xFFFFFFFF && buf[i] != 0) {
+ k++;
+ /*printk("unc %d=[%x]\n",i, buf[i]);*/
+ } else
+ j++;
+ }
+ if (j > (size/2))
+ return 1;
+ }
+ /*if (info->cur_lpage < ((mtd->blkcnt - 4) * mtd->pagecnt)) {
+ print_nand_register(mtd);
+ printk("cur page 0x%x\n",info->cur_page);
+ }*/
+ return 0;
+}
+
+#if 1
+int check_all_FF_sw(unsigned int *buf, int size, struct mtd_info *mtd)
+{
+ /*struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ int i = 0, j = 0, k = 0;
+ for (i = 0; i < size; i++) {
+ if (buf[i] != 0xFFFFFFFF) {
+ k++;//return -i;
+ /*if (oob)
+ printk("%d=[%x] \n",i, buf[i]);*/
+ } else
+ j++;
+ }
+ //if (k && oob) {
+ //printk("k=%d total%d, oob=%d\n", k, size, oob);
+ /*print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ while(k);*/
+ //}
+ /*if (k && !oob)
+ printk("k=%d j%d, total=%d\n", k, j, size);*/
+ if (j > (size/2))
+ return 1;
+ else
+ return 0;
+}
+#endif
+
+void clear_ecc_resume_dma(struct wmt_nand_info *info)
+{
+ writeb((ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+ wmb();
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ wmb();
+}
+
+
+
+void bch_data_ecc_correct(struct mtd_info *mtd)
+{
+ int i, all_FF = 0;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank;
+ unsigned int bank_size;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* for data area */
+ /*if (bch_ecc_idx > 15)
+ printk(KERN_NOTICE "pg=0x%x bk%d=%d\n",info->cur_page, bank, bch_ecc_idx);*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read data \n");
+ #endif
+ bank_size = info->bank_size;
+ /*if (this->cur_chip && (info->cur_page%4) == 0)
+ if ((info->cur_lpage < 0x7FB00) && this->cur_chip->cur_try_times < 5 && this->cur_chip != 0 && info->isr_cmd == 0x0) {
+ printk("----------------------------------set unc error by dannier info->cur_page0x%x\n", info->cur_page);
+ bch_ecc_idx = BCH_ERR_CNT;
+ }*/
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (bank == 0)
+ info->unc_bank = 1;
+ else
+ info->unc_bank |= (1<<bank);
+ /*if (oob_parity_size >= 40)
+ oob_parity_size = 40;*/
+
+ clear_ecc_resume_dma(info);
+
+ if (bank >= (info->banks-1)) {
+ //all_FF = check_rdmz_mark((uint32_t *)(info->reg+ECC_FIFO_4), 1, 0, mtd);
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6, mtd);
+
+ if (all_FF) {
+ info->data_ecc_uncor_err = 0;
+ info->unc_allFF = 1;
+ /*set_FIFO_FF((uint32_t *)(info->reg+ECC_FIFO_0), 5);
+ set_FIFO_FF((uint32_t *)info->dmabuf, mtd->realwritesize/4);*/
+ return;
+ } /*else
+ printk("**********lost check all FF case *********af%x, bk%x\n",
+ info->unc_bank,((1<<info->banks)-1));*/
+ }
+
+ if (info->isr_cmd == 0x0 && mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ if ((info->cur_lpage >= ((mtd->blkcnt-8)*mtd->pagecnt) &&
+ info->cur_lpage < ((mtd->blkcnt-4)*mtd->pagecnt)) &&
+ ((this->cur_chip->nand_id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ /* read retry table not allowed to use read retry */
+ info->data_ecc_uncor_err = 2;
+ if (bank >= (info->banks-1))
+ printk(KERN_ERR "data area bank %d uncor err page=0x%x no retry\n", bank, info->cur_page);
+ }
+ #ifdef RETRY_DEBUG
+ else {
+ if (bank >= (info->banks-1))
+ printk(KERN_ERR "data area bank %d uncor err page=0x%x use retry\n", bank, info->cur_page);
+ }
+ #endif
+
+ return;
+ } else {
+ if (bank >= (info->banks-1)) {
+ printk("reda lpage=%x bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", info->cur_lpage, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+ printk(KERN_ERR "data area uncor err page=0x%x,blk=%d no retry\n", info->cur_page, info->cur_page/mtd->pagecnt);
+ /*print_nand_buffer(info->dmabuf, 32);printk("isrcmd 0x=%x\n", info->isr_cmd);
+ print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 48);
+ print_nand_register(mtd);
+ while(1);*/
+ } else
+ return;
+ }
+ printk(KERN_ERR "data area unc++ page=0x%x no retry\n", info->cur_page);
+ mtd->ecc_stats.failed++;
+ return; /* uncorrected err */
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %d bit corrected err on bank %d \n", bch_ecc_idx, bank);
+ #endif
+ /*if (bank >= (info->banks-1)) {
+ print_nand_register(mtd);
+ }*/
+
+ for (i = 0; i < bch_ecc_idx; i++)
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+
+ for (i = 0; i < bch_ecc_idx; i++) {
+ //bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ //printk(KERN_NOTICE "data page=0x%x byte=%d corrected err on bank %d bs=%d, banks=%d\n",
+ //info->cur_page, bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "bank%d area value=%x ", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ bit_correct(&info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "bank%d area c-value=%x \n", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ } else if ((bch_err_pos[i] >> 3) < (bank_size + 24) && bank >= (info->banks-1)) {//oob area
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "red area value=%x ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //if (bank >= (info->banks-1))
+ //printk(KERN_NOTICE "red area c-value=%x \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+}
+
+void bch_redunt_ecc_correct(struct mtd_info *mtd)
+{
+ int i, all_FF = 1;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank;
+ unsigned int bank_size;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* for data area */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read oob \n");
+ #endif
+
+ if (info->isr_cmd != 0x50) {
+ printk("bch_redunt_ecc_correct cmd not read oob \n");
+ print_nand_register(mtd);
+ while(1)
+ ;
+ }
+ /*oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (oob_parity_size >= 40)
+ oob_parity_size = 40;*/
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ info->unc_bank = 1;
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6, mtd);
+
+ clear_ecc_resume_dma(info);
+
+ if (all_FF > 0) {
+ info->unc_allFF = 1;
+ return;
+ }
+ /*printk("red unc err\n");
+ print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ print_nand_register(mtd);
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ while(1);*/
+ if (mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ info->oob_ecc_error = 0x50;
+ if ((info->cur_lpage >= ((mtd->blkcnt-8)*mtd->pagecnt) &&
+ info->cur_lpage < ((mtd->blkcnt-4)*mtd->pagecnt)) &&
+ ((this->cur_chip->nand_id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ /* read retry table not allowed to use read retry */
+ info->data_ecc_uncor_err = 2;
+ printk(KERN_ERR "red area bank %d uncor err page=0x%x no retry\n", bank, info->cur_page);
+ }
+ #ifdef RETRY_DEBUG
+ else
+ printk(KERN_ERR "red area bank %d uncor err page=0x%x use retry\n", bank, info->cur_page);
+ #endif
+
+ return;
+ } else {
+ printk(KERN_ERR "red area uncor err page=0x%x no retry\n", info->cur_page);
+ }
+ mtd->ecc_stats.failed++;
+ printk(KERN_ERR "red area unc++ page=0x%x no retry\n", info->cur_page);
+ return; /* uncorrected err */
+ }
+ bank_size = info->bank_size;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "redunt area %d bit corrected err on bank %d \n", bch_ecc_idx, bank);
+ #endif
+ for (i = 0; i < bch_ecc_idx; i++) {
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ if((bch_err_pos[i] >> 3) < 24) {
+ //printk(KERN_NOTICE "area value=%d ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3)));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ }
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "redunt area %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+}
+
+void bch_data_last_bk_ecc_correct(struct mtd_info *mtd)
+{
+ int i, all_FF;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank, bank_size;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ bank_size = info->bank_size;
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ info->unc_bank = 1;
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6/*oob_parity_size/4*/, mtd);
+ clear_ecc_resume_dma(info);
+ if (all_FF > 0) {
+ info->unc_allFF = 1;
+ return;
+ }
+ if (mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d use retry\n",info->cur_page);
+ return;
+ } else
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d no retry\n",info->cur_page);
+ mtd->ecc_stats.failed++;
+ printk(KERN_ERR "lst area unc++ page=0x%x no retry\n", info->cur_page);
+ //while(bank_stat1);
+ return;
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "last bank %d bit corrected error\n", bch_ecc_idx);
+ #endif
+ for (i = 0; i < bch_ecc_idx; i++) {
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,info->banks);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ bit_correct(&info->dmabuf[bank_size * (info->banks-1) + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ } else if ((bch_err_pos[i] >> 3) < (bank_size + 24)) {//oob area of last bank
+ //printk(KERN_NOTICE "redundant area value=%d ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //printk(KERN_NOTICE "redundant area value=%d \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area last bank %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+}
+
+void bch_data_ecc_correct_noalign(struct mtd_info *mtd)
+{
+ int i, all_FF = 0;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank, dmabank = info->banks;
+ unsigned int bank_size;
+
+ dmabank = info->banks + 1;
+
+ /* BCH ECC err process */
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ bank_size = info->bank_size;
+ /* for data area */
+ /*if (bch_ecc_idx >= 50)
+ printk(KERN_NOTICE "pg=0x%x=blk%d bk%d=%d\n",info->cur_page, info->cur_page/mtd->pagecnt, bank, bch_ecc_idx);*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read data \n");//print_nand_register(mtd);
+ #endif
+
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ /*if (bank >= (dmabank-1))
+ print_nand_buffer((uint8_t *)info->dmabuf+bank_size * (dmabank-1), 32);*/
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (bank == 0)
+ info->unc_bank = 1;
+ else
+ info->unc_bank |= (1<<bank);
+
+ clear_ecc_resume_dma(info);
+
+ if (bank >= (dmabank-1)) {
+ if (dmabank == (info->banks + 1))
+ all_FF = check_all_FF((uint32_t *)(info->dmabuf+mtd->realwritesize+24), 6, mtd);
+ else
+ all_FF = check_all_FF((uint32_t *)(info->reg+ECC_FIFO_6), 6, mtd);
+ if (all_FF) {
+ info->data_ecc_uncor_err = 0;
+ info->unc_allFF = 1;
+ return;
+ } /*else
+ printk("**********lost check all FF case *********af%x, bk%x\n",
+ info->unc_bank,((1<<dmabank)-1));*/
+ }
+
+ if (info->isr_cmd == 0x0 && mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ if ((info->cur_lpage >= ((mtd->blkcnt-8)*mtd->pagecnt) &&
+ info->cur_lpage < ((mtd->blkcnt-4)*mtd->pagecnt)) &&
+ ((this->cur_chip->nand_id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ /* read retry table not allowed to use read retry */
+ info->data_ecc_uncor_err = 2;
+ if (bank >= (dmabank-1))
+ printk(KERN_ERR "data area bank %d uncor err at eslc page=0x%x no retry\n", bank, info->cur_page);
+ }
+ #ifdef RETRY_DEBUG
+ else {
+ if (bank >= (dmabank-1))
+ printk(KERN_ERR "data area bank %d uncor err page=0x%x use retry\n", bank, info->cur_page);
+ }
+ #endif
+
+ return;
+ } else {
+ if (bank >= (dmabank-1)) {
+ printk("reda lpage=%x bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", info->cur_lpage, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+ printk(KERN_ERR "data area uncor err page=0x%x,blk=%d no retry\n", info->cur_page, info->cur_page/mtd->pagecnt);
+ /*print_nand_buffer(info->dmabuf, 32);printk("isrcmd 0x=%x\n", info->isr_cmd);
+ print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 48);
+ print_nand_register(mtd);
+ while(1);*/
+ } else
+ return;
+ }
+ printk(KERN_ERR "data area unc++ page=0x%x no retry\n", info->cur_page);
+ mtd->ecc_stats.failed++;
+ return; /* uncorrected err */
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %d bit corrected err on bank %d \n", bch_ecc_idx, bank);
+ #endif
+ /*if (bank >= (dmabank-1)) {
+ print_nand_register(mtd);
+ }*/
+
+ for (i = 0; i < bch_ecc_idx; i++)
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+
+ for (i = 0; i < bch_ecc_idx; i++) {
+ //if (bank >= (dmabank-1))
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,dmabank);
+ //printk(KERN_NOTICE "data page=0x%x byte=%d corrected err on bank %d bs=%d, banks=%d\n",
+ //info->cur_page, bch_err_pos[i]>>3, bank, bank_size,dmabank);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ //printk(KERN_NOTICE "bank%d area value=%x ", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ bit_correct(&info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ //printk(KERN_NOTICE "bank%d area c-value=%x \n", bank, info->dmabuf[bank_size* bank + (bch_err_pos[i] >> 3)]);
+ } else if ((bch_err_pos[i] >> 3) < (bank_size + 24) && bank >= (dmabank-1)) {//oob area
+ //printk(KERN_NOTICE "red area value=%x ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //printk(KERN_NOTICE "red area c-value=%x \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+}
+
+void bch_data_last_bk_ecc_correct_noalign(struct mtd_info *mtd)
+{
+ int i, all_FF;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ unsigned int bank_stat2, bch_ecc_idx, bank, bank_size, bank_oob = info->banks;
+
+ bank_stat2 = readw(info->reg + NFCR17_ECC_BCH_ERR_STAT);
+ bch_ecc_idx = bank_stat2 & BCH_ERR_CNT;
+ bank = (bank_stat2 & BANK_NUM) >> 8;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Read lst bk data \n");
+ #endif
+
+ bank_size = info->bank_size;
+ if (bch_ecc_idx >= BCH_ERR_CNT) {
+ //print_nand_buffer((uint8_t *)info->dmabuf+bank_size * bank_oob, 32);
+ //unsigned int oob_parity_size = readb(info->reg + NFCR10_OOB_ECC_SIZE+1);
+ all_FF = check_all_FF((uint32_t *)(info->dmabuf+bank_size * bank_oob + 24), 6, mtd);//print_nand_buffer(info->dmabuf+bank_size * bank_oob + 24, 24);
+ clear_ecc_resume_dma(info);
+ //printk(KERN_ERR "lstbk err cur_page=0x%x %d all_FF=%d\n",info->cur_page, info->cur_page, all_FF);
+ if (all_FF > 0) {
+ info->unc_allFF = 1;
+ return;
+ }
+ if (mtd->dwRetry && this->cur_chip) {
+ info->data_ecc_uncor_err = 1;
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d use retry\n",info->cur_page);
+ //print_nand_buffer(info->dmabuf+bank_size * bank_oob/* + 24*/, 48);
+ return;
+ } else
+ printk(KERN_ERR
+ "last bank data area uncorrected err cur_page=%d no retry\n",info->cur_page);
+ mtd->ecc_stats.failed++;
+ //while(bank_stat1);
+ return;
+ }
+ if (mtd->ecc_err_cnt < bch_ecc_idx)
+ mtd->ecc_err_cnt = bch_ecc_idx;
+ /* mtd->ecc_stats.corrected += (bank_stat2 & BCH_ERR_CNT);*/
+ /* BCH ECC correct */
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "last bank %d bit corrected error\n", bch_ecc_idx);
+ #endif
+ for (i = 0; i < bch_ecc_idx; i++) {
+ bch_err_pos[i] = (readw(info->reg + NFCR18_ECC_BCH_ERR_POS + 2*i) & BCH_ERRPOS0);
+ //printk(KERN_NOTICE "data area byte=%d corrected err on bank %d bs=%d, banks=%d\n", bch_err_pos[i]>>3, bank, bank_size,bank_oob+1);
+ if((bch_err_pos[i] >> 3) < bank_size) {
+ bit_correct(&info->dmabuf[bank_size * bank_oob + (bch_err_pos[i] >> 3)], bch_err_pos[i] & 0x07);
+ } /*else if ((bch_err_pos[i] >> 3) < (bank_size + 24)) {//oob area of last bank
+ //printk(KERN_NOTICE "redundant area value=%d ", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ bit_correct((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size, (bch_err_pos[i] & 0x07));
+ //printk(KERN_NOTICE "redundant area value=%d \n", *((uint8_t *)(info->reg+ECC_FIFO_0)+(bch_err_pos[i] >> 3) - bank_size));
+ }*/
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "data area last bank %xth ecc error position is byte%d bit%d\n",
+ i, bank_size * bank + (bch_err_pos[i] >> 3), (bch_err_pos[i] & 0x07));
+ #endif
+ }
+ /* continue read next bank and calc BCH ECC */
+ clear_ecc_resume_dma(info);
+}
+
+/*
+* [Routine Description]
+* read status
+* [Arguments]
+* cmd : nand read status command
+* [Return]
+* the result of command
+*/
+static int wmt_read_nand_status(struct mtd_info *mtd, unsigned char cmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int cfg = 0, status = -1;
+ unsigned int b2r_stat;
+
+ #ifdef WMT_HW_RDMZ
+ unsigned int rdmz;
+ rdmz = readb(info->reg + NFCRf_CALC_RDMZ+2);
+ if (mtd->dwRdmz && rdmz) {
+ //dump_stack();
+ nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+
+ writeb(cmd, info->reg + NFCR2_COMPORT0);
+ cfg = TWHR|DPAHSE_DISABLE|(1<<1);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+//print_nand_register(mtd);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "NFC command transfer1 is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ cfg = SING_RW|NAND2NFC;
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+//print_nand_register(mtd);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "NFC command transfer2 is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ /*print_nand_register(mtd);*/
+ return status;
+ }
+ info->datalen = 0;
+ info->dmabuf[0] = readb(info->reg + NFCR0_DATAPORT) & 0xff;
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz && rdmz) {
+ //printk(KERN_ERR "sts=%x\n", info->dmabuf[0]);
+ info->dmabuf[0] = info->dmabuf[0]^rdmz_tb[0];
+ if ((info->dmabuf[0]&0xFF) != 0xe0) {
+ printk(KERN_ERR "de-rdmz sts=%x page=%x\n", info->dmabuf[0],info->cur_page);
+ //if (info->cur_page != 0x7ff00) {
+ print_nand_register(mtd);
+ dump_stack();
+ //while(1);
+ //}
+ }
+ }
+ #endif
+
+ status = info->dmabuf[0];
+ //printk( "read status=0x%x\n", status);
+ return status;
+}
+
+void fill_desc(unsigned int *Desc, unsigned int len, unsigned char *buf, unsigned int bank_size)
+{
+ unsigned int CurDes_off = 0, i;
+ unsigned char *desc = (unsigned char *)Desc;
+
+ for (i = 0; i < (len/bank_size); i++) {
+ nand_init_short_desc((unsigned int *)(desc+CurDes_off), bank_size,
+ (unsigned int *)(buf+i*bank_size),
+ ((i == ((len/bank_size)-1)) && (!(len%bank_size))) ? 1 : 0);
+ CurDes_off += sizeof(struct _NAND_PDMA_DESC_S);
+ }
+ if (len%bank_size)
+ nand_init_short_desc((unsigned int *)(desc+CurDes_off),
+ (len%bank_size), (unsigned int *)(buf+i*bank_size), 1);
+}
+
+/* data_flag = 0: set data ecc fifo */
+static int wmt_nfc_dma_cfg(struct mtd_info *mtd, unsigned int len, unsigned int wr,
+int data_flag, int Nbank)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int status;
+ unsigned int *ReadDesc, *WriteDesc, ofs;
+ ofs = mtd->writesize + mtd->oobsize + 0x1000 - (mtd->oobsize%0x1000);
+ ReadDesc = (unsigned int *)(info->dmabuf + ofs + 0x100);
+ WriteDesc = (unsigned int *)(info->dmabuf + ofs + 0x200);
+ /*
+ printk(KERN_ERR "info->dmabuf = 0x%x\r\n", (unsigned int) info->dmabuf);
+ printk(KERN_ERR "info->dmaaddr = 0x%x\r\n", (unsigned int) info->dmaaddr);
+ printk(KERN_ERR "ReadDesc addr = 0x%x\r\n", (unsigned int) ReadDesc);
+ printk(KERN_ERR "WriteDesc addr = 0x%x\r\n", (unsigned int) WriteDesc);
+ */
+ if (len == 0) {
+ printk(KERN_ERR "DMA transfer length = 0\r\n");
+ return 1;
+ }
+ if (len > 1024 && readb(info->reg + NFCR9_ECC_BCH_CTRL)&DIS_BCH_ECC) {
+ len = 512;
+ if (mtd->realwritesize > 8192)
+ len = 1024;
+ }
+
+ if (data_flag == 0) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+ }
+ writew(len - 1, info->reg + NFCR8_DMA_CNT);
+ status = nand_init_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "nand_init_pdma fail status = 0x%x", status);
+
+ if (readl(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS)
+ writel(NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+
+ if (readl(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS) {
+ printk(KERN_ERR "PDMA interrupt status can't be clear ");
+ printk(KERN_ERR "NFC_DMA_ISR = 0x%8.8x \n", (unsigned int)readl(info->reg + NFC_DMA_ISR));
+ }
+
+ nand_alloc_desc_pool((wr) ? WriteDesc : ReadDesc);
+ /*nand_init_short_desc((wr)?WriteDesc : ReadDesc, len, (unsigned long *)buf);*/
+ if (info->oob_ecc_error == 0x50 && len != 1 && len != 3) {
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->last_bank_dmaaddr, 1024);
+ if (len != 1024 && len != 512)
+ printk("oob_ecc_error len!=1024, len=%d \n", len);
+ } else if (Nbank == 2) {//for multi-plane 2nd plane wr dma cfg
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->dmaaddr, 1024);
+ else
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->dmaaddr + mtd->realwritesize, 1024);
+ } else
+ fill_desc((wr)?WriteDesc : ReadDesc, len, (unsigned char *)info->dmaaddr, 1024);
+ /*printk(KERN_ERR "dma wr=%d, len=0x%x\n", wr, len);*/
+
+ nand_config_pdma(mtd,
+ (wr) ? (unsigned long *)(info->dmaaddr + ofs + 0x200)
+ : (unsigned long *)(info->dmaaddr + ofs + 0x100), wr);
+
+ return 0;
+}
+
+int nand_init_pdma(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ writel(NAND_PDMA_GCR_SOFTRESET, info->reg + NFC_DMA_GCR);
+ writel(NAND_PDMA_GCR_DMA_EN, info->reg + NFC_DMA_GCR);
+ if (readl(info->reg + NFC_DMA_GCR) & NAND_PDMA_GCR_DMA_EN)
+ return 0;
+ else
+ return 1;
+}
+
+
+int nand_free_pdma(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ writel(0, info->reg + NFC_DMA_DESPR);
+ writel(0, info->reg + NFC_DMA_GCR);
+ return 0;
+}
+
+
+int nand_alloc_desc_pool(unsigned int *DescAddr)
+{
+ memset(DescAddr, 0x00, 0x80);
+ return 0;
+}
+
+int nand_init_short_desc(unsigned int *DescAddr, unsigned int ReqCount, unsigned int *BufferAddr, int End)
+{
+ struct _NAND_PDMA_DESC_S *CurDes_S;
+ CurDes_S = (struct _NAND_PDMA_DESC_S *) DescAddr;
+ CurDes_S->ReqCount = ReqCount;
+ CurDes_S->i = End;
+ CurDes_S->end = End;
+ CurDes_S->format = 0;
+ CurDes_S->DataBufferAddr = (unsigned long)BufferAddr;
+ return 0;
+}
+
+int nand_init_long_desc(unsigned long *DescAddr, unsigned int ReqCount, unsigned long *BufferAddr,
+unsigned long *BranchAddr, int End)
+{
+ struct _NAND_PDMA_DESC_L *CurDes_L;
+ CurDes_L = (struct _NAND_PDMA_DESC_L *) DescAddr;
+ CurDes_L->ReqCount = ReqCount;
+ CurDes_L->i = 0;
+ CurDes_L->format = 1;
+ CurDes_L->DataBufferAddr = (unsigned long)BufferAddr;
+ CurDes_L->BranchAddr = (unsigned long)BranchAddr;
+ if (End) {
+ CurDes_L->end = 1;
+ CurDes_L->i = 1;
+ }
+
+ return 0;
+}
+/*
+int nand_config_desc(unsigned long *DescAddr, unsigned long *BufferAddr, int Blk_Cnt)
+{
+ int i = 0 ;
+ unsigned long *CurDes = DescAddr;
+
+ nand_alloc_desc_pool(CurDes);
+
+
+ for (i = 0 ; i < 3 ; i++) {
+ nand_init_short_desc(CurDes, 0x80, BufferAddr);
+ BufferAddr += (i * 0x80);
+ CurDes += (i * sizeof(NAND_PDMA_DESC_S));
+ }
+ if (Blk_Cnt > 1) {
+ nand_init_long_desc(CurDes, 0x80, BufferAddr, CurDes + sizeof(NAND_PDMA_DESC_L), 0);
+ BufferAddr += (i * 0x80);
+ CurDes += (i * sizeof(NAND_PDMA_DESC_L));
+
+ nand_init_long_desc(CurDes, (Blk_Cnt - 1) * 512, BufferAddr,
+ CurDes + sizeof(NAND_PDMA_DESC_L), 1);
+ } else {
+ nand_init_long_desc(CurDes, 0x80, BufferAddr, CurDes + sizeof(NAND_PDMA_DESC_L), 1);
+ }
+
+ return 0;
+}
+*/
+
+int nand_config_pdma(struct mtd_info *mtd, unsigned long *DescAddr, unsigned int dir)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ if (info->isr_cmd != NAND_SET_FEATURE && info->isr_cmd != 0x37 && info->isr_cmd != 0x36)
+ writel(NAND_PDMA_IER_INT_EN, info->reg + NFC_DMA_IER);
+ writel((unsigned long)DescAddr, info->reg + NFC_DMA_DESPR);
+ if (dir == NAND_PDMA_READ)
+ writel(readl(info->reg + NFC_DMA_CCR)|NAND_PDMA_CCR_peripheral_to_IF,
+ info->reg + NFC_DMA_CCR);
+ else
+ writel(readl(info->reg + NFC_DMA_CCR)&(~NAND_PDMA_CCR_peripheral_to_IF),
+ info->reg + NFC_DMA_CCR);
+ wmb();
+ /*mask_interrupt(IRQ_NFC_DMA);*/
+ writel(readl(info->reg + NFC_DMA_CCR)|NAND_PDMA_CCR_RUN, info->reg + NFC_DMA_CCR);
+ /*printk(KERN_ERR "NFC_DMA_CCR = 0x%8.8x\r\n", readl(info->reg + NFC_DMA_CCR));*/
+ /*print_nand_register(mtd);*/
+ wmb();
+ return 0;
+}
+
+int nand_pdma_handler(struct mtd_info *mtd)
+{
+ unsigned long status = 0;
+ unsigned long count = 0;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ count = 0x100000;
+#if 0
+ /* polling CSR TC status */
+ if (!(readl(info->reg + NFC_DMA_CCR)|NAND_PDMA_CCR_peripheral_to_IF)) {
+ do {
+ count--;
+ if (readl(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS) {
+ status = readl(info->reg + NFC_DMA_CCR) & NAND_PDMA_CCR_EvtCode;
+ writel(readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+ printk(KERN_ERR "NFC_DMA_ISR = 0x%8.8x\r\n",
+ (unsigned int)readl(info->reg + NFC_DMA_ISR));
+ break;
+ }
+ if (count == 0) {
+ printk(KERN_ERR "PDMA Time Out!\n");
+ printk(KERN_ERR "NFC_DMA_CCR = 0x%8.8x\r\n",
+ (unsigned int)readl(info->reg + NFC_DMA_CCR));
+ /*print_nand_register(mtd);*/
+ count = 0x100000;
+ /*break;*/
+ }
+ } while (1);
+} else
+#endif
+ status = readl(info->reg + NFC_DMA_CCR) & NAND_PDMA_CCR_EvtCode;
+ writel(readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+ if (status == NAND_PDMA_CCR_Evt_ff_underrun)
+ printk(KERN_ERR "PDMA Buffer under run!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_ff_overrun)
+ printk(KERN_ERR "PDMA Buffer over run!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_desp_read)
+ printk(KERN_ERR "PDMA read Descriptor error!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_data_rw)
+ printk(KERN_ERR "PDMA read/write memory descriptor error!\n");
+
+ if (status == NAND_PDMA_CCR_Evt_early_end)
+ printk(KERN_ERR "PDMA read early end!\n");
+
+ if (count == 0) {
+ printk(KERN_ERR "PDMA TimeOut!\n");
+ while (1)
+ ;
+ }
+ return 0;
+}
+
+int nand_get_feature(struct mtd_info *mtd, int addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, i = 0;
+ int status = -1;
+
+ writeb(0xEE, info->reg + NFCR2_COMPORT0);
+ writeb(addr, info->reg + NFCR3_COMPORT1_2);
+ cfg = DPAHSE_DISABLE|(0x02<<1);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+
+ if (status) {
+ printk(KERN_ERR "nand_get_feature(): wait cmd is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_ERR "flash is not ready\n");
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_ERR "get feature wait B2R fail\n");
+
+ cfg = NAND2NFC|SING_RW;
+ for (i = 0; i < 4; i++) {
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ return status;
+ status = wmt_nfc_transfer_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "in nand_get_feature(): wait transfer cmd is not ready\n");
+ return status;
+ }
+ info->dmabuf[i] = readb(info->reg + NFCR0_DATAPORT) & 0xff;
+ }
+ //#ifdef NAND_DEBUG
+ printk(KERN_NOTICE "nand get feature %x %x %x %x\n",
+ info->dmabuf[0], info->dmabuf[1], info->dmabuf[2], info->dmabuf[3]);
+ //#endif
+ info->datalen = 0;
+ return 0;
+}
+
+int nand_set_feature(struct mtd_info *mtd, int cmd, int addrss, int value)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, len = 4;
+ int status = -1;
+ DECLARE_COMPLETION(complete);
+ //unsigned char id[4] = {value, 0, 0, 0};
+ info->dmabuf[0] = value;
+ info->dmabuf[1] = 0;
+ info->dmabuf[2] = 0;
+ info->dmabuf[3] = 0;
+ info->isr_cmd = cmd;
+ info->done_data = &complete;
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) | DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ //printk("set feature cycle1\n");
+
+ writeb(0x1F, info->reg + NFCR13_INT_MASK);
+ writel(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ printk("nand get feature B2R can't clear\n");
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ //printk("set feature cycle2\n");
+
+ wmt_nfc_dma_cfg(mtd, len, 1, 0, -1);
+ //print_nand_register(nfc);
+
+ writeb(cmd, info->reg + NFCR2_COMPORT0);
+ writeb(addrss, info->reg + NFCR3_COMPORT1_2);
+ cfg = (0x02<<1);
+ //print_nand_register(mtd);
+ //printk("set feature cycle trigg = 0x%x\n", cfg|NFC_TRIGGER|OLD_CMD);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ //print_nand_register(mtd);
+ //printk("set feature cycle3\n");
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("get feature nand flash idle time out\n");
+ return status;
+ }
+
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ //printk("set feature cycle5\n");
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ /*print_nand_register(mtd);*/
+ return status;
+ }
+
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("set feature nand flash idle time out\n");
+ return status;
+ }
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) & ~DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ printk(KERN_DEBUG " MICRON flash set feature timing mode %d\n", value);
+ return status;
+}
+
+int get_parameter(struct mtd_info *mtd, uint8_t *buf, uint8_t *addr, int size)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, len = 1;
+ int i, status = -1, regc = size;
+ unsigned char *FIFO = (unsigned char *) (info->reg+ECC_FIFO_c);
+
+ //print_nand_register(mtd);
+
+ for (i = 0; i < regc;i++) {
+ //DECLARE_COMPLETION(complete);
+ info->isr_cmd = 0x37;
+ //info->done_data = &complete;
+ //printk("hynix retry get c1\n");
+ //nfc->reg->NFCR13 = 0x0F;
+ writeb(0x1F, info->reg + NFCR13_INT_MASK);
+ //nfc->reg->NFCRb |= B2R; /* write to clear */
+ writel(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ printk("B2R can't clear\n");
+
+ //printk("hynix retry get c2\n");
+ wmt_nfc_dma_cfg(mtd, len, 0, 0, -1);
+ //print_nand_register(nfc);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ if (i == 0) {
+ FIFO[0] = 0x37;
+ FIFO[1] = addr[0];
+ //nfc->reg->NFCRc = 0x00020001;
+ writel(0x00020001, info->reg + NFCRc_CMD_ADDR);
+ cfg = (0x02<<1);
+ } else {
+ FIFO[0] = addr[i];
+ // set address latch ALE(high) and CLE(lower)
+ //nfc->reg->NFCRc = 0x00010000;
+ writel(0x00010000, info->reg + NFCRc_CMD_ADDR);
+ cfg = (0x01<<1);
+ }
+ //print_nand_register(mtd);
+ //printk("hynix get retry param trigg = 0x%x\n", NAND2NFC|cfg|NFC_TRIGGER);
+ //nfc->reg->NFCR1 = NAND2NFC|cfg|NFC_TRIGGER; /* cfg & start*/
+ writew(NAND2NFC|cfg|NFC_TRIGGER, info->reg + NFCR1_COMCTRL);
+ //print_nand_register(mtd);
+ //wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //j = 0;
+ while (!readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS);
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("get feature nand flash idle time out\n");
+ return status;
+ }
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ //printk("set feature cycle5\n");
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ /*print_nand_register(mtd);*/
+ return status;
+ }
+
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("set feature nand flash idle time out\n");
+ return status;
+ }
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+
+ buf[i] = info->dmabuf[0];
+ }
+
+ #ifdef RETRY_DEBUG
+ printk("retry param buf =");
+ for (i = 0; i < regc;i++)
+ printk(" 0x%x", buf[i]);
+ printk("\n");
+ #endif
+
+ //writel(readl(info->reg + NFCR9_ECC_BCH_CTRL) & ~DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ return status;
+}
+
+int hynix_get_parameter(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char buf[16] = {0};
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned char *def_value = NULL;
+ unsigned int reg_num;
+ int i = 0, j = 0;
+ int rc = -1;
+
+ if (mode == ESLC_MODE) {
+ reg_num = cur_chip->eslc_reg_num;
+ offset = cur_chip->eslc_offset;
+ def_value = cur_chip->eslc_def_value;
+ set_value = cur_chip->eslc_set_value;
+ } else if (mode == READ_RETRY_MODE) {
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ def_value = cur_chip->retry_def_value;
+ } else {
+ printk("Not support this mode %d\n", mode);
+ return rc;
+ }
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ rc = get_parameter(mtd, buf, offset, reg_num);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+ if (rc != 0)
+ return rc;
+
+ if (mode == ESLC_MODE) {
+ if((def_value[reg_num] != 0xff) && (def_value[reg_num + 1] != 0xff)) {
+ for(i = 0; i < reg_num; i++) {
+ def_value[i] = buf[i];
+ set_value[i] += buf[i];
+ }
+ def_value[reg_num] = 0xff;
+ def_value[reg_num + 1] = 0xff;
+ //printk("ESLC: ");
+ print_nand_buffer(buf, reg_num);
+ } else {
+ //printk("ESLC Current: ");
+ //print_nand_buffer(buf, reg_num);
+ }
+ } else if (mode == READ_RETRY_MODE) {
+ if ((def_value[reg_num] != 0xff) && (def_value[reg_num + 1] != 0xff)) {
+ for (i = 0; i < reg_num; i++)
+ def_value[i] = buf[i];
+ def_value[reg_num] = 0xff;
+ def_value[reg_num + 1] = 0xff;
+ //printk("Retry : ");
+ //print_nand_buffer(buf, reg_num);
+ } else {
+ //printk("Retry Current: ");
+ //print_nand_buffer(buf, reg_num);
+ //printk("\n");
+ for(j = 0; j < cur_chip->total_try_times; j++) {
+ for(i = 0; i < reg_num; i++) {
+ if(buf[i] != cur_chip->retry_value[j*reg_num+i])
+ break;
+ }
+ if(i == reg_num) {
+ cur_chip->cur_try_times = j;
+ printk("Get current try times %d from current register.\n", j);
+ break;
+ }
+ }
+
+ }
+ }
+ return rc;
+}
+
+int write_bytes_cmd(struct mtd_info *mtd, int cmd_cnt, int addr_cnt, int data_cnt, uint8_t *cmd, uint8_t *addr, uint8_t *data)
+{
+ int i, status = 0;
+ unsigned int cmd_addr_cycle = 0, cfg = 0, cfg_bit8 = 0, counter = 10000;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned char *FIFO = (unsigned char *) (info->reg+ECC_FIFO_c);
+
+
+ writeb(0x1F, info->reg + NFCR13_INT_MASK);
+
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("nand flash idle time out\n");
+ return status;
+ }
+
+ if (data_cnt > 0) {
+ info->isr_cmd = 0x36;
+ memcpy(info->dmabuf, data, data_cnt);
+ wmt_nfc_dma_cfg(mtd, data_cnt, 1, 0, -1);
+ }
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ for (i = 0; i < cmd_cnt; i++) {
+ FIFO[i] = cmd[i];
+ cmd_addr_cycle |= (1<<i);
+ }
+ for (i = cmd_cnt; i < (addr_cnt+cmd_cnt); i++) {
+ FIFO[i] = addr[i-cmd_cnt];
+ cmd_addr_cycle |= (1<<(i+16));
+ }
+ writel(cmd_addr_cycle, info->reg + NFCRc_CMD_ADDR);
+
+ #ifdef RETRY_DEBUG
+ //printk("NFCRc=0x%x ", cmd_addr_cycle);
+ printk("FIFO = ");
+ for (i = 0; i < (addr_cnt+cmd_cnt); i++)
+ printk("0x%x ", FIFO[i]);
+ if (data_cnt > 0) {
+ printk("data = ");
+ for (i = 0; i < data_cnt; i++) {
+ printk("0x%x ", data[i]);
+ }
+ printk("\n");
+ } else
+ printk("\n");
+ #endif
+
+ cfg = ((cmd_cnt + addr_cnt)&0x7)<<1;
+ cfg_bit8 = (((cmd_cnt + addr_cnt)&0x18)>>3)<<8;
+
+ if (data_cnt == 0)
+ cfg |= DPAHSE_DISABLE;
+
+ writew(cfg_bit8|cfg|NFC_TRIGGER, info->reg + NFCR1_COMCTRL);
+
+//print_nand_register(mtd);
+ status = wmt_nfc_transfer_ready(mtd);
+ if (status) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ printk(KERN_ERR "NFC IO transfer is not ready\n");
+ /*print_nand_register(mtd);*/
+ goto go_fail;
+ }
+ status = NFC_WAIT_IDLE(mtd);
+ if (status) {
+ printk("retry c1 wait idle time out\n");
+ goto go_fail;
+ }
+ if (cmd_cnt > 0 && cmd)
+ if (cmd[0] == NAND_CMD_RESET) {
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Reset err, nand device is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ }
+ }
+ if (data_cnt > 0)
+ while (!readl(info->reg + NFC_DMA_ISR)&NAND_PDMA_IER_INT_STS) {
+ if (counter <= 0) {
+ break;
+ }
+ counter--;
+ }
+ if (data_cnt > 0) {
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status) {
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ goto go_fail;
+ }
+ }
+
+go_fail:
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+
+ return status;
+}
+
+int set_parameter(struct mtd_info *mtd, unsigned char *buf, unsigned char *offset, int regn)
+{
+ int i, status = -1, regc = regn;
+ unsigned char cmd[2] = {0x36, 0x16};
+//print_nand_register(mtd);
+ status = write_bytes_cmd(mtd, 1, 1, 1, (uint8_t *)&cmd[0], offset, buf);
+ if (status)
+ printk("hynix_set read retry reg: phase 0 fail");
+ for (i = 1; i < regc; i++) {
+ status = write_bytes_cmd(mtd, 0, 1, 1, NULL, &offset[i], &buf[i]);
+ if (status)
+ printk("hynix_set read retry reg: phase %d fail", i);
+ }
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&cmd[1], NULL, NULL);
+ if (status)
+ printk("load_hynix_opt_reg: phase 3 fail");
+
+ return status;
+}
+
+void dummy_read(struct mtd_info *mtd)
+{
+ int status = -1;
+ uint8_t cmd[2] = {0x00, 0x30}, addr[5] = {0, 0, 0, 0, 0};
+
+ status = write_bytes_cmd(mtd, 1, 5, 0, &cmd[0], addr, NULL);
+ if (status)
+ printk("dummy read cmd(00) + addr fail\n");
+ status = write_bytes_cmd(mtd, 1, 0, 0, &cmd[1], NULL, NULL);
+ if (status)
+ printk("dummy read cmd(0x30) fail\n");
+/*print_nand_register(mtd);
+dump_stack();*/
+ /* check busy to ready status*/
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "NFC check B2R time out\n");
+ }
+}
+
+int hynix_set_parameter(struct mtd_info *mtd, int mode, int def_value)
+{struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num;
+ int rc = -1;
+
+
+ if (mode == ESLC_MODE) {
+ reg_num = cur_chip->eslc_reg_num;
+ offset = cur_chip->eslc_offset;
+ if (def_value == ECC_ERROR_VALUE) {
+ set_value = cur_chip->eslc_set_value;
+ } else {
+ set_value = cur_chip->eslc_def_value;
+ }
+ } else {
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_value == ECC_ERROR_VALUE) {
+ cur_chip->cur_try_times++;
+ if (cur_chip->cur_try_times >= cur_chip->total_try_times)
+ cur_chip->cur_try_times = -1;
+ if ((cur_chip->cur_try_times >= 0) && (cur_chip->cur_try_times < cur_chip->total_try_times))
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times* cur_chip->retry_reg_num;
+ else
+ set_value = cur_chip->retry_def_value;
+
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = -1;
+ }
+ }
+#ifdef RETRY_DEBUG
+ printk("hynix set value: cur_try_times=%d\n", cur_chip->cur_try_times);
+ for(rc = 0; rc < reg_num; rc++)
+ printk(" 0x%x:0x%x ", offset[rc], set_value[rc]);
+ printk("reg_num = %d\n", reg_num);
+#endif
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ rc = set_parameter(mtd, set_value, offset, reg_num);
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ if(rc) {
+ printk("set_parameter fail.\n");
+ return rc;
+ }
+
+ if (def_value == DEFAULT_VALUE && mode == ESLC_MODE) {
+ printk("dummy read: rpage=%x wpage%x\n", info->cur_page, info->lst_wpage);
+ dummy_read(mtd);
+ }
+
+ return rc;
+}
+
+int toshiba_pre_condition(struct mtd_info *mtd)
+{
+ int status = 0;
+ unsigned char cmd1[2] = {0x5c, 0xc5};
+
+ status = write_bytes_cmd(mtd, 2, 0, 0, cmd1, NULL, NULL);
+ if(status)
+ printk("toshiba pre condition cmd1 time out.\n");
+ else
+ printk("toshiba pre condition OK.\n");
+
+ return status;
+}
+
+int toshiba_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int toshiba_set_parameter(struct mtd_info *mtd, int mode, int def_mode)
+{
+ int i, status = -1;
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char cmd2[1] = {0x55};
+ unsigned char cmd3[2] = {0x26, 0x5d};
+ unsigned char *set_value = NULL;
+ unsigned char *offset = NULL;
+
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+
+ if (cur_chip->cur_try_times >= cur_chip->total_try_times)
+ cur_chip->cur_try_times = 0;
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times*cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+
+ cur_chip->cur_try_times++;
+ #ifdef RETRY_DEBUG
+ printk("toshiba set cur_try_times=%d\n", cur_chip->cur_try_times);
+ #endif
+ for (i = 0; i < 4; i++) {
+ status = write_bytes_cmd(mtd, 1, 1, 1, cmd2, &offset[i], &set_value[i]);
+ if (status)
+ printk("toshiba set read retry reg: phase %d fail", i);
+ }
+
+ status = write_bytes_cmd(mtd, 2, 0, 0, cmd3, NULL, NULL);
+ if (status) {
+ printk("pre condition cmd2 time out\n");
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return status;
+}
+
+int samsung_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int samsung_set_parameter(struct mtd_info *mtd, int mode, int def_mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num;
+ int rc = -1, i;
+ uint8_t cmd[1] = {0xA1};
+ uint8_t data[3] = {0, 0, 0};
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_mode == ECC_ERROR_VALUE) {
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times * reg_num;
+ cur_chip->cur_try_times++;
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = 0;
+ }
+
+ #ifdef RETRY_DEBUG
+ printk("samsung set value: cur_try_times=%d\n", cur_chip->cur_try_times);
+ for(i = 0; i < reg_num; i++)
+ printk(" 0x%x:0x%x ", offset[i], set_value[i]);
+ printk("reg_num = %d\n", reg_num);
+ #endif
+
+ for (i = 0; i < reg_num; i++) {
+ data[1] = offset[i];
+ data[2] = set_value[i];
+ rc = write_bytes_cmd(mtd, 1, 0, 3, cmd, NULL, data);
+ if (rc)
+ printk("samsung read retry reg: phase %d fail\n", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return rc;
+}
+
+int sandisk_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int sandisk_set_parameter(struct mtd_info *mtd, int total_try_times, int def_value)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num, upper_page = 0;
+ int i, rc = -1;
+ uint8_t cmd[4] = {0x3B, 0xB9, 0x53, 0x54};
+
+ if (total_try_times != (cur_chip->total_try_times&0xFF))
+ upper_page = 1;
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_value == ECC_ERROR_VALUE) {
+ cur_chip->cur_try_times++;
+ if (cur_chip->cur_try_times >= total_try_times)
+ cur_chip->cur_try_times = -1;
+ if ((cur_chip->cur_try_times >= 0) && (cur_chip->cur_try_times < total_try_times)) {
+ if (upper_page)
+ set_value = cur_chip->retry_value +
+ (cur_chip->cur_try_times + (cur_chip->total_try_times&0xFF))* reg_num;
+ else
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times * reg_num;
+ } else
+ set_value = cur_chip->retry_def_value;
+
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = -1;
+ }
+#ifdef RETRY_DEBUG
+ printk("sandisk set value: upper_page=%d, cur_try_times=%d\n", upper_page, cur_chip->cur_try_times);
+ for(i = 0; i < reg_num; i++)
+ printk(" 0x%x:0x%x ", offset[i], set_value[i]);
+ printk("reg_num = %d\n", reg_num);
+#endif
+ rc = write_bytes_cmd(mtd, 2, 0, 0, cmd, NULL, NULL);
+ if (rc)
+ printk("sandisk read retry reg: set cmd fail\n");
+ for (i = 0; i < reg_num; i++) {
+ rc = write_bytes_cmd(mtd, 1, 1, 1, &cmd[2], &offset[i], &set_value[i]);
+ if (rc)
+ printk("sandisk set retry reg: phase %d fail\n", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return rc;
+}
+
+int sandisk_init_retry_register(struct mtd_info *mtd, struct nand_read_retry_param *cur_chip)
+{
+ int i,status = -1;
+ unsigned char cmd[4] = {0x3B, 0xB9, 0x53, 0x54};
+ unsigned char *offset = cur_chip->otp_offset;
+ unsigned char *data = cur_chip->otp_data;
+ unsigned int regc = cur_chip->otp_len;
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ #ifdef RETRY_DEBUG
+ printk("set sandisk init retry register offset addr: 0x%x, 0x%x\n", offset[0], offset[1]);
+ #endif
+ status = write_bytes_cmd(mtd, 2, 0, 0, cmd, NULL, NULL);
+ if (status) {
+ printk("send sandisk_init_retry_register cmd fail\n");
+ }
+ for (i = 0; i < regc; i++) {
+ status = write_bytes_cmd(mtd, 1, 1, 1, &cmd[2], &offset[i], &data[i]);
+ if (status)
+ printk("sandisk_init_retry_register : phase %d fail", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return status;
+}
+
+int micron_get_parameter(struct mtd_info *mtd, int mode)
+{
+ return 0;
+}
+
+int micron_set_parameter(struct mtd_info *mtd, int mode, int def_mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_read_retry_param *cur_chip = this->cur_chip;
+ unsigned char *offset = NULL;
+ unsigned char *set_value = NULL;
+ unsigned int reg_num;
+ int rc = -1, i;
+ uint8_t cmd[1] = {NAND_SET_FEATURE};
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ reg_num = cur_chip->retry_reg_num;
+ offset = cur_chip->retry_offset;
+ if (def_mode == ECC_ERROR_VALUE) {
+ set_value = cur_chip->retry_value + cur_chip->cur_try_times * reg_num;
+ cur_chip->cur_try_times++;
+ } else {
+ set_value = cur_chip->retry_def_value;
+ cur_chip->cur_try_times = 0;
+ }
+
+ #ifdef RETRY_DEBUG
+ printk("micron set value: cur_try_times=%d\n", cur_chip->cur_try_times);
+ for(i = 0; i < reg_num; i++)
+ printk(" 0x%x:0x%x ", offset[i], set_value[i]);
+ printk("reg_num = %d\n", reg_num);
+ #endif
+
+ for (i = 0; i < reg_num; i++) {
+ rc = write_bytes_cmd(mtd, 1, 1, 1, cmd, offset, set_value);
+ if (rc)
+ printk("micron read retry reg: phase %d fail\n", i);
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ return rc;
+}
+
+static int wmt_nand_read_raw_page(struct mtd_info *mtd, struct nand_chip *chip, int page);
+int hynix_get_otp(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_read_retry_param *cur_chip = chip->cur_chip;
+ int i, j, status = -1;
+ //unsigned char data[2] = {0x00, 0x4D};
+ unsigned char cmd[5] = {0x36, 0x16, 0x17, 0x04, 0x19};
+ //unsigned char addr[2] = {0xAE , 0xB0};
+ unsigned int page = 0x200;
+ unsigned char *buff, reset = NAND_CMD_RESET, retry_end = NAND_CMD_HYNIX_RETRY_END;
+ unsigned char *offset = cur_chip->otp_offset;
+ unsigned char *data = cur_chip->otp_data;
+ unsigned int retry_times, retry_regs, chk = 0;
+ unsigned char *bf, *bf2;
+
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+
+ printk("get otp offset addr: 0x%x, 0x%x\n", offset[0], offset[1]);
+ //chip->cmdfunc(mtd, NAND_CMD_RESET_NO_STATUS_READ, -1, -1);
+
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ if (status) {
+ printk("load_hynix_opt_reg: reset fail");
+ }
+ status = write_bytes_cmd(mtd, 1, 1, 1, (uint8_t *)&cmd[0], (uint8_t *)&offset[0], (uint8_t *)&data[0]);
+ if (status)
+ printk("load_hynix_opt_reg: phase 1 fail");
+ status = write_bytes_cmd(mtd, 0, 1, 1, NULL, (uint8_t *)&offset[1], (uint8_t *)&data[1]);
+ if (status)
+ printk("load_hynix_opt_reg: phase 2 fail");
+ status = write_bytes_cmd(mtd, 4, 0, 0, (uint8_t *)&cmd[1], NULL, NULL);
+ if (status)
+ printk("load_hynix_opt_reg: phase 3 fail");
+ //status = HY_nand_read(0, page, buf, 1026, ecc_code, nfc, 0);
+ wmt_nand_read_raw_page(mtd, chip, page);
+ /*if (status != 0) {
+ printk("load_hynix_opt_reg: phase 3 fail status = %d\n", status);
+ //return -1;
+ }*/
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ if (status) {
+ printk("load_hynix_opt_reg: reset fail");
+ }
+ status = write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&retry_end, NULL, NULL);
+ if (status) {
+ printk("load_hynix_opt_reg: OTP end 0x38 fail");
+ }
+
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+
+ print_nand_buffer((uint8_t *)info->dmabuf, 1040);
+ buff = info->dmabuf;
+ if (buff[0] > 8 || buff[1] > 8) {
+ printk("retry_cmd buff is not big enough for size %d\n", buff[0]*buff[1]);
+ return -1;
+ }
+
+ retry_times = buff[0];
+ retry_regs = buff[1];
+
+ cur_chip->total_try_times = buff[0] - 1;
+ cur_chip->retry_reg_num = buff[1];
+ for (i = 0; i < 16; i+=2) {
+ bf = &buff[i * retry_times * retry_regs + 2];
+ bf2 = &buff[(i+1) * retry_times * retry_regs + 2];
+ for (j = 0; j < (retry_times*retry_regs); j++) {
+ if ((bf[j] ^ bf2[j]) != 0xFF) {
+ printk("inverse check fail %x %x\n", bf[j], bf2[j]);
+ break;
+ }
+ }
+ if (j >= (retry_times*retry_regs)) {
+ chk = 1;
+ break;
+ }
+ }
+
+ if (chk == 0) {
+ printk("hynix : no valid otp data checked\n");
+ }
+
+ for (j = 0; j < retry_regs; j++)
+ cur_chip->retry_def_value[j] = bf[j];
+
+ print_nand_buffer(cur_chip->retry_def_value, retry_regs);
+
+ for (i = 0; i < (retry_times-1); i++) {
+ for (j = 0; j < retry_regs; j++) {
+ cur_chip->retry_value[i*retry_regs + j] = bf[(i+1)*retry_regs + j];
+ }
+ print_nand_buffer(&cur_chip->retry_value[i*retry_regs], retry_regs);
+ }
+ cur_chip->retry_def_value[buff[1]] = 0xff;
+ cur_chip->retry_def_value[buff[1]+1] = 0xff;
+
+
+ return 0;
+}
+
+int nand_get_para(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ int ret = 0;
+ struct nand_read_retry_param *cur_chip = chip->cur_chip;
+
+ if (cur_chip->get_otp_table) {
+ ret = cur_chip->get_otp_table(mtd, chip);
+ if (ret) {
+ printk("get otp para error\n");
+ chip->cur_chip = NULL;
+ return ret;
+ } else
+ printk("get otp retry para end\n");
+ } else if (cur_chip->get_parameter) {
+ ret = cur_chip->get_parameter(mtd, READ_RETRY_MODE);
+ if (ret) {
+ printk("get default retry para error\n");
+ chip->cur_chip = NULL;
+ return ret;
+ } else
+ printk("get default retry para end\n");
+ }
+
+ if (cur_chip->eslc_reg_num) {
+ ret = cur_chip->get_parameter(mtd, ESLC_MODE);
+ if (ret) {
+ printk("get default eslc error\n");
+ chip->cur_chip = NULL;
+ } else
+ printk("get eslc param end\n");
+ }
+
+ print_nand_buffer((uint8_t *)cur_chip, sizeof(chip_table[0]));
+
+ return ret;
+}
+
+static int wmt_nand_readID(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int cfg = 0, i = 0;
+ int status = -1;
+
+ writeb(NAND_CMD_READID, info->reg + NFCR2_COMPORT0);
+ writeb(0x00, info->reg + NFCR3_COMPORT1_2);
+ cfg = DPAHSE_DISABLE|(0x02<<1);
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd);*/
+
+ if (status) {
+ printk(KERN_ERR "in wmt_nand_readID(): wait cmd is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ return status;
+ }
+ cfg = NAND2NFC|SING_RW;
+ for (i = 0; i < 6; i++) {
+ writew(cfg|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd);*/
+ if (status)
+ return status;
+ status = wmt_nfc_transfer_ready(mtd);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status) {
+ printk(KERN_ERR "in wmt_nand_readID(): wait transfer cmd is not ready\n");
+ return status;
+ }
+ info->dmabuf[i] = readb(info->reg + NFCR0_DATAPORT) & 0xff;
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "readID is %x\n", readb(info->reg + NFCR0_DATAPORT));
+ #endif
+ }
+ info->datalen = 0;
+ return 0;
+}
+
+/* check flash busy pin is ready => return 1 else return 0 */
+static int wmt_device_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ return readb(info->reg + NFCRa_NFC_STAT) & 0x01;
+}
+
+
+static void wmt_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ if (mode == hardware_ecc)
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) & (~DIS_BCH_ECC), info->reg + NFCR9_ECC_BCH_CTRL);
+ else
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) | DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+}
+
+/*static*/ void print_nand_register(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int j;
+
+ for (j = 0; j < 0x200; j += 16)
+ printk(KERN_NOTICE "NFCR%x ~ NFCR%x = 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\r\n",
+ j/4, (j+12)/4,
+ readl(info->reg + j + 0),
+ readl(info->reg + j + 4),
+ readl(info->reg + j + 8),
+ readl(info->reg + j + 12));
+}
+
+void print_nand_buffer(char *value, unsigned int length)
+{
+ int j;
+ for (j = 0; j < length; j += 16)
+ printk(KERN_NOTICE "Row%3.3x:%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x"
+ "-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x-%2.2x\n",
+ j, value[j+0], value[j+1], value[j+2], value[j+3], value[j+4],
+ value[j+5], value[j+6], value[j+7], value[j+8], value[j+9],
+ value[j+10], value[j+11], value[j+12], value[j+13], value[j+14], value[j+15]);
+}
+void print_nand_buffer_int(unsigned int *value, unsigned int length)
+{
+ int j;
+ for (j = 0; j < length; j += 8)
+ printk(KERN_NOTICE"Row%3.3x:%8.2x-%8.2x-%8.2x-%8.2x-%8.2x-%8.2x-%8.2x-%8.2x\n",
+ j, value[j+0], value[j+1], value[j+2], value[j+3], value[j+4], value[j+5], value[j+6], value[j+7]);
+}
+
+static void set_read_addr(struct mtd_info *mtd, unsigned int *address_cycle, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int addr_cycle = 0;
+
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ addr_cycle++;
+ if (mtd->realwritesize != 512) {
+ writeb(column >> 8, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle++;
+ }
+ if (page_addr != -1) {
+ if (mtd->realwritesize != 512) {
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ page_addr >>= 8;
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle += 2;
+ } else {
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle += 2;
+ }
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ addr_cycle++;
+ }
+ } else {/*page size 512*/
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else
+ writeb(page_addr,
+ (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle++;
+ }
+ }
+ }
+ /* } else if (page_addr != -1) {*/
+ } else if ((page_addr != -1) && (column == -1)) {
+ writeb(page_addr & 0xff, info->reg + NFCR3_COMPORT1_2);
+ page_addr >>= 8;
+ writeb(page_addr & 0xff, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle += 2;
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else {/*page size = 512 bytes */
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+
+ /* One more address cycle for devices > 128MiB */
+ /* if (chip->chipsize > (128 << 20)) {*/
+ page_addr >>= 8;
+ /* writeb(page_addr,
+ info->reg + NFCR4_COMPORT3_4 + 1); */
+ /* before, may be a little error */
+ writeb(page_addr & 0xff,
+ info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ }
+ }
+ *address_cycle = addr_cycle;
+}
+
+static int wmt_multi_page_start_micron(struct mtd_info *mtd, unsigned command, int colum, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+ unsigned int pagecnt = mtd->pagecnt;
+ unsigned int b2r_stat;
+ int status = 0, i;
+ DECLARE_COMPLETION(complete);
+
+ uint8_t cmd[2] = {0x00, 0x32}, addr[5] = {0, 0, 0, 0, 0};
+
+ for (i = 0; i < 3; i++) {
+ addr[2+i] = 0xFF&(page>>(8*i));
+ }
+
+ status = write_bytes_cmd(mtd, 1, 5, 0, &cmd[0], addr, NULL);
+ if (status)
+ printk("micron multi read cmd(00) + addr fail\n");
+ status = write_bytes_cmd(mtd, 1, 0, 0, &cmd[1], NULL, NULL);
+ if (status)
+ printk("micron multi read cmd(32) + addr fail\n");
+
+ /* check busy to ready status*/
+ status = wmt_nand_ready(mtd);
+
+ for (i = 0; i < 3; i++) {
+ addr[2+i] = 0xFF&((page + pagecnt)>>(8*i));
+ }
+
+ status = write_bytes_cmd(mtd, 1, 5, 0, &cmd[0], addr, NULL);
+ if (status)
+ printk("micron multi read cmd(00) + addr fail\n");
+
+ writeb(0x30, info->reg + NFCR2_COMPORT0);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (B2R&b2r_stat) {
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ }
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ info->done_data = &complete;
+ info->isr_cmd = 0x60;
+
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ info->datalen = 0;
+
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ //b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ //writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_start err: nfc command is not ready\n");
+ }
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ return 0;
+}
+
+static int wmt_multi_page_start(struct mtd_info *mtd, unsigned command, int colum, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int pagecnt = mtd->pagecnt;
+ unsigned int b2r_stat;
+ int status = 0;
+ DECLARE_COMPLETION(complete);
+
+ chip->cmdfunc(mtd, 0x60, -1, page);
+ chip->cmdfunc(mtd, 0x60, -1, page + pagecnt);
+
+ writeb(0x30, info->reg + NFCR2_COMPORT0);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (B2R&b2r_stat) {
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ }
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ info->done_data = &complete;
+ info->isr_cmd = 0x60;
+
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ info->datalen = 0;
+
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ //b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ //writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_start err: nfc command is not ready\n");
+ }
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ return 0;
+}
+//unsigned int r1,r2,r3,r4,r5,r6,r7,r8,r9,r10;
+static int wmt_multi_page_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+ DECLARE_COMPLETION(complete);
+ unsigned int addr_cycle = 0 /*b2r_stat, bank_stat1, bank_stat2=0*/;
+ int status = -1;
+ unsigned char *FIFO = (unsigned char *) (info->reg+ECC_FIFO_c);
+
+ info->isr_cmd = command;
+ info->data_ecc_uncor_err = 0;
+ info->dma_finish = 0;
+ info->done_data = &complete;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+ //printk("multi read page=%x blk=%d, addr_cycle=%d trig=%x\n",page_addr, page_addr/128, addr_cycle, DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD);
+ //print_nand_register(mtd);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wmb();
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_s2 err: nfc command is not ready\n");
+ }
+
+ addr_cycle = 0;
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ writeb(column, info->reg + NFCR3_COMPORT1_2 + 1);
+ addr_cycle += 2;
+ }
+
+ writeb(NAND_CMD_RNDOUT, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wmb();
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read_s2 err: nfc command is not ready\n");
+ }
+
+ writeb(0x1C, info->reg + NFCR13_INT_MASK);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ FIFO[0] = NAND_CMD_RNDOUTSTART;
+ FIFO[3] = 0xFF&page_addr;
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & ~HIGH64FIFO, info->reg + NFCRd_OOB_CTRL);
+ writel(0x80001, info->reg + NFCRc_CMD_ADDR);
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize + 1024, 0, -1, -1);
+ else
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 0, -1, -1);//r3 = wmt_read_oscr();
+
+ info->datalen = 0;
+
+ //printk("2page=%x blk=%d, addr_cycle=%d trig=%x\n",page_addr, page_addr/256, addr_cycle, NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD);
+//print_nand_register(mtd);
+ //writew(NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ //writew(NAND2NFC|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ writew(NAND2NFC|(1<<1)|NFC_TRIGGER, info->reg + NFCR1_COMCTRL);
+ wmb();
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ if (info->dma_finish != 1) {
+ printk("read page wait dma time out info->dma_finish=%d\n",info->dma_finish);
+ print_nand_register(mtd);
+ dump_stack();
+ while(info->dma_finish == 0) {
+ if (readl(info->reg + NFC_DMA_ISR)&1) {
+ writel(0, info->reg + NFC_DMA_IER);
+ info->dma_finish++;
+ if (info->done_data != NULL) {
+ //complete(info->done_data);
+ info->done_data = NULL;
+ }
+ }
+ }
+ }
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "dma transfer data time out: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));
+
+ wmt_nfc_transfer_ready(mtd);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+ if (status) {
+ printk(KERN_NOTICE"multi-read page wait idle status =%d\n", status);
+ }
+ return 0;
+}
+
+static int wmt_dma_ready(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i = 0;
+
+ while (1) {
+ if ((readb(info->reg + NFC_DMA_ISR) & NAND_PDMA_IER_INT_STS))
+ break;
+
+ if (++i>>20)
+ return -3;
+ }
+ return 0;
+}
+
+//#define RE_PORFO
+static int wmt_nand_page_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ struct nand_read_retry_param *cur_chip = chip->cur_chip;
+ unsigned int addr_cycle = 0, b2r_stat;
+ int status = -1;
+ unsigned int bank_stat, id = 0, pageInBlk = 0;
+ int i, total_times = 1, total_try_times = 0, tmp = 0;
+ unsigned char reset = NAND_CMD_RESET, retry_enable =0xB6, retry_disable = 0xD6;
+ DECLARE_COMPLETION(complete);
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "read data cmd: 0x%x col:0x%x, page:0x%x\n", command, column, page_addr);
+ #endif
+ /*info->phase = 0;
+ if (readl(info->reg + NFCR9_ECC_BCH_CTRL) & DIS_BCH_ECC)
+ info->phase = 2;*/
+
+ if (cur_chip != NULL) {
+ total_times = cur_chip->total_try_times + 1;
+ id = (cur_chip->nand_id>>24)&0xFF;
+ if (id == NAND_MFR_SANDISK) {
+ pageInBlk = page_addr%mtd->pagecnt;
+ if (((pageInBlk%2) == 1 || pageInBlk == 0) && pageInBlk != (mtd->pagecnt - 1))
+ total_try_times = cur_chip->total_try_times&0xFF;//Lower page
+ else
+ total_try_times = (cur_chip->total_try_times>>8)&0xFF;//Upper page
+ } else
+ total_try_times = cur_chip->total_try_times&0xFF;
+ //printk("read page--cur_times = %d, totoal_times = %d \n", cur_chip->cur_try_times, total_times);
+ }
+ //cur_chip->cur_try_times = 4;
+ for (i = 0; i < total_times; i++) {
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+ if (i > 0)
+ info->isr_cmd = command;
+
+ info->data_ecc_uncor_err = 0;
+ info->dma_finish = 0;
+ writeb(0x1C, info->reg + NFCR13_INT_MASK);
+ info->done_data = &complete;
+ /* 1: read, 0:data, -1: */
+ if (info->phase == 2) {//disable bch read
+ tmp = (mtd->realoobsize > 512) ? mtd->realoobsize : 512;
+ wmt_nfc_dma_cfg(mtd, tmp, 0, -1, -1);
+ } else {
+ if (info->oob_ecc_error == 0x50) {//read last bank for oob in DDR mode
+ wmt_nfc_dma_cfg(mtd, chip->ecc.size, 0, -1, -1);
+ } else {//read whole page
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize + 1024, 0, -1, -1);
+ else
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 0, -1, -1);
+ }
+ }
+ /*print_nand_register(mtd);*/
+ wmb();
+ info->datalen = 0;
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ /* printk(KERN_NOTICE "RB is %d\n", b2r_stat & 0x02);*/
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ bank_stat = readw(info->reg + NFCRb_NFC_INT_STAT);
+ writew(bank_stat|0x101, info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/
+ if (status)
+ printk(KERN_ERR "The chip is not ready\n");
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+ if (addr_cycle == 4)
+ writeb(NAND_CMD_READSTART, info->reg + NFCR5_COMPORT5_6);
+ else if (addr_cycle == 5)
+ writeb(NAND_CMD_READSTART, (unsigned char *)(info->reg + NFCR5_COMPORT5_6) + 1);
+ wmb();
+ writew(NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wmb();
+ //printk("read page wait for completion\n");
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ if (info->dma_finish != 1) {
+ printk("read page wait dma time out info->dma_finish=%d\n",info->dma_finish);
+ print_nand_register(mtd);
+ dump_stack();
+ while(info->dma_finish == 0) {
+ if (readl(info->reg + NFC_DMA_ISR)&1) {
+ writel(0, info->reg + NFC_DMA_IER);
+ info->dma_finish++;
+ if (info->done_data != NULL) {
+ //complete(info->done_data);
+ info->done_data = NULL;
+ }
+ }
+ }
+ }
+ status = nand_pdma_handler(mtd);
+ //printk(KERN_ERR "check status pdma handler status= %x \n", status);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "dma transfer data time out: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));
+//printk("read page 3\n");
+ wmt_nfc_transfer_ready(mtd);
+ /*status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);*/
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+//printk("read page 4\n");
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+//printk("read page 5\n");
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ /*print_nand_register(mtd);*/
+ /*while(1);*/
+ }
+ if (info->unc_allFF == 0 && info->unc_bank && mtd->dwRetry == 0) {
+ mtd->ecc_stats.failed++;
+ printk("no retry flash occur uncoverable ecc error uncor_err=%d\n", info->data_ecc_uncor_err);
+ }
+
+ if(info->data_ecc_uncor_err == 1) {
+ if((cur_chip != NULL)) {
+ mtd->ecc_err_cnt = 0;
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt)){
+ if((id != NAND_MFR_HYNIX) ||((id == NAND_MFR_HYNIX) && (cur_chip->cur_try_times >=5)))
+ printk("Unc_Err %d_th pg=0x%x cur_retry=%d\n", i, page_addr, cur_chip->cur_try_times);
+ }
+
+ if (id == NAND_MFR_HYNIX) {
+ //printk("set retry mode cur_try_times=%d\n", cur_chip->cur_try_times);
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, ECC_ERROR_VALUE);
+ cur_chip->retry = 1;
+
+ if (i == total_try_times) {
+ cur_chip->retry = 0;
+ /* read retry many times still ecc uncorrectable error */
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //dump_stack();
+ //while(cur_chip);
+ return status;
+ }
+ } else if (id == NAND_MFR_TOSHIBA) {
+ if (cur_chip->cur_try_times >= total_try_times) {
+ /* send reset cmd after read retry finish(fail) for toshiba */
+ write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ cur_chip->cur_try_times = 0;
+ cur_chip->retry = 0;
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //while(cur_chip);
+ return status;
+ }
+ if (cur_chip->cur_try_times == 0 && cur_chip->retry != 1)
+ toshiba_pre_condition(mtd);
+ cur_chip->set_parameter(mtd, 0, 0);
+ cur_chip->retry = 1;
+ } else if (id == NAND_MFR_SAMSUNG || id == NAND_MFR_MICRON) {
+ if (cur_chip->cur_try_times >= total_try_times) {
+ /* send default cmd after read retry finish(fail) for samsung */
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ cur_chip->cur_try_times = 0;
+ cur_chip->retry = 0;
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //while(cur_chip);
+ return status;
+ }
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, ECC_ERROR_VALUE);
+ cur_chip->retry = 1;
+ } else if (id == NAND_MFR_SANDISK) {
+ //printk("set retry mode cur_try_times=%d\n", cur_chip->cur_try_times);
+ cur_chip->set_parameter(mtd, total_try_times, ECC_ERROR_VALUE);
+ if (i == 0 && cur_chip->retry != 1)
+ write_bytes_cmd(mtd, 1, 0, 0, &retry_enable, NULL, NULL);
+ cur_chip->retry = 1;
+
+ if (i == total_try_times) {
+ write_bytes_cmd(mtd, 1, 0, 0, &retry_disable, NULL, NULL);
+ cur_chip->retry = 0;
+ /* read retry many times still ecc uncorrectable error */
+ if (prob_end == 1 && page_addr < ((mtd->blkcnt - 8) * mtd->pagecnt))
+ printk("read page after retry still uncor err\n");
+ mtd->ecc_stats.failed++;
+ //while(cur_chip);
+ return status;
+ }
+ }
+ } else {
+ printk("read page uncor err but cur_chip = NULL!\n");
+ break;
+ }
+ } else {
+ if (cur_chip) {
+ unsigned int bakeup;
+ if (cur_chip->retry == 1) {
+ if((id != NAND_MFR_HYNIX) || ((id == NAND_MFR_HYNIX)&&(cur_chip->cur_try_times >= 5)))
+ printk("read retry PASS cur_try_times=%d\n", cur_chip->cur_try_times);
+ bakeup = *(uint32_t *)info->dmabuf;
+ } else
+ break;
+ /* send reset cmd after read retry finish(pass) for toshiba */
+ if (id == NAND_MFR_TOSHIBA) {
+ write_bytes_cmd(mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ printk("reset cmd to finish retry\n");
+ cur_chip->cur_try_times = 0;
+ } else if (id == NAND_MFR_SAMSUNG || id == NAND_MFR_MICRON) {
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ cur_chip->cur_try_times = 0;
+ } else if (id == NAND_MFR_SANDISK) {
+ write_bytes_cmd(mtd, 1, 0, 0, &retry_disable, NULL, NULL);
+ //set retry default value need before page program
+ cur_chip->set_parameter(mtd, total_try_times, DEFAULT_VALUE);
+ //should we reset cur_try_times to zero?
+ cur_chip->cur_try_times = -1;
+ } if (id == NAND_MFR_HYNIX) {
+ cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ cur_chip->cur_try_times = -1;
+ }
+ cur_chip->retry = 0;
+ *(uint32_t *)info->dmabuf = bakeup;
+ }
+ break;
+ }
+ } //end of retry for loop
+
+ return 0;
+}
+#if 0
+static int wmt_multi_copy_start(struct mtd_info *mtd, unsigned command, int column, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ unsigned int div = mtd->erasesize / mtd->writesize;
+
+ unsigned int b2r_stat;
+
+ int status = 0;
+
+ chip->cmdfunc(mtd, 0x60, -1, page);
+
+ chip->cmdfunc(mtd, 0x60, -1, page + div);
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x35, info->reg + NFCR2_COMPORT0);
+
+ writew(NAND2NFC|DPAHSE_DISABLE|1<<1|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);// cost lots of time
+
+ status = wmt_wait_cmd_ready(mtd);
+
+ if (status) {
+
+ printk(KERN_ERR "Multi_read err: nfc command is not ready\n");
+ }
+
+ return 0;
+}
+static int wmt_multi_copy_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+
+ unsigned int addr_cycle = 0;//, b2r_stat, bank_stat1, bank_stat2=0;
+ int status = -1;
+ //unsigned int bank_stat, id = 0, pageInBlk = 0;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+ // bank_stat = readw(info->reg + NFCRb_NFC_INT_STAT);
+ // writew(bank_stat|0x101, info->reg + NFCRb_NFC_INT_STAT);
+
+ //status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/ //problem
+
+ // if (status)
+ // printk(KERN_ERR "The chip is not ready\n");
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Multi_read err: nfc command is not ready\n");
+ }
+
+ addr_cycle = 0;
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ writeb(column, info->reg + NFCR3_COMPORT1_2 + 1);
+ addr_cycle += 2;
+ }
+
+ // writeb(0x07, info->reg + WMT_NFC_REDUNT_ECC_STAT);
+ // writel(0xffffffff, info->reg + WMT_NFC_BANK18_ECC_STAT);
+ // b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ // writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(NAND_CMD_RNDOUT, info->reg + NFCR2_COMPORT0);
+
+ writeb(NAND_CMD_RNDOUTSTART, info->reg + NFCR4_COMPORT3_4);
+
+ writew(DPAHSE_DISABLE|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+ if(status) {
+ printk(KERN_NOTICE"WaitIdle is not ready=%d\n", status);
+ }
+ return status;
+}
+
+static int wmt_multi_copy_write(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int div = mtd->erasesize / mtd->writesize;
+ unsigned int addr_cycle = 0;
+ int status = -1;
+ int b2r_stat = 0;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+ writeb(0x85, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "erase command is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x11, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ set_read_addr(mtd, &addr_cycle, column, page_addr+div);
+ writeb(0x81, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "command is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x10, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ //printk("\n wmt_copy_back_write is OK!");
+ return status;
+}
+
+static int wmt_copy_back_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int addr_cycle = 0, b2r_stat;
+ int status = -1;
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+
+ writew(NAND2NFC|DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD,info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "Read 0x00 cmd is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x35, info->reg + NFCR2_COMPORT0);
+
+ writew(DPAHSE_DISABLE|1<<1|NFC_TRIGGER|OLD_CMD,info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ //printk("\n wmt_copy_back_read is OK! ");
+ return status;
+}
+
+static int wmt_copy_back_write(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int addr_cycle = 0;
+ int status = -1;
+ int b2r_stat = 0;
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+ writeb(0x85, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_wait_cmd_ready(mtd);
+ if (status)
+ printk(KERN_ERR "erase command is not ready\n");
+
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writeb(0x10, info->reg + NFCR2_COMPORT0);
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_nand_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"B2R not clear status=0x%x\n", status);
+ status = wmt_nfc_wait_idle(mtd, 0, command, column, page_addr);
+
+ if (status) {
+ printk(KERN_NOTICE"read page wait idle status =%d\n", status);
+ }
+ //printk("\n wmt_copy_back_write is OK!");
+ return status;
+}
+#endif
+
+
+static void wmt_nand_oob_read(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ //struct nand_chip *chip = mtd->priv;
+ unsigned int addr_cycle = 0, b2r_stat;
+ int status = -1;
+ unsigned int bank_stat;
+ int mycolumn = column, mypage_addr = page_addr;
+ DECLARE_COMPLETION(complete);
+
+ info->data_ecc_uncor_err = 0;
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "wmt_nand_oob_read: readoob col=0x%x, page=0x%x\n", column, page_addr);
+ #endif
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x18, info->reg + NFCR13_INT_MASK);
+ info->done_data = &complete;
+
+ info->datalen = 0;
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ /* printk(KERN_NOTICE "RB is %d\n", b2r_stat & 0x02);*/
+
+ set_read_addr(mtd, &addr_cycle, column, page_addr);
+
+ bank_stat = readw(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat)
+ writew(B2R|(ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+
+ status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/
+ if (status)
+ printk(KERN_ERR "The chip is not ready\n");
+ writeb(NAND_CMD_READ0, info->reg + NFCR2_COMPORT0);
+ if (addr_cycle == 4)
+ writeb(NAND_CMD_READSTART, info->reg + NFCR5_COMPORT5_6);
+ else if (addr_cycle == 5)
+ writeb(NAND_CMD_READSTART, (unsigned char *)(info->reg + NFCR5_COMPORT5_6) + 1);
+
+ writew(NAND2NFC|MUL_CMDS|((addr_cycle + 2)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+
+ /* read oob has no dma but assert B2R status */
+ //printk("read oob wait for completion");
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = wmt_nfc_transfer_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"oob read wait NFC_BUSY time out\n");
+ //wmt_nand_ready(mtd);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ status = wmt_nfc_wait_idle(mtd, 0, command, mycolumn, mypage_addr);
+
+ if (status) {
+ if (status == -4)
+ return;
+ printk(KERN_ERR "wmt_nfc_wait_idle status =%d\n", status);
+ printk(KERN_ERR "command =0x%x\n", command);
+ printk(KERN_ERR "Read ERR ,NFC is not idle\n");
+ /*print_nand_register(mtd);*/
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ /*while(1);*/
+ }
+//printk(KERN_NOTICE "rbe|");
+ return;
+}
+
+/**
+ * wmt_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ *
+*/
+int wmt_isbad_bbt(struct mtd_info *mtd, struct nand_chip *chip, int block)
+{
+ uint8_t res;
+
+ if (!mtd || !chip) {
+ printk(KERN_ERR "nand not init, check bad block fail.\n");
+ return 1;
+ }
+ if (!chip->bbt) {
+ printk(KERN_ERR "nand bbt not init, check bad block fail.\n");
+ return 1;
+ }
+ /* dannier test nandwrite tool */
+ #if 0
+ if (block == 339 || block == 342 || block == 344) {
+ //if (block == 338 || block == 340 || block == 341 || block == 343) {
+ printk("blk%d --->bad\n", block);
+ return 1;
+ }
+ #endif
+
+ /* Get block number * 2 */
+ block <<= 1;
+ res = (chip->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return 1;
+ case 0x02:
+ return 1;
+ }
+ return 1;
+}
+
+/**
+ * wmt_isbad_bbt_multi - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ *
+*/
+int wmt_isbad_bbt_multi(struct mtd_info *mtd, struct nand_chip *chip, int block)
+{
+ uint8_t res;
+
+ if (!mtd || !chip) {
+ printk(KERN_ERR "nand not init, check bad block fail.\n");
+ return 1;
+ }
+ if (!chip->bbt) {
+ printk(KERN_ERR "nand bbt not init, check bad block fail.\n");
+ return 1;
+ }
+ /* dannier test nandwrite tool */
+ #if 0
+ if (block == 339 || block == 342 || block == 344) {
+ //if (block == 338 || block == 340 || block == 341 || block == 343) {
+ printk("blk%d --->bad\n", block);
+ return 1;
+ }
+ #endif
+
+ /* Get block number * 4 */
+ block <<= 2;
+ res = (chip->bbt[block >> 3] >> (block & 0x4)) & 0x0F;
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ case 0x04:
+ case 0x05:
+ return 1;
+ }
+ return 1;
+}
+
+//#define ESLC_DEBUG
+#define ESLC_READ_WRITE
+#ifdef ESLC_READ_WRITE
+static int hynix_eslc_page_address_calculate(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ int status = -1, page_in_blk, par_page_start = 0, par_page_end, block;
+ int good_blk = 0, bad_blk = 0, par_blk_start, par_blk_end, i, j, blk_page_shift;
+ unsigned int par_blk_ofs = 0, real_need_blk, real_page;
+
+ blk_page_shift = chip->phys_erase_shift - chip->page_shift;
+ block = page >> blk_page_shift;
+ page_in_blk = page%mtd->pagecnt;
+
+ if (page < par1_ofs/4) {
+ par_page_start = 0;
+ par_page_end = par1_ofs/4;
+ } else if (page < par1_ofs) {
+ par_page_start = par1_ofs/4;
+ par_page_end = par1_ofs;
+ } else if (page < par2_ofs) {
+ par_page_start = par1_ofs;
+ par_page_end = par2_ofs;
+ } else if (page < par3_ofs) {
+ par_page_start = par2_ofs;
+ par_page_end = par3_ofs;
+ } else {
+ par_page_start = par3_ofs;
+ par_page_end = par4_ofs;
+ }
+ par_blk_start = par_page_start >> blk_page_shift;
+ par_blk_end = par_page_end >> blk_page_shift;
+ par_blk_ofs = block - par_blk_start;
+
+ for (j = par_blk_start; j < block; j++) {
+ if (chip->realplanenum)
+ status = wmt_isbad_bbt_multi(mtd, chip, j);
+ else
+ status = wmt_isbad_bbt(mtd, chip, j);
+ if (status) {
+ #ifdef ESLC_DEBUG
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk("skip blk%d bad\n", j);
+ #endif
+ bad_blk++;
+ }
+ }
+ par_blk_ofs = par_blk_ofs - bad_blk;
+ real_need_blk = par_blk_ofs*2 + ((page_in_blk >= (mtd->pagecnt/2)) ? 1 : 0);
+
+ for (i = par_blk_start; i < par_blk_end; i++) {
+ //printk("i=%d, par_blk_start=0x%x, par_blk_end=0x%x real_need_blk=0x%x\n", i, par_blk_start, par_blk_end, real_need_blk);
+ if (chip->realplanenum)
+ status = wmt_isbad_bbt_multi(mtd, chip, i);
+ else
+ status = wmt_isbad_bbt(mtd, chip, i);
+ if (status == 0) {
+ #ifdef ESLC_DEBUG
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk("blk%d good\n",i);
+ #endif
+ good_blk++;
+ }
+ if (good_blk >= (real_need_blk + 1)) {
+ #ifdef ESLC_DEBUG
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk("wr blk%d \n",i);
+ #endif
+ break;
+ }
+ }
+ if (i >= par_blk_end) {
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk(KERN_ERR "eslc addr is out of partition size, skip page=0x%x"
+ ", par_page_end=0x%x, end_blk=%d\n", page, par_page_end, i);
+ return -1;
+ }
+ real_page = (i << blk_page_shift) + eslc_map_table[(page_in_blk%(mtd->pagecnt/2))];
+ if (page_in_blk == 0 || page_in_blk == (mtd->pagecnt/2))
+ printk(KERN_NOTICE "page = 0x%x ======> eslc page = 0x%x\n", page, real_page);
+
+ return real_page;
+}
+#endif
+
+/*
+ * wmt_nand_cmdfunc - Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices We dont have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void wmt_nand_cmdfunc(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int addr_cycle = 0, b2r_stat, pmc_nand, chip_en, tmp;
+ int status = -1, i;
+ int mycolumn, mypage_addr;
+ DECLARE_COMPLETION(complete);
+
+ if (!chip->realplanenum && (command == NAND_CMD_READ0)) {
+ info->cur_lpage = page_addr;
+ if (page_addr >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+ }
+//printk(KERN_DEBUG "cmd %x col:%x, page:0x%x hold=0x%x \n", command, column, page_addr, ((mtd->blkcnt - 8)*mtd->pagecnt));
+ if (mtd->id == 0xECDED57A) {
+ if (page_addr >= (4096*128)) {
+ page_addr = page_addr + 0x80000;
+ //printk(KERN_NOTICE "cmd %x col:%x, page:0x%x\n", command, column, page_addr);
+ }
+ } else if (command == NAND_CMD_READ0 && chip->cur_chip && prob_end == 1 &&
+ (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ #ifdef ESLC_READ_WRITE
+ if (!chip->realplanenum)
+ if (command == NAND_CMD_READ0) {
+ if ((page_addr < par4_ofs && second_chip == 0)) {
+ #ifdef ESLC_DEBUG
+ if (page_addr%mtd->pagecnt == 0 || page_addr%mtd->pagecnt == (mtd->pagecnt/2))
+ printk("\ncmdfunc: \n");
+ #endif
+ page_addr = hynix_eslc_page_address_calculate(mtd, chip, page_addr);
+ if (page_addr < 0)
+ return;
+ }
+ #endif
+ }
+ }
+ mycolumn = column;
+ mypage_addr = page_addr;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_cmdfunc() command: %x column:%x, page_addr:%x\n",
+ command, column, page_addr);
+ //if (command == 0x70)
+ //dump_stack();
+ #endif
+ info->isr_cmd = command;
+ if (page_addr != 0xFFFFFFFF && page_addr != -1)
+ info->cur_page = page_addr;
+ info->phase = 0;
+ if (readl(info->reg + NFCR9_ECC_BCH_CTRL) & DIS_BCH_ECC)
+ info->phase = 2;
+ pmc_nand = *(volatile unsigned long *)PMCEU_ADDR;// |= (0x0010000);//add by vincent
+ if (!(pmc_nand&0x0010000))
+ printk(KERN_NOTICE "pmc_nand=0x%x\n", pmc_nand);
+
+ chip_en = readb(info->reg + NFCR12_NAND_TYPE_SEL+1);
+ if ((chip_en&7) == 7) {
+ printk(KERN_NOTICE "chip 0, or 1, is not select chip_sel=%x\n", chip_en);
+ writeb(0xfe, info->reg + NFCR12_NAND_TYPE_SEL+1);
+ }
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ #ifdef WMT_HW_RDMZ
+ tmp = DIS_BCH_ECC & readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz || tmp) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+ #endif
+ wmt_nand_page_read(mtd, command, column, page_addr);
+ /*#ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz)
+ nfc_hw_rdmz(mtd, 1);
+ #endif*/
+ return;
+ case NAND_CMD_READOOB:
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+ #endif
+ //printk("oobRe=%x mtd->bbt_sw_rdmz=%d dwRdmz=%d\n", page_addr, mtd->bbt_sw_rdmz, mtd->dwRdmz);
+ wmt_nand_oob_read(mtd, command, column, page_addr);
+ /*#ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz)
+ nfc_hw_rdmz(mtd, 1);
+ #endif*/
+ return;
+
+ case MULTI_READ_1CYCLE:
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_MICRON || (0xFF&(mtd->id>>24)) == NAND_MFR_INTEL)
+ wmt_multi_page_start_micron(mtd, command, column, page_addr);
+ else
+ wmt_multi_page_start(mtd, command, column, page_addr);
+ return;
+ case MULTI_READ_2CYCLE:
+ info->isr_cmd = 0x00;
+ command = 0x00;
+ wmt_multi_page_read(mtd, command, column, page_addr);
+ return;
+ /*case MULTI_COPY_1CYCLE:
+ info->isr_cmd = 0x60;
+ command = 0x60;
+ wmt_multi_copy_start(mtd, command, column, page_addr);
+ return;
+ case MULTI_COPY_2CYCLE:
+ info->isr_cmd = 0x00;
+ command = 0x00;
+ wmt_multi_copy_read(mtd, command, column, page_addr);
+ return;
+ case MULTI_COPY_3CYCLE:
+ info->isr_cmd = 0x85;
+ command = 0x85;
+ wmt_multi_copy_write(mtd, command, column, page_addr);
+ return;
+ case COPY_BACK_1CYCLE:
+ info->isr_cmd = 0x00;
+ command = 0x00;
+ wmt_copy_back_read(mtd, command, column, page_addr);
+ return;
+ case COPY_BACK_2CYCLE:
+ info->isr_cmd = 0x85;
+ command = 0x85;
+ wmt_copy_back_write(mtd, command, column, page_addr);
+ return;*/
+
+ case 0x81:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_ERASE1:
+ /* printk(KERN_NOTICE "command is %x\n", command);*/
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ addr_cycle++;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512) {
+ writeb(column >> 8, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle++;
+ }/*#endif*/
+ if (page_addr != -1) {
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512) {
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ page_addr >>= 8;
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ addr_cycle += 2;
+ /*#else*/
+ } else {
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle += 2;
+ } /*#endif*/
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else /*#else*/
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ /*#endif*/
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else /*#else*/
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ /*#endif*/
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ addr_cycle++;
+ }
+ } else {
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+ page_addr >>= 8;
+ /*#ifndef PAGE_ADDR*/
+ if (mtd->realwritesize != 512)
+ writeb(page_addr, info->reg + NFCR5_COMPORT5_6);
+ else /*#else*/
+ writeb(page_addr, (unsigned char *)(info->reg + NFCR4_COMPORT3_4) + 1);
+ /*#endif*/
+ addr_cycle++;
+ }
+ }
+ }
+ /*} else if (page_addr != -1) {*/
+ } else if ((page_addr != -1) && (column == -1)) {
+ writeb(page_addr & 0xff, info->reg + NFCR3_COMPORT1_2);
+ page_addr >>= 8;
+ writeb(page_addr & 0xff, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle += 2;
+
+ if (mtd->realwritesize == 2048) {
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 4096) {
+ /* One more address cycle for devices > 256MiB */
+ if (chip->chipsize > (256 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 8192) {
+ /* One more address cycle for devices > 512MiB */
+ if (chip->chipsize > (512 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else if (mtd->realwritesize == 16384) {
+ /* One more address cycle for devices > 1024MiB */
+ if (chip->chipsize > (1024 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ } else {
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20)) {
+ page_addr >>= 8;
+ writeb(page_addr, info->reg + NFCR4_COMPORT3_4);
+ addr_cycle++;
+ }
+ }
+ }
+
+ /* set command 1 cycle */
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ if (command == NAND_CMD_SEQIN || command == 0x81) {
+ wmb();
+ info->done_data = &complete;
+ writew(((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ } else {
+ /* writeb(read(info->reg + NFCR12_NAND_TYPE_SEL) | WP_DISABLE ,
+ info->reg + NFCR12_NAND_TYPE_SEL);*/
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD,
+ info->reg + NFCR1_COMCTRL);
+ }
+ wmb();
+
+ if (command == NAND_CMD_ERASE1) {//printk("erpg=0x%x\n", page_addr);
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd); */
+ if (status)
+ printk(KERN_ERR "command is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ } else {
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = wmt_nfc_transfer_ready(mtd);
+ /*status = wmt_wait_dma_ready(mtd);*/ /*dannier mask*/
+ wmt_wait_nfc_ready(info);
+ if (status) {
+ printk(KERN_ERR "dma transfer data is not ready: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ /*printk(KERN_NOTICE "\rwait transfer data is not ready: %x\n",
+ readb(info->reg + NFCRa_NFC_STAT));*/
+ /*print_nand_register(mtd);*/
+ /* while (1);*/
+ /* return;*/
+ }
+ }
+ return;
+
+
+ case 0x11:
+ //printk("\n0x11 is here \n");
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ //writeb(0x1B, info->reg + NFCR13_INT_MASK);
+ info->done_data = &complete;
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|0x400, info->reg + NFCR1_COMCTRL);
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ //print_nand_register(mtd);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ status = wmt_wait_chip_ready(mtd);
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ if (status < 0)
+ printk(KERN_ERR "page multi plane err, nand controller is not idle\n");
+ return;
+
+
+ case NAND_CMD_PAGEPROG:
+ /* case NAND_CMD_READSTART:*/
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_ERASE3:
+ /*printk(KERN_NOTICE "command is %x\n", command);*/
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (B2R&b2r_stat) {
+ printk(KERN_NOTICE"flash B2R status assert command=0x%x statu%x\n",command, b2r_stat);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ status = wmt_wait_chip_ready(mtd); /*Vincent 2008.11.3*/
+ if (status)
+ printk(KERN_NOTICE"The chip is not ready\n");
+ }
+
+ if (NAND_CMD_ERASE2 == command || NAND_CMD_ERASE3 == command) {
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+ }
+ info->done_data = &complete;
+ writew(DPAHSE_DISABLE|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ info->datalen = 0;
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ #if 0 /* for debug */
+ if (command == NAND_CMD_ERASE2 || NAND_CMD_ERASE3 == command) {
+ wmt_read_nand_status(mtd, NAND_CMD_STATUS);
+ if ((readb(info->reg + NFCR0_DATAPORT) & 0xff) == 0xc0) {
+ printk(KERN_NOTICE "wmt_func: erase block OK\n");
+ printk(KERN_NOTICE "read nand status is %x\n",
+ readb(info->reg + NFCR0_DATAPORT) & 0xff);
+ } else
+ printk(KERN_NOTICE "wmt_func: erase block failed\n");
+ }
+ #endif
+
+ status = wmt_nfc_wait_idle(mtd, 1, 1, -1, -1); /* write page, don't check ecc */
+ if (status < 0) {
+ printk(KERN_ERR "page program or erase err, nand controller is not idle\n");
+ /*print_nand_register(mtd);*/
+ /* while (1);*/
+ #if 0
+ status = wmt_read_nand_status(mtd, NAND_CMD_STATUS);
+ if (status < 0)
+ printk(KERN_NOTICE "\rNFC or NAND is not ready\n");
+ else if (status & NAND_STATUS_FAIL)
+ printk(KERN_NOTICE "\r status : fail\n");
+ else if (!(status & NAND_STATUS_READY))
+ printk(KERN_NOTICE "\r status : busy\n");
+ else if (!(status & NAND_STATUS_WP))
+ printk(KERN_NOTICE "\r status : protect\n");
+ #endif
+ return;
+ }
+
+ return;
+
+ case NAND_CMD_RESET_NO_STATUS_READ:
+ case NAND_CMD_HYNIX_RETRY_END:
+
+ if (!chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writew(DPAHSE_DISABLE|(0x01<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ printk(KERN_ERR "Reset err, nand device is not ready\n");
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ }
+
+ return;
+
+ case NAND_CMD_RESET:
+
+ if (!chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ writeb(command, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writew(DPAHSE_DISABLE|(0x01<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ status = wmt_nand_ready(mtd);
+ if (status) {
+ b2r_stat = readb(info->reg + NFCR12_NAND_TYPE_SEL+1);
+ printk(KERN_ERR "Reset err, nand device chip %d is not ready\n", ((~b2r_stat)&0xFF)>>1);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ }
+
+ wmt_read_nand_status(mtd, NAND_CMD_STATUS);
+ /* while (!(chip->read_byte(mtd) & NAND_STATUS_READY));*/
+ i = 0;
+ while (!((readb(info->reg + NFCR0_DATAPORT) & 0xff) & NAND_STATUS_READY)) {
+ if (i>>12) {
+ printk("reset flash chip%d time out\n", ~readb(info->reg + NFCR12_NAND_TYPE_SEL+1));
+ break;
+ }
+ i++;
+ }
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "Reset status is ok\n");
+ #endif
+ return;
+
+ case NAND_CMD_READID:
+
+ status = wmt_nand_readID(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "readID status is %d\n", status);
+ #endif
+ return;
+
+ case NAND_GET_FEATURE:
+ if (mtd->dwRdmz)
+ reset_nfc(mtd, NULL, 3);
+ status = nand_get_feature(mtd, 0x1);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);//enable rdmz
+ return;
+
+ case NAND_CMD_STATUS:
+
+ wmt_read_nand_status(mtd, command);
+ return;
+
+ case NAND_CMD_STATUS_MULTI:
+
+ wmt_read_nand_status(mtd, command);
+ return;
+
+ case NAND_CMD_RNDIN:
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ addr_cycle++;
+ if (mtd->realwritesize != 512) {
+ writeb(column >> 8, (unsigned char *)(info->reg + NFCR3_COMPORT1_2) + 1);
+ addr_cycle++;
+ }
+ }
+ info->done_data = &complete;
+ /* set command 1 cycle */
+ writeb(command, info->reg + NFCR2_COMPORT0);
+
+ writew(((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ wait_for_completion_timeout(&complete, NFC_TIMEOUT_TIME);
+ status = wmt_nfc_wait_idle(mtd, 1, -1, -1, -1); /* don't check ecc, wait nfc idle */
+ /* status = wmt_wait_cmd_ready(mtd);*/
+ /* status = wmt_nfc_ready(mtd);*/
+ if (status)
+ printk(KERN_ERR "Ramdom input err: nfc is not idle\n");
+
+ return;
+
+ case NAND_CMD_RNDOUT:
+
+ if (column != -1) {
+ writeb(column, info->reg + NFCR3_COMPORT1_2);
+ writeb(column, info->reg + NFCR3_COMPORT1_2 + 1);
+ addr_cycle += 2;
+ }
+
+ /* CLEAR ECC BIT */
+ //writeb(0x1B, info->reg + NFCR13_INT_MASK);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ /* set command 1 cycle */
+ writeb(command, info->reg + NFCR2_COMPORT0);
+
+ writew(DPAHSE_DISABLE|((addr_cycle + 1)<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nfc_ready(mtd);*/
+ if (status) {
+ printk(KERN_ERR "Ramdom output err: nfc command is not ready\n");
+ /* return;*/
+ }
+
+ writeb(NAND_CMD_RNDOUTSTART, info->reg + NFCR2_COMPORT0);
+ /* write to clear B2R */
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ writew(NAND2NFC|(1<<1)|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+
+ status = wmt_wait_cmd_ready(mtd);
+ /* status = wmt_nand_ready(mtd);*/
+ if (status) {
+ printk(KERN_ERR "Ramdom output err: nfc io transfer is not finished\n");
+ /* return;*/
+ }
+ /* reduntant aera check ecc, wait nfc idle */
+ status = wmt_nfc_wait_idle(mtd, 0, -1, -1, -1);
+ /* status = wmt_nand_wait_idle(mtd);*/
+ if (status)
+ printk(KERN_ERR "Ramdom output err: nfc is not idle\n");
+ return;
+
+
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ udelay(chip->chip_delay);
+ return;
+
+
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+
+ /* trigger command and addrress cycle */
+
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+ /* Apply this short delay always to ensure that we do wait tWB in */
+ /* any case on any machine.*/
+ /* ndelay(100);*/
+ wmt_device_ready(mtd);
+}
+
+
+static void wmt_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int b2r_stat;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_select_chip()\n");
+ #endif
+ if (!((*(volatile unsigned long *)PMCEU_ADDR)&0x0010000))
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ if (chipnr > 1)
+ printk(KERN_WARNING "There are only support two chip sets\n");
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ if (chipnr == 1)
+ chipnr++;
+ else if (chipnr == 2)
+ chipnr--;
+
+ if (chipnr >= 0 && chipnr < 4)
+ writeb(~(1<<chipnr), info->reg + NFCR12_NAND_TYPE_SEL+1);
+ else if (chipnr < 0)
+ writeb(~0, info->reg + NFCR12_NAND_TYPE_SEL+1);
+ else
+ printk(KERN_WARNING "There are only support two chip sets. chipnr = %d\n", chipnr);
+}
+
+void rdmzier(uint8_t *buf, int size, int page)
+{
+ int i, j;
+ unsigned int *bi = (unsigned int *)buf;
+ j = page%256;
+
+ for (i = 0; i < size; i++) {
+ bi[i] = rdmz[j] ^ bi[i];
+ j++;
+ if (j >= BYTE_SEED)
+ j = 0;
+ }
+}
+void rdmzier_oob(uint8_t *buf, uint8_t *src, int size, int page, int ofs)
+{
+ int i, j;
+ unsigned int *bi = (unsigned int *)buf;
+ unsigned int *bs = (unsigned int *)src;
+ j = page%256;
+ j = (j+ofs)%BYTE_SEED;
+
+ for (i = 0; i < size; i++) {
+ bi[i] = rdmz[j] ^ bs[i];
+ j++;
+ if (j >= BYTE_SEED)
+ j = 0;
+ }
+}
+
+
+static void wmt_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_write_buf()\n");
+ #endif
+ //printk("info->dmabuf=%x datalen=%x \n", (unsigned int)info->dmabuf, info->datalen);
+ memcpy(info->dmabuf + info->datalen, buf, len);
+//print_nand_buffer((uint8_t *)info->dmabuf, mtd->writesize);
+ info->datalen += len;
+}
+
+static void wmt_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_buf() len: %x infoDatalen :%x\n", len, info->datalen);
+ #endif
+
+ memcpy(buf, info->dmabuf + info->datalen, len);
+ info->datalen += len;
+}
+
+static uint8_t wmt_read_byte(struct mtd_info *mtd)
+{
+ /* struct wmt_nand_mtd *nmtd = mtd->priv;*/
+ /* struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);*/
+ uint8_t d;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_byte()\n");
+ #endif
+
+ /* d = readb(info->reg + NFCR0_DATAPORT) & 0xff;*/
+ wmt_nand_read_buf(mtd, &d, 1);
+ /* via_dev_dbg(&nmtd->info->platform->dev, "Read %02x\n", d);*/
+ /* via_dev_dbg(info->platform->dev, "Read %02x\n", d);*/
+
+ return d;
+}
+
+static int wmt_nand_read_oob_noalign(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *buf = chip->oob_poi;
+ uint8_t *bufpoi = buf;
+
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+
+ // read redundant area cmd
+ //printk(KERN_NOTICE "scan oob page=0x%x\n", page);dannier
+ info->oob_ecc_error = 0x0;
+ #if 0
+ if (!mtd->dwDDR) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ,
+ info->reg + NFCRd_OOB_CTRL);
+ //writeb((info->oob_ECC_bytes+1), info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->oob_ECC_mode);
+ //pos = info->oob_col/*+ i * (eccsize + chunk);*/
+ //print_nand_register(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, info->oob_col, page);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->ECC_mode);
+ //writeb(info->oob_ECC_bytes, info->reg + NFCR10_OOB_ECC_SIZE+1);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ),
+ info->reg + NFCRd_OOB_CTRL);
+ } else
+ #endif
+ {
+ info->data_ecc_uncor_err = 0;
+ info->oob_ecc_error = 0x50;
+ }
+
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ if (info->data_ecc_uncor_err == 1 || info->oob_ecc_error == 0x50) {
+ if (info->data_ecc_uncor_err == 1)
+ printk(KERN_WARNING "**************page0x%x, read oob unc err goto read page\n", page);
+ info->isr_cmd = 0;
+ wmt_nand_page_read(mtd, 0, info->last_bank_col, page);
+ info->oob_ecc_error = 0;
+ }
+
+ if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(chip->oob_poi), 6);//set_FIFO_FF((uint32_t *)(info->reg+ECC_FIFO_0), 4);
+ /*printk("oobRe=%x \n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ print_nand_buffer((char *)(chip->oob_poi), 32);
+ printk("\n");*/
+ } else {
+ memcpy(bufpoi, info->dmabuf + mtd->realwritesize, 24);
+ //print_nand_buffer((char *)(chip->oob_poi), 32);
+ //print_nand_buffer((char *)(info->dmabuf + mtd->realwritesize), 32);
+ /*if (!(*(uint32_t *)(info->reg+ECC_FIFO_0) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_1) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_2) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_3) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_4) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_5) == 0xFFFFFFFF)) {
+ printk("fail to derdmz oob roob page= 0x%x e\n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //while(1);
+ }*/
+ }
+
+ return 1;
+}
+
+static int wmt_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *buf = chip->oob_poi;
+ /* int length = mtd->realoobsize; */ /* prepad = chip->ecc.prepad, bytes = chip->ecc.bytes;*/
+ /* int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;*/
+ /* int eccsize = chip->ecc.size;*/
+ uint8_t *bufpoi = buf;
+ /* struct nand_oobfree *free = chip->ecc.layout->oobfree;*/
+ /* uint32_t boffs;*/
+ /* int pos; */ /* toread, sndrnd = 1;*/
+ #ifdef WMT_SW_RDMZ
+ unsigned int rdmz_mark = 0;
+ #endif
+
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_read_oob() page =0x%x cur_page=0x%x\n", page, info->cur_page);
+ #endif
+
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+
+ // read redundant area cmd
+ //printk(KERN_NOTICE "scan oob page=%d\n", page);
+ info->oob_ecc_error = 0x0;
+ #if 1
+ if (!mtd->dwDDR) {
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ,
+ info->reg + NFCRd_OOB_CTRL);
+ //writeb((info->oob_ECC_bytes+1), info->reg + NFCR10_OOB_ECC_SIZE+1);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->oob_ECC_mode);
+ //pos = info->oob_col/*+ i * (eccsize + chunk);*/
+ //print_nand_register(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, info->oob_col, page);
+ if (info->ECC_mode != info->oob_ECC_mode)
+ set_ecc_engine(info, info->ECC_mode);
+ //writeb(info->oob_ECC_bytes, info->reg + NFCR10_OOB_ECC_SIZE+1);
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ),
+ info->reg + NFCRd_OOB_CTRL);
+ } else
+ #endif
+ {
+ info->data_ecc_uncor_err = 0;
+ info->oob_ecc_error = 0x50;
+ }
+
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ if (info->data_ecc_uncor_err == 1 || info->oob_ecc_error == 0x50) {
+ if (info->data_ecc_uncor_err == 1)
+ printk(KERN_WARNING "**************page0x%x, read oob unc err goto read page\n", page);
+ info->isr_cmd = 0;
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) | 0x10, info->reg + NFCR9_ECC_BCH_CTRL);
+ wmt_nand_page_read(mtd, 0, info->last_bank_col, page);
+ writeb(readb(info->reg + NFCR9_ECC_BCH_CTRL) & 0xEF, info->reg + NFCR9_ECC_BCH_CTRL);
+ info->oob_ecc_error = 0;
+ }
+//print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 16);
+ #ifdef WMT_SW_RDMZ
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+ //printk("re oob page=0x%x rdmz_mark=0x%x wmt_rdmz=0x%x fifo5=0x%x\n",page , rdmz_mark, *(unsigned int *)wmt_rdmz, *(unsigned int *)(info->reg+ECC_FIFO_5));
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz) {
+ rdmzier_oob(bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer(info->reg+ECC_FIFO_0, 24);
+ } else
+ #endif
+ if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(chip->oob_poi), 5);//set_FIFO_FF((uint32_t *)(info->reg+ECC_FIFO_0), 4);
+ /*printk("oobRe=%x \n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ print_nand_buffer((char *)(chip->oob_poi), 32);
+ printk("\n");*/
+ } else {
+ memcpy(bufpoi, info->reg+ECC_FIFO_0, 20);
+ /*if (!(*(uint32_t *)(info->reg+ECC_FIFO_0) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_1) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_2) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_3) == 0xFFFFFFFF
+ && *(uint32_t *)(info->reg+ECC_FIFO_4) == 0xFFFFFFFF && *(uint32_t *)(info->reg+ECC_FIFO_5) == 0xFFFFFFFF)) {
+ printk("fail to derdmz oob roob page= 0x%x e\n", page);
+ print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer((char *)(info->reg+ECC_FIFO_0), 32);
+ //while(1);
+ }*/
+ }
+ /*chip->read_buf(mtd, bufpoi, 32);*/
+ /*chip->read_buf(mtd, bufpoi + i * 16, 16);*/
+
+ return 1;
+}
+
+static int wmt_nand_read_oob_single(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int ret = 0;
+
+ info->cur_lpage = page;
+ info->cur_page = page;
+
+ ret = cache_read_data(mtd, chip, page, NULL);
+ if (!ret) {
+ //printk("re oob lpg=0x%x from cache\n", page);
+ return 0;
+ }
+ ret = 0;
+
+ if (page >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+//printk("11oobRe=0x%x mtd->bbt_sw_rdmz=%d cur_page=0x%x\n", page, mtd->bbt_sw_rdmz, info->cur_page);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ if (wmt_nand_read_oob_noalign(mtd, chip, page, sndcmd))
+ ret = 1;
+ } else {
+ if (wmt_nand_read_oob(mtd, chip, page, sndcmd))
+ ret = 1;
+ }
+
+ return ret;
+}
+
+
+static int wmt_nand_read_oob_plane(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd)
+{
+ //printk("\n wmt_nand_read_oob_plane \n");
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ int pagecnt = mtd->pagecnt;
+ int ret = 0; //printk("\n wmt_nand_read_oob_plane page=%x =>page=%x\n",page,(page / pagecnt) * pagecnt + page);
+
+ info->cur_lpage = page;
+ ret = cache_read_data(mtd, chip, page, NULL);
+ if (!ret) {
+ //printk("re oob lpg=0x%x from cache\n", page);
+ return 0;
+ }
+ ret = 0;
+
+ if (page >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ page = (page / pagecnt) * pagecnt + page;
+
+ info->cur_page = page;
+ //printk("22oobRe=0x%x mtd->bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", page, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ if (wmt_nand_read_oob_noalign(mtd, chip, page, 1))
+ ret = 1;
+ } else {
+ if (wmt_nand_read_oob(mtd, chip, page, 1))
+ ret = 1;
+ }
+/* info->oper_step = 1;
+ if(wmt_nand_read_oob(mtd, chip, page+div, 1))ret = 1;
+ //if(ret)printk("ret is 1! \n");
+*/
+ return ret;
+}
+
+
+
+/*
+ * wmt_nand_read_raw_page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int wmt_nand_read_raw_page(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ unsigned int bch;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ /*print_nand_register(mtd);
+ dump_stack();*/
+
+ bch = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb((bch & (~BCH_INT_EN))| DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ writeb(bch, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ set_ecc_engine(info, info->ECC_mode);
+
+ return 0;
+}
+
+
+/* - SCAN DEFAULT INVALID BAD BLOCK -
+ * wmt_nand_read_bb_oob - OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int wmt_nand_read_bb_oob(struct mtd_info *mtd, struct nand_chip *chip,
+int page, int sndcmd)
+{
+ unsigned int bch, bak_time;
+ int i, size = 1024, ofs = mtd->realwritesize;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_bb_oob() page=0x%x\n", page);
+ #endif
+ bch = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb((bch & (~BCH_INT_EN))| DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ bak_time = readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ if (!mtd->dwDDR)
+ writel(0x2424, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ ofs = ofs + 2048;
+ }
+
+ if (sndcmd) {
+ if ((0xFF&(mtd->id>>24)) == 0x45) {
+ for (i = 0; i < ((ofs/1024)+1); i++) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, i*1024, page);
+ info->datalen = 0;
+ if (i == (ofs/1024))
+ size = (mtd->realoobsize >= 1024) ? 1024 : mtd->realoobsize;
+ chip->read_buf(mtd, chip->oob_poi - ofs + (i*1024), size);
+ }
+ } else if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi, 1);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi+1, 63);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi, 64);
+ }
+ sndcmd = 0;
+ }
+ if (!mtd->dwDDR)
+ writel(bak_time, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(bch, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ set_ecc_engine(info, info->ECC_mode);
+
+ return sndcmd;
+}
+
+/* - SCAN DEFAULT INVALID BAD BLOCK -
+ * wmt_nand_read_bb_oob_multi - OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int wmt_nand_read_bb_oob_multi(struct mtd_info *mtd, struct nand_chip *chip,
+int page, int sndcmd)
+{
+ unsigned int bch, bak_time;
+ int i, size = 1024, plane, ofs = mtd->realwritesize;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_read_bb_oob() page=0x%x\n", page);
+ #endif
+ bch = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb((bch & (~BCH_INT_EN))| DIS_BCH_ECC, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+ bak_time = readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ if (!mtd->dwDDR)
+ writel(0x2424, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ ofs = ofs + 2048;
+ }
+
+ if (sndcmd) {
+ if ((0xFF&(mtd->id>>24)) == 0x45) {
+ plane = (info->oper_step ? (ofs-1024) : mtd->writesize);
+ for (i = 0; i < ((ofs/1024)+1); i++) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, i*1024, page);
+ info->datalen = 0;
+ if (i == (ofs/1024))
+ size = (mtd->realoobsize >= 1024) ? 1024 : mtd->realoobsize;
+ chip->read_buf(mtd, chip->oob_poi - plane + (i*1024), size);
+ }
+ } else if (mtd->id == 0xECDED57E && mtd->id2 == 0x68440000) {
+ plane = (info->oper_step ? 32 : 0);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi+plane, 1);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ chip->read_buf(mtd, chip->oob_poi+1+plane, 31);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, ofs, page);
+ info->datalen = 0;
+ plane = (info->oper_step ? 32 : 0);
+ chip->read_buf(mtd, chip->oob_poi+plane, 32);
+ }
+ sndcmd = 0;
+ }
+ if (!mtd->dwDDR)
+ writel(bak_time, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writeb(bch, info->reg + NFCR9_ECC_BCH_CTRL);
+ writeb(READ_RESUME, info->reg + NFCR9_ECC_BCH_CTRL + 1);
+
+ set_ecc_engine(info, info->ECC_mode);
+
+ return sndcmd;
+}
+
+static int wmt_nand_read_bb_oob_plane(struct mtd_info *mtd, struct nand_chip *chip,
+int page, int sndcmd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int div = mtd->erasesize / mtd->writesize;
+ int ret = 0;
+ page = (page / div) * div + page;
+ info->oper_step = 0;
+ if (wmt_nand_read_bb_oob_multi(mtd, chip, page,sndcmd))
+ ret = 1;
+ info->oper_step = 1;
+ if(wmt_nand_read_bb_oob_multi(mtd, chip, page+div,sndcmd))
+ ret = 1;
+ info->oper_step = 0;
+ return ret;
+}
+
+
+/* write oob is no longer support */
+static int wmt_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ /*int i;*/
+ unsigned int b2r_stat;
+ /*int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;*/
+ int eccsize = chip->ecc.size; /* length = mtd->realoobsize; */
+ /* prepad = chip->ecc.prepad, bytes = chip->ecc.bytes;*/
+
+ int pos, status = 0;
+ /*int steps = chip->ecc.steps;*/ /* Vincent 2008.11.4*/
+ const uint8_t *bufpoi = chip->oob_poi;
+ /* struct nand_oobfree *free = chip->ecc.layout->oobfree;*/
+ /* uint32_t boffs;*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_write_oob()\n");
+ #endif
+
+
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ info->datalen = 0;
+ /*chip->write_buf(mtd, bufpoi, 32);*/
+ memcpy(info->reg+ECC_FIFO_0, bufpoi, 32);
+ pos = eccsize * chip->ecc.steps + 8*4;
+ /*pos = eccsize + i * (eccsize + chunk);*/
+ /*wmt_nfc_dma_cfg(mtd, 32, 1, 1, i);*/
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_1\n");*/
+ status = chip->waitfunc(mtd, chip);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_2\n");*/
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ /* } */
+ return 0;
+
+}
+
+static int wmt_nand_write_oob_plane(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+ /*int i;*/
+ unsigned int b2r_stat;
+ /*int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;*/
+ int eccsize = chip->ecc.size; /* length = mtd->realoobsize; */
+ /* prepad = chip->ecc.prepad, bytes = chip->ecc.bytes;*/
+
+ int pos, status = 0;
+ /*int steps = chip->ecc.steps;*/ /* Vincent 2008.11.4*/
+
+ int div = mtd->erasesize / mtd->writesize;
+
+ page = (page / div) *div + page;
+
+ // if(info->oper_step) bufpoi = chip->oob_poi + mtd->realoobsize;
+ /* struct nand_oobfree *free = chip->ecc.layout->oobfree;*/
+ /* uint32_t boffs;*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_write_oob()\n");
+ #endif
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * 512 7 1 5 0 3
+ * data-ecc-prepad-data-pad-oobecc ....
+ */
+
+ /* for (i = 0; i < steps; i++) {*/
+ /*for (i = 0; i < 4; i++) {*/
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+
+ info->datalen = 0;
+ memcpy(info->reg+ECC_FIFO_0, bufpoi, 32);
+ pos = eccsize * chip->ecc.steps + 8*4;
+ /*pos = eccsize + i * (eccsize + chunk);*/
+ /*wmt_nfc_dma_cfg(mtd, 32, 1, 1, i);*/
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+
+ chip->cmdfunc(mtd, 0x11, -1, -1);
+
+ memcpy(info->reg+ECC_FIFO_0, bufpoi + mtd->realoobsize, 32);
+
+ chip->cmdfunc(mtd, 0x81, pos, page + div);
+
+ chip->cmdfunc(mtd, 0x10, -1, -1);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_1\n");*/
+ status = chip->waitfunc(mtd, chip);
+ /* printk(KERN_NOTICE "\r in wmt_nand_write_oob_new(): waitfunc_2\n");*/
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ /* } */
+
+ return 0;
+}
+
+static void wmt_single_plane_erase(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i;
+#if 0
+ /* Send commands to erase a block */
+ if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX && prob_end == 1) {
+ if (page < par3_ofs || (page >= par6_ofs && page < par7_ofs)) {
+ //printk("SKIP erase page 0x%x, par4_ofs 0x%x\n", page, par4_ofs);
+ return;
+ }
+ } // nand_base.c nand_erase_nand
+#endif
+ for (i = 0; i < WR_BUF_CNT; i++)
+ if (page <= info->wr_page[i] && (page+mtd->pagecnt) > info->wr_page[i])
+ info->wr_page[i] = -1;
+ info->cur_page = page;
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+static void wmt_multi_plane_erase(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i, pagecnt = mtd->pagecnt, page_plane1;
+ //if (((page/pagecnt) * pagecnt + page) != (page<<1) - (page%pagecnt))
+ //printk("erase page %d => page1=%d page2=%d\n", page, (page/pagecnt) * pagecnt + page, (page<<1) - (page%pagecnt));
+
+ for (i = 0; i < WR_BUF_CNT; i++)
+ if (page <= info->wr_page[i] && (page+mtd->pagecnt) > info->wr_page[i])
+ info->wr_page[i] = -1;
+
+ /*if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX && prob_end == 1) {
+ if (page < par3_ofs || (page >= par5_ofs && page < par7_ofs)) {
+ printk("SKIP erase page 0x%x, par4_ofs 0x%x\n", page, par3_ofs);
+ //while(1);
+ return;
+ }
+ }*/
+ page = (page / pagecnt) * pagecnt + page;
+ page_plane1 = page + pagecnt;
+ //printk("multi erase page %x => page1=%x page2=%x, pagepl1=%x\n", page, (page/pagecnt) * pagecnt + page, (page<<1) - (page%pagecnt), page_plane1);
+ //printk("blk=%d, blk1=%d\n", page/mtd->pagecnt, page_plane1/mtd->pagecnt);
+
+ info->cur_page = page;
+// chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);//simulate
+// chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); //simulate
+// chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page+div); //simulate
+// chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); //simulate
+/*******************************************************************************/
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_MICRON || (0xFF&(mtd->id>>24)) == NAND_MFR_INTEL) {
+ //printk(KERN_NOTICE"multi erase0 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ //printk(KERN_NOTICE"multi erase1 command=0x%x \n",NAND_CMD_ERASE3);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE3, -1, -1); /* send cmd 0xd0 */
+ //printk(KERN_NOTICE"multi erase1 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page_plane1);
+ //printk(KERN_NOTICE"multi erase2 command=0x%x \n",NAND_CMD_ERASE2);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); /* send cmd 0xd0 */
+ } else {
+ //printk(KERN_NOTICE"multi erase0 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ //printk(KERN_NOTICE"multi erase1 command=0x%x \n",NAND_CMD_ERASE1);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page_plane1);
+ //printk(KERN_NOTICE"multi erase2 command=0x%x \n",NAND_CMD_ERASE2);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); /* send cmd 0xd0 */
+ }
+/*******************************************************************************/
+}
+
+
+#if 1 //faster encode function
+u32 reverse32 (u32 n)
+{
+ int i;
+ u32 tmp, y;
+
+ y=0;
+ tmp = n;
+
+ for(i=0;i<31;i++)
+ {
+ y = y + (tmp & 0x01);
+ //printf("y=%08x\n",y);
+ tmp >>= 1;
+ y <<= 1;
+ }
+ y = y + (tmp & 0x01);
+
+ return y;
+}
+
+int gen_gf2(u8 ecc_mod_bits, u32 *p_bch_gf2)
+{
+// assign bch_GF_60becc = 840'h9A_5FB0C03C_2D3F4F2F_F106E7D9_ED397A28_479724D7_F259A1CD_DB6C78DA_62668B7F_D9D13742_80F0C37C_06664C92_86CCB2D9_DD1A2B4B_3BC4895C_F7212F8C_D75FB017_7FBFE2B9_66646AAA_CEB7855F_6996F036_3D096201_F62357BD_EB9AD670_03F47DD9_73D6AE65_E5A30A27;
+// assign bch_GF_40becc = 560'hC07F_89B1_A0DC_5D96_619F_32D0_4967_54F6_DE9D_4F93_F527_EF14_EFB0_FD53_9915_A82C_CD92_5528_8030_477D_EE3F_338A_59EC_5FA2_10AF_E2EF_DFAE_D244_DF31_4DA5_0762_B724_A002_9CEF_2DC1;
+// assign bch_GF_24becc = 560'h8E94_E024_8D90_9D2B_4525_72D1_EDD9_D098_FE73_0E8E_8D26_C2D2_2893_A3A0_485B_D0AB_6E0B_4992_9A35_6BD4_30EF;
+// assign bch_GF_12becc = 560'hE48_7325_6115_A567_84A6_940A_4C6E_6D7E_1205_E051;
+// assign bch_GF_8becc = 560'h15_F914_E07B_0C13_8741_C5C4_FB23;
+// assign bch_GF_4becc = 560'h4_5230_43AB_86AB;
+u32 bch_GF_60becc[] = { 0x9A, 0x5FB0C03C, 0x2D3F4F2F, 0xF106E7D9, 0xED397A28, 0x479724D7, 0xF259A1CD, 0xDB6C78DA, 0x62668B7F, 0xD9D13742, 0x80F0C37C, 0x06664C92, 0x86CCB2D9, 0xDD1A2B4B, 0x3BC4895C, 0xF7212F8C, 0xD75FB017, 0x7FBFE2B9, 0x66646AAA, 0xCEB7855F, 0x6996F036, 0x3D096201, 0xF62357BD, 0xEB9AD670, 0x03F47DD9, 0x73D6AE65, 0xE5A30A27};
+u32 bch_GF_40becc[] = { 0xC07F, 0x89B1A0DC, 0x5D96619F, 0x32D04967, 0x54F6DE9D, 0x4F93F527, 0xEF14EFB0, 0xFD539915, 0xA82CCD92, 0x55288030, 0x477DEE3F, 0x338A59EC, 0x5FA210AF, 0xE2EFDFAE, 0xD244DF31, 0x4DA50762, 0xB724A002, 0x9CEF2DC1};
+u32 bch_GF_24becc[] = { 0x8E94, 0xE0248D90, 0x9D2B4525, 0x72D1EDD9, 0xD098FE73, 0x0E8E8D26, 0xC2D22893, 0xA3A0485B, 0xD0AB6E0B, 0x49929A35, 0x6BD430EF};
+u32 bch_GF_12becc[] = { 0xE487325, 0x6115A567, 0x84A6940A, 0x4C6E6D7E, 0x1205E051};
+u32 bch_GF_8becc[] = { 0x15, 0xF914E07B, 0x0C138741, 0xC5C4FB23};
+u32 bch_GF_4becc[] = { 0x45230, 0x43AB86AB};
+u32 *p_tmp;
+int i,len,width;
+
+ switch (ecc_mod_bits)
+ {
+ case 4 : width = 52; p_tmp = bch_GF_4becc; break;
+ case 8 : width = 104; p_tmp = bch_GF_8becc; break;
+ case 12 : width = 156; p_tmp = bch_GF_12becc; break;
+ case 24 : width = 336; p_tmp = bch_GF_24becc; break;
+ case 40 : width = 560; p_tmp = bch_GF_40becc; break;
+ case 60 : width = 840; p_tmp = bch_GF_60becc; break;
+ default : width = 52; p_tmp = bch_GF_4becc; break;
+ }
+len = width/32 +1;
+for(i=0;i<len;i++)
+ *(p_bch_gf2+i) = *(p_tmp+len-1-i);
+
+return (width);
+}
+//calculate ecc with 1bit data
+u32 calc_1b_bch( u32 *parity_buf, u32 *bch_GF2, u32 din, u8 parity_len, u8 parity_msb_pos)
+{
+ //parity_buf: pointer to parity buffer
+ //bch_GF2: pointer to generation polynomial
+ //din: input data, bit31-1 should be 0, only bit0 is valid
+ //parity_len: parity length in DW
+ //parity_msb_pos: the msb position of parity
+ u8 i;
+ u32 mask = ~(0xffffffff << (parity_msb_pos));
+ u32 msb_word = mask & parity_buf[parity_len-1];
+ u32 parity_msb = msb_word >> (parity_msb_pos-1) ;
+
+ for(i=parity_len-1;i>0;i--)
+ {
+ parity_buf[i]= (parity_buf[i]<<1) +(parity_buf[i-1]>>31);
+ if(din ^ parity_msb) parity_buf[i]= parity_buf[i]^ bch_GF2[i];
+ }
+
+ parity_buf[0]= (parity_buf[0]<<1);
+ if(din ^ parity_msb) parity_buf[i]= parity_buf[i]^ bch_GF2[i];
+
+ return (parity_msb );
+}
+
+int bch_encoder(u32 *p_parity, u32 *p_data, u8 ecc_mod_bits, u32 datacnt)
+{
+ //p_parity: pointer to parity buffer
+ //p_data: pointer to input data
+ //ecc_mod_bits: ecc mode select, options are 4,8,12,24,40,60
+ //datacnt: data length in DW
+ int i,j;
+ int bchGF_width;
+ u8 parity_len,parity_msb_pos;
+ u32 bch_GF2[27]; //support 60becc, 105bytes 27DW
+ u32 tmp;
+ u32 *p;
+ u8 align_offset;
+
+ bchGF_width = gen_gf2( ecc_mod_bits, bch_GF2);
+ parity_len = (u8)(bchGF_width /32 + 1);
+ parity_msb_pos = (u8)(bchGF_width %32);
+ align_offset = 32 - parity_msb_pos;
+
+ //p = (u32 *)malloc((parity_len) * sizeof(u32));
+ p = (unsigned int *)kmalloc((parity_len) * sizeof(unsigned int), GFP_KERNEL);
+ if (p == NULL) {
+ printk("malloc Error!");
+ return -1;
+ } else {
+ //initialize parity buffer
+ for(i=parity_len-1;i>=0;i--)
+ *(p+i) = 0;
+
+ //Caculate bit by bit
+ for (i=0;i<datacnt;i++) {
+ tmp = p_data[i];
+ for (j=0;j<32;j++) {
+ calc_1b_bch( p, bch_GF2, tmp&0x00000001, parity_len, parity_msb_pos);
+ tmp >>= 1;
+ }
+ }
+ //adjust parity align offset
+ for (i=parity_len -1 ; i >0; i--)
+ p[i] = (p[i] << align_offset) + (p[i-1] >> (32-align_offset));
+
+ p[0] = p[0] << align_offset;
+
+ //reverse parity order
+ for(i=0;i<parity_len;i++)
+ p_parity[parity_len-1-i] =reverse32(p[i]);
+ kfree(p); //release malloc
+ }
+ return 0;
+}
+#endif //faster encode function
+
+#if 0 //old fast encode function
+
+unsigned int reverse32 (unsigned int n)
+{
+ int i=0;
+ unsigned int tmp = n, y=0;
+ for(;i<31;i++) {
+ y += tmp&0x00000001;
+ tmp >>= 1;
+ y <<= 1;
+ }
+ y += tmp&0x00000001;
+ return y;
+}
+
+int Gen_GF2(u8 bits, unsigned int *buf)
+{
+// assign bch_GF_40becc = 560'hC07F_89B1_A0DC_5D96_619F_32D0_4967_54F6_DE9D_4F93_F527_EF14_EFB0_FD53_9915_A82C_CD92_5528_8030_477D_EE3F_338A_59EC_5FA2_10AF_E2EF_DFAE_D244_DF31_4DA5_0762_B724_A002_9CEF_2DC1;
+// assign bch_GF_24becc = 560'h8E94_E024_8D90_9D2B_4525_72D1_EDD9_D098_FE73_0E8E_8D26_C2D2_2893_A3A0_485B_D0AB_6E0B_4992_9A35_6BD4_30EF;
+// assign bch_GF_12becc = 560'hE48_7325_6115_A567_84A6_940A_4C6E_6D7E_1205_E051;
+// assign bch_GF_8becc = 560'h15_F914_E07B_0C13_8741_C5C4_FB23;
+// assign bch_GF_4becc = 560'h4_5230_43AB_86AB;
+ unsigned int bch_GF_40becc[] = { 0xC07F, 0x89B1A0DC, 0x5D96619F, 0x32D04967, 0x54F6DE9D, 0x4F93F527, 0xEF14EFB0, 0xFD539915, 0xA82CCD92, 0x55288030, 0x477DEE3F, 0x338A59EC, 0x5FA210AF, 0xE2EFDFAE, 0xD244DF31, 0x4DA50762, 0xB724A002, 0x9CEF2DC1};
+ unsigned int bch_GF_24becc[] = { 0x8E94, 0xE0248D90, 0x9D2B4525, 0x72D1EDD9, 0xD098FE73, 0x0E8E8D26, 0xC2D22893, 0xA3A0485B, 0xD0AB6E0B, 0x49929A35, 0x6BD430EF};
+ unsigned int bch_GF_12becc[] = { 0xE487325, 0x6115A567, 0x84A6940A, 0x4C6E6D7E, 0x1205E051};
+ unsigned int bch_GF_8becc[] = { 0x15, 0xF914E07B, 0x0C138741, 0xC5C4FB23};
+ unsigned int bch_GF_4becc[] = { 0x45230, 0x43AB86AB};
+ unsigned int *p;
+ int i,len,width;
+
+ switch (bits) {
+ case 4 : width = 51; p = bch_GF_4becc; break;
+ case 8 : width = 103; p = bch_GF_8becc; break;
+ case 12 : width = 155; p = bch_GF_12becc; break;
+ case 24 : width = 335; p = bch_GF_24becc; break;
+ case 40 : width = 559; p = bch_GF_40becc; break;
+ default : width = 51; p = bch_GF_4becc; break;
+ }
+ len = width/32 +1;
+ for(i=0;i<len;i++)
+ buf[i] = *(p+len-1-i);
+
+ return (width);
+}
+
+unsigned int Caculat_1b_bch( unsigned int *pariA, unsigned int *bch_GF2, unsigned int din, u8 pari_len, u8 pari_lb)
+{
+ //din: bit31-1 should be 0, only bit0 is valid
+ //pari_len: the index of last DW of the parity
+ //pari_lb: the MSB of the last DW
+ u8 i;
+ unsigned int mask = ~(0xffffffff <<(pari_lb+1));
+ unsigned int lstdw = mask & pariA[pari_len];
+ unsigned int ldwMSB = lstdw >> pari_lb ;
+ // for(i=pari_len;i>=0;i--) printk("%8x",pariA[i]);printk("\n---before\n");
+ for(i=pari_len;i>0;i--) {
+ pariA[i]= (pariA[i]<<1) +(pariA[i-1]>>31);
+ if(din ^ ldwMSB) pariA[i] = pariA[i] ^ bch_GF2[i];
+ }
+ pariA[0]= (pariA[0]<<1);
+ if(din ^ ldwMSB) pariA[i] = pariA[i]^ bch_GF2[i];
+ // for(i=pari_len;i>=0;i--) printk("%8x",pariA[i]);printk("\n---after\n");
+ return (ldwMSB );
+}
+
+int bch_encoder(unsigned int *p_parity, unsigned int *p_data, u8 bits, unsigned int datacnt)
+{
+ int i,j;
+ int bchGF_msb;
+ u8 pari_len,pari_lb;
+ unsigned int bch_GF2[18];
+ unsigned int tmp;
+ unsigned int *p, *p1;
+ u8 *p2;//, p3[50];
+
+ bchGF_msb = Gen_GF2( bits, bch_GF2);
+ pari_len = (u8)(bchGF_msb /32);
+ pari_lb = (u8)(bchGF_msb %32);
+ //p = (unsigned int *)malloc((pari_len+2) * sizeof(unsigned int));
+ p = (unsigned int *)kmalloc((pari_len+2) * sizeof(unsigned int), GFP_KERNEL);
+ if (p == NULL) {
+ printk("malloc Error!");
+ return -1;
+ } else {
+ /*gen parity[ bchGF_msb:0] begin*/
+ //Init
+ for(i=pari_len+1;i>=0;i--)
+ *(p+i) = 0;
+ //Caculate
+ p1 = &p[1];
+ for (i=0;i<datacnt;i++) {
+ tmp = p_data[i];
+ for (j=0;j<32;j++) {
+ Caculat_1b_bch( p1, bch_GF2, tmp&0x00000001, pari_len, pari_lb);
+ tmp >>= 1;
+ }
+ }
+ //printk("encode finiah!pari_len=%d p_parity=0x%x\n",pari_len, (unsigned int)p_parity);
+ /*gen parity[ bchGF_msb:0] end*/
+
+ /*reverse oder of parity begin*/
+ p2 = (u8 *)p;
+ //printk("pari_lb=%d p2=0x%x\n", pari_lb, (unsigned int)p2);
+ p1 = (unsigned int *)(p2+3-(pari_lb/8));
+ /*p2 = (p2+3-(pari_lb/8));
+ for(i=0;i<((pari_len+1)*4);i++)
+ p3[i] = p2[i];
+ p1 = p3;
+ */
+ //printk("p2=0x%x p3=0x%x\n", (unsigned int)p2, (unsigned int)p3);
+ for(i=0;i<=pari_len;i++) {
+ p_parity[pari_len-i] = reverse32(p1[i]);
+ }
+ /*reverse oder of parity end*/
+ //printk("reverse finiah!\n");
+ kfree(p); //release malloc
+ }
+ //printk("leave encode\n");
+ return 0;
+}
+#endif //old fast encode function
+
+#if 0 //slow encode function
+int encode_ecc(unsigned char *src_data, unsigned char *parity, unsigned int ecc_bit, unsigned char *c_len, unsigned int encode_len)
+{
+ //unsigned char src_data[512];//24
+ //unsigned char parity[26];//42
+ //unsigned char ecc_bit;
+ unsigned char c_len1 = *c_len;
+ unsigned int fail;
+
+ //char in_char;
+ int i;
+ //int j,in_v;
+
+
+
+ //for (i=0; i<encode_len; i++) src_data[i] = 0x00;
+ // for (i = 0; i < encode_len; i += 2) {
+ // src_data[i] = 0xFF&(jj>>8);
+ // src_data[i+1] = 0xFF&jj;
+ // jj++;
+ // jj %= 0x10000;
+ // src_data[i] = 0x12;
+ // src_data[i+1] = 0x12;
+ // }
+/*
+ i = 0; j = 0;
+ in_char = getchar();
+ while (in_char != EOF) {
+ in_v = hextoint(in_char);
+ if (in_v != -1) {
+ if (j==0) {
+ src_data[i] = 0;
+ src_data[i] += in_v * 16;
+ j++;
+ } else {
+ src_data[i] += in_v;
+ i++;
+ j = 0;
+ }
+ }
+ in_char = getchar();
+ }*/
+ //printk("start encode\n");
+ fail = wmt_bchencoder(src_data,parity,ecc_bit,&c_len1, encode_len);
+ if (fail)
+ printk("----------------Encode Error Detected! code=%d-----------------\n",fail);
+ else
+ *c_len = c_len1;
+ /*printk("\nCodeLengh=%d %d Parity=",*c_len, c_len1);
+ for (i=(c_len1-1); i>=0; i--)
+ printk("%02x ",parity[i]);
+ printk("\n");*/
+
+ return 0;
+}
+
+int hextoint(char hex)
+// Convert HEX number to Integer
+{
+ int r, h;
+ r = -1;
+ h = (int)hex;
+ if ((h >= 97) && (h <= 102))
+ r = h - 87;
+ else if ((h >= 65) && (h <= 70))
+ r = h - 55;
+ else if ((h >= 48) && (h <= 57))
+ r = h - 48;
+ else if ((h != 10) && (h != 13))
+ printk("Error detected!!! hex=%c",hex);
+ return r;
+}
+
+
+// This function is used to encode the BCH code for the input data
+// data : [IN] The information data to be encoded by BCH. The lendth of this buffer is fixed at 512Bytes.
+// bch_code : [OUT] Buffer pointer to keep the BCH code.
+// bits : [IN] The number of bits for the BCH error correcting capability.
+// bch_codelen : [IN/OUT] This parameter is used to specify the length of the buffer bch_code in unit of byte for input for the
+// encoder. And will specify the length of encoded bch for the data with error correcting capability bits as output.
+// RETURN : 0 indicates success. Nonzero indicates failure.
+unsigned int wmt_bchencoder (unsigned char *data, unsigned char *bch_code, unsigned char bits, unsigned char *bch_codelen, unsigned int encode_len)
+{
+ unsigned char bch_codelen_in;
+ unsigned char bch_i;
+ /*unsigned char b_data[MAX_BANK_SIZE*8];
+ unsigned char bch_sera[MAX_PARITY_SIZE*8];
+ unsigned char bch_sera_tmp[MAX_PARITY_SIZE*8];*/
+ unsigned char bch_sera_back;
+ unsigned int width;
+ unsigned int i,j,k;
+ unsigned long retval;
+ unsigned char offset;
+
+ unsigned char *bch_GF2;
+ /*unsigned char bch_GF_4becc[MAX_PARITY_SIZE*8] = {0,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0,1,1,1,0,1,0,1,0,1,1,1,0,0,0,0,1,1,0,1,0,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_8becc[MAX_PARITY_SIZE*8] = {0,0,0,1,0,1,0,1,1,1,1,1,1,0,0,1,0,0,0,1,0,1,0,0,1,1,1,0,0,0,0,0,0,1,1,1,1,0,1,1,0,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,1,1,1,0,1,0,0,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0,0,1,0,0,1,1,1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_12becc[MAX_PARITY_SIZE*8] = {1,1,1,0,0,1,0,0,1,0,0,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,1,0,1,1,0,0,0,0,1,0,0,0,1,0,1,0,1,1,0,1,0,0,1,0,1,0,1,1,0,0,1,1,1,1,0,0,0,0,1,0,0,1,0,1,0,0,1,1,0,1,0,0,1,0,1,0,0,0,0,0,0,1,0,1,0,0,1,0,0,1,1,0,0,0,1,1,0,1,1,1,0,0,1,1,0,1,1,0,1,0,1,1,1,1,1,1,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,1,1,1,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_16becc[MAX_PARITY_SIZE*8] = {1,1,0,0,1,0,1,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,1,0,0,0,0,1,1,0,1,1,0,1,1,1,1,1,0,1,1,0,0,0,1,0,1,0,1,1,0,0,0,1,1,1,0,1,1,0,1,0,1,1,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,0,1,1,1,1,0,1,0,1,0,1,0,0,1,0,0,0,1,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,0,0,1,1,0,1,1,1,1,1,0,1,1,0,0,1,1,0,1,1,1,1,0,0,0,1,0,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,0,0,1,1,0,1,1,1,0,1,0,0,0,0,1,1,1,1,1,1,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_24becc[MAX_PARITY_SIZE*8] = {1,0,0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,1,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0,1,1,0,1,1,0,0,1,0,0,0,0,1,0,0,1,1,1,0,1,0,0,1,0,1,0,1,1,0,1,0,0,0,1,0,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,1,1,0,1,0,0,0,1,1,1,1,0,1,1,0,1,1,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,1,0,0,1,1,0,0,0,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,1,0,0,0,0,1,1,1,0,1,0,0,0,1,1,1,0,1,0,0,0,1,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,1,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,1,1,1,0,1,0,0,0,1,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,1,1,1,1,0,1,0,0,0,0,1,0,1,0,1,0,1,1,0,1,1,0,1,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,1,0,0,1,1,0,0,1,0,0,1,0,1,0,0,1,1,0,1,0,0,0,1,1,0,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ unsigned char bch_GF_40becc[MAX_PARITY_SIZE*8] = {1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,1,0,0,1,1,0,1,1,0,0,0,1,1,0,1,0,0,0,0,0,1,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1,1,0,0,1,0,1,1,0,0,1,1,0,0,0,0,1,1,0,0,1,1,1,1,1,0,0,1,1,0,0,1,0,1,1,0,1,0,0,0,0,0,1,0,0,1,0,0,1,0,1,1,0,0,1,1,1,0,1,0,1,0,1,0,0,1,1,1,1,0,1,1,0,1,1,0,1,1,1,1,0,1,0,0,1,1,1,0,1,0,1,0,0,1,1,1,1,1,0,0,1,0,0,1,1,1,1,1,1,0,1,0,1,0,0,1,0,0,1,1,1,1,1,1,0,1,1,1,1,0,0,0,1,0,1,0,0,1,1,1,0,1,1,1,1,1,0,1,1,0,0,0,0,1,1,1,1,1,1,0,1,0,1,0,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,0,1,0,1,0,1,1,0,1,0,1,0,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,1,1,1,0,1,1,1,1,1,0,1,1,1,1,0,1,1,1,0,0,0,1,1,1,1,1,1,0,0,1,1,0,0,1,1,1,0,0,0,1,0,1,0,0,1,0,1,1,0,0,1,1,1,1,0,1,1,0,0,0,1,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,1,0,1,1,1,1,1,1,1,0,0,0,1,0,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,0,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,1,0,0,1,1,0,1,1,1,1,1,0,0,1,1,0,0,0,1,0,1,0,0,1,1,0,1,1,0,1,0,0,1,0,1,0,0,0,0,0,1,1,1,0,1,1,0,0,0,1,0,1,0,1,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,1,1,0,1,1,1,1,0,0,1,0,1,1,0,1,1,1,0,0,0,0,0,1};*/
+
+ // initialization
+ retval = 0;
+ for(i=0; i<MAX_PARITY_SIZE*8; i++) {
+ bch_sera[i] = 0;
+ bch_sera_tmp[i] = 0;
+ }
+
+ for (i=0; i <=(encode_len*8); i++) {
+ if ((unsigned char)((unsigned int)(1<<(i%8))) & data[i/8])
+ b_data[i] = 1;
+ else
+ b_data[i] = 0;
+ }
+
+ // select width and poly-nominal
+ switch (bits) {
+ case 4 : width = 51; bch_GF2 = bch_GF_4becc; break;
+ case 8 : width = 103; bch_GF2 = bch_GF_8becc; break;
+ case 12 : width = 155; bch_GF2 = bch_GF_12becc; break;
+ case 16 : width = 207; bch_GF2 = bch_GF_16becc; break;
+ case 24 : width = 335; bch_GF2 = bch_GF_24becc; break;
+ case 40 : width = 559; bch_GF2 = bch_GF_40becc; break;
+ default : width = 51; bch_GF2 = bch_GF_4becc; retval += 1; break;
+ }
+
+ // calculate the parity
+ for (k=0; k<(encode_len*8); k++) {
+ bch_i = b_data[k];
+ bch_sera_back = bch_sera[width] ^ bch_i;
+ bch_sera_tmp[0] = bch_sera_back;
+ for (i=0; i<width; i++) {
+ bch_sera_tmp[i+1] = bch_sera[i] ^ (bch_sera_back * bch_GF2[width-(i+1)]);
+ }
+ for (i=0; i<=width; i++)
+ bch_sera[i] = bch_sera_tmp[i];
+ }
+
+ i = 0;
+ bch_code[0] = 0;
+ bch_codelen_in = *bch_codelen;
+ if(bits == 4 || bits == 12)
+ offset = 4;
+ else
+ offset = 0;
+ for (j = 0; j <= width; j++) {
+ *bch_codelen = i+1;
+ bch_code[i] += bch_sera[j] * (unsigned char)((unsigned int)(1<<(7-((j+offset)%8))));
+ if (i>=bch_codelen_in) {
+ retval += 2;
+ break;
+ }
+ if((j+offset)%8==7) {
+ i++;
+ bch_code[i] = 0;
+ }
+ }
+
+ return(retval);
+}
+#endif //end of #if 0 : slow encode function
+
+/**
+ * wmt_nand_read_page - hardware ecc syndrom based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static int wmt_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+
+ //#ifdef WMT_SW_RDMZ
+ unsigned int rdmz_mark = 0;//,g1=0,g2=0,g3=0;
+ //#endif
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "\r enter in wmt_nand_read_page()page=0x%x\n", page);
+ #endif
+ if (info->oper_step)
+ bufpoi = chip->oob_poi+20;//bufpoi = chip->oob_poi+mtd->realoobsize;
+//g1 = wmt_read_oscr();
+ info->datalen = 0;
+ if (info->unc_allFF && !mtd->bbt_sw_rdmz) {
+ set_FIFO_FF((uint32_t *)buf, mtd->realwritesize/4);
+ } else
+ chip->read_buf(mtd, buf, mtd->realwritesize);
+//g2 = wmt_read_oscr();
+ if (chip->cur_chip && prob_end == 1 &&
+ (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ if (!chip->realplanenum)
+ if (page < par4_ofs && second_chip == 0) {
+ #ifdef ESLC_DEBUG
+ if (page%mtd->pagecnt == 0 || page%mtd->pagecnt == (mtd->pagecnt/2))
+ printk("\nread: \n");
+ #endif
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ if (page < 0)
+ return 0;
+ }
+ }
+ /*if (page == 0xaa00) {
+ print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 24);
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 6, info->cur_page, mtd->realwritesize/4);
+ print_nand_buffer((uint8_t *)bufpoi, 24);
+ }*/
+ /*if (info->cur_page != page) {
+ printk("cur_page=%x, page=%x\n", info->cur_page, page);
+ while(1);
+ }*/
+ //#ifdef WMT_SW_RDMZ
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ //printk("check read page derdmz page= 0x%x\n", page);
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+ if ((*(unsigned int *)(info->reg+ECC_FIFO_5)) == (*(unsigned int *)wmt_rdmz) ||
+ rdmz_mark == (*(unsigned int *)wmt_rdmz)) {
+ //printk("read page derdmz page= 0x%x\n", page);
+ rdmzier(buf, mtd->realwritesize/4, page);
+ }
+ }
+ //#endif
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+
+ //printk("re page=0x%x rdmz_mark=0x%x wmt_rdmz=0x%x fifo5=0x%x\n",page , rdmz_mark, *(unsigned int *)wmt_rdmz, *(unsigned int *)(info->reg+ECC_FIFO_5));
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz && mtd->bbt_sw_rdmz) {
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 24);
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 5/*20/4*/, page, mtd->realwritesize/4);
+ //print_nand_buffer((uint8_t *)bufpoi, 24);
+ } else if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(bufpoi), 4);
+ } else
+ memcpy(bufpoi, info->reg+ECC_FIFO_0, 20);
+ /*print_nand_buffer((char *)(chip->oob_poi), 32);
+ print_nand_buffer((char *)(buf), 16);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));*/
+ /*if (*(uint32_t *)(info->reg+ECC_FIFO_0) != 0xFFFFFFFF) {
+ printk(KERN_NOTICE "rd PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));
+ }*/
+//g3 = wmt_read_oscr();
+ //printk(KERN_DEBUG"g12=%d,g23=%d\n",(g2-g1)/3,(g3-g1)/3);
+ return 0;
+}
+
+/**
+ * wmt_nand_read_page - hardware ecc syndrom based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static int wmt_nand_read_page_noalign(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+
+ unsigned int rdmz_mark = 0;
+
+ if (info->oper_step)
+ bufpoi = chip->oob_poi+20;
+
+ info->datalen = 0;
+ if (info->unc_allFF && !mtd->bbt_sw_rdmz) {
+ set_FIFO_FF((uint32_t *)buf, mtd->realwritesize/4);
+ } else
+ chip->read_buf(mtd, buf, mtd->realwritesize);
+
+ if (chip->cur_chip && prob_end == 1 &&
+ (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ if (!chip->realplanenum)
+ if (page < par4_ofs && second_chip == 0) {
+ #ifdef ESLC_DEBUG
+ if (page%mtd->pagecnt == 0 || page%mtd->pagecnt == (mtd->pagecnt/2))
+ printk("\nread: \n");
+ #endif
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ if (page < 0)
+ return 0;
+ }
+ }
+
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ //printk("check read page derdmz page= 0x%x\n", page);
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->dmabuf + mtd->realwritesize + 20), 1, page, (mtd->realwritesize+20)/4);
+ if ((*(unsigned int *)(info->dmabuf + mtd->realwritesize + 20)) == (*(unsigned int *)wmt_rdmz) ||
+ rdmz_mark == (*(unsigned int *)wmt_rdmz)) {
+ //printk("read page derdmz page= 0x%x\n", page);
+ rdmzier(buf, mtd->realwritesize/4, page);
+ }
+ }
+
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz && mtd->bbt_sw_rdmz) {
+ //print_nand_buffer((uint8_t *)(info->dmabuf + mtd->realwritesize), 24);
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->dmabuf + mtd->realwritesize /*+ 20*/), 5, page, mtd->realwritesize/4);
+ //print_nand_buffer((uint8_t *)bufpoi, 24);
+ } else if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(bufpoi), 6);
+ } else
+ memcpy(bufpoi, info->dmabuf + mtd->realwritesize, 20);
+ //print_nand_buffer((char *)(chip->oob_poi), 32);
+ /*print_nand_buffer((char *)(buf), 16);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));*/
+
+ /*if (*(uint32_t *)(info->reg+ECC_FIFO_0) != 0xFFFFFFFF) {
+ printk(KERN_NOTICE "rd PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ printk("info->unc_bank=%x golden=%x\n", info->unc_bank, ((1<<info->banks)-1));
+ }*/
+ /*if (info->dmabuf[0] == 1)
+ printk( "R%x:%x ", page, *(uint32_t *)info->dmabuf);*/
+/*printk(KERN_DEBUG "RPG=0x%x : 0x%x 0x%x 0x%x 0x%x\n", page, *(uint32_t *)info->dmabuf,
+*((uint32_t *)info->dmabuf+1), *((uint32_t *)info->dmabuf+2), *((uint32_t *)info->dmabuf+3));*/
+ return 0;
+}
+
+#if 0
+static int wmt_nand_cp_data(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int rdmz_mark = 0;
+
+ info->datalen = 0;
+ if (info->unc_allFF && !mtd->bbt_sw_rdmz) {
+ set_FIFO_FF((uint32_t *)buf, mtd->realwritesize/4);
+ } else
+ chip->read_buf(mtd, buf, mtd->realwritesize);
+
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+ if ((*(unsigned int *)(info->reg+ECC_FIFO_5)) == (*(unsigned int *)wmt_rdmz) ||
+ rdmz_mark == (*(unsigned int *)wmt_rdmz)) {
+ rdmzier(buf, mtd->realwritesize/4, page);
+ }
+ }
+ return 0;
+}
+
+static int wmt_nand_cp_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ uint8_t *bufpoi = chip->oob_poi;
+ unsigned int rdmz_mark = 0;
+
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz)
+ rdmzier_oob((uint8_t *)&rdmz_mark, (uint8_t *)(info->reg+ECC_FIFO_5), 1, page, (mtd->realwritesize+20)/4);
+
+ if (mtd->dwRdmz == 1 && rdmz_mark == *(unsigned int *)wmt_rdmz && mtd->bbt_sw_rdmz) {
+ rdmzier_oob((uint8_t *)bufpoi, (uint8_t *)(info->reg+ECC_FIFO_0), 5, page, mtd->realwritesize/4);
+ } else if (info->unc_allFF) {
+ set_FIFO_FF((uint32_t *)(bufpoi), 4);
+ } else
+ memcpy(bufpoi, info->reg+ECC_FIFO_0, 20);
+
+ return 0;
+}
+#endif
+
+int reset_nfc(struct mtd_info *mtd, unsigned int *buf, int step)
+{
+ int ret = 0;
+ unsigned int backup1[7], *backup;
+ //unsigned int t1, t2;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+
+ backup = backup1;
+ if (step != 3 && buf != NULL)
+ backup = buf;
+
+ if (step&1) {
+ backup[0] = readl(info->reg + NFCR9_ECC_BCH_CTRL);
+ backup[1] = readl(info->reg + NFCRe_CALC_TADL);
+ backup[2] = readl(info->reg + NFCR10_OOB_ECC_SIZE);
+ backup[3] = readl(info->reg + NFCR12_NAND_TYPE_SEL);
+ backup[4] = readl(info->reg + NFCR13_INT_MASK);
+ backup[5] = readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ backup[6] = readl(info->reg + NFCR7_DLYCOMP);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ writew(1, info->reg + NFCR12_NAND_TYPE_SEL);
+ writeb(0x2, info->reg + NFCR11_SOFT_RST);
+ }
+ if (step&2) {
+ ret = NFC_WAIT_IDLE(mtd);
+ if (ret)
+ printk("reset nfc, wait idle time out\n");
+ writeb(0x0, info->reg + NFCR11_SOFT_RST);
+
+ ret = wmt_wait_chip_ready(mtd);
+ if (ret) {
+ printk(KERN_ERR "reset nfc, The chip is not ready\n");
+ print_nand_register(mtd);
+ while(1);
+ }
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0, info->reg + NFCRd_OOB_CTRL);
+ writel(backup[0], info->reg + NFCR9_ECC_BCH_CTRL);
+ writel(backup[1], info->reg + NFCRe_CALC_TADL);
+ writel(backup[2], info->reg + NFCR10_OOB_ECC_SIZE);
+ writel(backup[3], info->reg + NFCR12_NAND_TYPE_SEL);
+ writel(backup[4], info->reg + NFCR13_INT_MASK);
+ writel(backup[5], info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ writel(backup[6], info->reg + NFCR7_DLYCOMP);
+ }
+
+ return ret;
+}
+void nfc_hw_rdmz(struct mtd_info *mtd, int on)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ if (on)
+ writel(RDMZ/*|(page%RDMZ)*/, info->reg + NFCRf_CALC_RDMZ);
+ else
+ writel(0, info->reg + NFCRf_CALC_RDMZ);
+}
+
+int hw_encode_oob(struct mtd_info *mtd)
+{
+ int ret = 0;
+ unsigned int ecc_mode, oob_ecc_mode, tmp;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ tmp = readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ ecc_mode = tmp & ECC_MODE;
+ oob_ecc_mode = ecc_mode;
+ if (ecc_mode > 5)
+ oob_ecc_mode = 5;
+
+ if (oob_ecc_mode != ecc_mode)
+ writeb((tmp & (~ECC_MODE)) | oob_ecc_mode, info->reg + NFCR9_ECC_BCH_CTRL);
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) | OOB_READ, info->reg + NFCRd_OOB_CTRL);
+ writew(DPAHSE_DISABLE|NFC_TRIGGER|OLD_CMD, info->reg + NFCR1_COMCTRL);
+ ret = NFC_WAIT_IDLE(mtd);
+ if (ret)
+ printk("hw encode oob idle time out\n");
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & (~OOB_READ), info->reg + NFCRd_OOB_CTRL);
+
+ if (oob_ecc_mode != ecc_mode)
+ writeb(tmp, info->reg + NFCR9_ECC_BCH_CTRL);
+
+ return ret;
+}
+
+/************************Johnny Liu****************************************************/
+static int wmt_multi_plane_read(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+/*
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ int div = mtd->erasesize / mtd->writesize;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+ wmt_nand_read_page(mtd, chip, buf, page);
+
+ info->oper_step = 1;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page+div);
+
+ wmt_nand_read_page(mtd, chip, buf+mtd->realwritesize, page+div);
+
+ info->oper_step = 0;
+
+ return 0;
+*/
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int tmp, ret = 0, plane_0_uncor_err = 0, plane_1_uncor_err = 0;
+
+ info->cur_lpage = page;
+
+ tmp = cache_read_data(mtd, chip, page, buf);
+ if (!tmp) {
+ //printk("re lpg=0x%x from cache\n", page);
+ return 0;
+ }
+
+ if (page >= ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ //printk("re pg=%x bbt_sw_rdmz=%d hold=%x blkcnt=%d\n", page, mtd->bbt_sw_rdmz, ((mtd->blkcnt - 8)*mtd->pagecnt), mtd->blkcnt);
+
+ if (chip->cur_chip && prob_end == 1 && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX) {
+ #ifdef ESLC_READ_WRITE
+ if (page < par4_ofs && second_chip == 0) {
+ //printk("multi read page=%x ",page);
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ //printk("eslc cal page0=%x page1=0x%x \n", (page / mtd->pagecnt) * mtd->pagecnt + page,
+ //(page / mtd->pagecnt) * mtd->pagecnt + page + mtd->pagecnt);
+ if (page < 0)
+ return 0;
+ }
+ #endif
+ }
+
+ page = (page / mtd->pagecnt) * mtd->pagecnt + page;//dan_multi 65->129, 129->257
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+ if (/*(0xFF&(mtd->id>>24)) != NAND_MFR_MICRON && (0xFF&(mtd->id>>24)) != NAND_MFR_INTEL &&*/ (0xFF&(mtd->id>>24)) != NAND_MFR_TOSHIBA) {
+
+ #ifdef WMT_HW_RDMZ
+ tmp = DIS_BCH_ECC & readb(info->reg + NFCR9_ECC_BCH_CTRL);
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz || tmp) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+ #endif
+
+ chip->cmdfunc(mtd, MULTI_READ_1CYCLE, -1, page);
+ chip->cmdfunc(mtd, MULTI_READ_2CYCLE, 0x00, page);
+
+ if (info->data_ecc_uncor_err == 0) {
+ //printk("multi read plane0page=%x\n",page);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf, page);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf, page);
+ if (ret) {
+ printk("multi read plane0 data fail\n");
+ ret = 1;
+ } else
+ ret = 0;
+ } else
+ plane_0_uncor_err = 1;
+ info->oper_step = 1;
+ info->unc_bank = 0;
+ info->unc_allFF = 0;
+ chip->cmdfunc(mtd, MULTI_READ_2CYCLE, 0x00, page + mtd->pagecnt);
+
+ if (info->data_ecc_uncor_err == 0) {
+ //printk("multi read plane1 page=%x\n", page+mtd->pagecnt);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ if (ret) {
+ printk("multi read plane1 data fail\n");
+ ret = 1;
+ } else
+ ret = 0;
+ } else
+ plane_1_uncor_err = 1;
+ } else {
+ plane_0_uncor_err = 1;
+ plane_1_uncor_err = 1;
+ }
+ //print_nand_buffer((uint8_t *)buf, mtd->writesize);
+
+ info->oper_step = 0;
+ if (plane_0_uncor_err == 1) {
+ //printk("multi read plane_0_uncor_err\n");
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf, page);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf, page);
+ if (ret)
+ ret = 1;
+ else
+ ret = 0;
+ }
+ info->oper_step = 1;
+ if (plane_1_uncor_err == 1) {
+ //printk("multi read plane_1_uncor_err\n");
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page + mtd->pagecnt);
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ ret = wmt_nand_read_page_noalign(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ else
+ ret = wmt_nand_read_page(mtd, chip, buf+mtd->realwritesize, page + mtd->pagecnt);
+ if (ret)
+ ret = 1;
+ else
+ ret = 0;
+ }
+ info->oper_step = 0;
+ //printk("mrp=%d=0x%x\n", page, page);
+ //print_nand_buffer((uint8_t *)chip->oob_poi, 48);
+ /*printk(KERN_NOTICE "re sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x PID:%d Comm:%s\n",
+ *(uint32_t *)(chip->oob_poi+ECC_FIFO_0),
+ *(uint32_t *)(chip->oob_poi+4), *(uint32_t *)(chip->oob_poi+8),
+ *(uint32_t *)(chip->oob_poi+12), page,current->pid, current->comm);*/
+ if (ret)
+ printk("----------multi read ret=%d\n", ret);
+ return ret;
+}
+
+static void wmt_nand_write_page_lowlevel_noalign(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int b2r_stat, backup[6];//, w1, w2, w3;
+ uint8_t *bufpoi = chip->oob_poi;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_page_write_lowlevel() writesize %x\n", mtd->realwritesize);
+ #endif
+
+ info->dma_finish = 0;
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ if (mtd->dwRdmz == 1) {
+ *(unsigned int *)(bufpoi+20) = *(unsigned int *)wmt_rdmz;
+ }
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+ //memcpy(info->reg+ECC_FIFO_0, bufpoi, 24);
+
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 32);
+ if(!chip->realplanenum) {
+ info->datalen = 0;
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ memcpy(info->dmabuf + mtd->realwritesize, bufpoi, 24);
+ memset(info->dmabuf + mtd->realwritesize+24, 0x55, 24);
+ reset_nfc(mtd, backup, 2);
+ //hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize+1024, 1, 0, -1);
+ //print_nand_buffer((uint8_t *)(info->dmabuf+6144), 32);print_nand_register(mtd);
+ } else if (chip->realplanenum && info->datalen == 0) {
+ //printk("copybuf 1\n");
+ //w1 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->realwritesize);
+ memcpy(info->dmabuf + mtd->realwritesize, bufpoi, 24);
+ memset(info->dmabuf + mtd->realwritesize+24, 0x55, 24);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 1)
+ rdmzier_oob((info->dmabuf + mtd->realwritesize), (info->dmabuf + mtd->realwritesize), 1024/4, info->cur_page, mtd->realwritesize/4);
+ //w2 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 2);
+ //hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ //w3 = wmt_read_oscr();
+ //printk(KERN_DEBUG "w2-w1=%d w3-w1=%d---------------\n",w2-w1, w3-w1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize+1024, 1, 0, -1);
+ //print_nand_register(mtd);
+ } else if (info->datalen == mtd->writesize) {
+ //printk("copybuf 2\n");
+ //info->datalen = mtd->realwritesize;
+ //chip->write_buf(mtd, buf, mtd->writesize);
+ memcpy(info->dmabuf, buf+mtd->realwritesize, mtd->realwritesize);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize+1024, 1, 0, 2);
+ //print_nand_register(mtd);
+ }
+/*printk(KERN_DEBUG "WPG=0x%x : 0x%x 0x%x 0x%x 0x%x\n", info->cur_page, *(uint32_t *)info->dmabuf,
+*((uint32_t *)info->dmabuf+1), *((uint32_t *)info->dmabuf+2), *((uint32_t *)info->dmabuf+3));*/
+ /*if ((info->cur_page%256) == 0)dannier
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);*/
+}
+//extern unsigned int wmt_read_oscr(void);
+/**
+ * wmt_nand_write_page_lowlevel - hardware ecc syndrom based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ *
+ */
+static void wmt_nand_write_page_lowlevel(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ unsigned int b2r_stat, backup[6];//, w1, w2, w3;
+ uint8_t *bufpoi = chip->oob_poi;
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_page_write_lowlevel() writesize %x\n", mtd->realwritesize);
+ #endif
+
+ info->dma_finish = 0;
+ b2r_stat = readb(info->reg + NFCRb_NFC_INT_STAT);
+ writeb(B2R|b2r_stat, info->reg + NFCRb_NFC_INT_STAT);
+ writeb(0x1B, info->reg + NFCR13_INT_MASK);
+
+ if (mtd->dwRdmz == 1) {
+ *(unsigned int *)(bufpoi+20) = *(unsigned int *)wmt_rdmz;
+ }
+
+ writeb(readb(info->reg + NFCRd_OOB_CTRL) & 0xF7, info->reg + NFCRd_OOB_CTRL);
+ if (mtd->dwRdmz == 1 && mtd->bbt_sw_rdmz) {
+ //print_nand_buffer((uint8_t *)bufpoi, 24);
+ printk(KERN_NOTICE "wr sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x PID:%d Comm:%s\n",
+ *(uint32_t *)(chip->oob_poi+0),
+ *(uint32_t *)(chip->oob_poi+4), *(uint32_t *)(chip->oob_poi+8),
+ *(uint32_t *)(chip->oob_poi+12), info->cur_page,
+ current->pid, current->comm);
+
+ rdmzier_oob((uint8_t *)(info->reg+ECC_FIFO_0), (uint8_t *)bufpoi, 6, info->cur_page, mtd->realwritesize/4);
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 64);
+ } else
+ memcpy(info->reg+ECC_FIFO_0, bufpoi, 24);
+
+
+ //print_nand_buffer((uint8_t *)(info->reg+ECC_FIFO_0), 32);
+ if(!chip->realplanenum) {
+ info->datalen = 0;
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ reset_nfc(mtd, backup, 2);
+ hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 1, 0, -1);
+ } else if (chip->realplanenum && info->datalen == 0) {
+ //printk("copybuf 1\n");
+ //w1 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 1);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ //w2 = wmt_read_oscr();
+ reset_nfc(mtd, backup, 2);
+ hw_encode_oob(mtd);
+ if (mtd->dwRdmz && mtd->bbt_sw_rdmz == 0)
+ nfc_hw_rdmz(mtd, 1);
+ //w3 = wmt_read_oscr();
+ //printk(KERN_DEBUG "w2-w1=%d w3-w1=%d---------------\n",w2-w1, w3-w1);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 1, 0, -1);
+ //print_nand_register(mtd);
+ } else if (info->datalen == mtd->writesize) {
+ //printk("copybuf 2\n");
+ //info->datalen = mtd->realwritesize;
+ //chip->write_buf(mtd, buf, mtd->writesize);
+ wmt_nfc_dma_cfg(mtd, mtd->realwritesize, 1, 0, 2);
+ //print_nand_register(mtd);
+ }
+//while(info->datalen);
+
+ #if 0
+ if (info->cur_lpage >= 19456/*992768*/) {
+ if (strcmp(current->comm, "yaffs-bg-1") == 0) {
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ #if 0
+ } else if (strcmp(current->comm, "cp") == 0 /*&& *(uint32_t *)(info->reg+ECC_FIFO_2) > 0x1f90*/
+ && lst_chunkid == 0 /*&& *(uint32_t *)(info->reg+ECC_FIFO_2) != (lst_chunkid+1)*/) {
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ lst_chunkid = 11;
+ //#endif
+ } else if (strcmp(current->comm, "cp") == 0 && *(uint32_t *)(info->reg+ECC_FIFO_2) > 0x1f60) {
+ chunk[idx] = *(uint32_t *)(info->reg+ECC_FIFO_2);
+ cpg[idx] = info->cur_page;
+ idx++;
+ } else if (strcmp(current->comm, "sync") == 0) {
+ printk(KERN_NOTICE "wr PID:%d Comm:%s sqNum=0x%x, objId=0x%x, lgcAdr=0x%x Byte=0x%x page=0x%x\n",
+ current->pid, current->comm, *(uint32_t *)(info->reg+ECC_FIFO_0),
+ *(uint32_t *)(info->reg+ECC_FIFO_1), *(uint32_t *)(info->reg+ECC_FIFO_2),
+ *(uint32_t *)(info->reg+ECC_FIFO_3), info->cur_page);
+ if (*(uint32_t *)(info->reg+ECC_FIFO_0) == 0x21 && *(uint32_t *)(info->reg+ECC_FIFO_2) == 0x4)
+ print_nand_buffer((char *)info->dmabuf, mtd->realwritesize);
+ #endif
+ }
+ }
+ #endif
+}
+
+static int hynix_eslc_mode_change(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ if (chip->cur_chip && (chip->cur_chip->nand_id>>24) == NAND_MFR_HYNIX /*&& mtd->dwRetry*/) {
+ #ifdef ESLC_READ_WRITE
+ #ifdef ESLC_DEBUG
+ int ori_page = page;
+ #endif
+ if ((page < par4_ofs && second_chip == 0) || (page >= (mtd->blkcnt-8)*mtd->pagecnt)) {
+ //printk("page=0x%x\n", page);
+ //dump_stack();
+ //while(1);
+ if (page < (mtd->blkcnt-8)*mtd->pagecnt) {
+ page = hynix_eslc_page_address_calculate(mtd, chip, page);
+ if (page < 0)
+ return -1;
+ if (page%(mtd->pagecnt/2) == 0) {
+ if(chip->realplanenum == 0) {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ } else if(chip->realplanenum == 1) {
+ wmt_multi_plane_erase(mtd, page);
+ }
+ #ifdef ESLC_DEBUG
+ printk("eslc erase page=0x%x => eslc page = 0x%x when write.\n", ori_page, page);
+ #endif
+ }
+ }
+ if (eslc_write != 2) {
+ eslc_write = 2;
+ chip->cur_chip->set_parameter(mtd, ESLC_MODE, ECC_ERROR_VALUE);
+ #ifdef ESLC_DEBUG
+ printk(KERN_WARNING "page=0x%x----ENABLE ESLC", ori_page);
+ if (page >= (mtd->blkcnt-8)*mtd->pagecnt) {
+ printk(KERN_WARNING "(BBT) page%x,bbtpage=%x pagecnt=%d, blkcnt=%d\n", page, (mtd->blkcnt-8)*mtd->pagecnt,mtd->pagecnt, mtd->blkcnt);
+ dump_stack();
+ } else
+ printk(KERN_WARNING "\n");
+ #endif
+ }
+ } else if (eslc_write == 2) {
+ chip->cur_chip->set_parameter(mtd, ESLC_MODE, DEFAULT_VALUE);
+ eslc_write = 0;
+ #ifdef ESLC_DEBUG
+ printk(KERN_NOTICE "page=0x%x****DIS ESLC\n", page);
+ #endif
+ }
+ #endif
+ }
+ return page;
+}
+
+int cache_read_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int cache_index;
+
+ if (prob_end == 0)
+ return 1;
+
+ if (wr_cache == NULL)
+ return 1;
+
+ cache_index = page%mtd->pagecnt;
+ cache_index %= WR_BUF_CNT;
+
+ if (info->wr_page[cache_index] == page && page >= 0) {
+ if (buf)
+ memcpy((char *)buf, wr_cache+(cache_index*(mtd->writesize+32)), mtd->writesize);
+ memcpy(chip->oob_poi, wr_cache+(cache_index*(mtd->writesize+32)) + mtd->writesize, 32);
+ return 0;
+ }
+ return 1;
+}
+
+void cache_write_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int cache_index;
+
+ cache_index = page%mtd->pagecnt;
+ cache_index %= WR_BUF_CNT;
+
+ if (wr_cache == NULL)
+ return;
+
+ if ((page%mtd->pagecnt) == 0 && prob_end == 1)
+ init_wr_cache(mtd);
+
+ if (prob_end == 1) {
+ info->wr_page[cache_index] = page;//printk("wr-cache lpage[%d]=0x%x\n", cache_index, page);
+ memcpy(wr_cache+(cache_index*(mtd->writesize+32)), buf, mtd->writesize);
+ memcpy(wr_cache+(cache_index*(mtd->writesize+32)) + mtd->writesize, chip->oob_poi, 32);
+ }
+}
+
+static int wmt_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+const uint8_t *buf, int page, int cached, int raw)
+{
+ int status;
+ uint8_t *tmp_buf = (uint8_t *)buf;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "enter in wmt_nand_write_page() raw = %d\n", raw);
+ #endif
+
+ cache_write_data(mtd, chip, page, buf);
+ info->cur_lpage = page;
+
+ if (page > ((mtd->blkcnt - 8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ page = hynix_eslc_mode_change(mtd, chip, page);
+
+ if (page < 0)
+ return 0;
+
+ info->cur_page = page;
+ wmb();
+
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ tmp_buf = buf_rdmz;
+ memcpy(tmp_buf, buf, mtd->realwritesize);//print_nand_buffer(tmp_buf, 64);
+ rdmzier(tmp_buf, mtd->realwritesize/4, page);//print_nand_buffer(tmp_buf, 64);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ info->datalen = 0;
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nand_write_page_lowlevel_noalign(mtd, chip, tmp_buf);
+ else
+ chip->ecc.write_page(mtd, chip, tmp_buf);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+
+ /*
+ * * * Cached progamming disabled for now, Not sure if its worth the
+ * * * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+ * * */
+ cached = 0;
+
+ if (!cached || !(chip->options & NAND_CACHEPRG)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ //nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+
+ status = chip->waitfunc(mtd, chip);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+ /*
+ * * See if operation failed and additional status checks are
+ * * available
+ * *
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+ if (status & NAND_STATUS_FAIL)
+ goto GO_EIO;//return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ tmp_buf = buf_rdmz;
+ memcpy(tmp_buf, buf, mtd->realwritesize);
+ rdmzier(tmp_buf, mtd->realwritesize/4, page);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /* Send command to read back the data */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ if (chip->verify_buf(mtd, buf, mtd->realwritesize))
+ goto GO_EIO;//return -EIO;
+ #endif
+ return 0;
+
+GO_EIO:
+ return -EIO;
+}
+int abcc;
+static int wmt_multi_plane_program(struct mtd_info *mtd, struct nand_chip *chip,
+const uint8_t *buf, int page, int cached, int raw)
+{
+ int status, page_plane1;
+
+ uint8_t *tmp_buf = (uint8_t *)buf;
+ int pagecnt = mtd->pagecnt, p1;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ #ifdef ESLC_DEBUG
+ int p0 = page;
+ #endif
+
+ cache_write_data(mtd, chip, page, buf);//logical address
+ info->cur_lpage = page;
+
+ if (page > ((mtd->blkcnt-8)*mtd->pagecnt))
+ mtd->bbt_sw_rdmz = 1;
+ else
+ mtd->bbt_sw_rdmz = 0;
+
+ //printk("multi program page %d => page1=%d page2=%d\n", page, (page/pagecnt) * pagecnt + page, (page<<1) - (page%pagecnt));
+ p1 = page = hynix_eslc_mode_change(mtd, chip, page);
+
+ if (page < 0)
+ return 0;
+
+ page = (page /pagecnt) * pagecnt + page;//1->1, 128 -> 256, 256->512
+ page_plane1 = page + pagecnt;
+ #ifdef ESLC_DEBUG
+ if (p0 != p1)
+ printk("multi program page 0x%x eslc 0x%x => page1=0x%x page2=0x%x \n", p0, p1, page, page_plane1);
+ #endif
+
+ info->lst_wpage = page;
+ //page_plane1 = hynix_eslc_mode_change(mtd, chip, page_plane1);
+ //printk("mw p1=%x page %x => page plane1=%x\n", p1, page, page_plane1);
+
+ info->cur_page = page;
+ wmb();
+ if (mtd->dwRdmz) {
+ if (mtd->bbt_sw_rdmz) {
+ if ((RDMZ & readl(info->reg + NFCRf_CALC_RDMZ)) == RDMZ)
+ reset_nfc(mtd, NULL, 3);
+ tmp_buf = buf_rdmz;
+ memcpy(tmp_buf, buf, mtd->writesize);
+ rdmzier(tmp_buf, mtd->realwritesize/4, page);
+ //memcpy(tmp_buf, buf+mtd->realwritesize, mtd->realwritesize);
+ rdmzier(tmp_buf+mtd->realwritesize, mtd->realwritesize/4, page_plane1);
+ } else
+ nfc_hw_rdmz(mtd, 1);
+ }
+
+ info->datalen = 0;
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nand_write_page_lowlevel_noalign(mtd, chip, tmp_buf);
+ else
+ chip->ecc.write_page(mtd, chip, tmp_buf);
+ chip->cmdfunc(mtd, 0x80, 0x00, page);
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ /***********************Johnny Liu start**************************************/
+ chip->cmdfunc(mtd, 0x11,-1,-1);
+
+ info->datalen = mtd->writesize;//need
+ info->cur_page = page_plane1;
+
+ /*#ifdef WMT_SW_RDMZ
+ if (mtd->dwRdmz == 1) {
+ tmp_buf = buf_rdmz;
+ //memcpy(tmp_buf, buf+mtd->realwritesize, mtd->realwritesize);
+ //rdmzier(tmp_buf, mtd->realwritesize/4, page_plane1);
+ }
+ #endif
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz)
+ nfc_hw_rdmz(mtd, 1);
+ #endif*/
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ wmt_nand_write_page_lowlevel_noalign(mtd, chip, tmp_buf);
+ else
+ chip->ecc.write_page(mtd, chip, tmp_buf);
+ if ((0xFF&(mtd->id>>24)) == NAND_MFR_MICRON)
+ chip->cmdfunc(mtd, 0x80, 0x00, page_plane1);
+ else
+ chip->cmdfunc(mtd, 0x81, 0x00, page_plane1);
+
+ status = nand_pdma_handler(mtd);
+ nand_free_pdma(mtd);
+ if (status)
+ printk(KERN_ERR "check write pdma handler status= %x \n", status);
+ /************************Johnny Liu end*************************************/
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ #ifdef WMT_HW_RDMZ
+ if (mtd->dwRdmz) {
+ //nfc_hw_rdmz(mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ #endif
+ status = chip->waitfunc(mtd, chip);
+ writeb(0x80, info->reg + NFCR13_INT_MASK);
+
+ if (chip->realplanenum && (status & NAND_STATUS_FAIL)) {
+ printk(KERN_ERR "multi write page=0x%x fail status= %x\n", page, status);
+ //dump_stack();
+ /*if (abcc != 13479) {
+ status = 0xe3;//0xe5;
+ abcc = 13479;
+ printk("write page=%x error abv=%d\n", page, abcc);
+ dump_stack();
+ }*/
+ chip->status_plane[0] = page;
+ chip->status_plane[1] = status;
+ }
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat)) {
+ printk(KERN_ERR "write fail status= %x\n", status);
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+#if 0
+static int wmt_multi_plane_copy(struct mtd_info *mtd, struct nand_chip *chip,
+int source, int des)
+{
+// printk("\n copy data from %d to %d",source, des);
+ unsigned int page = 0;
+
+ int div = mtd->erasesize / mtd->writesize;
+ page = (source / div) * div + source;
+ chip->cmdfunc(mtd, MULTI_COPY_1CYCLE, 0x00, page);
+
+ chip->cmdfunc(mtd, MULTI_COPY_2CYCLE, 0x00, page);
+ chip->cmdfunc(mtd, MULTI_COPY_2CYCLE, 0x00, page + div);
+
+ page = (des / div) * div + des;
+ chip->cmdfunc(mtd, MULTI_COPY_3CYCLE, 0x00, page);
+
+ return 0;
+}
+static int wmt_nand_copy_page(struct mtd_info *mtd, struct nand_chip *chip,
+int source, int des)
+{
+ unsigned int page = 0;
+ //int status = -1;
+ //printk("\n copy data from %d to %d", source, des);
+ //First, we calculate the source page
+ page = source;
+ //Copy back read cycle
+ chip->cmdfunc(mtd, COPY_BACK_1CYCLE, 0x00, page);
+
+ //Second, we calculate the des page
+ page = des;
+ //Copy back program cycle
+ chip->cmdfunc(mtd, COPY_BACK_2CYCLE, 0x00, page);
+ return 0;
+}
+#endif
+
+#if 0
+/**
+ * wmt_errstat - perform additional error status checks
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @state: state or the operation
+ * @status: status code returned from read status
+ * @page: startpage inside the chip, must be called with (page & this->pagemask)
+ *
+ * Perform additional error status checks on erase and write failures
+ * to determine if errors are correctable. For this device, correctable
+ * 1-bit errors on erase and write are considered acceptable.
+ *
+ *
+ */
+static int wmt_errstat(struct mtd_info *mtd, struct nand_chip *this,
+ int state, int status, int page)
+{
+ int er_stat = 0;
+ int rtn, retlen;
+ size_t len;
+ uint8_t *buf;
+ int i;
+
+ this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
+
+ if (state == FL_ERASING) {
+
+ for (i = 0; i < 4; i++) {
+ if (!(status & 1 << (i + 1)))
+ continue;
+ this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
+ -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
+
+ /* err_ecc_not_avail */
+ //if (!(rtn & ERR_STAT_ECC_AVAILABLE))
+ //er_stat |= 1 << (i + 1);
+ }
+
+ } else if (state == FL_WRITING) {
+
+ unsigned long corrected = mtd->ecc_stats.corrected;
+
+ /* single bank write logic */
+ this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
+
+ if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
+ /* err_ecc_not_avail */
+ er_stat |= 1 << 1;
+ goto out;
+ }
+
+ len = mtd->writesize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ er_stat = 1;
+ goto out;
+ }
+
+ /* recovery read */
+ rtn = nand_do_read(mtd, page, len, &retlen, buf);
+
+ /* if read failed or > 1-bit error corrected */
+ if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
+ er_stat |= 1 << 1;
+ kfree(buf);
+ }
+out:
+ rtn = status;
+ if (er_stat == 0) { /* if ECC is available */
+ rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
+ }
+
+ return rtn;
+}
+#endif
+
+/* wmt_nand_init_chip
+ *
+ * init a single instance of an chip
+ */
+
+static void wmt_nand_init_chip(struct mtd_info *mtd,
+struct ECC_size_info *ECC_size)
+{
+ //struct nand_chip *chip = &nmtd->chip;
+ //struct mtd_info *mtd = &nmtd->mtd;
+ struct nand_chip *chip = mtd->priv;
+
+ /* chip->cmd_ctrl = wmt_nand_hwcontrol;*/
+ #if 0
+ switch (info->cpu_type) {
+ case TYPE_wmt:
+ break;
+
+ case TYPE_vt8620:
+ break;
+
+ case TYPE_vt8610:
+ break;
+ }
+ #endif
+
+ /* nmtd->set = set;*/
+ if (hardware_ecc) {
+ /* chip->ecc.calculate = wmt_nand_calculate_ecc;*/
+ /* chip->ecc.correct = wmt_nand_correct_data;*/
+
+ /*if (mtd->realwritesize == 2048) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 8;
+ chip->ecc.steps = 4;
+ chip->ecc.layout = &wmt_oobinfo_2048;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else if (mtd->realwritesize == 4096) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 20;
+ chip->ecc.steps = 8;
+ chip->ecc.layout = &wmt_oobinfo_4096;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else if (mtd->realwritesize == 8192) {
+ chip->ecc.size = 1024;
+ chip->ecc.bytes = 42;
+ chip->ecc.steps = 8;
+ chip->ecc.layout = &wmt_oobinfo_8192;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else if (mtd->realwritesize == 16384) {
+ chip->ecc.size = 1024;
+ chip->ecc.bytes = 70;
+ chip->ecc.steps = 16;
+ chip->ecc.layout = &wmt_oobinfo_16k;
+ chip->ecc.prepad = 1;
+ chip->ecc.postpad = 8;
+ } else { // 512 page
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.steps = 1;
+ chip->ecc.layout = &wmt_oobinfo_512;
+ chip->ecc.prepad = 4;
+ chip->ecc.postpad = 9;
+ }*/
+ chip->ecc.size = (mtd->realwritesize/ECC_size->banks);
+ chip->ecc.bytes = ECC_size->ECC_bytes;
+ chip->ecc.steps = ECC_size->banks;
+
+ chip->write_page = wmt_nand_write_page;
+ //chip->copy_page = wmt_nand_copy_page;
+
+ chip->ecc.write_page = wmt_nand_write_page_lowlevel;
+ chip->ecc.write_oob = wmt_nand_write_oob;
+ chip->ecc.read_page = wmt_nand_read_page;
+ chip->ecc.read_oob = wmt_nand_read_oob_single;
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ chip->ecc.read_page = wmt_nand_read_page_noalign;
+
+ chip->ecc.read_bb_oob = wmt_nand_read_bb_oob;
+ chip->erase_cmd = wmt_single_plane_erase;
+ if (chip->realplanenum) {
+ chip->write_page = wmt_multi_plane_program;
+ //chip->copy_page = wmt_multi_plane_copy;
+ chip->ecc.read_page = wmt_multi_plane_read;
+ chip->erase_cmd = wmt_multi_plane_erase;
+ chip->ecc.write_oob = wmt_nand_write_oob_plane;
+ chip->ecc.read_oob = wmt_nand_read_oob_plane;
+ chip->ecc.read_bb_oob = wmt_nand_read_bb_oob_plane;
+ }
+
+
+ /* switch (info->cpu_type) {*/
+ /* case TYPE_wmt:*/
+ chip->ecc.hwctl = wmt_nand_enable_hwecc;
+ /* chip->ecc.calculate = wmt_nand_calculate_ecc;*/
+ /* break;*/
+ #if 0
+ case TYPE_vt8620:
+ chip->ecc.hwctl = vt8620_nand_enable_hwecc;
+ chip->ecc.calculate = vt86203_nand_calculate_ecc;
+ break;
+
+ case TYPE_vt8610:
+ chip->ecc.hwctl = vt8610_nand_enable_hwecc;
+ chip->ecc.calculate = vt8610_nand_calculate_ecc;
+ break;
+ #endif
+ } else
+ chip->ecc.mode = NAND_ECC_SOFT;
+}
+
+
+static int wmt_nand_remove(struct platform_device *pdev)
+{
+ struct wmt_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ /* struct mtd_info *mtd = dev_get_drvdata(pdev);*/
+ dev_set_drvdata(&pdev->dev, NULL);
+ /* platform_set_drvdata(pdev, NULL);*/
+ /* dev_set_drvdata(pdev, NULL);*/
+ if (info == NULL)
+ return 0;
+
+ /* first thing we need to do is release all our mtds
+ * and their partitions, then go through freeing the
+ * resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct wmt_nand_mtd *ptr = info->mtds;
+ /* int mtdno;*/
+
+ /* for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {*/
+ /* pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);*/
+ nand_release(&ptr->mtd);
+ /* }*/
+ kfree(info->mtds);
+ }
+
+ /* free the common resources */
+
+ if (info->reg != NULL) {
+ //iounmap(info->reg);
+ info->reg = NULL;
+ }
+
+ if (info->area != NULL) {
+ release_resource(info->area);
+ kfree(info->area);
+ info->area = NULL;
+ }
+ kfree(info);
+ if (buf_rdmz)
+ vfree(buf_rdmz);
+ remove_proc_entry(NANDINFO, NULL);
+ return 0;
+}
+
+#if 0
+/*Lch */
+static int wmt_recovery_call(struct notifier_block *nb, unsigned long code, void *_cmd)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ chip = (struct nand_chip *)mtd->priv;
+ if(chip->cur_chip && (((mtd->id >>24)&0xff) == NAND_MFR_HYNIX)) {
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ #ifdef RETRY_DEBUG
+ printk("current try times: %d\n", chip->cur_chip->cur_try_times);
+ #endif
+ chip->select_chip(mtd, 0);
+ chip->cur_chip->set_parameter(mtd, READ_RETRY_MODE, DEFAULT_VALUE);
+ //chip->cur_chip->get_parameter(mtd,READ_RETRY_MODE);
+ chip->select_chip(mtd, -1);
+ }
+ return NOTIFY_DONE;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+
+ if((code == SYS_RESTART) && _cmd) {
+ char *cmd = _cmd;
+ if (!strcmp(cmd, "recovery")) {
+ err = search_mtd_table("android-data", &ret1);
+ ret = (int)ret1;
+ if (!err) {
+ // printk(KERN_EMERG "Lch jump2 android-data wmt_recovery_call.ret =%d\n",ret);
+ struct erase_info einfo;
+ loff_t to;
+ memset(&einfo, 0, sizeof(einfo));
+ to = nand_partitions[ret].offset;
+ einfo.mtd = mtd;
+ einfo.addr = (unsigned long)to;
+ einfo.len = nand_partitions[ret].size;
+
+ // printk("android-data einfo.addr is %8.8x\n",einfo.addr);
+ // printk("android-data einfo.len is %8.8x\n",einfo.len);
+ // printk("android-data nand_partitions[%d].offset is %8.8x\n",ret,nand_partitions[ret].offset);
+ // printk("android-data nand_partitions[%d].size is %8.8x\n",ret,nand_partitions[ret].size);
+ ret = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (ret < 0)
+ printk("enand_erase_nand result is %x\n",ret);
+ }
+
+ err = search_mtd_table("android-cache", &ret1);
+ ret = (int)ret1;
+ if (!err) {
+ // printk(KERN_EMERG "Lch jump3 wmt_recovery_call.android-cache ret=%d\n",ret);
+ struct erase_info einfo;
+ loff_t to;
+ memset(&einfo, 0, sizeof(einfo));
+ to = nand_partitions[ret].offset;
+ einfo.mtd = mtd;
+ einfo.addr = (unsigned long)to;
+ einfo.len = nand_partitions[ret].size;
+
+ // printk("android-cache einfo.addr is %8.8x\n",einfo.addr);
+ // printk("android-cache einfo.len is %8.8x\n",einfo.len);
+ // printk("android-data nand_partitions[%d].offset is %8.8x\n",ret,nand_partitions[ret].offset);
+ // printk("android-data nand_partitions[%d].size is %8.8x\n",ret,nand_partitions[ret].size);
+ ret = nand_erase_nand(mtd, &einfo, 0xFF);
+ if (ret < 0)
+ printk("enand_erase_nand result is %x\n",ret);
+ }
+ }
+ }
+ return NOTIFY_DONE;
+}
+#endif
+
+/**********************************************************************
+Name : nfc_pdma_isr
+Function :.
+Calls :
+Called by :
+Parameter :
+Author : Dannier Chen
+History :
+***********************************************************************/
+static irqreturn_t nfc_pdma_isr(int irq, void *dev_id)
+{
+ struct wmt_nand_info *info = (struct wmt_nand_info *)dev_id;
+ struct mtd_info *mtd = &info->mtds->mtd;
+ disable_irq_nosync(irq);
+ //spin_lock(&host->lock);
+ writel(0, info->reg + NFC_DMA_IER);
+ wmb();
+ //writel(/*readl(info->reg + NFC_DMA_ISR)&*/NAND_PDMA_IER_INT_STS, info->reg + NFC_DMA_ISR);
+ //printk(" pdmaisr finish NFC_DMA_ISR=0x%x\n", readl(info->reg + NFC_DMA_ISR));
+ //print_nand_register(mtd);
+ info->dma_finish++;
+ WARN_ON(info->done_data == NULL);
+ if (info->done_data == NULL) {
+ printk(" pdmaisr finish pointer is null info->dma_finish=%d\n", info->dma_finish);
+ print_nand_register(mtd);
+ dump_stack();
+ //while(1);
+ }
+ if (info->done_data != NULL) {
+ complete(info->done_data);
+ info->done_data = NULL;
+ }
+ //info->done = NULL;
+ //spin_unlock(&host->lock);
+ enable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+
+/**********************************************************************
+Name : nfc_regular_isr
+Function :.
+Calls :
+Called by :
+Parameter :
+Author : Dannier Chen
+History :
+***********************************************************************/
+//static irqreturn_t nfc_regular_isr(int irq, void *dev_id, struct pt_regs *regs)
+irqreturn_t nfc_regular_isr(int irq, void *dev_id)
+{
+
+ struct wmt_nand_info *info = dev_id;
+ struct mtd_info *mtd = &info->mtds->mtd;
+ unsigned int bank_stat1, bank_stat2=0,status = 0, intsts;
+
+ disable_irq_nosync(irq);
+ //spin_lock(&host->lock);
+ //printk("isrCMD=0x%x\n", info->isr_cmd);
+ if (info->isr_cmd == 0) {
+ //print_nand_register(mtd);
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat1&(ERR_CORRECT | BCH_ERR)) {
+ while ((bank_stat1&(ERR_CORRECT|BCH_ERR)) != (ERR_CORRECT|BCH_ERR)) {
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ bank_stat2++;
+ if (bank_stat2 >= 0x10000) {
+ printk("ecc error, but ecc correct not assert ecc status=0x%x\n",bank_stat1);
+ print_nand_register(mtd);
+ //while(1);
+ break;
+ }
+ }
+ writeb((B2R | ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+ bank_stat2 = readw(info->reg + NFCR9_ECC_BCH_CTRL);
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE" BCH Read data ecc eror page_addr:%x cmd=%d\n", info->cur_page, info->isr_cmd);
+ #endif
+ if ((bank_stat2 & BANK_DR) || info->oob_ecc_error == 0x50) {
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ bch_data_last_bk_ecc_correct_noalign(mtd);
+ else
+ bch_data_last_bk_ecc_correct(mtd);
+ } else {
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1)
+ bch_data_ecc_correct_noalign(mtd);
+ else
+ bch_data_ecc_correct(mtd);
+ }
+ } else {
+ printk("read page error but not ecc error sts=0x%x\n",bank_stat1);
+ print_nand_register(mtd);
+ //while(1);
+ }
+ } else if (info->isr_cmd == 0x50) {
+ //print_nand_register(mtd);
+ wmt_wait_nfc_ready(info);
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat1&(ERR_CORRECT | BCH_ERR)) {
+ while ((bank_stat1&(ERR_CORRECT|BCH_ERR)) != (ERR_CORRECT|BCH_ERR)) {
+ bank_stat2++;
+ bank_stat1 = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (bank_stat2 >= 0x10000) {
+ printk("oob ecc error, but ecc correct not assert ecc status=0x%x\n",bank_stat1);
+ print_nand_register(mtd);
+ //while(1);
+ break;
+ }
+ }
+ bank_stat2 = readb(info->reg + NFCRd_OOB_CTRL)&OOB_READ;
+ if (!bank_stat2)
+ printk("oob cmd error, but oob flag is not set\n");
+ bch_redunt_ecc_correct(mtd);
+ }
+ writeb((B2R | ERR_CORRECT | BCH_ERR), info->reg + NFCRb_NFC_INT_STAT);
+ status = NFC_WAIT_IDLE(mtd);
+ if (status)
+ printk("B2R isr not ecc error occurs, but idle fail\n");
+ WARN_ON(info->done_data == NULL);
+ complete(info->done_data);
+ info->done_data = NULL;
+ } else /*if (info->isr_cmd != 0 && info->isr_cmd != 0x50) */{
+ /* only erase/write operation enter for B2R interrupt */
+ intsts = readb(info->reg + NFCRb_NFC_INT_STAT);
+ if (intsts&B2R) {
+ writeb(B2R, info->reg + NFCRb_NFC_INT_STAT);
+ if (readb(info->reg + NFCRb_NFC_INT_STAT) & B2R)
+ printk("[nfc_isr] erase/write cmd B2R staus can't clear\n");
+ } else
+ printk("[nfc_isr] erase/write cmd B2R staus not assert\n");
+
+ status = (readb(info->reg + NFCR13_INT_MASK)&0xFF);
+ if ((status&0x1C) != 0x18) {
+ printk("[nfc_isr] isr is not check busy interrup =0x%x\n", status);
+ dump_stack();
+ print_nand_register(mtd);
+ //while(info->isr_cmd);
+ }
+
+ WARN_ON(info->done_data == NULL);
+ complete(info->done_data);
+ info->done_data = NULL;
+ }
+ //spin_unlock(&host->lock);
+ enable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void wmt_set_logo_offset(void)
+{
+ int ret1;
+ int err = 0, ret = 0, status = 0, i;
+ unsigned char varval[100], tmp[100];
+ unsigned int varlen;
+ unsigned long long offs_data = 0;
+
+ err = search_mtd_table("u-boot-logo", &ret1);
+ ret = (int) ret1;
+ varlen = 100;
+ status = wmt_getsyspara("wmt.nfc.mtd.u-boot-logo", tmp, &varlen);
+ for (i = 0; i < ret; i++)
+ offs_data += nand_partitions[i].size;
+ sprintf(varval, "0x%llx", offs_data);
+ if (!status && (strcmp(varval, tmp) == 0))
+ status = 0;
+ else
+ status = 1;
+ if (!err && status) {
+ ret = wmt_setsyspara("wmt.nfc.mtd.u-boot-logo", varval);
+ if (ret)
+ printk(KERN_NOTICE "write u-boot-logo offset to env fail\n");
+ } else if (err)
+ printk(KERN_NOTICE "search u-boot-logo partition fail\n");
+
+ err = search_mtd_table("kernel-logo", &ret1);
+ ret = (int) ret1;
+ varlen = 100;
+ status = wmt_getsyspara("wmt.nfc.mtd.kernel-logo", tmp, &varlen);
+ offs_data = 0;
+ for (i = 0; i < ret; i++)
+ offs_data += nand_partitions[i].size;
+ sprintf(varval, "0x%llx", offs_data);
+ if (!status && (strcmp(varval, tmp) == 0))
+ status = 0;
+ else
+ status = 1;
+ if (!err && status) {
+ ret = wmt_setsyspara("wmt.nfc.mtd.kernel-logo", varval);
+ if (ret)
+ printk(KERN_NOTICE "write kernel-logo offset to env fail\n");
+ } else if (err)
+ printk(KERN_NOTICE "search kernel-logo partition fail\n");
+
+}
+
+#if 0
+static void wmt_set_partition_info(struct nand_chip *chip)
+{
+ int ret = 0, status = 0, i, j;
+ unsigned char varval[256], tmp[256];
+ unsigned int varlen = 256;
+ unsigned int offs_data, size;
+
+ varval[0] = '\0';
+ for (i = 0; i < NUM_NAND_PARTITIONS; i++) {
+ if (&nand_partitions[i]) {
+ offs_data = 0;
+ for (j = 0; j < i; j++)
+ offs_data += (unsigned int)(nand_partitions[j].size>>20);
+ if (i < (NUM_NAND_PARTITIONS - 1))
+ size = (unsigned int)(nand_partitions[i].size>>20);
+ else
+ size = (unsigned int)(chip->chipsize>>20) - offs_data;
+
+ if (i == 0)
+ sprintf(tmp, "%dm@%dm(%s)", size, offs_data, nand_partitions[i].name);
+ else
+ sprintf(tmp, ",%dm@%dm(%s)", size, offs_data, nand_partitions[i].name);
+ strcat(varval, tmp);
+ } else
+ break;
+ }
+ printk(KERN_DEBUG "fbparts=%s\n", varval);
+ status = wmt_getsyspara("fbparts", tmp, &varlen);
+ if (status) {
+ printk(KERN_DEBUG "fbparts not found varlen=256=>%d\n", varlen);
+ ret = wmt_setsyspara("fbparts", varval);
+ } else {
+ if (strcmp(tmp, varval) != 0) {
+ printk(KERN_DEBUG "tmp=%s\n", tmp);
+ printk(KERN_WARNING "fbparts not sync => update\n");
+ ret = wmt_setsyspara("fbparts", varval);
+ } else
+ printk(KERN_DEBUG "fbparts env compare pass\n");
+ }
+ if (ret)
+ printk(KERN_ERR "set fbparts env fail\n");
+}
+#endif
+
+void set_ecc_info(struct mtd_info *mtd)
+{
+ unsigned int ecc_bit_mode;
+ struct ECC_size_info ECC_size, *ECC_size_pt;
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+
+ ecc_bit_mode = mtd->dwECCBitNum;
+ if (ecc_bit_mode > 40)
+ ecc_bit_mode = (ecc_bit_mode == 60) ? 7 : (-1);
+ else if (ecc_bit_mode > 24)
+ ecc_bit_mode = (ecc_bit_mode == 40) ? 6 : (-1);
+ else
+ ecc_bit_mode = (ecc_bit_mode > 16) ? ((ecc_bit_mode/4) - 1) : (ecc_bit_mode/4);
+
+ info->ECC_mode = ECC_size.ecc_engine = ecc_bit_mode;
+ calculate_ECC_info(mtd, &ECC_size);
+ writew((ECC_size.oob_ECC_bytes<<8) /*+ (ECC_size.unprotect&0xFF)*/, info->reg + NFCR10_OOB_ECC_SIZE);
+ info->oob_ECC_bytes = ECC_size.oob_ECC_bytes;
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek)-1)) != 1) {
+ info->last_bank_dmaaddr = info->dmaaddr + mtd->realwritesize;
+ info->oob_col = mtd->realwritesize + (ECC_size.ECC_bytes * ECC_size.banks);
+ info->last_bank_col = info->oob_col;
+ } else {
+ info->last_bank_dmaaddr = info->dmaaddr + mtd->realwritesize - ECC_size.bank_size;
+ info->oob_col = mtd->realwritesize + (ECC_size.ECC_bytes * (ECC_size.banks-1));
+ info->last_bank_col = info->oob_col - ECC_size.bank_size;
+ }
+ info->oob_ECC_mode = ECC_size.oob_ECC_mode;
+ info->oob_ecc_error = 0;
+ info->banks = ECC_size.banks;
+ info->bank_size = ECC_size.bank_size;
+ info->oob_max_bit_error = ECC_size.oob_max_bit_error;
+
+ ECC_size_pt = &ECC_size;
+ wmt_nand_init_chip(mtd, ECC_size_pt);
+
+ printk(KERN_DEBUG "last_bank_dmaaddr=0x%x banks=%d\n", info->last_bank_dmaaddr, info->banks);
+ printk(KERN_DEBUG "oob_col=%d\n", info->oob_col);
+ printk(KERN_DEBUG "last_bank_col=%d\n", info->last_bank_col);
+
+ printk(KERN_NOTICE "BCH ECC %d BIT mode\n", mtd->dwECCBitNum);
+ set_ecc_engine(info, ecc_bit_mode); /* BCH ECC new structure */
+}
+
+void set_partition_size(struct mtd_info *mtd)
+{
+ int ret, index;
+ char varval[256], partition_name[32];
+ int varlen = 256;
+ char *s = NULL, *tmp = NULL;
+ uint64_t part_size = 0;
+ struct nand_chip *chip = mtd->priv;
+
+ if(((mtd->id>>24)&0xff) == NAND_MFR_HYNIX) {
+ if(chip->realplanenum == 1) {
+ nand_partitions[0].size = 0x4000000;
+ nand_partitions[1].size = 0x4000000;
+ nand_partitions[2].size = 0x4000000;
+ } else {
+ nand_partitions[0].size = 0x2000000;
+ nand_partitions[1].size = 0x2000000;
+ nand_partitions[2].size = 0x2000000;
+ }
+ }
+
+ if ((mtd->pageSizek >> (ffs(mtd->pageSizek) - 1)) != 1) {
+ if (mtd->pageSizek == 12) {
+ nand_partitions[0].size = 0x1080000;
+ nand_partitions[1].size = 0x1080000;
+ nand_partitions[2].size = 0x1080000;
+ nand_partitions[3].size = 0x1080000;
+ nand_partitions[4].size = 0x4200000;
+ nand_partitions[5].size = 0x30000000;
+ nand_partitions[6].size = 0x20100000;
+ nand_partitions[7].size = MTDPART_SIZ_FULL;
+ } else if (mtd->pageSizek == 28) {
+ nand_partitions[0].size = 0x3800000;
+ nand_partitions[1].size = 0x3800000;
+ nand_partitions[2].size = 0x3800000;
+ nand_partitions[3].size = 0x1c00000;
+ nand_partitions[4].size = 0x7000000;
+ nand_partitions[5].size = 0x31000000;
+ nand_partitions[6].size = 0x21400000;
+ nand_partitions[7].size = 0x1c000000;
+ nand_partitions[8].size = MTDPART_SIZ_FULL;
+ }
+ //printk("(pageSizek>>(ffs(pageSizek)-1)=%d\n", mtd->pageSizek >> (ffs(mtd->pageSizek)-1));
+ }
+
+ ret = wmt_getsyspara("wmt.nand.partition", varval, &varlen);
+ if(ret == 0) {
+ printk("wmt.nand.partition: %s\n", varval);
+ s = varval;
+ while(*s != '\0')
+ {
+ index = NUM_NAND_PARTITIONS;
+ memset(partition_name, 0, 32);
+ get_partition_name(s, &tmp, partition_name);
+ search_mtd_table(partition_name, &index);
+ s = tmp + 1;
+ part_size = simple_strtoul(s, &tmp, 16);
+ s = tmp;
+ if(*s == ':')
+ s++;
+
+ //data can't be resized by uboot env, its size is left whole nand.
+ if((index >= 0) && (index < (NUM_NAND_PARTITIONS-1)) && (part_size < chip->chipsize)) {
+ nand_partitions[index].size = part_size;
+ } else {
+ printk("Invalid parameter \"wmt.nand.partition\". Use default partition size for \"%s\" partition.\n", partition_name);
+ }
+ }
+ }
+
+
+
+ if(((mtd->id>>24)&0xff) == NAND_MFR_HYNIX) {
+ par1_ofs = nand_partitions[0].size;
+ par2_ofs = par1_ofs + nand_partitions[1].size;
+ par3_ofs = par2_ofs + nand_partitions[2].size;
+ par4_ofs = par3_ofs + nand_partitions[3].size;
+
+ par1_ofs = ((unsigned int )(par1_ofs >> 10))/mtd->pageSizek;
+ par2_ofs = ((unsigned int )(par2_ofs >> 10))/mtd->pageSizek;
+ par3_ofs = ((unsigned int )(par3_ofs >> 10))/mtd->pageSizek;
+ par4_ofs = ((unsigned int )(par4_ofs >> 10))/mtd->pageSizek;
+ }
+
+
+
+ /*min_partition_size = 0;
+ for (i = 0; i < 11; i++)
+ min_partition_size += nand_partitions[i].size;
+ nand_partitions[11].size = chip->chipsize - min_partition_size - (mtd->erasesize * 8);*/
+}
+
+void init_wr_cache(struct mtd_info *mtd)
+{
+ struct wmt_nand_info *info = wmt_nand_mtd_toinfo(mtd);
+ int i;
+
+ for (i = 0; i < WR_BUF_CNT; i++)
+ info->wr_page[i] = -1;
+}
+
+int alloc_write_cache(struct mtd_info *mtd)
+{
+ wr_cache = vmalloc((mtd->writesize+32)*WR_BUF_CNT);
+ if (!wr_cache) {
+ printk(KERN_ERR"wr_cache=0x%x alloc fail\n", (mtd->writesize+32)*WR_BUF_CNT);
+ return 1;
+ }
+
+ return 0;
+}
+
+int alloc_rdmz_buffer(struct mtd_info *mtd)
+{
+ if (mtd->dwRdmz == 1) {
+ buf_rdmz = vmalloc(mtd->writesize);
+ if (!buf_rdmz) {
+ printk(KERN_ERR"buf_rdmz alloc fail\n");
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int nandinfo_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data) {
+ char mfr_name[32];
+ int len = 0;
+ int mfr =(mtd_nandinfo->id>>24)&0xff;
+
+ switch(mfr) {
+ case NAND_MFR_SANDISK:
+ strcpy(mfr_name, "Sandisk");
+ break;
+ case NAND_MFR_HYNIX:
+ strcpy(mfr_name, "Hynix");
+ break;
+ case NAND_MFR_TOSHIBA:
+ strcpy(mfr_name, "Toshiba");
+ break;
+ case NAND_MFR_SAMSUNG:
+ strcpy(mfr_name, "Samsung");
+ break;
+ case NAND_MFR_MICRON:
+ strcpy(mfr_name, "Micron");
+ break;
+ case NAND_MFR_INTEL:
+ strcpy(mfr_name, "Intel");
+ break;
+ default:
+ strcpy(mfr_name, "Unknown");
+ break;
+ }
+
+ len = sprintf(page, "Manufacturer : %s\n"
+ "nand id1 : %lu\n"
+ "nand id2 : %lu\n" , mfr_name, mtd_nandinfo->id, mtd_nandinfo->id2);
+ return len;
+}
+
+extern int wmt_recovery_call(struct notifier_block *nb, unsigned long code, void *_cmd);
+static int wmt_nand_probe(struct platform_device *pdev)
+{
+ /* struct wmt_platform_nand *plat = to_nand_plat(pdev);*/
+ /*struct device *dev = &pdev->dev;*/
+ struct wmt_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct wmt_nand_info *info;
+ struct wmt_nand_mtd *nmtd;
+ struct mtd_info *mtd;
+ static const char *part_parsers[] = {"cmdlinepart", NULL};
+ /*struct mtd_part_parser_data ppdata;*/
+ /* struct wmt_nand_set *sets; */ /* extend more chips and partitions structure*/
+ struct resource *res;
+ int err = 0, ret = 0;
+ int size;
+ /* ------------------------*/
+ unsigned char sd_buf[80];
+ int sd_varlen = 80;
+ char *varname = "wmt.sd1.param";
+ int sd_enable = 0, SD1_function = 0; /*0 :disable 1:enable*/
+ /* ------------------------*/
+ buf_rdmz = NULL;
+ wr_cache = NULL;
+ prob_end = 0;
+ eslc_write = 0;
+ /* int nr_sets;*/
+ /* int setno;*/
+ pr_debug("wmt_nand_probe(%p)\n", pdev);
+ ret = wmt_getsyspara("wmt.boot.dev", sd_buf, &sd_varlen);
+ printk("wmt.boot.dev ret = %d\n", ret);
+ if(!ret && (!strncmp(sd_buf, "TF", 2) || (!strncmp(sd_buf, "UDISK", 5))))
+ {
+ printk("Boot from SD card or udisk card.\n");
+ return -1;
+ }
+
+ /*Read system param to identify host function 0: SD/MMC 1:SDIO wifi*/
+ ret = wmt_getsyspara(varname, sd_buf, &sd_varlen);
+ if (ret == 0) {
+ sscanf(sd_buf,"%d:%d", &sd_enable,&SD1_function);
+ if (sd_enable == 1) {
+ printk(KERN_NOTICE "SD1 enabled => NAND probe disabled\n");
+ return -EINVAL;
+ }
+ }
+ /*err = -EINVAL;
+ return err;*/
+ *(volatile unsigned int *)(GPIO_BASE_ADDR + 0x200) &= ~(1<<11); /*PIN_SHARE_SDMMC1_NAND*/
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "no memory for flash info\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info, sizeof(*info));
+ dev_set_drvdata(&pdev->dev, info);
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ret = request_irq(IRQ_NFC,
+ nfc_regular_isr,
+ IRQF_SHARED, //SA_SHIRQ, /*SA_INTERRUPT, * that is okay?*/ //zhf: modified by James Tian, should be IRQF_SHARED?
+ "NFC",
+ (void *)info);
+ if (ret) {
+ printk(KERN_ALERT "[NFC driver] Failed to register regular ISR!\n");
+ goto unmap;
+ }
+
+ ret = request_irq(IRQ_NFC_DMA,
+ nfc_pdma_isr,
+ IRQF_DISABLED, // SA_INTERRUPT, //zhf: modified by James Tian
+ "NFC",
+ (void *)info);
+ if (ret) {
+ printk(KERN_ALERT "[NFC driver] Failed to register DMA ISR!\n");
+ goto fr_regular_isr;
+ }
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = res->end - res->start + 1;
+
+ info->area = request_mem_region(res->start, size, pdev->name);
+ info->oper_step = 0;
+
+
+ if (info->area == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ info->device = &pdev->dev;
+ /* info->platform = plat;*/
+ info->reg = (void __iomem *)NF_CTRL_CFG_BASE_ADDR;/*ioremap(res->start, size);*/
+ /* info->cpu_type = cpu_type;*/
+
+ if (info->reg == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -EIO;
+ goto exit_error;
+ }
+
+/*
+ * * extend more partitions
+ *
+ err = wmt_nand_inithw(info, pdev);
+ if (err != 0)
+ goto exit_error;
+
+ sets = (plat != NULL) ? plat->sets : NULL;
+ nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+
+ info->mtd_count = nr_sets;
+*/
+ /* allocate our information */
+
+/* size = nr_sets * sizeof(*info->mtds);*/
+ size = sizeof(*info->mtds);
+ info->mtds = kmalloc(size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mtd storage\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info->mtds, size);
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ mtd = &nmtd->mtd;
+ info->dmabuf = dma_alloc_coherent(&pdev->dev, 40960, &info->dmaaddr, GFP_KERNEL);
+
+ if (!info->dmabuf && (info->dmaaddr & 0x0f)) {
+ err = -ENOMEM;
+ goto out_free_dma;
+ }
+ /* nmtd->chip.buffers = (void *)info->dmabuf + 2112;*/
+
+ nmtd->chip.cmdfunc = wmt_nand_cmdfunc;
+ nmtd->chip.dev_ready = wmt_device_ready;
+ nmtd->chip.read_byte = wmt_read_byte;
+ nmtd->chip.write_buf = wmt_nand_write_buf;
+ nmtd->chip.read_buf = wmt_nand_read_buf;
+ nmtd->chip.select_chip = wmt_nand_select_chip;
+ nmtd->chip.get_para = nand_get_para;
+ nmtd->chip.chip_delay = 20;
+ nmtd->chip.priv = nmtd;
+ nmtd->chip.bbt_options = NAND_BBT_LASTBLOCK | NAND_BBT_USE_FLASH | NAND_BBT_PERCHIP | NAND_BBT_NO_OOB_BBM;
+ /* nmtd->chip.controller = &info->controller;*/
+
+ /*nmtd->chip.ecc.steps = 1;
+ nmtd->chip.ecc.prepad = 1;
+ nmtd->chip.ecc.postpad = 8;*/
+
+ nmtd->chip.ecc.mode = NAND_ECC_HW;
+ /*nmtd->chip.ecc.mode = 0;*/
+
+
+ /* for (setno = 0; setno < nr_sets; setno++, nmtd++)*/
+ #ifdef NAND_DEBUG
+ printk(KERN_NOTICE "initialising (%p, info %p)\n", nmtd, info);
+ #endif
+
+ /* Set up DMA address */
+ /*writel(info->dmaaddr & 0xffffffff, info->reg + NFC_DMA_DAR);*/
+
+ /*info->dmabuf = readl(info->reg + WMT_NFC_DMA_TRANS_CONFIG);*/
+
+ /* nmtd->nand.chip_delay = 0;*/
+
+ /* Enable the following for a flash based bad block table */
+ /* nmtd->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;*/
+
+ nmtd->chip.bbt_td = &wmt_bbt_main_descr_2048;
+ nmtd->chip.bbt_md = &wmt_bbt_mirror_descr_2048;
+ nmtd->chip.retry_pattern = &wmt_rdtry_descr;
+ nmtd->chip.cur_chip = NULL;
+
+ nmtd->info = info;
+ nmtd->mtd.priv = &nmtd->chip;
+ nmtd->mtd.owner = THIS_MODULE;
+ nmtd->mtd.reboot_notifier.notifier_call = wmt_recovery_call;//Lch
+ {/*unsigned int s1, s2;
+ s1 = wmt_read_oscr();*/
+ ret = reset_nfc(mtd, NULL, 3);
+ //s2 = wmt_read_oscr();printk("s2-s1=%d------------\n", (s2-s1)/3);
+ }
+ set_ecc_engine(info, 1);
+
+ info->datalen = 0;
+ /* initialise the hardware */
+ wmt_nfc_init(info, &nmtd->mtd);
+ writeb(0xff, info->reg + NFCR12_NAND_TYPE_SEL+1); //chip disable
+
+ /*rc = set_ECC_mode(mtd);
+ if (rc)
+ goto out_free_dma;*/
+
+ nmtd->chip.ecc.layout = &wmt_oobinfo_16k;
+ writeb(0x0, info->reg + NFCR11_SOFT_RST);
+
+ nmtd->scan_res = nand_scan(&nmtd->mtd, MAX_CHIP);
+ /*nmtd->scan_res = nand_scan(&nmtd->mtd, (sets) ? sets->nr_chips : 1);*/
+
+ if (nmtd->chip.cur_chip && mtd->dwRetry && ((mtd->id>>24)&0xFF) == NAND_MFR_SANDISK) {
+ /* Activating and initializing Dynamic Read Register */
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ sandisk_init_retry_register(mtd, nmtd->chip.cur_chip);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+ }
+
+ if (nmtd->scan_res == 0) {
+ if (pdata)
+ pdata->partitions = nand_partitions;
+
+ ret = mtd_device_parse_register(mtd, part_parsers, NULL/*&ppdata*/,
+ pdata ? pdata->partitions : nand_partitions,
+ pdata ? NUM_NAND_PARTITIONS : NUM_NAND_PARTITIONS);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mtd device\n");
+ goto out_free_dma;
+ }
+ }
+
+ //wmt_set_logo_offset();
+
+ /* write back mtd partition to env */
+ /* wmt_set_partition_info(&nmtd->chip); */
+
+ nandinfo_proc = create_proc_entry(NANDINFO, 0666, NULL);
+ if(nandinfo_proc == NULL) {
+ printk("Failed to create nandinfo proccess device\n");
+ goto out_free_dma;
+ } else {
+ mtd_nandinfo = mtd;
+ }
+ nandinfo_proc->read_proc = nandinfo_proc_read;
+
+ register_reboot_notifier(&mtd->reboot_notifier);//Lch
+
+ /*if (((mtd->id>>24)&0xFF) == NAND_MFR_HYNIX) {
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ writel(0x1312, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ printk("prob_end timing=%x\n",readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL));
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+ }*/
+
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ if (!mtd->dwDDR) {
+ writeb(RD_DLY|readb(info->reg + NFCR12_NAND_TYPE_SEL), info->reg + NFCR12_NAND_TYPE_SEL);
+ writel(0x1212, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ } else {
+ //writel(0x0101, info->reg + NFCR14_READ_CYCLE_PULE_CTRL);
+ while ((*(volatile unsigned long *)(PMCS_ADDR+0x18))&0x7F0038)
+ ;
+ *(volatile unsigned long *)PMNAND_ADDR = (*(volatile unsigned long *)PMNAND_ADDR) - 5;
+ }
+ printk("prob_end timing=%x nfcr12%x divisor=0x%x\n",readl(info->reg + NFCR14_READ_CYCLE_PULE_CTRL),
+ readb(info->reg + NFCR12_NAND_TYPE_SEL), *(volatile unsigned long *)PMNAND_ADDR);
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+
+ init_wr_cache(mtd);
+
+ printk(KERN_NOTICE "nand initialised ok\n");
+ prob_end = 1;
+ second_chip = 0;
+ return 0;
+
+out_free_dma:
+ dma_free_coherent(&pdev->dev, 32000/*17664 + 0x300*/, info->dmabuf, info->dmaaddr);
+
+fr_regular_isr:
+unmap:
+exit_error:
+ wmt_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+int wmt_nand_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct wmt_nand_info *info = dev_get_drvdata(&pdev->dev);
+ struct mtd_info *mtd = &info->mtds->mtd;
+
+ /*nand_suspend->nand_get_device*/
+ mtd_suspend(mtd);
+
+ if ((STRAP_STATUS_VAL&0x400E) == 0x4008) {
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ *(volatile unsigned long *)(NF_CTRL_CFG_BASE_ADDR + 0x44) |= (1<<1);
+ printk(KERN_NOTICE "reset nand boot register NF_CTRL_CFG_BASE_ADDR + 0x44\n");
+ *(volatile unsigned long *)(NF_CTRL_CFG_BASE_ADDR + 0x44) &= ~(1<<1);
+ }
+ printk(KERN_NOTICE "wmt_nand_suspend\n");
+ return 0;
+}
+
+int wmt_nand_resume(struct platform_device *pdev)
+{
+ struct wmt_nand_info *info = dev_get_drvdata(&pdev->dev);
+ struct mtd_info *mtd = &info->mtds->mtd;
+ struct wmt_nand_mtd *nmtd;
+ struct nand_chip *chip;
+ unsigned char reset = NAND_CMD_RESET;
+ int i;
+ auto_pll_divisor(DEV_NAND, CLK_ENABLE, 0, 0);
+ if (info) {
+ nmtd = info->mtds;
+ chip = nmtd->mtd.priv;
+ //if ((STRAP_STATUS_VAL&0x400E) == 0x4008)
+ writeb(0x0, info->reg + NFCR11_SOFT_RST);
+ /* initialise the hardware */
+ wmt_nfc_init(info, &nmtd->mtd);
+ set_ecc_engine(info, info->ECC_mode); /* BCH ECC */
+ writew((info->oob_ECC_bytes<<8) /*+ (ECC_size.unprotect&0xFF)*/, info->reg + NFCR10_OOB_ECC_SIZE);
+
+ if ((&nmtd->mtd)->dwDDR)
+ writeb(0x7F, info->reg + NFCR7_DLYCOMP);
+ wmt_nand_select_chip(&nmtd->mtd, 0);
+ write_bytes_cmd(&nmtd->mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ for (i = 1; i < chip->numchips; i++) {
+ wmt_nand_select_chip(&nmtd->mtd, i);
+ write_bytes_cmd(&nmtd->mtd, 1, 0, 0, (uint8_t *)&reset, NULL, NULL);
+ }
+ wmt_init_nfc(&nmtd->mtd, nmtd->mtd.spec_clk, nmtd->mtd.spec_tadl, 0);
+ wmt_nand_select_chip(&nmtd->mtd, -1);
+
+ if ((&nmtd->mtd)->dwRdmz) {
+ nfc_hw_rdmz(&nmtd->mtd, 1);
+ writeb(0, info->reg + NFCR4_COMPORT3_4);
+ }
+ printk(KERN_NOTICE "wmt_nand_resume OK\n");
+ } else
+ printk(KERN_NOTICE "wmt_nand_resume error\n");
+
+ auto_pll_divisor(DEV_NAND, CLK_DISABLE, 0, 0);
+
+ /*nand_resume->nand_release_device*/
+ mtd_resume(mtd);
+
+ return 0;
+}
+
+#else /* else of #define PM */
+#define wmt_nand_suspend NULL
+#define wmt_nand_resume NULL
+#endif
+
+/*struct platform_driver wmt_nand_driver = {*/
+struct platform_driver wmt_nand_driver = {
+ .driver.name = "nand",
+ .probe = wmt_nand_probe,
+ .remove = wmt_nand_remove,
+ .suspend = wmt_nand_suspend,
+ .resume = wmt_nand_resume
+ /*
+ .driiver = {
+ .name = "wmt-nand",
+ .owner = THIS_MODULE,
+ },
+ */
+};
+
+static int __init wmt_nand_init(void)
+{
+ //printk(KERN_NOTICE "NAND Driver, WonderMedia Technologies, Inc\n");
+ return platform_driver_register(&wmt_nand_driver);
+}
+
+static void __exit wmt_nand_exit(void)
+{
+ platform_driver_unregister(&wmt_nand_driver);
+}
+
+module_init(wmt_nand_init);
+module_exit(wmt_nand_exit);
+
+MODULE_AUTHOR("WonderMedia Technologies, Inc.");
+MODULE_DESCRIPTION("WMT [Nand Flash Interface] driver");
+MODULE_LICENSE("GPL");
diff --git a/ANDROID_3.4.5/drivers/mtd/nand/wmt_nand.h b/ANDROID_3.4.5/drivers/mtd/nand/wmt_nand.h
new file mode 100755
index 00000000..e5692ac7
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/nand/wmt_nand.h
@@ -0,0 +1,365 @@
+/*++
+ Copyright (c) 2008 WonderMedia Technologies, Inc. All Rights Reserved.
+
+ This PROPRIETARY SOFTWARE is the property of WonderMedia Technologies, Inc.
+ and may contain trade secrets and/or other confidential information of
+ WonderMedia Technologies, Inc. This file shall not be disclosed to any third
+ party, in whole or in part, without prior written consent of WonderMedia.
+
+ THIS PROPRIETARY SOFTWARE AND ANY RELATED DOCUMENTATION ARE PROVIDED AS IS,
+ WITH ALL FAULTS, AND WITHOUT WARRANTY OF ANY KIND EITHER EXPRESS OR IMPLIED,
+ AND WonderMedia TECHNOLOGIES, INC. DISCLAIMS ALL EXPRESS OR IMPLIED WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR
+ NON-INFRINGEMENT.
+
+ Module Name:
+
+ $Workfile: post_nand.c $
+
+ Abstract:
+
+ POST functions, called by main().
+
+ Revision History:
+
+ Dec.04.2008 First created
+ Dec.19.2008 Dannier change coding style and support spi flash boot with nor accessible.
+ $JustDate: 2008/12/19 $
+--*/
+
+#ifndef __NFC_H__
+#define __NFC_H__
+
+/* include/asm-arm/arch-vt8630.h or arch-vt8620.h or include/asm-zac/arch-vt8620.h or arch-vt8630.h */
+/* #define NFC_BASE_ADDR 0xd8009000 */
+
+#define NFCR0_DATAPORT 0x00
+#define NFCR1_COMCTRL 0x04
+#define NFCR2_COMPORT0 0x08
+#define NFCR3_COMPORT1_2 0x0c
+#define NFCR4_COMPORT3_4 0x10
+#define NFCR5_COMPORT5_6 0x14
+#define NFCR6_COMPORT7 0x18
+#define NFCR7_DLYCOMP 0x1c
+#define NFCR8_DMA_CNT 0x20
+#define NFCR9_ECC_BCH_CTRL 0x24
+#define NFCRa_NFC_STAT 0x28
+#define NFCRb_NFC_INT_STAT 0x2c
+#define NFCRc_CMD_ADDR 0x30
+#define NFCRd_OOB_CTRL 0x34
+#define NFCRe_CALC_TADL 0x38
+#define NFCRf_CALC_RDMZ 0x3c
+#define NFCR10_OOB_ECC_SIZE 0x40
+#define NFCR11_SOFT_RST 0x44
+#define NFCR12_NAND_TYPE_SEL 0x48
+#define NFCR13_INT_MASK 0x4c
+#define NFCR14_READ_CYCLE_PULE_CTRL 0x50
+#define NFCR15_IDLE_STAT 0x54
+#define NFCR16_TIMER_CNT_CFG 0x58
+#define NFCR17_ECC_BCH_ERR_STAT 0x5c
+#define NFCR18_ECC_BCH_ERR_POS 0x60
+
+#define ECC_FIFO_0 0x1c0
+#define ECC_FIFO_1 0x1c4
+#define ECC_FIFO_2 0x1c8
+#define ECC_FIFO_3 0x1cc
+#define ECC_FIFO_4 0x1d0
+#define ECC_FIFO_5 0x1d4
+#define ECC_FIFO_6 0x1d8
+#define ECC_FIFO_7 0x1dc
+#define ECC_FIFO_8 0x1e0
+#define ECC_FIFO_9 0x1e4
+#define ECC_FIFO_a 0x1e8
+#define ECC_FIFO_b 0x1ec
+#define ECC_FIFO_c 0x1f0
+#define ECC_FIFO_d 0x1f4
+#define ECC_FIFO_e 0x1f8
+#define ECC_FIFO_f 0x1fc
+
+struct ECC_size_info{
+ int ecc_engine;
+ int oob_ECC_mode;
+ int banks;
+ int bank_size;
+ int max_bit_error;
+ int oob_max_bit_error;
+ int ecc_bits_count;
+ int oob_ecc_bits_count;
+ int bank_offset; /* add data + data ecc ex: 512+8 4-bit engine */
+ int ECC_bytes;
+ int oob_ECC_bytes;
+ int unprotect;
+};
+#define ECC1bit 0
+#define ECC4bit 1
+#define ECC8bit 2
+#define ECC12bit 3
+#define ECC16bit 4
+#define ECC24bitPer1K 5
+#define ECC40bitPer1K 6
+#define ECC60bitPer1K 7
+#define ECC44bitPer1K 7
+#define ECC44bit 8
+#define ECC1bit_bit_count 32
+#define ECC4bit_bit_count 52
+#define ECC8bit_bit_count 104
+#define ECC12bit_bit_count 156
+#define ECC16bit_bit_count 208
+#define ECC24bitPer1K_bit_count 336
+#define ECC40bitPer1K_bit_count 560
+#define ECC60bitPer1K_bit_count 830
+#define ECC44bitPer1K_bit_count 616
+#define ECC44bit_bit_count 576
+#define ECC1bit_byte_count 4
+#define ECC4bit_byte_count 8
+#define ECC8bit_byte_count 16
+#define ECC12bit_byte_count 20
+#define ECC16bit_byte_count 26
+#define ECC24bitPer1K_byte_count 42
+#define ECC40bitPer1K_byte_count 70
+#define ECC60bitPer1K_byte_count 106
+#define ECC44bitPer1K_byte_count 77
+#define ECC44bit_byte_count 72
+#define ECC1bit_unprotect 0
+#define ECC4bit_unprotect 0
+#define ECC8bit_unprotect 50
+#define ECC12bit_unprotect 14
+#define ECC16bit_unprotect 26
+#define ECC24bitPer1K_unprotect 34
+#define ECC40bitPer1K_unprotect 14
+#define ECC60bitPer1K_unprotect 46 //8k+960
+#define ECC44bitPer1K_unprotect 14
+#define ECC44bit_unprotect 72
+#define MAX_BANK_SIZE 1024
+#define MAX_PARITY_SIZE 106
+#define MAX_ECC_BIT_ERROR 60
+/*
+ * NAND PDMA
+ */
+#define NAND_DESC_BASE_ADDR 0x00D00000
+
+#define NFC_DMA_GCR 0x100
+#define NFC_DMA_IER 0x104
+#define NFC_DMA_ISR 0x108
+#define NFC_DMA_DESPR 0x10C
+#define NFC_DMA_RBR 0x110
+#define NFC_DMA_DAR 0x114
+#define NFC_DMA_BAR 0x118
+#define NFC_DMA_CPR 0x11C
+#define NFC_DMA_CCR 0X120
+
+#define NAND_GET_FEATURE 0xEE
+#define NAND_SET_FEATURE 0xEF
+/*
+ * NAND PDMA - DMA_GCR : DMA Global Control Register
+ */
+#define NAND_PDMA_GCR_DMA_EN 0x00000001 /* [0] -- DMA controller enable */
+#define NAND_PDMA_GCR_SOFTRESET 0x00000100 /* [8] -- Software rest */
+
+/*
+ * NAND PDMA - DMA_IER : DMA Interrupt Enable Register
+ */
+#define NAND_PDMA_IER_INT_EN 0x00000001 /* [0] -- DMA interrupt enable */
+/*
+ * NAND PDMA - DMA_ISR : DMA Interrupt Status Register
+ */
+#define NAND_PDMA_IER_INT_STS 0x00000001 /* [0] -- DMA interrupt status */
+/*
+ * NAND PDMA - DMA_DESPR : DMA Descriptor base address Pointer Register
+ */
+
+/*
+ * NAND PDMA - DMA_RBR : DMA Residual Bytes Register
+ */
+#define NAND_PDMA_RBR_End 0x80000000 /* [31] -- DMA descriptor end flag */
+#define NAND_PDMA_RBR_Format 0x40000000 /* [30] -- DMA descriptor format */
+/*
+ * NAND PDMA - DMA_DAR : DMA Data Address Register
+ */
+
+/*
+ * NAND PDMA - DMA_BAR : DMA Rbanch Address Register
+ */
+
+/*
+ * NAND PDMA - DMA_CPR : DMA Command Pointer Register
+ */
+
+/*
+ * NAND PDMA - DMA_CCR : DMAContext Control Register for Channel 0
+ */
+#define NAND_PDMA_READ 0x00
+#define NAND_PDMA_WRITE 0x01
+#define NAND_PDMA_CCR_RUN 0x00000080
+#define NAND_PDMA_CCR_IF_to_peripheral 0x00000000
+#define NAND_PDMA_CCR_peripheral_to_IF 0x00400000
+#define NAND_PDMA_CCR_EvtCode 0x0000000f
+#define NAND_PDMA_CCR_Evt_no_status 0x00000000
+#define NAND_PDMA_CCR_Evt_ff_underrun 0x00000001
+#define NAND_PDMA_CCR_Evt_ff_overrun 0x00000002
+#define NAND_PDMA_CCR_Evt_desp_read 0x00000003
+#define NAND_PDMA_CCR_Evt_data_rw 0x00000004
+#define NAND_PDMA_CCR_Evt_early_end 0x00000005
+#define NAND_PDMA_CCR_Evt_success 0x0000000f
+
+/*
+ * PDMA Descriptor short
+ */
+struct _NAND_PDMA_DESC_S{
+ unsigned int volatile ReqCount : 16; /* bit 0 -15 -Request count */
+ unsigned int volatile i : 1; /* bit 16 -interrupt */
+ unsigned int volatile reserve : 13; /* bit 17-29 -reserved */
+ unsigned int volatile format : 1; /* bit 30 -The descriptor format */
+ unsigned int volatile end : 1; /* bit 31 -End flag of descriptor list*/
+ unsigned int volatile DataBufferAddr : 32;/* bit 31 -Data Buffer address */
+};
+
+/*
+ * PDMA Descriptor long
+ */
+struct _NAND_PDMA_DESC_L{
+ unsigned long volatile ReqCount : 16; /* bit 0 -15 -Request count */
+ unsigned long volatile i : 1; /* bit 16 -interrupt */
+ unsigned long volatile reserve : 13; /* bit 17-29 -reserved */
+ unsigned long volatile format : 1; /* bit 30 -The descriptor format */
+ unsigned long volatile end : 1; /* bit 31 -End flag of descriptor list*/
+ unsigned long volatile DataBufferAddr : 32;/* bit 31-0 -Data Buffer address */
+ unsigned long volatile BranchAddr : 32; /* bit 31-2 -Descriptor Branch address */
+ unsigned long volatile reserve0 : 32; /* bit 31-0 -reserved */
+};
+
+struct NFC_RW_T {
+ unsigned int T_R_setup;
+ unsigned int T_R_hold;
+ unsigned int T_W_setup;
+ unsigned int T_W_hold;
+ unsigned int divisor;
+ unsigned int T_TADL;
+ unsigned int T_TWHR;
+ unsigned int T_TWB;
+ unsigned int T_RHC_THC;
+};
+
+/* cfg_1 */
+#define TWHR 0x800
+#define OLD_CMD 0x400 /* enable old command trigger mode */
+#define DPAHSE_DISABLE 0x80 /*disable data phase */
+#define NAND2NFC 0x40 /* direction : nand to controller */
+#define SING_RW 0x20 /* enable signal read/ write command */
+#define MUL_CMDS 0x10 /* support cmd+addr+cmd */
+#define NFC_TRIGGER 0x01 /* start cmd&addr sequence */
+/*cfg_9 */
+#define READ_RESUME 1 //0x100
+#define BCH_INT_EN 0x60
+#define BANK_DR 0x10
+#define DIS_BCH_ECC 0x08
+#define USE_HW_ECC 0
+#define ECC_MODE 7
+
+/*cfg_a */
+#define NFC_CMD_RDY 0x04
+#define NFC_BUSY 0x02 /* command and data is being transfer in flash I/O */
+#define FLASH_RDY 0x01 /* flash is ready */
+/*cfg_b */
+#define B2R 0x08 /* status form busy to ready */
+#define ERR_CORRECT 0x2
+#define BCH_ERR 0x1
+/*cfg_d */
+#define HIGH64FIFO 8 /* read high 64 bytes fifo */
+#define OOB_READ 0x4 /* calculate the side info BCH decoder */
+#define RED_DIS 0x2 /* do not read out oob area data to FIFO */
+/*cfg_f */
+#define RDMZ 0x10000 /* enable randomizer */
+#define RDMZH 1 /* enable randomizer */
+/*cfg_12 */
+#define PAGE_512 0
+#define PAGE_2K 1
+#define PAGE_4K 2
+#define PAGE_8K 3
+#define PAGE_16K 4
+#define PAGE_32K 5
+#define WD8 0
+#define WIDTH_16 (1<<3)
+#define WP_DISABLE (1<<4) /*disable write protection */
+#define DIRECT_MAP (1<<5)
+#define RD_DLY (1<<6)
+#define TOGGLE (1<<7)
+
+/*cfg_13*/ /* Dannier Add */
+#define B2RMEN 0x08 /* interrupt mask enable of nand flash form busy to ready */
+/*cfg_15 */
+#define NFC_IDLE 0x01
+/*cfg_17 status */
+#define BANK_NUM 0x1F00
+#define BCH_ERR_CNT 0x3F
+/*cfg_18 */
+#define BCH_ERRPOS0 0x3fff
+#define BCH_ERRPOS1 (BCH_ERRPOS0<<16)
+
+#define ADDR_COLUMN 1
+#define ADDR_PAGE 2
+#define ADDR_COLUMN_PAGE 3
+#define WRITE_NAND_COMMAND(d, adr) do { *(volatile unsigned char *)(adr) = (unsigned char)(d); } while (0)
+#define WRITE_NAND_ADDRESS(d, adr) do { *(volatile unsigned char *)(adr) = (unsigned char)(d); } while (0)
+
+
+#define SOURCE_CLOCK 24
+#define MAX_SPEED_MHZ 96
+#define MAX_READ_DELAY 9 /* 8.182 = tSKEW 3.606 + tDLY 4.176 + tSETUP 0.4 */
+#define MAX_WRITE_DELAY 9 /* 8.72 = tDLY 10.24 - tSKEW 1.52*/
+
+#define DMA_SINGNAL 0
+#define DMA_INC4 0x10
+#define DMA_INC8 0x20
+/*#define first4k218 0
+#define second4k218 4314 *//* 4096 + 218 */
+#define NFC_TIMEOUT_TIME (HZ*2)
+
+int nand_init_pdma(struct mtd_info *mtd);
+int nand_free_pdma(struct mtd_info *mtd);
+int nand_alloc_desc_pool(unsigned int *DescAddr);
+int nand_init_short_desc(unsigned int *DescAddr, unsigned int ReqCount, unsigned int *BufferAddr, int End);
+int nand_init_long_desc(unsigned long *DescAddr, unsigned int ReqCount, unsigned long *BufferAddr,
+unsigned long *BranchAddr, int End);
+int nand_config_pdma(struct mtd_info *mtd, unsigned long *DescAddr, unsigned int dir);
+int nand_pdma_handler(struct mtd_info *mtd);
+void nand_hamming_ecc_1bit_correct(struct mtd_info *mtd);
+void bch_data_ecc_correct(struct mtd_info *mtd);
+void bch_redunt_ecc_correct(struct mtd_info *mtd);
+void bch_data_last_bk_ecc_correct(struct mtd_info *mtd);
+void copy_filename (char *dst, char *src, int size);
+int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen);
+int wmt_setsyspara(char *varname, char *varval);
+void calculate_ECC_info(struct mtd_info *mtd, struct ECC_size_info *ECC_size);
+/*unsigned int wmt_bchencoder (unsigned char *data, unsigned char *bch_code, unsigned char bits, unsigned char *bch_codelen, unsigned int encode_len);*/
+int bch_encoder(unsigned int *p_parity, u32 *p_data, u8 bits, u32 datacnt);
+unsigned int Caculat_1b_bch( unsigned int *pariA, unsigned int *bch_GF2, unsigned int din, u8 pari_len, u8 pari_lb);
+int Gen_GF2(u8 bits, unsigned int *buf);
+int nand_get_feature(struct mtd_info *mtd, int addr);
+int nand_set_feature(struct mtd_info *mtd, int cmd, int addrss, int value);
+void rdmzier(uint8_t *buf, int size, int page);
+void rdmzier_oob(uint8_t *buf, uint8_t *src, int size, int page, int ofs);
+int nand_hynix_get_retry_reg(struct mtd_info *mtd, uint8_t *addr, uint8_t *para, int size);
+int nand_hynix_set_retry_reg(struct mtd_info *mtd, int reg);
+int write_bytes_cmd(struct mtd_info *mtd, int cmd_cnt, int addr_cnt, int data_cnt, uint8_t *cmd, uint8_t *addr, uint8_t *data);
+int hynix_read_retry_set_para(struct mtd_info *mtd, int reg);
+int load_hynix_opt_reg(struct mtd_info *mtd, struct nand_chip *chip);
+void wmt_init_nfc(struct mtd_info *mtd, unsigned int spec_clk, unsigned int spec_tadl, int busw);
+void set_ecc_info(struct mtd_info *mtd);
+int alloc_rdmz_buffer(struct mtd_info *mtd);
+int alloc_write_cache(struct mtd_info *mtd);
+void init_wr_cache(struct mtd_info *mtd);
+int cache_read_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf);
+void cache_write_data(struct mtd_info *mtd, struct nand_chip *chip, int page, const uint8_t *buf);
+void set_partition_size(struct mtd_info *mtd);
+int get_flash_info_from_env(unsigned int id, unsigned int id2, struct WMT_nand_flash_dev *type);
+int reset_nfc(struct mtd_info *mtd, unsigned int *buf, int step);
+void nfc_hw_rdmz(struct mtd_info *mtd, int on);
+
+
+#define REG_SEED 1
+#define BYTE_SEED 2112
+
+
+
+#endif
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/Kconfig b/ANDROID_3.4.5/drivers/mtd/ubi/Kconfig
index 4dcc752a..1fd786bf 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/Kconfig
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/Kconfig
@@ -27,20 +27,55 @@ config MTD_UBI_WL_THRESHOLD
life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
-config MTD_UBI_BEB_RESERVE
- int "Percentage of reserved eraseblocks for bad eraseblocks handling"
- default 1
- range 0 25
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 10
+ range 0 768
help
- If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
- reserves some amount of physical eraseblocks to handle new bad
- eraseblocks. For example, if a flash physical eraseblock becomes bad,
- UBI uses these reserved physical eraseblocks to relocate the bad one.
- This option specifies how many physical eraseblocks will be reserved
- for bad eraseblock handling (percents of total number of good flash
- eraseblocks). If the underlying flash does not admit of bad
- eraseblocks (e.g. NOR flash), this value is ignored and nothing is
- reserved. Leave the default value if unsure.
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default y
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
@@ -52,12 +87,4 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
-config MTD_UBI_DEBUG
- bool "UBI debugging"
- depends on SYSFS
- select DEBUG_FS
- select KALLSYMS
- help
- This option enables UBI debugging.
-
endif # MTD_UBI
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/Makefile b/ANDROID_3.4.5/drivers/mtd/ubi/Makefile
index c9302a54..b46b0c97 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/Makefile
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MTD_UBI) += ubi.o
-ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
-ubi-y += misc.o
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
-ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/attach.c b/ANDROID_3.4.5/drivers/mtd/ubi/attach.c
new file mode 100755
index 00000000..cc1af19f
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/attach.c
@@ -0,0 +1,1769 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI attaching sub-system.
+ *
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
+ *
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ *
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
+ * UBI tries to distinguish between 2 types of corruptions.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
+ *
+ * 2. Unexpected corruptions which are not caused by power cuts. During
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
+ *
+ * However, it is difficult to reliably distinguish between these types of
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
+ * NAND), it is probably a PEB which was being erased when power cut
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this is corruption type 2.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/math64.h>
+#include <linux/random.h>
+#include "ubi.h"
+
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* Temporary variables used during scanning */
+static struct ubi_ec_hdr *ech;
+static struct ubi_vid_hdr *vidh;
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
+ * @ec: erase counter of the physical eraseblock
+ * @to_head: if not zero, add to the head of the list
+ * @list: the list to add to
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
+ * If @to_head is not zero, PEB will be added to the head of the list, which
+ * basically means it will be processed first later. E.g., we add corrupted
+ * PEBs (corrupted due to power cuts) to the head of the erase list to make
+ * sure we erase them first and get rid of corruptions ASAP. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
+{
+ struct ubi_ainf_peb *aeb;
+
+ if (list == &ai->free) {
+ dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->erase) {
+ dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->alien) {
+ dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+ ai->alien_peb_count += 1;
+ } else
+ BUG();
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
+ if (to_head)
+ list_add(&aeb->u.list, list);
+ else
+ list_add_tail(&aeb->u.list, list);
+ return 0;
+}
+
+/**
+ * add_corrupted - add a corrupted physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
+ return 0;
+}
+
+/**
+ * validate_vid_hdr - check volume identifier header.
+ * @vid_hdr: the volume identifier header to check
+ * @av: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O sub-system. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
+ const struct ubi_ainf_volume *av, int pnum)
+{
+ int vol_type = vid_hdr->vol_type;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+ if (av->leb_count != 0) {
+ int av_vol_type;
+
+ /*
+ * This is not the first logical eraseblock belonging to this
+ * volume. Ensure that the data in its VID header is consistent
+ * to the data in previous logical eraseblock headers.
+ */
+
+ if (vol_id != av->vol_id) {
+ ubi_err("inconsistent vol_id");
+ goto bad;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
+ else
+ av_vol_type = UBI_VID_DYNAMIC;
+
+ if (vol_type != av_vol_type) {
+ ubi_err("inconsistent vol_type");
+ goto bad;
+ }
+
+ if (used_ebs != av->used_ebs) {
+ ubi_err("inconsistent used_ebs");
+ goto bad;
+ }
+
+ if (data_pad != av->data_pad) {
+ ubi_err("inconsistent data_pad");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err("inconsistent VID header at PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
+ return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ /* The volume is absent - add it */
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ return ERR_PTR(-ENOMEM);
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ : UBI_STATIC_VOLUME;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
+ dbg_bld("added volume %d", vol_id);
+ return av;
+}
+
+/**
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @aeb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
+ * second PEB (described by @pnum and @vid_hdr);
+ * o bit 0 is set: the second PEB is newer;
+ * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ * o bit 1 is set: bit-flips were detected in the newer LEB;
+ * o bit 2 is cleared: the older LEB is not corrupted;
+ * o bit 2 is set: the older LEB is corrupted.
+ */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+ int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+ uint32_t data_crc, crc;
+ struct ubi_vid_hdr *vh = NULL;
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+ if (sqnum2 == aeb->sqnum) {
+ /*
+ * This must be a really ancient UBI image which has been
+ * created before sequence numbers support has been added. At
+ * that times we used 32-bit LEB versions stored in logical
+ * eraseblocks. That was before UBI got into mainline. We do not
+ * support these images anymore. Well, those images still work,
+ * but only if no unclean reboots happened.
+ */
+ ubi_err("unsupported on-flash UBI format");
+ return -EINVAL;
+ }
+
+ /* Obviously the LEB with lower sequence counter is older */
+ second_is_newer = (sqnum2 > aeb->sqnum);
+
+ /*
+ * Now we know which copy is newer. If the copy flag of the PEB with
+ * newer version is not set, then we just return, otherwise we have to
+ * check data CRC. For the second PEB we already have the VID header,
+ * for the first one - we'll need to re-read it from flash.
+ *
+ * Note: this may be optimized so that we wouldn't read twice.
+ */
+
+ if (second_is_newer) {
+ if (!vid_hdr->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("second PEB %d is newer, copy_flag is unset",
+ pnum);
+ return 1;
+ }
+ } else {
+ if (!aeb->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("first PEB %d is newer, copy_flag is unset",
+ pnum);
+ return bitflips << 1;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh)
+ return -ENOMEM;
+
+ pnum = aeb->pnum;
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else {
+ ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
+ if (err > 0)
+ err = -EIO;
+
+ goto out_free_vidh;
+ }
+ }
+
+ vid_hdr = vh;
+ }
+
+ /* Read the data of the copy and check the CRC */
+
+ len = be32_to_cpu(vid_hdr->data_size);
+
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto out_unlock;
+
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
+ if (crc != data_crc) {
+ dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+ pnum, crc, data_crc);
+ corrupted = 1;
+ bitflips = 0;
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+ bitflips = !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+ ubi_free_vid_hdr(ubi, vh);
+
+ if (second_is_newer)
+ dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+ else
+ dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+ return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+out_free_vidh:
+ ubi_free_vid_hdr(ubi, vh);
+ return err;
+}
+
+/**
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
+{
+ int err, vol_id, lnum;
+ unsigned long long sqnum;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node **p, *parent = NULL;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+
+ dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
+ pnum, vol_id, lnum, ec, sqnum, bitflips);
+
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
+
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
+
+ /*
+ * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+ * if this is the first instance of this logical eraseblock or not.
+ */
+ p = &av->root.rb_node;
+ while (*p) {
+ int cmp_res;
+
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ continue;
+ }
+
+ /*
+ * There is already a physical eraseblock describing the same
+ * logical eraseblock present.
+ */
+
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * sequence numbers. Otherwise the image is bad.
+ *
+ * However, if the sequence number is zero, we assume it must
+ * be an ancient UBI image from the era when UBI did not have
+ * sequence numbers. We still can attach these images, unless
+ * there is a need to distinguish between old and new
+ * eraseblocks, in which case we'll refuse the image in
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
+ * images, but refuse attaching old images with duplicated
+ * logical eraseblocks because there was an unclean reboot.
+ */
+ if (aeb->sqnum == sqnum && sqnum != 0) {
+ ubi_err("two LEBs with same sequence number %llu",
+ sqnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Now we have to drop the older one and preserve the newer
+ * one.
+ */
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ if (cmp_res & 1) {
+ /*
+ * This logical eraseblock is newer than the one
+ * found earlier.
+ */
+ err = validate_vid_hdr(vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
+ if (err)
+ return err;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
+ be32_to_cpu(vid_hdr->data_size);
+
+ return 0;
+ } else {
+ /*
+ * This logical eraseblock is older than the one found
+ * previously.
+ */
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
+ }
+ }
+
+ /*
+ * We've met this logical eraseblock for the first time, add it to the
+ * attaching information.
+ */
+
+ err = validate_vid_hdr(vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ }
+
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+ return 0;
+}
+
+/**
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the attaching information.
+ */
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
+
+ while (p) {
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
+ */
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
+
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
+ }
+
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
+}
+
+/**
+ * early_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA sub-system had not been yet initialized.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
+ return -EINVAL;
+ }
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_sync_erase(ubi, pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * ubi_early_get_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling sub-system is
+ * not initialized yet. This function picks a physical eraseblocks from one of
+ * the lists, writes the EC header if it is needed, and removes it from the
+ * list.
+ *
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
+ */
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err = 0;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
+
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ /*
+ * We try to erase the first physical eraseblock from the erase list
+ * and pick it if we succeed, or try to erase the next one if not. And
+ * so forth. We don't want to take care about bad eraseblocks here -
+ * they'll be handled later.
+ */
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+ if (err)
+ continue;
+
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ ubi_err("no free eraseblocks");
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * check_corruption - check the data area of PEB.
+ * @ubi: UBI device description object
+ * @vid_hdr: the (corrupted) VID header of this PEB
+ * @pnum: the physical eraseblock number to check
+ *
+ * This is a helper function which is used to distinguish between VID header
+ * corruptions caused by power cuts and other reasons. If the PEB contains only
+ * 0xFF bytes in the data area, the VID header is most probably corrupted
+ * because of a power cut (%0 is returned in this case). Otherwise, it was
+ * probably corrupted for some other reasons (%1 is returned in this case). A
+ * negative error code is returned if a read error occurred.
+ *
+ * If the corruption reason was a power cut, UBI can safely erase this PEB.
+ * Otherwise, it should preserve it to avoid possibly destroying important
+ * information.
+ */
+static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
+ int pnum)
+{
+ int err;
+
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
+
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
+ ubi->leb_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * Bit-flips or integrity errors while reading the data area.
+ * It is difficult to say for sure what type of corruption is
+ * this, but presumably a power cut happened while this PEB was
+ * erased, so it became unstable and corrupted, and should be
+ * erased.
+ */
+ err = 0;
+ goto out_unlock;
+ }
+
+ if (err)
+ goto out_unlock;
+
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
+ goto out_unlock;
+
+ ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
+ ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->peb_buf, ubi->leb_size, 1);
+ err = 1;
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+ return err;
+}
+
+/**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, int *vid, unsigned long long *sqnum)
+{
+ long long uninitialized_var(ec);
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+
+ /* Skip bad physical eraseblocks */
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0)
+ return err;
+ else if (err) {
+ ai->bad_peb_count += 1;
+ return 0;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_FF:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
+ case UBI_IO_FF_BITFLIPS:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
+ case UBI_IO_BAD_HDR_EBADMSG:
+ case UBI_IO_BAD_HDR:
+ /*
+ * We have to also look at the VID header, possibly it is not
+ * corrupted. Set %bitflips flag in order to make this PEB be
+ * moved and EC be re-created.
+ */
+ ec_err = err;
+ ec = UBI_UNKNOWN;
+ bitflips = 1;
+ break;
+ default:
+ ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
+ return -EINVAL;
+ }
+
+ if (!ec_err) {
+ int image_seq;
+
+ /* Make sure UBI version is OK */
+ if (ech->version != UBI_VERSION) {
+ ubi_err("this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ech->version);
+ return -EINVAL;
+ }
+
+ ec = be64_to_cpu(ech->ec);
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. The EC headers have 64 bits
+ * reserved, but we anyway make use of only 31 bit
+ * values, as this seems to be enough for any existing
+ * flash. Upgrade UBI and use 64-bit erase counters
+ * internally.
+ */
+ ubi_err("erase counter overflow, max is %d",
+ UBI_MAX_ERASECOUNTER);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that all PEBs have the same image sequence number.
+ * This allows us to detect situations when users flash UBI
+ * images incorrectly, so that the flash has the new UBI image
+ * and leftovers from the old one. This feature was added
+ * relatively recently, and the sequence number was always
+ * zero, because old UBI implementations always set it to zero.
+ * For this reasons, we do not panic if some PEBs have zero
+ * sequence number, while other PEBs have non-zero sequence
+ * number.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+ if (image_seq && ubi->image_seq != image_seq) {
+ ubi_err("bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+ }
+
+ /* OK, we've done with the EC header, let's look at the VID header */
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_BAD_HDR_EBADMSG:
+ if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
+ /*
+ * Both EC and VID headers are corrupted and were read
+ * with data integrity error, probably this is a bad
+ * PEB, bit it is not marked as bad yet. This may also
+ * be a result of power cut during erasure.
+ */
+ ai->maybe_bad_peb_count += 1;
+ case UBI_IO_BAD_HDR:
+ if (ec_err)
+ /*
+ * Both headers are corrupted. There is a possibility
+ * that this a valid UBI PEB which has corresponding
+ * LEB, but the headers are corrupted. However, it is
+ * impossible to distinguish it from a PEB which just
+ * contains garbage because of a power cut during erase
+ * operation. So we just schedule this PEB for erasure.
+ *
+ * Besides, in case of NOR flash, we deliberately
+ * corrupt both headers because NOR flash erasure is
+ * slow and can start from the end.
+ */
+ err = 0;
+ else
+ /*
+ * The EC was OK, but the VID header is corrupted. We
+ * have to check what is in the data area.
+ */
+ err = check_corruption(ubi, vidh, pnum);
+
+ if (err < 0)
+ return err;
+ else if (!err)
+ /* This corruption is caused by a power cut */
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ /* This is an unexpected corruption */
+ err = add_corrupted(ai, pnum, ec);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF_BITFLIPS:
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ default:
+ ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
+ if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_RO:
+ ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
+ vol_id, lnum);
+ ubi->ro_mode = 1;
+ break;
+
+ case UBI_COMPAT_PRESERVE:
+ ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_REJECT:
+ ubi_err("incompatible internal volume %d:%d found",
+ vol_id, lnum);
+ return -EINVAL;
+ }
+ }
+
+ if (ec_err)
+ ubi_warn("valid VID header but corrupted EC header at PEB %d",
+ pnum);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+ if (err)
+ return err;
+
+adjust_mean_ec:
+ if (!ec_err) {
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
+ }
+
+ return 0;
+}
+
+/**
+ * late_analysis - analyze the overall situation with PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
+ */
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ int max_corr, peb_count;
+
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
+ max_corr = peb_count / 20 ?: 8;
+
+ /*
+ * Few corrupted PEBs is not a problem and may be just a result of
+ * unclean reboots. However, many of them may indicate some problems
+ * with the flash HW or driver.
+ */
+ if (ai->corr_peb_count) {
+ ubi_err("%d PEBs are corrupted and preserved",
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
+
+ /*
+ * If too many PEBs are corrupted, we refuse attaching,
+ * otherwise, only print a warning.
+ */
+ if (ai->corr_peb_count >= max_corr) {
+ ubi_err("too many corrupted PEBs, refusing");
+ return -EINVAL;
+ }
+ }
+
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
+ /*
+ * All PEBs are empty, or almost all - a couple PEBs look like
+ * they may be bad PEBs which were not marked as bad yet.
+ *
+ * This piece of code basically tries to distinguish between
+ * the following situations:
+ *
+ * 1. Flash is empty, but there are few bad PEBs, which are not
+ * marked as bad so far, and which were read with error. We
+ * want to go ahead and format this flash. While formatting,
+ * the faulty PEBs will probably be marked as bad.
+ *
+ * 2. Flash contains non-UBI data and we do not want to format
+ * it and destroy possibly important information.
+ */
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
+ ubi_msg("empty MTD device detected");
+ get_random_bytes(&ubi->image_seq,
+ sizeof(ubi->image_seq));
+ } else {
+ ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ if (ai->aeb_slab_cache)
+ kmem_cache_destroy(ai->aeb_slab_cache);
+
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
+ */
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
+{
+ int err, pnum;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return err;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ ubi_msg("scanning is finished");
+
+ /* Calculate mean erase counter */
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+ err = late_analysis(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ /*
+ * In case of unknown erase counter we use the mean erase counter
+ * value.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = self_check_ai(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ return 0;
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+ return err;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
+
+ err = -ENOMEM;
+ ubi->old_anchor = 0;
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+#if 0
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
+ }
+#endif
+
+ if(vol_id == UBI_FM_SB_VOLUME_ID) {
+ //printk("\n fm_anchor is %d", pnum);
+ if(sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ if(fm_anchor != -1) {
+ ai->empty_peb_count += 1;
+ ubi->old_anchor = fm_anchor;
+ }
+ fm_anchor = pnum;
+ } else {
+ ai->empty_peb_count += 1;
+ ubi->old_anchor = pnum;
+ }
+ }
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ return ubi_scan_fastmap(ubi, ai, fm_anchor);
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create(slab_name,
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
+}
+
+/**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi, int force_scan)
+{
+ int err;
+ struct ubi_attach_info *ai;
+retry:
+ ai = alloc_ai("ubi_aeb_slab_cache");
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
+ }
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, ai);
+ if (err > 0) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ goto retry;
+ // ai = alloc_ai("ubi_aeb_slab_cache2");
+ // if (!ai)
+ // return -ENOMEM;
+ }
+ ubi->fm_idx = 1;//UBI_FM_MAX_START
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
+
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
+
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+ if (!scan_ai)
+ goto out_wl;
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
+ }
+#endif
+
+ destroy_ai(ai);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
+
+/**
+ * self_check_ai - check the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero if the attaching information is all right, and a
+ * negative error code if not or if an error occurred.
+ */
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int pnum, err, vols_found = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
+ uint8_t *buf;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ /*
+ * At first, check that attaching information is OK.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ int leb_count = 0;
+
+ cond_resched();
+
+ vols_found += 1;
+
+ if (ai->is_empty) {
+ ubi_err("bad is_empty flag");
+ goto bad_av;
+ }
+
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
+ ubi_err("negative values");
+ goto bad_av;
+ }
+
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("bad vol_id");
+ goto bad_av;
+ }
+
+ if (av->vol_id > ai->highest_vol_id) {
+ ubi_err("highest_vol_id is %d, but vol_id %d is there",
+ ai->highest_vol_id, av->vol_id);
+ goto out;
+ }
+
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err("bad vol_type");
+ goto bad_av;
+ }
+
+ if (av->data_pad > ubi->leb_size / 2) {
+ ubi_err("bad data_pad");
+ goto bad_av;
+ }
+
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ last_aeb = aeb;
+ leb_count += 1;
+
+ if (aeb->pnum < 0 || aeb->ec < 0) {
+ ubi_err("negative values");
+ goto bad_aeb;
+ }
+
+ if (aeb->ec < ai->min_ec) {
+ ubi_err("bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->ec > ai->max_ec) {
+ ubi_err("bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->pnum >= ubi->peb_count) {
+ ubi_err("too high PEB number %d, total PEBs %d",
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
+ ubi_err("bad lnum or used_ebs");
+ goto bad_aeb;
+ }
+ } else {
+ if (av->used_ebs != 0) {
+ ubi_err("non-zero used_ebs");
+ goto bad_aeb;
+ }
+ }
+
+ if (aeb->lnum > av->highest_lnum) {
+ ubi_err("incorrect highest_lnum or lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (av->leb_count != leb_count) {
+ ubi_err("bad leb_count, %d objects in the tree",
+ leb_count);
+ goto bad_av;
+ }
+
+ if (!last_aeb)
+ continue;
+
+ aeb = last_aeb;
+
+ if (aeb->lnum != av->highest_lnum) {
+ ubi_err("bad highest_lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (vols_found != ai->vols_found) {
+ ubi_err("bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
+ goto out;
+ }
+
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ int vol_type;
+
+ cond_resched();
+
+ last_aeb = aeb;
+
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("VID header is not OK (%d)", err);
+ if (err > 0)
+ err = -EIO;
+ return err;
+ }
+
+ vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ if (av->vol_type != vol_type) {
+ ubi_err("bad vol_type");
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err("bad sqnum %llu", aeb->sqnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err("bad vol_id %d", av->vol_id);
+ goto bad_vid_hdr;
+ }
+
+ if (av->compat != vidh->compat) {
+ ubi_err("bad compat %d", vidh->compat);
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad lnum %d", aeb->lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err("bad used_ebs %d", av->used_ebs);
+ goto bad_vid_hdr;
+ }
+
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err("bad data_pad %d", av->data_pad);
+ goto bad_vid_hdr;
+ }
+ }
+
+ if (!last_aeb)
+ continue;
+
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad highest_lnum %d", av->highest_lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err("bad last_data_size %d", av->last_data_size);
+ goto bad_vid_hdr;
+ }
+ }
+
+ /*
+ * Make sure that all the physical eraseblocks are in one of the lists
+ * or trees.
+ */
+ buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ } else if (err)
+ buf[pnum] = 1;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
+
+ err = 0;
+ for (pnum = 0; pnum < ubi->peb_count; pnum++)
+ if (!buf[pnum]) {
+ ubi_err("PEB %d is not referred", pnum);
+ err = 1;
+ }
+
+ kfree(buf);
+ if (err)
+ goto out;
+ return 0;
+
+bad_aeb:
+ ubi_err("bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
+ goto out;
+
+bad_av:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ goto out;
+
+bad_vid_hdr:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
+
+out:
+ dump_stack();
+ return -EINVAL;
+}
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/build.c b/ANDROID_3.4.5/drivers/mtd/ubi/build.c
index 0fde9fc7..ac3ab45d 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/build.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/build.c
@@ -27,10 +27,6 @@
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
@@ -40,6 +36,7 @@
#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
@@ -49,6 +46,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 3
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
#ifdef CONFIG_MTD_UBI_MODULE
#define ubi_is_module() 1
#else
@@ -60,10 +63,12 @@
* @name: MTD character device node path, MTD device name, or MTD device number
* string
* @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int vid_hdr_offs;
+ int max_beb_per1024;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -71,7 +76,10 @@ static int __initdata mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
-
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
@@ -148,6 +156,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ if (ubi_update_fastmap(ubi)) {
+ ubi_err("Unable to update fastmap!");
+ ubi_ro_mode(ubi);
+ }
+ }
+#endif
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
@@ -554,10 +575,10 @@ static void uif_close(struct ubi_device *ubi)
}
/**
- * free_internal_volumes - free internal volumes.
+ * ubi_free_internal_volumes - free internal volumes.
* @ubi: UBI device description object
*/
-static void free_internal_volumes(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
{
int i;
@@ -568,62 +589,38 @@ static void free_internal_volumes(struct ubi_device *ubi)
}
}
-/**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
- */
-static int attach_by_scanning(struct ubi_device *ubi)
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
- int err;
- struct ubi_scan_info *si;
-
- si = ubi_scan(ubi);
- if (IS_ERR(si))
- return PTR_ERR(si);
+ int limit, device_pebs;
+ uint64_t device_size;
- ubi->bad_peb_count = si->bad_peb_count;
- ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
- ubi->corr_peb_count = si->corr_peb_count;
- ubi->max_ec = si->max_ec;
- ubi->mean_ec = si->mean_ec;
- ubi_msg("max. sequence number: %llu", si->max_sqnum);
-
- err = ubi_read_volume_table(ubi, si);
- if (err)
- goto out_si;
-
- err = ubi_wl_init_scan(ubi, si);
- if (err)
- goto out_vtbl;
+ if (!max_beb_per1024)
+ return 0;
- err = ubi_eba_init_scan(ubi, si);
- if (err)
- goto out_wl;
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
- ubi_scan_destroy_si(si);
- return 0;
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
-out_wl:
- ubi_wl_close(ubi);
-out_vtbl:
- free_internal_volumes(ubi);
- vfree(ubi->vtbl);
-out_si:
- ubi_scan_destroy_si(si);
- return err;
+ return limit;
}
/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
@@ -636,8 +633,11 @@ out_si:
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
@@ -664,8 +664,10 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (mtd_can_have_bb(ubi->mtd))
+ if (mtd_can_have_bb(ubi->mtd)) {
ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
if (ubi->mtd->type == MTD_NORFLASH) {
ubi_assert(ubi->mtd->writesize == 1);
@@ -707,11 +709,11 @@ static int io_init(struct ubi_device *ubi)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
- dbg_msg("min_io_size %d", ubi->min_io_size);
- dbg_msg("max_write_size %d", ubi->max_write_size);
- dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
- dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
- dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
@@ -728,10 +730,10 @@ static int io_init(struct ubi_device *ubi)
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
- dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
- dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
- dbg_msg("leb_start %d", ubi->leb_start);
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
@@ -757,7 +759,7 @@ static int io_init(struct ubi_device *ubi)
ubi->max_erroneous = ubi->peb_count / 10;
if (ubi->max_erroneous < 16)
ubi->max_erroneous = 16;
- dbg_msg("max_erroneous %d", ubi->max_erroneous);
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
/*
* It may happen that EC and VID headers are situated in one minimal
@@ -765,36 +767,24 @@ static int io_init(struct ubi_device *ubi)
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
- ubi_warn("EC and VID headers are in the same minimal I/O unit, "
- "switch to read-only mode");
+ ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
- ubi_msg("MTD device %d is write-protected, attach in "
- "read-only mode", ubi->mtd->index);
+ ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
ubi->ro_mode = 1;
}
- ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
- ubi->peb_size, ubi->peb_size >> 10);
- ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
- ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
- if (ubi->hdrs_min_io_size != ubi->min_io_size)
- ubi_msg("sub-page size: %d",
- ubi->hdrs_min_io_size);
- ubi_msg("VID header offset: %d (aligned %d)",
- ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
- ubi_msg("data offset: %d", ubi->leb_start);
-
/*
- * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
- * each physical eraseblock. So, we skip ubi->bad_peb_count
- * uninitialized and initialize it after scanning.
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
*/
return 0;
@@ -805,7 +795,7 @@ static int io_init(struct ubi_device *ubi)
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
@@ -816,6 +806,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
+ if (ubi->ro_mode) {
+ ubi_warn("skip auto-resize because of R/O mode");
+ return 0;
+ }
+
/*
* Clear the auto-resize flag in the volume in-memory copy of the
* volume table, and 'ubi_resize_volume()' will propagate this change
@@ -830,8 +825,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -857,6 +851,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -867,11 +862,18 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
{
struct ubi_device *ubi;
int i, err, ref = 0;
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
/*
* Check if we already have the same MTD device attached.
*
@@ -881,7 +883,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- dbg_err("mtd%d is already attached to ubi%d",
+ ubi_err("mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -896,8 +898,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err("refuse attaching mtd%d - it is already emulated on "
- "top of UBI", mtd->index);
+ ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
return -EINVAL;
}
@@ -907,7 +909,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- dbg_err("only %d UBI devices may be created",
+ ubi_err("only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -917,7 +919,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- dbg_err("ubi%d already exists", ubi_num);
+ ubi_err("ubi%d already exists", ubi_num);
return -EEXIST;
}
}
@@ -931,16 +933,44 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+ fm_autoconvert = 1;
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 2, UBI_FM_MAX_POOL_SIZE);
+ if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+ ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+ ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+ ubi->fm_disabled = !fm_autoconvert;
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
+ mutex_init(&ubi->fm_mutex);
+ init_rwsem(&ubi->fm_sem);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
- dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
- dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
- err = io_init(ubi);
+ err = io_init(ubi, max_beb_per1024);
if (err)
goto out_free;
@@ -949,14 +979,19 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf)
goto out_free;
- err = ubi_debugging_init_dev(ubi);
- if (err)
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->leb_size);
+ ubi->fm_cur = vzalloc(ubi->fm_size);
+ ubi->fm_cnt = 0;
+ ubi->fm_idx = 0;
+ if (!ubi->fm_buf)
goto out_free;
-
- err = attach_by_scanning(ubi);
+#endif
+ err = ubi_attach(ubi, 0);
if (err) {
- dbg_err("failed to attach by scanning, error %d", err);
- goto out_debugging;
+ ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
+ goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
@@ -981,23 +1016,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
goto out_debugfs;
}
- ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
- ubi_msg("MTD device name: \"%s\"", mtd->name);
- ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
- ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
- ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
- ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
- ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
- ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
- ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
- ubi_msg("number of user volumes: %d",
- ubi->vol_count - UBI_INT_VOL_COUNT);
- ubi_msg("available PEBs: %d", ubi->avail_pebs);
- ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
- ubi_msg("number of PEBs reserved for bad PEB handling: %d",
- ubi->beb_rsvd_pebs);
- ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
- ubi_msg("image sequence number: %d", ubi->image_seq);
+ ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+ mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+ ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1020,12 +1056,11 @@ out_uif:
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
-out_debugging:
- ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
@@ -1074,8 +1109,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
- dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
-
+ ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap. */
+ ubi->fm_idx = 0;
+ ubi_update_fastmap(ubi);
+#endif
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
@@ -1091,12 +1131,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
+
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ vfree(ubi->fm_cur);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
@@ -1229,7 +1271,7 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
- p->vid_hdr_offs);
+ p->vid_hdr_offs, p->max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
ubi_err("cannot attach mtd%d", mtd->index);
@@ -1275,7 +1317,7 @@ out:
ubi_err("UBI error: cannot initialize UBI, error %d", err);
return err;
}
-module_init(ubi_init);
+late_initcall(ubi_init);
static void __exit ubi_exit(void)
{
@@ -1309,8 +1351,7 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1326,8 +1367,7 @@ static int __init bytes_str_to_int(const char *str)
case '\0':
break;
default:
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1348,27 +1388,26 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
- char *tokens[2] = {NULL, NULL};
+ char *tokens[MTD_PARAM_MAX_COUNT];
if (!val)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
- printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
- UBI_MAX_DEVICES);
+ ubi_err("UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
- "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+ ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
- "ignored\n");
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
@@ -1378,12 +1417,11 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
- for (i = 0; i < 2; i++)
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
- val);
+ ubi_err("UBI error: too many arguments at \"%s\"\n", val);
return -EINVAL;
}
@@ -1396,24 +1434,36 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (p->vid_hdr_offs < 0)
return p->vid_hdr_offs;
+ if (tokens[2]) {
+ int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
+
+ if (err) {
+ ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ tokens[2]);
+ return -EINVAL;
+ }
+ }
+
mtd_devs += 1;
return 0;
}
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number, name, or "
- "path to the MTD character device node.\n"
- "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
- "header position to be used by UBI.\n"
- "Example 1: mtd=/dev/mtd0 - attach MTD device "
- "/dev/mtd0.\n"
- "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
- "with name \"content\" using VID header offset 1984, and "
- "MTD device number 4 with default VID header offset.");
-
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/cdev.c b/ANDROID_3.4.5/drivers/mtd/ubi/cdev.c
index ad76592f..dfcc65b3 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/cdev.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/cdev.c
@@ -63,7 +63,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
- dbg_err("%d users for volume %d", users, vol->vol_id);
+ ubi_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
- dbg_gen("only %lld of %lld bytes received for atomic LEB change"
- " for volume %d:%d, cancel", vol->upd_received,
- vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+ dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+ vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+ vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
@@ -159,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
@@ -178,7 +178,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
}
if (new_offset < 0 || new_offset > vol->used_bytes) {
- dbg_err("bad seek %lld", new_offset);
+ ubi_err("bad seek %lld", new_offset);
return -EINVAL;
}
@@ -189,7 +189,8 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
return new_offset;
}
-static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
@@ -216,11 +217,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
count, *offp, vol->vol_id);
if (vol->updating) {
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
- dbg_err("damaged volume, update marker is set");
+ ubi_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
@@ -300,7 +301,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
- dbg_err("unaligned position");
+ ubi_err("unaligned position");
return -EINVAL;
}
@@ -309,7 +310,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
- dbg_err("unaligned write length");
+ ubi_err("unaligned write length");
return -EINVAL;
}
@@ -334,8 +335,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
if (err)
break;
@@ -477,9 +477,6 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
- if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
- req.dtype != UBI_UNKNOWN)
- break;
err = get_exclusive(desc);
if (err < 0)
@@ -518,7 +515,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (err)
break;
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
break;
}
@@ -532,7 +529,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
err = -EFAULT;
break;
}
- err = ubi_leb_map(desc, req.lnum, req.dtype);
+ err = ubi_leb_map(desc, req.lnum);
break;
}
@@ -647,8 +644,8 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
return 0;
bad:
- dbg_err("bad volume creation request");
- ubi_dbg_dump_mkvol_req(req);
+ ubi_err("bad volume creation request");
+ ubi_dump_mkvol_req(req);
return err;
}
@@ -713,12 +710,12 @@ static int rename_volumes(struct ubi_device *ubi,
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
- dbg_err("duplicated volume id %d",
+ ubi_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
- dbg_err("duplicated volume name \"%s\"",
+ ubi_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
@@ -741,7 +738,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
- dbg_err("cannot open volume %d, error %d", vol_id, err);
+ ubi_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
@@ -757,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
- dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
+ dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
@@ -800,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi,
continue;
/* The volume exists but busy, or an error occurred */
- dbg_err("cannot open volume \"%s\", error %d",
+ ubi_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
@@ -815,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
- dbg_msg("will remove volume %d, name \"%s\"",
+ dbg_gen("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
@@ -946,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
{
struct ubi_rnvol_req *req;
- dbg_msg("re-name volumes");
+ dbg_gen("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
@@ -1014,7 +1011,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
- err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+ req.max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
@@ -1030,7 +1028,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
{
int ubi_num;
- dbg_gen("dettach MTD device");
+ dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/debug.c b/ANDROID_3.4.5/drivers/mtd/ubi/debug.c
index 61af9bb5..63cb1d72 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/debug.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/debug.c
@@ -18,243 +18,203 @@
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
- */
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-
#include "ubi.h"
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+
/**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
+
+/**
+ * ubi_dump_ec_hdr - dump an erase counter header.
* @ec_hdr: the erase counter header to dump
*/
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
- printk(KERN_DEBUG "Erase counter header dump:\n");
- printk(KERN_DEBUG "\tmagic %#08x\n",
- be32_to_cpu(ec_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version);
- printk(KERN_DEBUG "\tec %llu\n",
- (long long)be64_to_cpu(ec_hdr->ec));
- printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
- be32_to_cpu(ec_hdr->vid_hdr_offset));
- printk(KERN_DEBUG "\tdata_offset %d\n",
- be32_to_cpu(ec_hdr->data_offset));
- printk(KERN_DEBUG "\timage_seq %d\n",
- be32_to_cpu(ec_hdr->image_seq));
- printk(KERN_DEBUG "\thdr_crc %#08x\n",
- be32_to_cpu(ec_hdr->hdr_crc));
- printk(KERN_DEBUG "erase counter header hexdump:\n");
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
* @vid_hdr: the volume identifier header to dump
*/
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
- printk(KERN_DEBUG "Volume identifier header dump:\n");
- printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
- printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
- printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
- printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
- printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
- printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
- printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
- printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
- printk(KERN_DEBUG "\tsqnum %llu\n",
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
- printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
- printk(KERN_DEBUG "Volume identifier header hexdump:\n");
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
vid_hdr, UBI_VID_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
* @vol: UBI volume description object
*/
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
{
- printk(KERN_DEBUG "Volume information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
- printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs);
- printk(KERN_DEBUG "\talignment %d\n", vol->alignment);
- printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad);
- printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", vol->name_len);
- printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
- printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs);
- printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes);
- printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes);
- printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted);
- printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker);
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- printk(KERN_DEBUG "\tname %s\n", vol->name);
+ pr_err("\tname %s\n", vol->name);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
vol->name[0], vol->name[1], vol->name[2],
vol->name[3], vol->name[4]);
}
}
/**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
* @r: the object to dump
* @idx: volume table index
*/
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
- printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
- printk(KERN_DEBUG "\treserved_pebs %d\n",
- be32_to_cpu(r->reserved_pebs));
- printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad));
- printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type);
- printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker);
- printk(KERN_DEBUG "\tname_len %d\n", name_len);
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
if (r->name[0] == '\0') {
- printk(KERN_DEBUG "\tname NULL\n");
+ pr_err("\tname NULL\n");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- printk(KERN_DEBUG "\tname %s\n", &r->name[0]);
+ pr_err("\tname %s\n", &r->name[0]);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc));
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
}
/**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
*/
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
{
- printk(KERN_DEBUG "Volume scanning information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id);
- printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum);
- printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count);
- printk(KERN_DEBUG "\tcompat %d\n", sv->compat);
- printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type);
- printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs);
- printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
- printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad);
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
}
/**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
* @type: object type: 0 - not corrupted, 1 - corrupted
*/
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
- printk(KERN_DEBUG "eraseblock scanning information dump:\n");
- printk(KERN_DEBUG "\tec %d\n", seb->ec);
- printk(KERN_DEBUG "\tpnum %d\n", seb->pnum);
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
if (type == 0) {
- printk(KERN_DEBUG "\tlnum %d\n", seb->lnum);
- printk(KERN_DEBUG "\tscrub %d\n", seb->scrub);
- printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum);
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
}
}
/**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
* @req: the object to dump
*/
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
- printk(KERN_DEBUG "Volume creation request dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id);
- printk(KERN_DEBUG "\talignment %d\n", req->alignment);
- printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes);
- printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", req->name_len);
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
memcpy(nm, req->name, 16);
nm[16] = 0;
- printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
-}
-
-/**
- * ubi_dbg_dump_flash - dump a region of flash.
- * @ubi: UBI device description object
- * @pnum: the physical eraseblock number to dump
- * @offset: the starting offset within the physical eraseblock to dump
- * @len: the length of the region to dump
- */
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
-{
- int err;
- size_t read;
- void *buf;
- loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
-
- buf = vmalloc(len);
- if (!buf)
- return;
- err = mtd_read(ubi->mtd, addr, len, &read, buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
- goto out;
- }
-
- dbg_msg("dumping %d bytes of data from PEB %d, offset %d",
- len, pnum, offset);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
-out:
- vfree(buf);
- return;
-}
-
-/**
- * ubi_debugging_init_dev - initialize debugging for an UBI device.
- * @ubi: UBI device description object
- *
- * This function initializes debugging-related data for UBI device @ubi.
- * Returns zero in case of success and a negative error code in case of
- * failure.
- */
-int ubi_debugging_init_dev(struct ubi_device *ubi)
-{
- ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
- if (!ubi->dbg)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ubi_debugging_exit_dev - free debugging data for an UBI device.
- * @ubi: UBI device description object
- */
-void ubi_debugging_exit_dev(struct ubi_device *ubi)
-{
- kfree(ubi->dbg);
+ pr_err("\t1st 16 characters of name: %s\n", nm);
}
/*
@@ -271,6 +231,9 @@ static struct dentry *dfs_rootdir;
*/
int ubi_debugfs_init(void)
{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -288,7 +251,8 @@ int ubi_debugfs_init(void)
*/
void ubi_debugfs_exit(void)
{
- debugfs_remove(dfs_rootdir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
}
/* Read an UBI debugfs file */
@@ -305,7 +269,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
if (dent == d->dfs_chk_gen)
val = d->chk_gen;
@@ -351,7 +315,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
buf_size = min_t(size_t, count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size)) {
@@ -408,7 +372,10 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
unsigned long ubi_num = ubi->ubi_num;
const char *fname;
struct dentry *dent;
- struct ubi_debug_info *d = ubi->dbg;
+ struct ubi_debug_info *d = &ubi->dbg;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
@@ -477,7 +444,6 @@ out:
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/debug.h b/ANDROID_3.4.5/drivers/mtd/ubi/debug.h
index ead2cd16..33f8f3b2 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/debug.h
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/debug.h
@@ -21,31 +21,26 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-#ifdef CONFIG_MTD_UBI_DEBUG
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
#include <linux/random.h>
#define ubi_assert(expr) do { \
if (unlikely(!(expr))) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
- ubi_dbg_dump_stack(); \
+ dump_stack(); \
} \
} while (0)
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-
-#define ubi_dbg_dump_stack() dump_stack()
-
-#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
#define ubi_dbg_msg(type, fmt, ...) \
- pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
-
-/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
- current->pid, __func__, ##__VA_ARGS__)
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
/* General debugging messages */
#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
@@ -58,62 +53,18 @@
/* Initialization and build messages */
#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len);
-int ubi_debugging_init_dev(struct ubi_device *ubi);
-void ubi_debugging_exit_dev(struct ubi_device *ubi);
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
int ubi_debugfs_init(void);
void ubi_debugfs_exit(void);
int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
-/*
- * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
- * + 2 for the number plus 1 for the trailing zero byte.
- */
-#define UBI_DFS_DIR_NAME "ubi%d"
-#define UBI_DFS_DIR_LEN (3 + 2 + 1)
-
-/**
- * struct ubi_debug_info - debugging information for an UBI device.
- *
- * @chk_gen: if UBI general extra checks are enabled
- * @chk_io: if UBI I/O extra checks are enabled
- * @disable_bgt: disable the background task for testing purposes
- * @emulate_bitflips: emulate bit-flips for testing purposes
- * @emulate_io_failures: emulate write/erase failures for testing purposes
- * @dfs_dir_name: name of debugfs directory containing files of this UBI device
- * @dfs_dir: direntry object of the UBI device debugfs directory
- * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
- * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
- * @dfs_disable_bgt: debugfs knob to disable the background task
- * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
- * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
- */
-struct ubi_debug_info {
- unsigned int chk_gen:1;
- unsigned int chk_io:1;
- unsigned int disable_bgt:1;
- unsigned int emulate_bitflips:1;
- unsigned int emulate_io_failures:1;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
- struct dentry *dfs_dir;
- struct dentry *dfs_chk_gen;
- struct dentry *dfs_chk_io;
- struct dentry *dfs_disable_bgt;
- struct dentry *dfs_emulate_bitflips;
- struct dentry *dfs_emulate_io_failures;
-};
-
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
* @ubi: UBI device description object
@@ -123,7 +74,7 @@ struct ubi_debug_info {
*/
static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi->dbg->disable_bgt;
+ return ubi->dbg.disable_bgt;
}
/**
@@ -134,7 +85,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_bitflips)
+ if (ubi->dbg.emulate_bitflips)
return !(random32() % 200);
return 0;
}
@@ -148,7 +99,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 500);
return 0;
}
@@ -162,78 +113,18 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 400);
return 0;
}
-#else
-
-/* Use "if (0)" to make compiler check arguments even if debugging is off */
-#define ubi_assert(expr) do { \
- if (0) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
- __func__, __LINE__, current->pid); \
- } \
-} while (0)
-
-#define dbg_err(fmt, ...) do { \
- if (0) \
- ubi_err(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define ubi_dbg_msg(fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
-} while (0)
-
-#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gen(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_eba(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_wl(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_io(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_bld(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-
-static inline void ubi_dbg_dump_stack(void) { return; }
-static inline void
-ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { return; }
-static inline void
-ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { return; }
-static inline void
-ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { return; }
-static inline void
-ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { return; }
-static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
-static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
- int type) { return; }
-static inline void
-ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { return; }
-static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
- int pnum, int offset, int len) { return; }
-static inline void
-ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
- int g, const void *b, size_t len, bool a) { return; }
-static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
- int pnum, int offset,
- int len) { return 0; }
-static inline int ubi_dbg_check_write(struct ubi_device *ubi,
- const void *buf, int pnum,
- int offset, int len) { return 0; }
-
-static inline int ubi_debugging_init_dev(struct ubi_device *ubi) { return 0; }
-static inline void ubi_debugging_exit_dev(struct ubi_device *ubi) { return; }
-static inline int ubi_debugfs_init(void) { return 0; }
-static inline void ubi_debugfs_exit(void) { return; }
-static inline int ubi_debugfs_init_dev(struct ubi_device *ubi) { return 0; }
-static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi) { return; }
-
-static inline int
-ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) { return 0; }
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_write_failure(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_erase_failure(const struct ubi_device *ubi) { return 0; }
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
-#endif /* !CONFIG_MTD_UBI_DEBUG */
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/eba.c b/ANDROID_3.4.5/drivers/mtd/ubi/eba.c
index 2455d620..5501b5ab 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/eba.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@
* global sequence counter value. It also increases the global sequence
* counter.
*/
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
@@ -340,8 +340,10 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
- err = ubi_wl_put_peb(ubi, pnum, 0);
+ up_read(&ubi->fm_sem);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -420,9 +422,8 @@ retry:
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
- ubi_warn("corrupted VID header at PEB "
- "%d, LEB %d:%d", pnum, vol_id,
- lnum);
+ ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -479,6 +480,60 @@ out_unlock:
return err;
}
+int ubi_eba_read_leb_oob(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, void *spare)
+{
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
+ uint32_t uninitialized_var(crc);
+
+ err = leb_read_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0) {
+ /*
+ * The logical eraseblock is not mapped, fill the whole buffer
+ * with 0xFF bytes. The exception is static volumes for which
+ * it is an error to read unmapped logical eraseblocks.
+ */
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
+ len, offset, vol_id, lnum);
+ leb_read_unlock(ubi, vol_id, lnum);
+ ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
+ if(buf) memset(buf, 0xFF, len);
+ memset(spare, 0xFF, 8);
+ return 0;
+ }
+
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+ err = ubi_io_read_data_oob(ubi, buf, pnum, offset, len, spare);
+
+ if (err) {
+ if (err == UBI_IO_BITFLIPS) {
+ scrub = 1;
+ err = 0;
+ } else if (mtd_is_eccerr(err)) {
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ goto out_unlock;
+ scrub = 1;
+ } else
+ goto out_unlock;
+ }
+
+ if (scrub)
+ err = ubi_wl_scrub_peb(ubi, pnum);
+
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+
+out_unlock:
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+
/**
* recover_peb - recover from write failure.
* @ubi: UBI device description object
@@ -507,7 +562,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
return -ENOMEM;
retry:
- new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+ new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
@@ -522,7 +577,7 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
@@ -549,8 +604,10 @@ retry:
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum;
- ubi_wl_put_peb(ubi, pnum, 1);
+ up_read(&ubi->fm_sem);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
return 0;
@@ -558,7 +615,7 @@ retry:
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_put:
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -568,7 +625,7 @@ write_error:
* get another one.
*/
ubi_warn("failed to write to PEB %d", new_pnum);
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -585,7 +642,6 @@ write_error:
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
@@ -593,11 +649,10 @@ write_error:
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype)
+ const void *buf, int offset, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
-
if (ubi->ro_mode)
return -EROFS;
@@ -634,14 +689,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -661,6 +716,115 @@ retry:
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
+ ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+ goto write_error;
+ }
+ }
+
+ down_read(&ubi->fm_sem);
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ /*
+ * Fortunately, this is the first write operation to this physical
+ * eraseblock, so just put it and request a new one. We assume that if
+ * this physical eraseblock went bad, the erase code will handle that.
+ */
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg("try another PEB");
+ goto retry;
+}
+
+int ubi_eba_write_leb_oob(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len, void *spare, int dtype)
+{
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum >= 0) {
+ dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_data_oob(ubi, buf, pnum, offset, len, spare);
+ if (err) {
+ ubi_warn("failed to write data to PEB %d", pnum);
+ if (err == -EIO && ubi->bad_allowed)
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
+ if (err)
+ ubi_ro_mode(ubi);
+ }
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
+
+ /*
+ * The logical eraseblock is not mapped. We have to get a free physical
+ * eraseblock and write the volume identifier header there first.
+ */
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ leb_write_unlock(ubi, vol_id, lnum);
+ return -ENOMEM;
+ }
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto write_error;
+ }
+
+ if (len) {
+ err = ubi_io_write_data_oob(ubi, buf, pnum, offset, len, spare);
+ if (err) {
ubi_warn("failed to write %d bytes at offset %d of "
"LEB %d:%d, PEB %d", len, offset, vol_id,
lnum, pnum);
@@ -687,7 +851,7 @@ write_error:
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -695,7 +859,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -707,7 +871,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
@@ -724,8 +887,7 @@ write_error:
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs)
+ int lnum, const void *buf, int len, int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -750,7 +912,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -763,7 +925,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -788,7 +950,9 @@ retry:
}
ubi_assert(vol->eba_tbl[lnum] < 0);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -807,7 +971,7 @@ write_error:
return err;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -815,7 +979,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -827,7 +991,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -839,7 +1002,7 @@ write_error:
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype)
+ int lnum, const void *buf, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -856,7 +1019,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -868,7 +1031,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
goto out_mutex;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -881,7 +1044,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_leb_unlock;
@@ -905,12 +1068,14 @@ retry:
}
if (vol->eba_tbl[lnum] >= 0) {
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
if (err)
goto out_leb_unlock;
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -930,13 +1095,13 @@ write_error:
goto out_leb_unlock;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -1044,9 +1209,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
@@ -1096,7 +1260,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
@@ -1111,8 +1275,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading VID header back from "
- "PEB %d", err, to);
+ ubi_warn("error %d while reading VID header back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
@@ -1138,8 +1302,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading data back "
- "from PEB %d", err, to);
+ ubi_warn("error %d while reading data back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
@@ -1150,15 +1314,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
- ubi_warn("read data back from PEB %d and it is "
- "different", to);
+ ubi_warn("read data back from PEB %d and it is different",
+ to);
err = -EINVAL;
goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = to;
+ up_read(&ubi->fm_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1171,7 +1337,7 @@ out_unlock_leb:
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
*
- * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
* decision whether we have to print a warning or not. The algorithm is as
* follows:
@@ -1186,13 +1352,13 @@ out_unlock_leb:
* reported by real users.
*/
static void print_rsvd_warning(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
/*
* The 1 << 18 (256KiB) number is picked randomly, just a reasonably
* large number to distinguish between newly flashed and used images.
*/
- if (si->max_sqnum > (1 << 18)) {
+ if (ai->max_sqnum > (1 << 18)) {
int min = ubi->beb_rsvd_level / 10;
if (!min)
@@ -1201,27 +1367,123 @@ static void print_rsvd_warning(struct ubi_device *ubi,
return;
}
- ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
- " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
ubi_warn("%d PEBs are corrupted and not used",
- ubi->corr_peb_count);
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
}
/**
- * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, j, err, num_volumes;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct rb_node *rb;
dbg_eba("initialize EBA sub-system");
@@ -1230,7 +1492,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- ubi->global_sqnum = si->max_sqnum + 1;
+ ubi->global_sqnum = ai->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
@@ -1250,18 +1512,18 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
- sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
- if (!sv)
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
continue;
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- if (seb->lnum >= vol->reserved_pebs)
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
- ubi_scan_move_to_list(sv, seb, &si->erase);
- vol->eba_tbl[seb->lnum] = seb->pnum;
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
@@ -1283,7 +1545,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
- print_rsvd_warning(ubi, si);
+ print_rsvd_warning(ubi, ai);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/fastmap.c b/ANDROID_3.4.5/drivers/mtd/ubi/fastmap.c
new file mode 100755
index 00000000..50a976b3
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1668 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crc32.h>
+#include "ubi.h"
+
+void print_nand_buffer(char *value, unsigned int length);
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_hdr) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+ (sizeof(struct ubi_fm_eba) + \
+ (ubi->peb_count * sizeof(__be32))) + \
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ //printk("\n size is %d", size);
+// return roundup(size, ubi->leb_size);
+ return roundup(size, ubi->min_io_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > av->vol_id)
+ p = &(*p)->rb_right;
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->used_ebs = used_ebs;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size = \
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @eba_orphans: list of PEBs which need to be scanned
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *eba_orphans, struct list_head *free)
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb, *tmp_aeb;
+ int i, pnum, err, found_orphan, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+ int image_seq;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err("bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err == UBI_IO_FF) {
+ add_aeb(ai, &ai->erase, pnum, ai->mean_ec, 1);
+ continue;
+ }
+ else if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (ret == UBI_IO_BITFLIPS)
+ scrub = 1;
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err("bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ err = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, free, pnum, ec, 1);
+ else
+ add_aeb(ai, free, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ found_orphan = 0;
+ list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ found_orphan = 1;
+ break;
+ }
+ }
+ if (found_orphan) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+
+ new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err("fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+#if 0
+static int search_blk(struct ubi_device *ubi, int lnum)
+{
+ int tmp_low, tmp_high, tmp_mid;
+ int tmp_offs;
+
+ tmp_low = 0;
+ tmp_high = ubi->leb_size / ubi->fm_size - 1;
+
+
+ while(tmp_low <= tmp_high)
+ {
+ tmp_mid = (tmp_low + tmp_high) / 2;
+ tmp_offs = tmp_mid * ubi->fm_size;
+ ubi_io_read(ubi, ubi->fm_buf, lnum, ubi->leb_start + tmp_offs, ubi->min_io_size);
+ if (ubi_check_pattern(ubi->fm_buf, 0xFF, ubi->min_io_size)) {
+ tmp_high = tmp_mid - 1;
+ } else {
+ tmp_low = tmp_mid + 1;
+ }
+ }
+ tmp_mid = (tmp_low + tmp_high) / 2;
+ return tmp_mid;
+}
+#endif
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, eba_orphans, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_tmp;
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ INIT_LIST_HEAD(&eba_orphans);
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err("bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err("bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+ if(ubi->old_anchor)
+ add_aeb(ai, &ai->erase, ubi->old_anchor, ai->mean_ec, 1);
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err("bad fastmap vol header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err("bad fastmap EBA header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ /* This can happen if a PEB is already in an EBA known
+ * by this fastmap but the PEB itself is not in the used
+ * list.
+ * In this case the PEB can be within the fastmap pool
+ * or while writing the fastmap it was in the protection
+ * queue.
+ */
+ if (!aeb) {
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!aeb) {
+ ret = -ENOMEM;
+
+ goto fail;
+ }
+
+ aeb->lnum = j;
+ aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
+ aeb->ec = -1;
+ aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+ list_add_tail(&aeb->u.list, &eba_orphans);
+ continue;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
+ u.list) {
+ int err;
+
+ if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
+ ubi_err("bad PEB in fastmap EBA orphan list");
+ ret = UBI_BAD_FASTMAP;
+ kfree(ech);
+ goto fail;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
+ if (err == UBI_IO_FF) {
+ add_aeb(ai, &ai->erase, tmp_aeb->pnum, ai->mean_ec, 1);
+ continue;
+ }
+ else if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i " \
+ "err:%i", tmp_aeb->pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ kfree(ech);
+
+ goto fail;
+ } else if (err == UBI_IO_BITFLIPS)
+ tmp_aeb->scrub = 1;
+
+ tmp_aeb->ec = be64_to_cpu(ech->ec);
+ assign_aeb_to_av(ai, tmp_aeb, av);
+ }
+
+ kfree(ech);
+ }
+//Test by Johnny
+#if 0
+ ret = UBI_BAD_FASTMAP;
+ kfree(ech);
+ goto fail;
+#endif
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks)) {
+ goto fail_bad;
+ }
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, ret = 0;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ mutex_lock(&ubi->fm_mutex);
+ memset(ubi->fm_buf, 0, ubi->leb_size);
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb_1;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err("bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb_1;
+ }
+
+// used_blocks = be32_to_cpu(fmsb->used_blocks);
+ used_blocks = 1;
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb_1;
+ }
+#if 0
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+ ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb_1;
+ }
+#endif
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+#if 0
+ for (i = 0; i < used_blocks; i++) {
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ if (!ubi->image_seq)
+ ubi->image_seq = be32_to_cpu(ech->image_seq);
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err("bad fastmap anchor vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err("bad fastmap data vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ "err: %i)", i, pnum, ret);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+#endif
+/*
+ ret = ubi_io_read(ubi, ubi->fm_buf, fm_anchor, ubi->leb_start, ubi->leb_size);
+ i = 0;
+ while (1) {
+ ubi->fm_tmp = ubi->fm_buf + ubi->fm_size * i;
+ if(ubi_check_pattern(ubi->fm_tmp, 0xFF, ubi->fm_size))
+ break;
+ i++;
+ if(i == ubi->fm_cnt)
+ break;
+ }
+ ubi->fm_idx = i;
+ //printk("\n fm_idx is %d", ubi->fm_idx);
+ if(ubi->fm_idx > 0)ubi->fm_tmp = ubi->fm_buf + ubi->fm_size * (i - 1);
+*/
+
+ //ubi->fm_idx = search_blk(ubi, fm_anchor);
+ ubi->fm_idx = 0;
+#if 0
+ ret = ubi_io_read(ubi, ubi->fm_buf, fm_anchor,
+ ubi->leb_start + (ubi->fm_idx + 1) * ubi->fm_size,
+ ubi->min_io_size);
+
+ if (!ubi_check_pattern(ubi->fm_buf, 0xFF, ubi->min_io_size)) {
+
+ ubi_err("fastmap data is invalid");
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+#endif
+ ret = ubi_io_read(ubi, ubi->fm_buf, fm_anchor, ubi->leb_start + ubi->fm_idx * ubi->fm_size,
+ ubi->fm_size);
+ ubi->fm_tmp = ubi->fm_buf;
+ ubi->fm_idx = 1;
+ kfree(fmsb);
+ fmsb = NULL;
+ //print_nand_buffer(ubi->fm_buf, ubi->fm_size);
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_tmp);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_tmp, ubi->fm_size);
+ if (crc != tmp_crc) {
+ ubi_err("fastmap data CRC is invalid");
+ ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+ ubi->fm = fm;
+ ubi->fm_cnt = 1;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg("attached by fastmap");
+ ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ mutex_unlock(&ubi->fm_mutex);
+ if (ret == UBI_BAD_FASTMAP) {
+
+ if(ubi->old_anchor) erase_block(ubi, ubi->old_anchor);
+ ubi_err("Attach by fastmap failed, doing a full scan!");
+
+} return ret;
+
+free_hdr:
+ if (ret == UBI_BAD_FASTMAP) {
+ if(fmsb2) {
+ for(i = 0; i < used_blocks; i++)
+ erase_block(ubi, be32_to_cpu(fmsb2->block_loc[i]));
+
+ }
+ }
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ if (ret == UBI_BAD_FASTMAP) {
+ if(fmsb) {
+ for(i = 1; i < used_blocks; i++)
+ erase_block(ubi, be32_to_cpu(fmsb->block_loc[i]));
+
+ }
+ }
+free_fm_sb_1:
+ if (ret == UBI_BAD_FASTMAP) {
+ if(fmsb) erase_block(ubi, be32_to_cpu(fmsb->block_loc[0]));
+ }
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct rb_node *node;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+
+ //printk("\n ubi_write_fastmap!!!");
+
+#if 0
+ if(new_fm) {
+ for ( i = 0; i < new_fm->used_blocks; i++)
+ printk("\n new pnum is %d",new_fm->e[i]->pnum);
+ }
+#endif
+
+ fm_raw = ubi->fm_cur;
+ memset(ubi->fm_cur, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++)
+ fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++)
+ fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+
+ for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+ if(!ubi->fm_idx) {
+ #if 0
+ ret = ubi_io_read(ubi, ubi->fm_buf, new_fm->e[0]->pnum, ubi->leb_start, ubi->fm_size);
+ if(ret) {
+ ubi_err("ubable to check the fastmap SB!");
+ goto out_kfree;
+ }
+ if(!ubi_check_pattern(ubi->fm_buf, 0xFF, ubi->fm_size)) {
+ ret = erase_block(ubi, new_fm->e[0]->pnum);
+ if(ret) ubi_err("erase failed!!!");
+ else ubi_msg("\n Block is not clean, fix it now");
+ }
+ #endif
+ //printk("\n update anchor block here, %d@@@", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+ }
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+#if 0
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+#endif
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ if(!ubi->fm_idx)ret = ubi_io_write(ubi, fm_raw,
+ new_fm->e[i]->pnum, ubi->leb_start + ubi->fm_size * ubi->fm_idx, ubi->fm_size);
+ if (ret) {
+ ubi_err("unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+ //print_nand_buffer((char *)fm_raw, ubi->fm_size);
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+#if 0
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ * @fm: the fastmap to be destroyed
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int ret;
+ struct ubi_vid_hdr *vh;
+
+ ret = erase_block(ubi, fm->e[0]->pnum);
+ if (ret < 0)
+ return ret;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ return -ENOMEM;
+
+ /* deleting the current fastmap SB is not enough, an old SB may exist,
+ * so create a (corrupted) SB such that fastmap will find it and fall
+ * back to scanning mode in any case */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+
+// for (i = 0; i < fm->used_blocks; i++)
+// ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
+
+ return ret;
+}
+#endif
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ mutex_lock(&ubi->fm_mutex);
+
+ ubi_refill_pools(ubi);
+ if (ubi->fm_idx || ubi->ro_mode || ubi->fm_disabled) {
+ mutex_unlock(&ubi->fm_mutex);
+ return 0;
+ }
+#if 0
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
+ mutex_unlock(&ubi->fm_mutex);
+ return ret;
+ }
+#endif
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+ //new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+ new_fm->used_blocks = 1;
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!new_fm->e[i]) {
+ while (i--)
+ kfree(new_fm->e[i]);
+
+ kfree(new_fm);
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+ }
+#if 0
+ if(ubi->fm_idx&&ubi->fm_idx < ubi->fm_cnt) {
+ new_fm->e[0]->pnum = ubi->fm->e[0]->pnum;
+ new_fm->e[0]->ec = ubi->fm->e[0]->ec;
+
+ } else {
+#endif
+ if (!ubi->fm_idx) {
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err("fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+#if 0
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e && !old_fm) {
+ int j;
+ ubi_err("could not get any free erase block");
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ } else if (!tmp_e && old_fm) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ int j;
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+
+ ubi_err("could not erase old fastmap PEB");
+ goto err;
+ }
+
+ new_fm->e[i]->pnum = old_fm->e[i]->pnum;
+ new_fm->e[i]->ec = old_fm->e[i]->ec;
+ } else {
+ new_fm->e[i]->pnum = tmp_e->pnum;
+ new_fm->e[i]->ec = tmp_e->ec;
+
+ if (old_fm)
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ }
+ }
+#endif
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ int i;
+ ubi_err("could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+ new_fm->e[0]->ec = ret;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+ } else {
+ if (!tmp_e) {
+ int i;
+ ubi_err("could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+ }//add to else
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+ up_write(&ubi->fm_sem);
+ up_write(&ubi->work_sem);
+ if (ret)
+ goto err;
+
+out_unlock:
+ mutex_unlock(&ubi->fm_mutex);
+ if(ubi->fm_idx == 0)kfree(old_fm);
+ return ret;
+
+err:
+ kfree(new_fm);
+
+ ubi_warn("Unable to write new fastmap, err=%i", ret);
+
+ ret = 0;
+/* if (old_fm) {
+ ret = invalidate_fastmap(ubi, old_fm);
+ if (ret < 0)
+ ubi_err("Unable to invalidiate current fastmap!");
+ else if (ret)
+ ret = 0;
+ }
+*/
+ goto out_unlock;
+}
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/gluebi.c b/ANDROID_3.4.5/drivers/mtd/ubi/gluebi.c
index 90b98822..b93807b4 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/gluebi.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/gluebi.c
@@ -41,7 +41,7 @@
#include "ubi-media.h"
#define err_msg(fmt, ...) \
- printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+ pr_err("gluebi (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
/**
@@ -171,17 +171,17 @@ static void gluebi_put_device(struct mtd_info *mtd)
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
- int err = 0, lnum, offs, total_read;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
- total_read = len;
- while (total_read) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
- if (to_read > total_read)
- to_read = total_read;
+ if (to_read > bytes_left)
+ to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
@@ -189,11 +189,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
lnum += 1;
offs = 0;
- total_read -= to_read;
+ bytes_left -= to_read;
buf += to_read;
}
- *retlen = len - total_read;
+ *retlen = len - bytes_left;
return err;
}
@@ -211,7 +211,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- int err = 0, lnum, offs, total_written;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
@@ -220,24 +220,24 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
- total_written = len;
- while (total_written) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
- if (to_write > total_written)
- to_write = total_written;
+ if (to_write > bytes_left)
+ to_write = bytes_left;
- err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
+ err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
lnum += 1;
offs = 0;
- total_written -= to_write;
+ bytes_left -= to_write;
buf += to_write;
}
- *retlen = len - total_written;
+ *retlen = len - bytes_left;
return err;
}
@@ -341,9 +341,8 @@ static int gluebi_create(struct ubi_device_info *di,
mutex_lock(&devices_mutex);
g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (g)
- err_msg("gluebi MTD device %d form UBI device %d volume %d "
- "already exists", g->mtd.index, vi->ubi_num,
- vi->vol_id);
+ err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+ g->mtd.index, vi->ubi_num, vi->vol_id);
mutex_unlock(&devices_mutex);
if (mtd_device_register(mtd, NULL, 0)) {
@@ -376,8 +375,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
- err_msg("got remove notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got remove notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
err = -ENOENT;
} else if (gluebi->refcnt)
err = -EBUSY;
@@ -390,9 +389,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mtd = &gluebi->mtd;
err = mtd_device_unregister(mtd);
if (err) {
- err_msg("cannot remove fake MTD device %d, UBI device %d, "
- "volume %d, error %d", mtd->index, gluebi->ubi_num,
- gluebi->vol_id, err);
+ err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+ mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
mutex_lock(&devices_mutex);
list_add_tail(&gluebi->list, &gluebi_devices);
mutex_unlock(&devices_mutex);
@@ -422,8 +420,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
@@ -449,8 +447,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
gluebi->mtd.size = vi->used_bytes;
@@ -507,9 +505,9 @@ static void __exit ubi_gluebi_exit(void)
err = mtd_device_unregister(mtd);
if (err)
- err_msg("error %d while removing gluebi MTD device %d, "
- "UBI device %d, volume %d - ignoring", err,
- mtd->index, gluebi->ubi_num, gluebi->vol_id);
+ err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+ err, mtd->index, gluebi->ubi_num,
+ gluebi->vol_id);
kfree(mtd->name);
kfree(gluebi);
}
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/io.c b/ANDROID_3.4.5/drivers/mtd/ubi/io.c
index 43f1a001..eede4c5e 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/io.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/io.c
@@ -91,21 +91,15 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
/**
* ubi_io_read - read data from a physical eraseblock.
@@ -142,7 +136,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(len > 0);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
@@ -171,6 +165,10 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
err = mtd_read(ubi->mtd, addr, len, &read, buf);
+
+ if (err == -NEED_REPLACEMENT)
+ err = 0;
+
if (err) {
const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
@@ -183,22 +181,21 @@ retry:
* enabled. A corresponding message will be printed
* later, when it is has been scrubbed.
*/
- dbg_msg("fixable bit-flip detected at PEB %d", pnum);
+ ubi_msg("fixable bit-flip detected at PEB %d", pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d%s while reading %d bytes from PEB "
- "%d:%d, read only %zd bytes, retry",
- err, errstr, len, pnum, offset, read);
+ ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
- ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, errstr, len, pnum, offset, read);
- ubi_dbg_dump_stack();
+ ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
@@ -221,6 +218,62 @@ retry:
return err;
}
+int ubi_io_read_oob(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len, void * spare)
+{
+ int err, retries = 0;
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ err = self_check_not_bad(ubi, pnum);
+ if(err)
+ return err;
+
+ if(buf) *((uint8_t *)buf) ^= 0xFF;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = 8;
+ ops.len = len;
+ ops.ooboffs = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = spare;
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+retry:
+ err = ubi->mtd->_read_oob(ubi->mtd, addr, &ops);
+ if (err == -NEED_REPLACEMENT)
+ err = 0;
+ if (err) {
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
+
+ if (mtd_is_bitflip(err)) {
+ ubi_msg("fixable bit-flip detected at PEB %d", pnum);
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (retries++ < UBI_IO_RETRIES) {
+ dbg_io("oob error %d%s while reading %d bytes from PEB "
+ "%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset);
+ yield();
+ goto retry;
+ }
+ printk("\n UBI_IO_READ_OOB failed!");
+ dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (mtd_is_eccerr(err)) {
+ ubi_assert(0);
+ err = -EIO;
+ }
+ }
+ return err;
+}
+
+
/**
* ubi_io_write - write data to a physical eraseblock.
* @ubi: UBI device description object
@@ -245,7 +298,6 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
size_t written;
loff_t addr;
- dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
@@ -257,14 +309,12 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return -EROFS;
}
- /* The below has to be compiled out if paranoid checks are disabled */
-
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
if (err)
return err;
@@ -273,33 +323,107 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
* We write to the data area of the physical eraseblock. Make
* sure it has valid EC and VID headers.
*/
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
- err = paranoid_check_peb_vid_hdr(ubi, pnum);
+ err = self_check_peb_vid_hdr(ubi, pnum);
if (err)
return err;
}
if (ubi_dbg_is_write_failure(ubi)) {
- dbg_err("cannot write %d bytes to PEB %d:%d "
- "(emulated)", len, pnum, offset);
- ubi_dbg_dump_stack();
+ ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
return -EIO;
}
addr = (loff_t)pnum * ubi->peb_size + offset;
err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
- "%zd bytes", err, len, pnum, offset, written);
- ubi_dbg_dump_stack();
- ubi_dbg_dump_flash(ubi, pnum, offset, len);
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
} else
ubi_assert(written == len);
+ if (!err) {
+ err = self_check_write(ubi, buf, pnum, offset, len);
+ if (err)
+ return err;
+
+ /*
+ * Since we always write sequentially, the rest of the PEB has
+ * to contain only 0xFF bytes.
+ */
+ offset += len;
+ len = ubi->peb_size - offset;
+ if (len)
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ }
+
+ return err;
+}
+
+int ubi_io_write_oob(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len, void *spare)
+{
+ int err;
+ loff_t addr;
+ struct mtd_oob_ops ops;
+
+ if (ubi->ro_mode) {
+ ubi_err("\nread-only mode");
+ return -EROFS;
+ }
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /* The area we are writing to has to contain all 0xFF bytes */
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ if (err)
+ return err;
+
+ if (offset >= ubi->leb_start) {
+ /*
+ * We write to the data area of the physical eraseblock. Make
+ * sure it has valid EC and VID headers.
+ */
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+ err = self_check_peb_vid_hdr(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (ubi_dbg_is_write_failure(ubi)) {
+ ubi_err("cannot write %d bytes to PEB %d:%d "
+ "(emulated)", len, pnum, offset);
+ dump_stack();
+ return -EIO;
+ }
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = 32;
+ ops.len = len;
+ ops.ooboffs = 0;
+ ops.datbuf = (u8 *)buf;
+ ops.oobbuf = (u8 *)spare;
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+ err = ubi->mtd->_write_oob(ubi->mtd, addr, &ops);
+
+ if (err) {
+ printk("\n UBI_IO_WRITE_OOB failed!");
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
+ }
if (!err) {
- err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
+ err = self_check_write(ubi, buf, pnum, offset, len);
if (err)
return err;
@@ -310,12 +434,13 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
offset += len;
len = ubi->peb_size - offset;
if (len)
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
}
return err;
}
+
/**
* erase_callback - MTD erasure call-back.
* @ei: MTD erase information object.
@@ -364,13 +489,13 @@ retry:
err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d while erasing PEB %d, retry",
- err, pnum);
+ ubi_warn("error %d while erasing PEB %d, retry",
+ err, pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d, error %d", pnum, err);
- ubi_dbg_dump_stack();
+ dump_stack();
return err;
}
@@ -383,21 +508,21 @@ retry:
if (ei.state == MTD_ERASE_FAILED) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error while erasing PEB %d, retry", pnum);
+ ubi_warn("error while erasing PEB %d, retry", pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d", pnum);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EIO;
}
- err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err;
if (ubi_dbg_is_erase_failure(ubi)) {
- dbg_err("cannot erase PEB %d (emulated)", pnum);
+ ubi_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -521,8 +646,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
- * corrupted and will try to preserve it, and print scary warnings (see
- * the header comment in scan.c for more information).
+ * corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
@@ -563,7 +687,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
*/
ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
pnum, err, err1);
- ubi_dbg_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
@@ -589,7 +713,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err != 0)
return err;
@@ -694,8 +818,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
- ubi_err("node with incompatible UBI version found: "
- "this UBI version is %d, image version is %d",
+ ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
@@ -721,8 +844,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad EC header");
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return 1;
}
@@ -786,10 +909,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
if (verbose)
- ubi_warn("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -801,12 +924,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -815,12 +938,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
@@ -874,7 +997,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
return err;
@@ -905,40 +1028,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
- dbg_err("bad copy_flag");
+ ubi_err("bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
- dbg_err("negative values");
+ ubi_err("negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
- dbg_err("bad vol_id");
+ ubi_err("bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
- dbg_err("bad vol_type");
+ ubi_err("bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
- dbg_err("bad data_pad");
+ ubi_err("bad data_pad");
goto bad;
}
@@ -950,45 +1073,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
- dbg_err("zero used_ebs");
+ ubi_err("zero used_ebs");
goto bad;
}
if (data_size == 0) {
- dbg_err("zero data_size");
+ ubi_err("zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
- dbg_err("bad data_size");
+ ubi_err("bad data_size");
goto bad;
}
} else if (lnum == used_ebs - 1) {
if (data_size == 0) {
- dbg_err("bad data_size at last LEB");
+ ubi_err("bad data_size at last LEB");
goto bad;
}
} else {
- dbg_err("too high lnum");
+ ubi_err("too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
- dbg_err("non-zero data CRC");
+ ubi_err("non-zero data CRC");
goto bad;
}
if (data_size != 0) {
- dbg_err("non-zero data_size");
+ ubi_err("non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
- dbg_err("zero data_size of copy");
+ ubi_err("zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
- dbg_err("bad used_ebs");
+ ubi_err("bad used_ebs");
goto bad;
}
}
@@ -997,8 +1120,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad VID header");
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return 1;
}
@@ -1041,10 +1164,10 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
- ubi_warn("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -1052,12 +1175,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
}
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -1066,12 +1189,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
@@ -1112,7 +1235,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
@@ -1121,7 +1244,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
vid_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
return err;
@@ -1131,34 +1254,32 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
* This function returns zero if the physical eraseblock is good, %-EINVAL if
* it is bad and a negative error code if an error occurred.
*/
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ dump_stack();
return err > 0 ? -EINVAL : err;
}
/**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the erase counter header belongs to
* @ec_hdr: the erase counter header to check
@@ -1166,13 +1287,13 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
* This function returns zero if the erase counter header contains valid
* values, and %-EINVAL if not.
*/
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1184,33 +1305,33 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return 0;
fail:
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_ec_hdr - check erase counter header.
+ * self_check_peb_ec_hdr - check erase counter header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the erase counter header is all right and and
* a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1225,14 +1346,14 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
exit:
kfree(ec_hdr);
@@ -1240,7 +1361,7 @@ exit:
}
/**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the volume identifier header belongs to
* @vid_hdr: the volume identifier header to check
@@ -1248,13 +1369,13 @@ exit:
* This function returns zero if the volume identifier header is all right, and
* %-EINVAL if not.
*/
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
{
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1266,36 +1387,36 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_vid_hdr - check volume identifier header.
+ * self_check_peb_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
* and a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1311,16 +1432,16 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1328,7 +1449,7 @@ exit:
}
/**
- * ubi_dbg_check_write - make sure write succeeded.
+ * self_check_write - make sure write succeeded.
* @ubi: UBI device description object
* @buf: buffer with data which were written
* @pnum: physical eraseblock number the data were written to
@@ -1339,15 +1460,15 @@ exit:
* the original data buffer - the data have to match. Returns zero if the data
* match and a negative error code if not or in case of failure.
*/
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len)
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
{
int err, i;
size_t read;
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1357,6 +1478,10 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
}
err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+
+ if (err == -NEED_REPLACEMENT)
+ err = 0;
+
if (err && !mtd_is_bitflip(err))
goto out_free;
@@ -1368,7 +1493,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
if (c == c1)
continue;
- ubi_err("paranoid check failed for PEB %d:%d, len %d",
+ ubi_err("self-check failed for PEB %d:%d, len %d",
pnum, offset, len);
ubi_msg("data differ at position %d", i);
dump_len = max_t(int, 128, len - i);
@@ -1380,7 +1505,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf1 + i, dump_len, 1);
- ubi_dbg_dump_stack();
+ dump_stack();
err = -EINVAL;
goto out_free;
}
@@ -1394,7 +1519,7 @@ out_free:
}
/**
- * ubi_dbg_check_all_ff - check that a region of flash is empty.
+ * ubi_self_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
@@ -1404,14 +1529,14 @@ out_free:
* @offset of the physical eraseblock @pnum, and a negative error code if not
* or if an error occurred.
*/
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1421,16 +1546,20 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
}
err = mtd_read(ubi->mtd, addr, len, &read, buf);
+
+ if (err == -NEED_REPLACEMENT)
+ err = 0;
+
if (err && !mtd_is_bitflip(err)) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
goto error;
}
err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not "
- "contain all 0xFF bytes", pnum, offset, len);
+ ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
goto fail;
}
@@ -1438,14 +1567,12 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
- ubi_dbg_dump_stack();
+ dump_stack();
vfree(buf);
return err;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/kapi.c b/ANDROID_3.4.5/drivers/mtd/ubi/kapi.c
index 9fdb3536..06dd33b3 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/kapi.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/kapi.c
@@ -221,12 +221,25 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
- dbg_err("cannot open device %d, volume %d, error %d",
+ ubi_err("cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
+void ubi_update_volume(struct ubi_volume_desc *desc){
+
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_idx = 0;
+ ubi_update_fastmap(ubi);
+ ubi->fm_idx = 1;
+ #endif
+}
+EXPORT_SYMBOL_GPL(ubi_update_volume);
+
/**
* ubi_open_volume_nm - open UBI volume by name.
* @ubi_num: UBI device number
@@ -320,7 +333,20 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
+void ubi_set_volume(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->ro_mode = 1;
+ spin_unlock(&ubi->volumes_lock);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_set_volume);
/**
* ubi_close_volume - close UBI volume.
* @desc: volume descriptor
@@ -419,6 +445,42 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
}
EXPORT_SYMBOL_GPL(ubi_leb_read);
+int ubi_leb_read_oob(struct ubi_volume_desc *desc, int lnum, void *buf, int offset,
+ int len, void *spare)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+ lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size)
+ return -EINVAL;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
+
+ if (vol->upd_marker)
+ return -EBADF;
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb_oob(ubi, vol, lnum, buf, offset, len, spare);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn("mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read_oob);
/**
* ubi_leb_write - write data.
* @desc: volume descriptor
@@ -426,11 +488,9 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* @buf: data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
*
* This function takes care of physical eraseblock write failures. If write to
* the physical eraseblock write operation fails, the logical eraseblock is
@@ -447,7 +507,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* returns immediately with %-EBADF code.
*/
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype)
+ int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -466,27 +526,50 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
+int ubi_leb_write_oob(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int offset, int len, void *spare, int dtype)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size ||
+ offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+ return ubi_eba_write_leb_oob(ubi, vol, lnum, buf, offset, len, spare, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_write_oob);
+
/*
* ubi_leb_change - change logical eraseblock atomically.
* @desc: volume descriptor
* @lnum: logical eraseblock number to change
* @buf: data to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -497,7 +580,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* code in case of failure.
*/
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int len, int dtype)
+ int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -515,17 +598,13 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -562,7 +641,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (err)
return err;
- return ubi_wl_flush(ubi);
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_erase);
@@ -626,7 +705,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
- * @dtype: expected data type
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
* eraseblock. This means, that after a successful invocation of this
@@ -639,7 +717,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -652,17 +730,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
return -EBADMSG;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
@@ -720,6 +794,33 @@ int ubi_sync(int ubi_num)
}
EXPORT_SYMBOL_GPL(ubi_sync);
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/misc.c b/ANDROID_3.4.5/drivers/mtd/ubi/misc.c
index f6a7d7ac..f913d701 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/misc.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/misc.c
@@ -92,16 +92,45 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
void ubi_calculate_reserved(struct ubi_device *ubi)
{
- ubi->beb_rsvd_level = ubi->good_peb_count/100;
- ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
- if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
- ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
}
/**
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/ubi-media.h b/ANDROID_3.4.5/drivers/mtd/ubi/ubi-media.h
index 6fb8ec21..ac2b24d1 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/ubi-media.h
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/ubi-media.h
@@ -149,10 +149,10 @@ enum {
* The @image_seq field is used to validate a UBI image that has been prepared
* for a UBI device. The @image_seq value can be any value, but it must be the
* same on all eraseblocks. UBI will ensure that all new erase counter headers
- * also contain this value, and will check the value when scanning at start-up.
+ * also contain this value, and will check the value when attaching the flash.
* One way to make use of @image_seq is to increase its value by one every time
* an image is flashed over an existing image, then, if the flashing does not
- * complete, UBI will detect the error when scanning.
+ * complete, UBI will detect the error when attaching the media.
*/
struct ubi_ec_hdr {
__be32 magic;
@@ -298,8 +298,8 @@ struct ubi_vid_hdr {
#define UBI_INT_VOL_COUNT 1
/*
- * Starting ID of internal volumes. There is reserved room for 4096 internal
- * volumes.
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
*/
#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
@@ -375,4 +375,141 @@ struct ubi_vtbl_record {
__be32 crc;
} __packed;
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+#define UBI_FM_WL_POOL_SIZE 25
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/ubi.h b/ANDROID_3.4.5/drivers/mtd/ubi/ubi.h
index b1627907..1a39a08d 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/ubi.h
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/ubi.h
@@ -43,7 +43,6 @@
#include <asm/pgtable.h>
#include "ubi-media.h"
-#include "scan.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -52,21 +51,21 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
__func__, ##__VA_ARGS__)
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
-
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
#define UBI_LEB_UNMAPPED -1
/*
@@ -82,6 +81,16 @@
*/
#define UBI_PROT_QUEUE_LEN 10
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
/*
* Error codes returned by the I/O sub-system.
*
@@ -131,6 +140,17 @@ enum {
MOVE_RETRY,
};
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@@ -197,6 +217,41 @@ struct ubi_rename_entry {
struct ubi_volume_desc;
/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
* @cdev: character device object to create character device
@@ -222,8 +277,6 @@ struct ubi_volume_desc;
* @upd_ebs: how many eraseblocks are expected to be updated
* @ch_lnum: LEB number which is being changing by the atomic LEB change
* operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- * change operation
* @upd_bytes: how many bytes are expected to be received for volume update or
* atomic LEB change
* @upd_received: how many bytes were already received for volume update or
@@ -270,7 +323,6 @@ struct ubi_volume {
int upd_ebs;
int ch_lnum;
- int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
@@ -297,6 +349,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -334,9 +417,21 @@ struct ubi_wl_entry;
* @ltree: the lock tree
* @alc_mutex: serializes "atomic LEB change" operations
*
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ *
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
@@ -361,6 +456,7 @@ struct ubi_wl_entry;
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
* @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -408,6 +504,7 @@ struct ubi_device {
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
+ int bad_peb_limit;
int autoresize_vol_id;
int vtbl_slots;
@@ -425,10 +522,30 @@ struct ubi_device {
struct rb_root ltree;
struct mutex alc_mutex;
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_sem;
+ struct mutex fm_mutex;
+
+ void *fm_buf;
+ void *fm_cur;
+ void *fm_tmp;
+ size_t fm_size;
+ size_t fm_cnt;
+ size_t fm_idx;
+
+ int old_anchor;
+
+ struct work_struct fm_work;
+
/* Wear-leveling sub-system's stuff */
struct rb_root used;
struct rb_root erroneous;
struct rb_root free;
+ int free_count;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
int pq_head;
@@ -474,7 +591,151 @@ struct ubi_device {
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct ubi_debug_info *dbg;
+ struct ubi_debug_info dbg;
+};
+
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
};
#include "debug.h"
@@ -487,12 +748,23 @@ extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec);
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
struct list_head *rename_list);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* vmt.c */
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
@@ -516,6 +788,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int length);
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
@@ -525,30 +798,49 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype);
+ const void *buf, int offset, int len);
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs);
+ int lnum, const void *buf, int len, int used_ebs);
+int ubi_eba_read_leb_oob(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, void *buf, int offset, int len, void *spare);
+int ubi_eba_write_leb_oob(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len, void *spare, int dtype);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype);
+ int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
/* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_wl_put_erased_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
int len);
int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
int len);
+int ubi_io_read_oob(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len, void *spare);
+int ubi_io_write_oob(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len, void *spare);
int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
@@ -562,7 +854,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
@@ -573,12 +866,24 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_notify_all(struct ubi_device *ubi, int ntype,
struct notifier_block *nb);
int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
/* kapi.c */
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+/* fastmap.c */
+int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub);
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
+int erase_block(struct ubi_device *ubi, int pnum);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
* @rb: a pointer to type 'struct rb_node' to use as a loop counter
@@ -593,6 +898,21 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
rb = rb_next(rb), \
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
* @ubi: UBI device description object
@@ -646,6 +966,12 @@ static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
}
+static inline int ubi_io_read_data_oob(const struct ubi_device *ubi, void *buf,
+ int pnum, int offset, int len, void *spare)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_read_oob(ubi, buf, pnum, offset + ubi->leb_start, len, spare);
+}
/*
* This function is equivalent to 'ubi_io_write()', but @offset is relative to
* the beginning of the logical eraseblock, not to the beginning of the
@@ -655,9 +981,22 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
int pnum, int offset, int len)
{
ubi_assert(offset >= 0);
+
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ if(ubi->fm_cnt) {
+ ubi->fm_cnt = 0;
+ erase_block(ubi, ubi->fm->e[0]->pnum);
+ }
+ #endif
return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
}
+static inline int ubi_io_write_data_oob(struct ubi_device *ubi, const void *buf,
+ int pnum, int offset, int len, void *spare)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_write_oob(ubi, buf, pnum, offset + ubi->leb_start, len, spare);
+}
/**
* ubi_ro_mode - switch to read-only mode.
* @ubi: UBI device description object
@@ -667,7 +1006,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
- ubi_dbg_dump_stack();
+ dump_stack();
}
}
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/upd.c b/ANDROID_3.4.5/drivers/mtd/ubi/upd.c
index 425bf5a3..ec2c2dc1 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/upd.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/upd.c
@@ -64,8 +64,7 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
@@ -93,8 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
@@ -147,7 +145,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
}
if (bytes == 0) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
@@ -186,14 +184,12 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
- return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
- req->dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->ch_dtype = req->dtype;
vol->upd_buf = vmalloc(req->bytes);
if (!vol->upd_buf)
@@ -246,8 +242,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return 0;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
} else {
/*
* When writing static volume, and this is the last logical
@@ -259,8 +254,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
- UBI_UNKNOWN, used_ebs);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
}
return err;
@@ -365,7 +359,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
/* The update is finished, clear the update marker */
@@ -421,7 +415,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
- vol->upd_buf, len, UBI_UNKNOWN);
+ vol->upd_buf, len);
if (err)
return err;
}
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/vmt.c b/ANDROID_3.4.5/drivers/mtd/ubi/vmt.c
index 863835f4..8330703c 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/vmt.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/vmt.c
@@ -29,11 +29,7 @@
#include <linux/export.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi) 0
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -227,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
if (vol_id == UBI_VOL_NUM_AUTO) {
- dbg_err("out of volume IDs");
+ ubi_err("out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
@@ -241,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
- dbg_err("volume %d already exists", vol_id);
+ ubi_err("volume %d already exists", vol_id);
goto out_unlock;
}
@@ -250,7 +246,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
- dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+ ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
@@ -261,9 +257,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
@@ -284,7 +280,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
@@ -360,8 +356,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while creating volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_sysfs:
@@ -448,21 +443,13 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
- i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (i > 0) {
- i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
- ubi->avail_pebs -= i;
- ubi->rsvd_pebs += i;
- ubi->beb_rsvd_pebs += i;
- if (i > 0)
- ubi_msg("reserve more %d PEBs", i);
- }
+ ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
- if (!no_vtbl && paranoid_check_volumes(ubi))
- dbg_err("check failed while removing volume %d", vol_id);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
return err;
@@ -500,7 +487,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
- dbg_err("too small size %d, %d LEBs contain data",
+ ubi_err("too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
@@ -529,10 +516,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs: requested %d, available %d",
+ ubi_err("not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
@@ -548,7 +535,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -563,15 +550,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
- pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (pebs > 0) {
- pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
- ubi->avail_pebs -= pebs;
- ubi->rsvd_pebs += pebs;
- ubi->beb_rsvd_pebs += pebs;
- if (pebs > 0)
- ubi_msg("reserve more %d PEBs", pebs);
- }
+ ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
@@ -588,8 +567,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while re-sizing volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_acc:
@@ -638,8 +616,8 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
}
}
- if (!err && paranoid_check_volumes(ubi))
- ;
+ if (!err)
+ self_check_volumes(ubi);
return err;
}
@@ -686,8 +664,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
return err;
}
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while adding volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_cdev:
@@ -712,16 +689,14 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
volume_sysfs_close(vol);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* Returns zero if volume is all right and a a negative error code if not.
*/
-static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -771,7 +746,7 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
}
if (vol->upd_marker && vol->corrupted) {
- dbg_err("update marker and corrupted simultaneously");
+ ubi_err("update marker and corrupted simultaneously");
goto fail;
}
@@ -853,34 +828,33 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
return 0;
fail:
- ubi_err("paranoid check failed for volume %d", vol_id);
+ ubi_err("self-check failed for volume %d", vol_id);
if (vol)
- ubi_dbg_dump_vol_info(vol);
- ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
/**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
* Returns zero if volumes are all right and a a negative error code if not.
*/
-static int paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
- err = paranoid_check_volume(ubi, i);
+ err = self_check_volume(ubi, i);
if (err)
break;
}
return err;
}
-#endif
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/vtbl.c b/ANDROID_3.4.5/drivers/mtd/ubi/vtbl.c
index 17cec0c0..d77b1c1d 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/vtbl.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/vtbl.c
@@ -37,16 +37,15 @@
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
* In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
*
* But it would still be beneficial to store this information in the volume
* table. For example, suppose we have a static volume X, and all its physical
* eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
* corresponding to the volume X. According to the volume table volume X does
* exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
*
* The volume table also stores so-called "update marker", which is used for
* volume updates. Before updating the volume, the update marker is set, and
@@ -62,11 +61,7 @@
#include <asm/div64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
/* Empty volume table record */
static struct ubi_vtbl_record empty_vtbl_record;
@@ -106,12 +101,12 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
- paranoid_vtbl_check(ubi);
+ self_vtbl_check(ubi);
return 0;
}
@@ -158,7 +153,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
@@ -197,7 +192,7 @@ static int vtbl_check(const struct ubi_device *ubi,
if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -229,7 +224,7 @@ static int vtbl_check(const struct ubi_device *ubi,
n = ubi->leb_size % alignment;
if (data_pad != n) {
- dbg_err("bad data_pad, has to be %d", n);
+ ubi_err("bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
@@ -245,7 +240,7 @@ static int vtbl_check(const struct ubi_device *ubi,
}
if (reserved_pebs > ubi->good_peb_count) {
- dbg_err("too large reserved_pebs %d, good PEBs %d",
+ ubi_err("too large reserved_pebs %d, good PEBs %d",
reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
@@ -275,10 +270,10 @@ static int vtbl_check(const struct ubi_device *ubi,
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
- ubi_err("volumes %d and %d have the same name"
- " \"%s\"", i, n, vtbl[i].name);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
- ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+ ubi_err("volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
}
}
@@ -288,37 +283,37 @@ static int vtbl_check(const struct ubi_device *ubi,
bad:
ubi_err("volume table check failed: record %d, error %d", i, err);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
/**
* create_vtbl - create a copy of volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @copy: number of the volume table copy
* @vtbl: contents of the volume table
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_leb *new_seb;
+ struct ubi_ainf_peb *new_aeb;
- ubi_msg("create volume table (copy #%d)", copy + 1);
+ dbg_gen("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
return -ENOMEM;
retry:
- new_seb = ubi_scan_get_free_peb(ubi, si);
- if (IS_ERR(new_seb)) {
- err = PTR_ERR(new_seb);
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
goto out_free;
}
@@ -328,25 +323,24 @@ retry:
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
vid_hdr->lnum = cpu_to_be32(copy);
- vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
- err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
if (err)
goto write_error;
/* Write the layout volume contents */
- err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
if (err)
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old version
- * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
- err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
- vid_hdr, 0);
- kfree(new_seb);
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -356,10 +350,10 @@ write_error:
* Probably this physical eraseblock went bad, try to pick
* another one.
*/
- list_add(&new_seb->u.list, &si->erase);
+ list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_seb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -369,20 +363,20 @@ out_free:
/**
* process_lvol - process the layout volume.
* @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
*
* This function is responsible for reading the layout volume, ensuring it is
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si,
- struct ubi_scan_volume *sv)
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
{
int err;
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
@@ -414,14 +408,14 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
dbg_gen("check layout volume");
/* Read both LEB 0 and LEB 1 into memory */
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vzalloc(ubi->vtbl_size);
- if (!leb[seb->lnum]) {
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi->vtbl_size);
if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
/*
@@ -429,12 +423,12 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* uncorrectable ECC error, but we have our own CRC and
* the data will be checked later. If the data is OK,
* the PEB will be scrubbed (because we set
- * seb->scrub). If the data is not OK, the contents of
+ * aeb->scrub). If the data is not OK, the contents of
* the PEB will be recovered from the second copy, and
- * seb->scrub will be cleared in
- * 'ubi_scan_add_used()'.
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
*/
- seb->scrub = 1;
+ aeb->scrub = 1;
else if (err)
goto out_free;
}
@@ -453,7 +447,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
ubi->vtbl_size);
if (leb_corrupted[1]) {
ubi_warn("volume table copy #2 is corrupted");
- err = create_vtbl(ubi, si, 1, leb[0]);
+ err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -476,7 +470,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
}
ubi_warn("volume table copy #1 is corrupted");
- err = create_vtbl(ubi, si, 0, leb[1]);
+ err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -494,13 +488,13 @@ out_free:
/**
* create_empty_lvol - create empty layout volume.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
int i;
struct ubi_vtbl_record *vtbl;
@@ -515,7 +509,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
int err;
- err = create_vtbl(ubi, si, i, vtbl);
+ err = create_vtbl(ubi, ai, i, vtbl);
if (err) {
vfree(vtbl);
return ERR_PTR(err);
@@ -528,18 +522,19 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
/**
* init_volumes - initialize volume information for existing volumes.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
* @vtbl: volume table
*
* This function allocates volume description objects for existing volumes.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
int i, reserved_pebs = 0;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -567,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
- ubi_err("more than one auto-resize volume (%d "
- "and %d)", ubi->autoresize_vol_id, i);
+ ubi_err("more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
}
@@ -595,8 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/* Static volumes only */
- sv = ubi_scan_find_sv(si, i);
- if (!sv) {
+ av = ubi_find_av(ai, i);
+ if (!av) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
@@ -608,22 +603,22 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
continue;
}
- if (sv->leb_count != sv->used_ebs) {
+ if (av->leb_count != av->used_ebs) {
/*
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
ubi_warn("static volume %d misses %d LEBs - corrupted",
- sv->vol_id, sv->used_ebs - sv->leb_count);
+ av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
}
- vol->used_ebs = sv->used_ebs;
+ vol->used_ebs = av->used_ebs;
vol->used_bytes =
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
- vol->used_bytes += sv->last_data_size;
- vol->last_eb_bytes = sv->last_data_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
}
/* And add the layout volume */
@@ -664,105 +659,104 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
* @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
*
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_sv(const struct ubi_volume *vol,
- const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
{
int err;
- if (sv->highest_lnum >= vol->reserved_pebs) {
+ if (av->highest_lnum >= vol->reserved_pebs) {
err = 1;
goto bad;
}
- if (sv->leb_count > vol->reserved_pebs) {
+ if (av->leb_count > vol->reserved_pebs) {
err = 2;
goto bad;
}
- if (sv->vol_type != vol->vol_type) {
+ if (av->vol_type != vol->vol_type) {
err = 3;
goto bad;
}
- if (sv->used_ebs > vol->reserved_pebs) {
+ if (av->used_ebs > vol->reserved_pebs) {
err = 4;
goto bad;
}
- if (sv->data_pad != vol->data_pad) {
+ if (av->data_pad != vol->data_pad) {
err = 5;
goto bad;
}
return 0;
bad:
- ubi_err("bad scanning information, error %d", err);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vol_info(vol);
+ ubi_err("bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
return -EINVAL;
}
/**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
* information is OK and %-EINVAL if it is not.
*/
-static int check_scanning_info(const struct ubi_device *ubi,
- struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err, i;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
- ubi_err("scanning found %d volumes, maximum is %d + %d",
- si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err("found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
- if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
- si->highest_vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("too large volume ID %d found by scanning",
- si->highest_vol_id);
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("too large volume ID %d found", ai->highest_vol_id);
return -EINVAL;
}
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
cond_resched();
- sv = ubi_scan_find_sv(si, i);
+ av = ubi_find_av(ai, i);
vol = ubi->volumes[i];
if (!vol) {
- if (sv)
- ubi_scan_rm_volume(si, sv);
+ if (av)
+ ubi_remove_av(ai, av);
continue;
}
if (vol->reserved_pebs == 0) {
ubi_assert(i < ubi->vtbl_slots);
- if (!sv)
+ if (!av)
continue;
/*
- * During scanning we found a volume which does not
+ * During attaching we found a volume which does not
* exist according to the information in the volume
* table. This must have happened due to an unclean
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
- ubi_msg("finish volume %d removal", sv->vol_id);
- ubi_scan_rm_volume(si, sv);
- } else if (sv) {
- err = check_sv(vol, sv);
+ ubi_msg("finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
if (err)
return err;
}
@@ -774,16 +768,16 @@ static int check_scanning_info(const struct ubi_device *ubi,
/**
* ubi_read_volume_table - read the volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function reads volume table, checks it, recover from errors if needed,
* or creates it if needed. Returns zero in case of success and a negative
* error code in case of failure.
*/
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, err;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -798,8 +792,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (!sv) {
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
/*
* No logical eraseblocks belonging to the layout volume were
* found. This could mean that the flash is just empty. In
@@ -808,8 +802,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* But if flash is not empty this must be a corruption or the
* MTD device just contains garbage.
*/
- if (si->is_empty) {
- ubi->vtbl = create_empty_lvol(ubi, si);
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
@@ -817,14 +811,14 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return -EINVAL;
}
} else {
- if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
- dbg_err("too many LEBs (%d) in layout volume",
- sv->leb_count);
+ ubi_err("too many LEBs (%d) in layout volume",
+ av->leb_count);
return -EINVAL;
}
- ubi->vtbl = process_lvol(ubi, si, sv);
+ ubi->vtbl = process_lvol(ubi, ai, av);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
}
@@ -835,15 +829,15 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* The layout volume is OK, initialize the corresponding in-RAM data
* structures.
*/
- err = init_volumes(ubi, si, ubi->vtbl);
+ err = init_volumes(ubi, ai, ubi->vtbl);
if (err)
goto out_free;
/*
- * Make sure that the scanning information is consistent to the
+ * Make sure that the attaching information is consistent to the
* information stored in the volume table.
*/
- err = check_scanning_info(ubi, si);
+ err = check_attaching_info(ubi, ai);
if (err)
goto out_free;
@@ -858,21 +852,17 @@ out_free:
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
* @ubi: UBI device description object
*/
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
- ubi_err("paranoid check failed");
+ ubi_err("self-check failed");
BUG();
}
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/ANDROID_3.4.5/drivers/mtd/ubi/wl.c b/ANDROID_3.4.5/drivers/mtd/ubi/wl.c
index 7c1a9bf8..6bda6a4b 100644
--- a/ANDROID_3.4.5/drivers/mtd/ubi/wl.c
+++ b/ANDROID_3.4.5/drivers/mtd/ubi/wl.c
@@ -41,12 +41,6 @@
* physical eraseblocks with low erase counter to free physical eraseblocks
* with high erase counter.
*
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL sub-system may pick a free physical eraseblock with low erase
- * counter, and so forth.
- *
* If the WL sub-system fails to erase a physical eraseblock, it marks it as
* bad.
*
@@ -70,8 +64,7 @@
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
- * soon, so it makes sense not to move it for some time, but wait; this is
- * especially important in case of "short term" physical eraseblocks.
+ * soon, so it makes sense not to move it for some time, but wait.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
@@ -142,37 +135,46 @@
*/
#define WL_MAX_FAILURES 32
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
/**
- * struct ubi_work - UBI work description data structure.
- * @list: a link in the list of pending works
- * @func: worker function
- * @e: physical eraseblock to erase
- * @torture: if the physical eraseblock has to be tortured
- *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
- * case of failure.
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
*/
-struct ubi_work {
- struct list_head list;
- int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
- /* The below fields are only relevant to erasure works */
- struct ubi_wl_entry *e;
- int torture;
-};
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
- struct ubi_wl_entry *e,
- struct rb_root *root);
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
- struct ubi_wl_entry *e);
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+ ubi_update_fastmap(ubi);
+}
+
+/**
+ * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ * @ubi: UBI device description object
+ * @pnum: the to be checked PEB
+ */
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ int i;
+
+ if (!ubi->fm)
+ return 0;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ if (ubi->fm->e[i]->pnum == pnum)
+ return 1;
+
+ return 0;
+}
#else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(ubi, e, root)
-#define paranoid_check_in_pq(ubi, e) 0
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ return 0;
+}
#endif
/**
@@ -271,18 +273,16 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
- spin_lock(&ubi->wl_lock);
while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
err = do_work(ubi);
- if (err)
- return err;
spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
}
- spin_unlock(&ubi->wl_lock);
return 0;
}
@@ -349,16 +349,18 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e;
+ struct ubi_wl_entry *e, *prev_e = NULL;
int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -373,39 +375,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
p = p->rb_left;
else {
p = p->rb_right;
+ prev_e = e;
e = e1;
}
}
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
return e;
}
/**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
* @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
+ * @root: the RB-tree where to look for
*
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure. Might sleep.
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
*/
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
{
- int err;
struct ubi_wl_entry *e, *first, *last;
- ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
- dtype == UBI_UNKNOWN);
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+#endif
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+#endif
+
+/**
+ * __wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ */
+static int __wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
retry:
- spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
- ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
- spin_unlock(&ubi->wl_lock);
+ ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
- spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
if (err < 0)
@@ -413,64 +519,182 @@ retry:
goto retry;
}
- switch (dtype) {
- case UBI_LONGTERM:
- /*
- * For long term data we pick a physical eraseblock with high
- * erase counter. But the highest erase counter we can pick is
- * bounded by the the lowest erase counter plus
- * %WL_FREE_MAX_DIFF.
- */
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- break;
- case UBI_UNKNOWN:
- /*
- * For unknown data we pick a physical eraseblock with medium
- * erase counter. But we by no means can pick a physical
- * eraseblock with erase counter greater or equivalent than the
- * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
- */
- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
- u.rb);
- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
-
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, u.rb);
- else
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
- break;
- case UBI_SHORTTERM:
- /*
- * For short term data we pick a physical eraseblock with the
- * lowest erase counter as we expect it will be erased soon.
- */
- e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
- break;
- default:
- BUG();
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err("no free eraseblocks");
+ return -ENOSPC;
}
- paranoid_check_in_wl_tree(ubi, e, &ubi->free);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time.
*/
rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+ /* We have to enqueue e only if fastmap is disabled,
+ * is fastmap enabled prot_queue_add() will be called by
+ * ubi_wl_get_peb() after removing e from the pool. */
prot_queue_add(ubi, e);
+#endif
+ return e->pnum;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+/**
+ * refill_wl_pool - refills all the fastmap pool used by the
+ * WL sub-system.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_pool(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ pool->pebs[pool->size] = e->pnum;
+ }
+ pool->used = 0;
+}
+
+/**
+ * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_user_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ pool->pebs[pool->size] = __wl_get_peb(ubi);
+ if (pool->pebs[pool->size] < 0)
+ break;
+ }
+ pool->used = 0;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ spin_lock(&ubi->wl_lock);
+ refill_wl_pool(ubi);
+ refill_wl_user_pool(ubi);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+ if (!pool->size || !wl_pool->size || pool->used == pool->size ||
+ wl_pool->used == wl_pool->size) {
+ ubi_update_fastmap(ubi);
+ }
+ /* we got not a single free PEB */
+ if (!pool->size)
+ ret = -ENOSPC;
+ else {
+ spin_lock(&ubi->wl_lock);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size || !pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ schedule_work(&ubi->fm_work);
+ return NULL;
+ } else {
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+ }
+}
+#else
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int peb, err;
+
+ spin_lock(&ubi->wl_lock);
+ peb = __wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
- err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
- ubi->peb_size - ubi->vid_hdr_aloffset);
+ err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
- ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
+ ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
return err;
}
- return e->pnum;
+ return peb;
}
+#endif
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
@@ -488,7 +712,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
if (!e)
return -ENODEV;
- if (paranoid_check_in_pq(ubi, e))
+ if (self_check_in_pq(ubi, e))
return -ENODEV;
list_del(&e->u.list);
@@ -514,7 +738,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
- err = paranoid_check_ec(ubi, e->pnum, e->ec);
+ err = self_check_ec(ubi, e->pnum, e->ec);
if (err)
return -EINVAL;
@@ -602,14 +826,14 @@ repeat:
}
/**
- * schedule_ubi_work - schedule a work.
+ * __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
- * list.
+ * list. Can only be used of ubi->work_sem is already held in read mode!
*/
-static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
@@ -620,23 +844,54 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
spin_unlock(&ubi->wl_lock);
}
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+#endif
+
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+ int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
+ ubi_assert(e);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
@@ -646,6 +901,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->func = &erase_worker;
wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
schedule_ubi_work(ubi, wl_wrk);
@@ -653,6 +910,110 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
}
/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+int ubi_wl_put_erased_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ } else {
+ e->ec = fm_e->ec;
+ kfree(fm_e);
+ }
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ spin_unlock(&ubi->wl_lock);
+
+ return 0;
+}
+
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ } else {
+ e->ec = fm_e->ec;
+ kfree(fm_e);
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+#endif
+
+/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
@@ -667,6 +1028,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, uninitialized_var(lnum);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int anchor = wrk->anchor;
+#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -700,21 +1065,46 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
if (!ubi->scrub.rb_node) {
+#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ pool->used--;
+#else
+ wl_tree_add(e2, &ubi->free);
+#endif
goto out_cancel;
}
- paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -722,14 +1112,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
- rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -846,7 +1237,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e1, 0);
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
@@ -861,7 +1252,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
- err = schedule_erase(ubi, e2, 0);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -900,7 +1291,7 @@ out_not_moved:
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- err = schedule_erase(ubi, e2, torture);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -941,12 +1332,13 @@ out_cancel:
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-static int ensure_wear_leveling(struct ubi_device *ubi)
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_wl_entry *e1;
@@ -974,7 +1366,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -991,8 +1383,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
goto out_cancel;
}
+ wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
@@ -1003,6 +1399,38 @@ out_unlock:
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+#endif
+
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
@@ -1018,7 +1446,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum, err, need;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1027,7 +1458,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return 0;
}
- dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
@@ -1036,6 +1470,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
spin_unlock(&ubi->wl_lock);
/*
@@ -1045,7 +1480,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
serve_prot_queue(ubi);
/* And take care about wear-leveling */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 1);
return err;
}
@@ -1057,7 +1492,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
err = err1;
goto out_ro;
@@ -1082,20 +1517,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
-
if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- goto out_ro;
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err("no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
}
spin_unlock(&ubi->volumes_lock);
@@ -1105,19 +1534,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs)
+ if (available_consumed)
+ ubi_warn("no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
else
- ubi_warn("last PEB from the reserved pool was used");
+ ubi_warn("last PEB from the reserve was used");
spin_unlock(&ubi->volumes_lock);
return err;
out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
ubi_ro_mode(ubi);
return err;
}
@@ -1125,6 +1571,8 @@ out_ro:
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
*
@@ -1133,7 +1581,8 @@ out_ro:
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success, and a negative error code in case of failure.
*/
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
{
int err;
struct ubi_wl_entry *e;
@@ -1175,13 +1624,13 @@ retry:
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1199,7 +1648,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@@ -1223,7 +1672,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
- dbg_msg("schedule PEB %d for scrubbing", pnum);
+ ubi_msg("schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);
@@ -1248,7 +1697,7 @@ retry:
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
@@ -1269,29 +1718,60 @@ retry:
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
- return ensure_wear_leveling(ubi);
+ return ensure_wear_leveling(ubi, 0);
}
/**
* ubi_wl_flush - flush all pending works.
* @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
*/
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
{
- int err;
+ int err = 0;
+ int found = 1;
/*
* Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
- dbg_wl("flush (%d pending works)", ubi->works_count);
- while (ubi->works_count) {
- err = do_work(ubi);
- if (err)
- return err;
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry(wrk, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
}
/*
@@ -1301,18 +1781,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
- /*
- * And in case last was the WL worker and it canceled the LEB
- * movement, flush again.
- */
- while (ubi->works_count) {
- dbg_wl("flush more (%d pending works)", ubi->works_count);
- err = do_work(ubi);
- if (err)
- return err;
- }
-
- return 0;
+ return err;
}
/**
@@ -1421,27 +1890,30 @@ static void cancel_pending(struct ubi_device *ubi)
}
/**
- * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
*/
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int err, i;
+ int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *tmp;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
- ubi->max_ec = si->max_ec;
+ ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1454,48 +1926,58 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
- list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
+ found_pebs++;
}
- list_for_each_entry(seb, &si->free, u.list) {
+ ubi->free_count = 0;
+ list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (!seb->scrub) {
+
+ if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);
@@ -1504,22 +1986,36 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
+
+ found_pebs++;
}
}
- if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+ dbg_wl("found %i PEBs", found_pebs);
+ if (ubi->fm)
+ ubi_assert(ubi->good_peb_count == \
+ found_pebs + ubi->fm->used_blocks);
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+ reserved_pebs = WL_RESERVED_PEBS;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Reserve enough LEBs to store two fastmaps. */
+ reserved_pebs += 2;
+#endif
+
+ if (ubi->avail_pebs < reserved_pebs) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, WL_RESERVED_PEBS);
+ ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
goto out_free;
}
- ubi->avail_pebs -= WL_RESERVED_PEBS;
- ubi->rsvd_pebs += WL_RESERVED_PEBS;
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
@@ -1567,10 +2063,8 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
@@ -1579,13 +2073,13 @@ void ubi_wl_close(struct ubi_device *ubi)
* is equivalent to @ec, and a negative error code if not or if an error
* occurred.
*/
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1600,10 +2094,10 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
}
read_ec = be64_to_cpu(ec_hdr->ec);
- if (ec != read_ec) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
- ubi_dbg_dump_stack();
+ dump_stack();
err = 1;
} else
err = 0;
@@ -1614,7 +2108,7 @@ out_free:
}
/**
- * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
@@ -1622,37 +2116,36 @@ out_free:
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
- struct ubi_wl_entry *e,
- struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+ ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_in_pq - check if wear-leveling entry is in the protection
+ * self_check_in_pq - check if wear-leveling entry is in the protection
* queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
- struct ubi_wl_entry *e)
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
@@ -1660,10 +2153,8 @@ static int paranoid_check_in_pq(const struct ubi_device *ubi,
if (p == e)
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
+ ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/ANDROID_3.4.5/drivers/mtd/wmt_env.c b/ANDROID_3.4.5/drivers/mtd/wmt_env.c
new file mode 100755
index 00000000..814dc60c
--- /dev/null
+++ b/ANDROID_3.4.5/drivers/mtd/wmt_env.c
@@ -0,0 +1,1099 @@
+/*
+ * This file is derived from crc32.c in U-Boot 1.1.4.
+ * For conditions of distribution and use, see copyright in crc32.c
+ */
+/*
+ * Some descriptions of such software. Copyright (c) 2008 WonderMedia Technologies, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License as published by the Free Software Foundation,
+ * either version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ * PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * WonderMedia Technologies, Inc.
+ * 10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/super.h>
+#include <mach/hardware.h>
+#include <linux/delay.h>
+#include <linux/semaphore.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include "mtdcore.h"
+#include "devices/wmt_sf.h"
+#include <linux/sha256.h>
+
+static DEFINE_SEMAPHORE(s_wmt_env_lock);
+
+#undef pr_err
+#undef pr_warning
+#undef pr_info
+#define pr_err(fmt, args...) printk("[WMTENV] %s, %d: " fmt, __func__, __LINE__, ##args)
+#define pr_warning(fmt, args...) printk("[WMTENV] %s, %d: " fmt, __func__, __LINE__, ##args)
+#define pr_info(fmt, args...) printk("[WMTENV] %s, %d: " fmt, __func__, __LINE__, ##args)
+
+
+extern unsigned int MTDSF_PHY_ADDR;
+extern int wmt_sfc_init(struct sfreg_t *sfc);
+extern int spi_flash_sector_erase(unsigned long addr, struct sfreg_t *sfreg);
+extern int spi_flash_sector_write(struct sfreg_t *sfreg,
+ unsigned char *sf_base_addr,
+ loff_t to, size_t len, u_char *buf);
+extern int sf_copy_env(char *dest, char *src, int len);
+extern int rsa_check(unsigned int pub_key_addr, unsigned int pub_key_size,
+ unsigned int sig_addr, unsigned int sig_size,
+ u8 *out_buf);
+
+extern int wmt_is_secure_enabled(void);
+
+/**
+ * white list when secure mode is enabled.
+ */
+static const char *env_white_list[] = {
+ "boot-method",
+ "ril.imei",
+ "wmt.modem.wakeuplight",
+ "wmt.display.tvformat",
+ "wmt.display.fb1",
+ "wmt.display.fb2",
+ "ethaddr",
+ NULL
+};
+
+static const unsigned int sf_crc_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
+ 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
+ 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
+ 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
+ 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
+ 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
+ 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
+ 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
+ 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
+ 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
+ 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
+ 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
+ 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
+ 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
+ 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
+ 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
+ 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
+ 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
+ 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
+ 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
+ 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
+ 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
+ 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
+ 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
+ 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
+ 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
+ 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
+ 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
+ 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
+ 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
+ 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
+ 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
+ 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
+ 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
+ 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
+ 0x2d02ef8d
+};
+#define DO1(buf) crc = sf_crc_table[((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8);
+#define DO2(buf) do {DO1(buf); DO1(buf); } while (0)
+#define DO4(buf) do {DO2(buf); DO2(buf); } while (0)
+#define DO8(buf) do {DO4(buf); DO4(buf); } while (0)
+
+static unsigned long uboot_crc32(u32 crc, unsigned char const *buf, size_t len)
+{
+ crc = crc ^ 0xffffffff;
+ while (len >= 8) {
+ DO8(buf);
+ len -= 8;
+ }
+
+ if (len) {
+ do {
+ DO1(buf);
+ } while (--len);
+ }
+ return crc ^ 0xffffffff;
+}
+
+#define SPI_FLASH_BASE 0xfff80000
+#define ENV_MAX_SIZE SZ_64K
+#define ENV_DATA_SIZE (ENV_MAX_SIZE - sizeof(unsigned int))
+#define ENV1 0
+#define ENV2 1
+#define MAX_NAME_SIZE 256
+#define MAX_VALUE_SIZE (4*1024)
+
+struct env_t {
+ unsigned long crc; /* CRC32 over data bytes */
+ unsigned char data[ENV_DATA_SIZE];
+};
+
+struct uboot_env {
+ struct env_t *env[2];
+ bool env_readed;
+ bool env_init;
+
+ // raw
+ void *io_base;
+ uint32_t offset[2];
+ size_t size[2];
+};
+
+static struct uboot_env *uboot_env;
+
+//GPIO_BASE_ADDR+0x100 should be bootstrap gpio, however, this is not true on some device.
+//As all wm8880 device boot from spi, i comment this out
+//static inline int boot_type(void)
+//{
+// uint32_t val = *((volatile unsigned int *)(GPIO_BASE_ADDR + 0x100));
+//
+// val = (val >> 1) & 0x3;
+// switch (val) {
+// case 0:
+// return SPI_FLASH_TYPE;
+// case 1:
+// return NAND_FLASH_TYPE;
+// case 2:
+// return NOR_FLASH_TYPE;
+// }
+// return -EINVAL;
+//}
+
+static inline unsigned char env_get_char(int type, int index)
+{
+ return uboot_env->env[type]->data[index];
+}
+
+/*
+ * Match a name / name=value pair
+ *
+ * s1 is either a simple 'name', or a 'name=value' pair.
+ * i2 is the environment index for a 'name2=value2' pair.
+ * If the names match, return the index for the value2, else NULL.
+ */
+static int envmatch(int type, unsigned char *s1, int i2)
+{
+ while (*s1 == env_get_char(type, i2++))
+ if (*s1++ == '=')
+ return i2;
+ if (*s1 == '\0' && env_get_char(type, i2-1) == '=')
+ return i2;
+ return -1;
+}
+
+
+/*
+ * raw interface
+ */
+static int raw_uboot_env_erase(int type)
+{
+ uint32_t offset;
+ int rc;
+
+ offset = uboot_env->offset[type]+0xfff80000-MTDSF_PHY_ADDR;
+
+ rc = spi_flash_sector_erase(offset, (struct sfreg_t *)SF_BASE_ADDR);
+ if (rc != ERR_OK) {
+ pr_err("spi_flash_sector_erase failed, try again\n");
+ rc = spi_flash_sector_erase(offset, (struct sfreg_t *)SF_BASE_ADDR);
+ if(rc != ERR_OK){
+ pr_err("spi_flash_sector_erase failed again\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int raw_uboot_env_write(int type,struct env_t *env)
+{
+ uint32_t offset;
+ void *io_base = uboot_env->io_base;
+ int rc;
+
+ offset = uboot_env->offset[type];
+
+ rc = spi_flash_sector_write(((struct sfreg_t *)SF_BASE_ADDR),
+ io_base,
+ offset,
+ ENV_MAX_SIZE,
+ (u_char *)env);
+ if (rc != ENV_MAX_SIZE)
+ pr_err("spi_flash_sector_write failed: 0x%x\n", rc);
+
+ return 0;
+}
+
+
+static int raw_uboot_env_read(void)
+{
+ unsigned long crc32;
+ int i;
+
+ //REG32_VAL(PMCEU_ADDR) |= SF_CLOCK_EN;
+ //wmt_sfc_init((struct sfreg_t *)SF_BASE_ADDR);
+
+ // ubootenv
+ for (i = 0; i < 2; ++i) {
+ struct env_t *env = uboot_env->env[i];
+ uint32_t offset = uboot_env->offset[i];
+
+ _memcpy_fromio((void *)env, uboot_env->io_base + offset, ENV_MAX_SIZE);
+ crc32 = uboot_crc32(0, env->data, ENV_DATA_SIZE);
+ if (env->crc != crc32) {
+ pr_err("ERROR:crc32 0x%lx, env->crc 0x%lx ????\n\n", crc32, env->crc);
+ if (i == 0) // uboot env must pass crc32
+ {
+ return -EINVAL;
+ }
+ }
+ }
+ //REG32_VAL(PMCEU_ADDR) &= ~(SF_CLOCK_EN);
+ return 0;
+}
+
+static int save_env(int index, struct env_t *env)
+{
+ int ret;
+
+ if(env == NULL || index > 2|| index < 0)
+ return -1;
+
+ env->crc = uboot_crc32(0, env->data, ENV_DATA_SIZE);
+
+ ret = raw_uboot_env_erase(index);
+ if (ret)
+ return ret;
+
+ raw_uboot_env_write(index,env);
+
+ return 0;
+
+}
+
+
+static int env_init_if_needed(void)
+{
+ int i;
+
+ //if (boot_type() != SPI_FLASH_TYPE){
+ // pr_err("unsupported boot type!");
+ // return -EINVAL;
+ //}
+
+ if (!uboot_env) {
+ uboot_env = kzalloc(sizeof(*uboot_env), GFP_KERNEL);
+ if (!uboot_env){
+ pr_err("out of memory!\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < 2; ++i) {
+ uboot_env->env[i] = kmalloc(ENV_MAX_SIZE, GFP_KERNEL);
+ if (!uboot_env->env[i]){
+ pr_err("out of memory!\n");
+ return -ENOMEM;
+ }
+ }
+ }
+
+ if (!uboot_env->io_base) {
+ uboot_env->io_base = (void *)ioremap(0xFFF80000, SZ_512K);
+ if (!uboot_env->io_base) {
+ printk(KERN_WARNING "uboot_env ioremap fail\n");
+ return -EIO;
+ }
+ uboot_env->offset[0] = 0x50000; // 0xfffd0000
+ uboot_env->size[0] = 0x10000;
+ uboot_env->offset[1] = 0x60000; // 0xfffe0000
+ uboot_env->size[1] = 0x10000;
+ }
+
+ if(!uboot_env->env_init){
+ wmt_sfc_init((struct sfreg_t *)SF_BASE_ADDR);
+ uboot_env->env_init = true;
+ }
+ // read the uboot env only once
+ if (uboot_env->env_readed == false) {
+ if (raw_uboot_env_read()){
+ pr_err("read env fail!\n");
+ return -EINVAL;
+ }
+ uboot_env->env_readed = true;
+ }
+ return 0;
+}
+
+/*
+ * return 0 have env
+ * return others no such env
+ */
+int search_env(int index, char *varname)
+{
+ int i, j, k, nxt = 0;
+ int rcode = 0;
+
+ if (env_init_if_needed()){
+ rcode = -EIO;
+ goto out;
+ }
+
+ k = -1;
+ i = 0;
+ for (j = 0; env_get_char(index, j) != '\0'; j = nxt+1) {
+
+ for (nxt = j; env_get_char(index, nxt) != '\0'; ++nxt)
+ ;
+ k = envmatch(index, (unsigned char *)varname, j);
+ if (k < 0)
+ continue;
+ break;
+ }
+
+ if (k < 0) {
+ rcode++;
+ }
+
+out:
+ return rcode;
+}
+
+/*
+ * insert env to env buf
+ */
+int insert_env(int index, char *varname, char *varval)
+{
+ int len, oldval;
+ int rcode = 0;
+ unsigned char *env, *nxt = NULL;
+ unsigned char *env_data;
+
+ if(*varname == '\0'|| index > 2 || index < 0)
+ return -EINVAL;
+
+ env_data = uboot_env->env[index]->data;
+
+ /*
+ * search if variable with this name already exists
+ */
+ oldval = -1;
+ for (env = env_data; *env; env = nxt+1) {
+ for (nxt = env; *nxt; ++nxt)
+ ;
+ oldval = envmatch(index, (unsigned char *)varname, env - env_data);
+ if (oldval >= 0)
+ break;
+ }
+
+ /*
+ * Delete any existing definition
+ */
+ if (oldval >= 0) {
+ /* otp env can not overwrite */
+ //if (is_otp_env) {
+ // rcode = -EEXIST;
+ // goto out;
+ //}
+
+ if (*++nxt == '\0') {
+ if (env > env_data)
+ env--;
+ else
+ *env = '\0';
+ } else {
+ for (;;) {
+ *env = *nxt++;
+ if ((*env == '\0') && (*nxt == '\0'))
+ break;
+ ++env;
+ }
+ }
+ *++env = '\0';
+ }
+
+ if ((varval == NULL) || (*varval == '\0')) {
+ if (oldval < 0) {
+ pr_info("No assigned any value for %s\n", varname);
+ rcode++;
+ } else {
+ rcode = 0;
+ }
+ goto out;
+ }
+
+ /*
+ * Append new definition at the end
+ */
+ for (env = env_data; *env || *(env+1); ++env)
+ ;
+ if (env > env_data)
+ ++env;
+ /*
+ * Overflow when:
+ * "varname" + "=" + "val" +"\0\0" > ENV_SIZE - (env-env_data)
+ */
+ len = strlen(varname) + 2;
+ /* add '=' for first arg, ' ' for all others */
+ len += strlen(varval);
+ if (len > (&env_data[ENV_MAX_SIZE]-env)) {
+ printk(KERN_WARNING "## Warning: environment overflow, \"%s\" deleted\n",
+ varname);
+ rcode++;
+ goto out;
+ }
+ while ((*env = *varname++) != '\0')
+ env++;
+
+ *env = '=';
+ while ((*++env = *varval++) != '\0')
+ ;
+
+ /* end is marked with double '\0' */
+ *++env = '\0';
+
+ rcode = 0;
+
+out:
+
+ return rcode;
+}
+
+int is_persist(char *name)
+{
+ int i, len;
+ char *persistlist[]={"otp.", "ethaddr", "wmt.ethaddr.persist", "androidboot.serialno",
+ "btaddr", "wmt.btaddr.persist","pcba.serialno","serialnum","persist.", NULL};
+
+ for(i=0; persistlist[i] != NULL; i++){
+ len = strlen(persistlist[i]);
+ if(!strncmp(name, persistlist[i], len))
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * sync env2's persist to env1, then update to env1 and env2
+ */
+int sync_persist_env(struct env_t *env1, struct env_t *env2)
+{
+ int i;
+ int updated=0;
+ unsigned char name[MAX_NAME_SIZE] = {0};
+ unsigned char *val = NULL,*valbuf=NULL;
+ unsigned char *s;
+
+ valbuf = kzalloc(MAX_VALUE_SIZE,GFP_KERNEL);
+ if(!valbuf){
+ printk("alloc mem failed!\n");
+ return -ENOMEM;
+ }
+
+ for(s = env2->data; s < (env2->data+ENV_DATA_SIZE) && *s!='\0'; ){
+
+ if(is_persist(s)==0){
+ i=0;
+ while(*s != '=' && *s != '\0' && i < (sizeof(name)-1))
+ name[i++] = *s++;
+
+ name[i] = '\0';
+
+ i=0;
+ s++;//skip '='
+ val = valbuf;
+ while(*s != '\0')
+ val[i++] = *s++;
+
+ val[i] = '\0';
+ s++;
+ //printk("%s=%s\n",name,val);
+
+ if(search_env(ENV1,name)){
+ printk("insert %s=%s to env1\n",name,val);
+ insert_env(ENV1,name,val);
+ //updated ++;
+ }
+ }
+ else{
+ s += (strlen(s)+1);
+ }
+ }
+
+ //printk("sync %d otps to env1\n",updated);
+ save_env(ENV1,env1);
+ save_env(ENV2,env1);
+ memcpy(env2,env1,sizeof(struct env_t));
+ kfree(valbuf);
+
+ return 0;
+}
+
+
+/*
+ * sync and update env to SF
+ */
+static int update_env(int to_update)
+{
+ struct env_t *env1 = uboot_env->env[ENV1];
+ struct env_t *env2 = uboot_env->env[ENV2];
+
+ if(!to_update){
+ return 0;
+ }
+
+ if(uboot_env->env_readed)
+ return sync_persist_env(env1,env2);
+
+ return -EIO;
+}
+
+static int esync(void)
+{
+ int i;
+ int ret;
+ u32 crc1,crc2;
+ char *dest,*src;
+ struct env_t *env1,*env2;
+
+ /* copy env raw data */
+ for(i = 0; i < 2; i++){
+ dest = (char *)uboot_env->env[i];
+ src = (char *)(uboot_env->offset[i] + uboot_env->io_base);
+ memset(dest, 0x00, ENV_MAX_SIZE);
+ sf_copy_env(dest, src, ENV_MAX_SIZE);
+ }
+
+ env1 = uboot_env->env[ENV1];
+ crc1 = uboot_crc32(0, env1->data, ENV_DATA_SIZE);
+
+ env2 = uboot_env->env[ENV2];
+ crc2 = uboot_crc32(0, env2->data, ENV_DATA_SIZE);
+
+ printk("crc1:%08x,%08x; crc2:%08x,%08x\n", env1->crc,crc1,env2->crc,crc2);
+
+ if( crc1 == env1->crc && crc2 == env2->crc && crc1 == crc2){
+ printk("env1==env2\n");
+ }else if(crc1 != env1->crc && crc2 == env2->crc){
+ printk("env2->env1\n");
+ return save_env(ENV1, env2);
+ }else if(crc1 == env1->crc && crc2 != env2->crc ){
+ printk("env1->env2\n");
+ return save_env(ENV2, env1);
+ }else if(crc1 == env1->crc && crc2 == env2->crc && crc1 != crc2 ){
+ printk("env1<->env2\n");
+ return sync_persist_env(env1,env2);
+ }else{
+ printk("env1,env2 invalid\n");
+ }
+
+ return 0;
+}
+
+/* Get the system parameter if existed.
+ *
+ * - varname: parameter name
+ * - varval : a buffer to store the parameter
+ * - varlen : the buffer size for the varval pointer
+ *
+ * return 0 if success.
+ */
+int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen)
+{
+ int i, j, k, nxt = 0;
+ int rcode = 0;
+
+ int ret = down_interruptible(&s_wmt_env_lock);
+ if (ret) {
+ printk(KERN_WARNING "lock s_wmt_env_lock error: %d\n", ret);
+ return -EAGAIN;
+ }
+
+ if (env_init_if_needed()){
+ rcode = -EIO;
+ goto out;
+ }
+
+ k = -1;
+ i = 0;
+ for (j = 0; env_get_char(ENV1, j) != '\0'; j = nxt+1) {
+
+ for (nxt = j; env_get_char(ENV1, nxt) != '\0'; ++nxt)
+ ;
+ k = envmatch(ENV1, (unsigned char *)varname, j);
+ if (k < 0)
+ continue;
+ while (k < nxt && i < *varlen)
+ varval[i++] = env_get_char(ENV1, k++);
+ if( k == nxt)
+ varval[i] = '\0';
+ break;
+ }
+
+ if (k < nxt && k > 0) {
+ printk(KERN_WARNING "## Warning: \"%s\" size(%d) exceed buffer size(%d)\n",
+ varname, i+(nxt-k), *varlen);
+ *varlen = i+(nxt-k);
+ rcode = 10;
+ goto out;
+ }
+
+ if (k < 0) {
+ rcode++;
+ }
+out:
+ up(&s_wmt_env_lock);
+ return rcode;
+}
+EXPORT_SYMBOL_GPL(wmt_getsyspara);
+
+/* Set the system parameter.
+ *
+ * - varname: parameter name
+ * - varval : the buffer to store the system parameter value for setting.
+ *
+ * If the pointer is NULL and the system parameter is existed,
+ * then the system parameter will be clear.
+ *
+ * return 0 if success.
+ */
+int wmt_setsyspara(char *name, char *varval)
+{
+ int len, oldval;
+ int rcode = 0;
+ unsigned char *env, *nxt = NULL;
+ unsigned char *env_data;
+ int is_otp_env;
+ int do_wsf = 1;
+ unsigned char *varname = name;
+
+ if(*varname == '\0')
+ return -EINVAL;
+
+ int ret = down_interruptible(&s_wmt_env_lock);
+ if (ret) {
+ printk(KERN_WARNING "lock s_wmt_env_lock error: %d\n", ret);
+ return -EAGAIN;
+ }
+
+ /*
+ * parameter name start with '~' is store in buf
+ */
+ if(*varname == '~'){
+ do_wsf = 0;
+ varname++;
+ }
+
+ if (env_init_if_needed()){
+ rcode = -EIO;
+ goto out;
+ }
+
+ if(!strcmp(varname,"esync")){
+ rcode = esync();
+ goto out;
+ }
+
+ is_otp_env = !strncmp(varname, "otp.", 4);
+
+ if( !is_otp_env && wmt_is_secure_enabled() ) {
+ //check white list for u-boot env
+ int i;
+ for( i = 0; env_white_list[i]; i++) {
+ if( !strcmp(varname, env_white_list[i]))
+ break;
+ }
+ if(!env_white_list[i]) {
+ printk("Not in env white list, disable write <%s>\n", varname);
+ rcode = -EPERM;
+ goto out;
+ }
+ }
+
+ if (strcmp(varname, "boot-method") == 0) {
+ if( strcmp(varval, "boot-nand-ota-normal") &&
+ strcmp(varval, "boot-nand-ota-recovery")&&
+ strcmp(varval, "boot-nand-otz-normal") &&
+ strcmp(varval, "boot-nand-otz-recovery")) {
+ printk("boot-method unsupported varval: %s\n", varval);
+ rcode = -EINVAL;
+ goto out;
+ }
+ }
+
+ env_data = uboot_env->env[ENV1]->data;
+
+ /*
+ * search if variable with this name already exists
+ */
+ oldval = -1;
+ for (env = env_data; *env; env = nxt+1) {
+ for (nxt = env; *nxt; ++nxt)
+ ;
+ oldval = envmatch(ENV1, (unsigned char *)varname, env - env_data);
+ if (oldval >= 0)
+ break;
+ }
+
+ /*
+ * Delete any existing definition
+ */
+ if (oldval >= 0) {
+ /* otp env can not overwrite */
+ if (is_otp_env) {
+ rcode = -EEXIST;
+ printk("Warning:OTP env can not overwrite!\n");
+ goto out;
+ }
+
+ if (*++nxt == '\0') {
+ if (env > env_data)
+ env--;
+ else
+ *env = '\0';
+ } else {
+ for (;;) {
+ *env = *nxt++;
+ if ((*env == '\0') && (*nxt == '\0'))
+ break;
+ ++env;
+ }
+ }
+ *++env = '\0';
+ }
+
+ if ((varval == NULL) || (*varval == '\0')) {
+ if (oldval < 0) {
+ pr_info("No assigned any value for %s\n", varname);
+ rcode++;
+ } else {
+ /*
+ * varname will be clear
+ */
+ pr_info("Delete environment variable: %s\n", varname);
+ if (update_env(do_wsf))
+ rcode++;
+ else
+ rcode = 0;
+ }
+ goto out;
+ }
+
+ /*
+ * Append new definition at the end
+ */
+ for (env = env_data; *env || *(env+1); ++env)
+ ;
+ if (env > env_data)
+ ++env;
+ /*
+ * Overflow when:
+ * "varname" + "=" + "val" +"\0\0" > ENV_SIZE - (env-env_data)
+ */
+ len = strlen(varname) + 2;
+ /* add '=' for first arg, ' ' for all others */
+ len += strlen(varval);
+ if (len > (&env_data[ENV_MAX_SIZE]-env)) {
+ printk(KERN_WARNING "## Warning: environment overflow, \"%s\" deleted\n",
+ varname);
+ rcode++;
+ goto out;
+ }
+ while ((*env = *varname++) != '\0')
+ env++;
+
+ *env = '=';
+ while ((*++env = *varval++) != '\0')
+ ;
+
+ /* end is marked with double '\0' */
+ *++env = '\0';
+
+ if (update_env(do_wsf))
+ rcode++;
+ else
+ rcode = 0;
+
+out:
+ up(&s_wmt_env_lock);
+ return rcode;
+}
+EXPORT_SYMBOL_GPL(wmt_setsyspara);
+
+/*
+ * Get the WMT SoC chipid & bondingid.
+ */
+int wmt_getsocinfo(unsigned int *chipid, unsigned int *bondingid)
+{
+ *chipid = SCC_CHIP_ID_ADDR;
+ *bondingid = BONDING_OPTION_4BYTE_ADDR;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wmt_getsocinfo);
+
+int wmt_is_secure_enabled(void)
+{
+ static int secure_enabled = 0;
+
+ if (secure_enabled == 0) {
+ char value[512] = {'\0',};
+ int len = 511;
+ if (wmt_getsyspara("otp.rsa.pem", value, &len) == 0 && len > 0) {
+ secure_enabled = 1;
+ }
+ else {
+ secure_enabled = -1;
+ }
+ }
+ return secure_enabled == 1 ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(wmt_is_secure_enabled);
+
+static int do_rsa(uint8_t *sig_data, size_t sig_len, uint8_t *publickey,
+ uint8_t *hash_signature)
+{
+ uint8_t out_buf[128], tmp;
+ int ret = 0;
+ int i, j, k;
+
+ ret = rsa_check((unsigned int)publickey, strlen(publickey),
+ (uint32_t)sig_data, sig_len, out_buf);
+ if (ret) {
+ printk("decode signature fail\n");
+ return 2;
+ }
+
+ for (i = 0, j = 0; i < 64; i=i+2,j++) {
+ tmp = 0;
+ for (k = 0; k < 2; k++) {
+ if (out_buf[i+k] == '0')
+ tmp += ((k == 0) ?(0<<4):0);
+ else if (out_buf[i+k] == '1')
+ tmp += ((k == 0) ?(1<<4):1);
+ else if (out_buf[i+k] == '2')
+ tmp += ((k == 0) ?(2<<4):2);
+ else if (out_buf[i+k] == '3')
+ tmp += ((k == 0) ?(3<<4):3);
+ else if (out_buf[i+k] == '4')
+ tmp += ((k == 0) ?(4<<4):4);
+ else if (out_buf[i+k] == '5')
+ tmp += ((k == 0) ?(5<<4):5);
+ else if (out_buf[i+k] == '6')
+ tmp += ((k == 0) ?(6<<4):6);
+ else if (out_buf[i+k] == '7')
+ tmp += ((k == 0) ?(7<<4):7);
+ else if (out_buf[i+k] == '8')
+ tmp += ((k == 0) ?(8<<4):8);
+ else if (out_buf[i+k] == '9')
+ tmp += ((k == 0) ?(9<<4):9);
+ else if (out_buf[i+k] == 'a')
+ tmp += ((k == 0) ?(0xa<<4):0xa);
+ else if (out_buf[i+k] == 'b')
+ tmp += ((k == 0) ?(0xb<<4):0xb);
+ else if (out_buf[i+k] == 'c')
+ tmp += ((k == 0) ?(0xc<<4):0xc);
+ else if (out_buf[i+k] == 'd')
+ tmp += ((k == 0) ?(0xd<<4):0xd);
+ else if (out_buf[i+k] == 'e')
+ tmp += ((k == 0) ?(0xe<<4):0xe);
+ else if (out_buf[i+k] == 'f')
+ tmp += ((k == 0) ?(0xf<<4):0xf);
+ else {
+ printk("change from character to digit fail out_buf[%d]=%c\n", i, out_buf[i]);
+ ret = 3;
+ break;
+ }
+ }
+ if (ret == 3)
+ break;
+ hash_signature[j] = tmp;
+ }
+
+ return ret;
+}
+
+static int do_hash(uint8_t *buf, size_t len, unsigned char sha256sum[32])
+{
+ sha256_context ctx;
+
+ sha256_starts(&ctx);
+
+ sha256_update(&ctx, buf, len);
+
+ sha256_finish(&ctx, sha256sum);
+
+#ifdef DEBUG
+ {
+ int j;
+ for (j = 0; j < 32; j++) {
+ printk( "%02x", sha256sum[j] );
+ }
+ printk("\n");
+ }
+#endif
+ return 0;
+}
+
+int wmt_write_signed_image(struct write_signed_image *w)
+{
+ uint8_t hash_sig[64], hash_img[32];
+ char publickey[400];
+ size_t len = sizeof(publickey);
+ uint32_t offset;
+ size_t size;
+ void *io_base;
+ int rc = 0, i;
+
+
+ if (wmt_getsyspara("otp.rsa.pem", publickey, &len) == 0) {
+
+ if (do_rsa(w->sig_data, w->sig_len, publickey, hash_sig)) {
+ printk("do rsa failed\n");
+ return -1;
+ }
+
+
+
+ if (do_hash(w->img_data, w->img_len, hash_img)) {
+ printk("do hash failed\n");
+ return -2;
+ }
+
+ if (memcmp(hash_sig, hash_img, 32)) {
+ for (i = 0; i < 32; i++)
+ printk("%2.2x", hash_sig[i]);
+ printk("\n");
+ for (i = 0; i < 32; i++)
+ printk("%2.2x", hash_img[i]);
+ printk("\n image check fail\n");
+ return -3;
+ }
+ pr_info("Decrypto signature success\n");
+ } else
+ pr_info("otp.rsa.pem not found\n");
+
+ // update
+
+ switch (w->type) {
+ case SIGNED_IMAGE_TYPE_WLOAD: // "w-load-SF",
+ offset = 0x00070000;
+ size = 0x00010000;
+ break;
+ case SIGNED_IMAGE_TYPE_UBOOT: // "u-boot-SF",
+ offset = 0x00000000;
+ size = 0x00050000;
+ break;
+ case SIGNED_IMAGE_TYPE_UBOOT_ENV: // "u-boot env. cfg. 1-SF",
+ offset = 0x00050000;
+ size = 0x00010000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (w->img_len > size) {
+ printk(" max size 0x%x\n", size);
+ return -E2BIG;
+ }
+
+ rc = down_interruptible(&s_wmt_env_lock);
+ if (rc) {
+ printk(KERN_WARNING "lock s_wmt_env_lock error: %d\n", rc);
+ return -EAGAIN;
+ }
+
+ // Erase
+ for (len = 0; len < size; len += 0x10000) {
+ printk(" Erase flash 0x%x\n", offset + len);
+
+ rc = spi_flash_sector_erase(offset + len,
+ (struct sfreg_t *)SF_BASE_ADDR);
+ if (rc != ERR_OK) {
+ pr_err("spi_flash_sector_erase failed\n");
+ rc = -EIO;
+ goto out;
+ }
+ }
+
+ // Write
+ io_base = uboot_env->io_base;
+
+ spi_flash_sector_write(((struct sfreg_t *)SF_BASE_ADDR),
+ io_base, offset, w->img_len, w->img_data);
+
+out:
+ up(&s_wmt_env_lock);
+ return rc;
+}
+
+
+/*
+ * reload the env from partition.
+ * After Hibernation restore, call this function since
+ * the env may has been changed before restoration.
+ * This function is considered only be called by hibernation related code.
+ * Do not call this function from different kernel thread at the same time.
+ */
+int env_cache_flush(void)
+{
+ int i;
+ int ret;
+ u32 crc1,crc2;
+ char *dest,*src;
+ struct env_t *env1,*env2;
+
+ /* copy env raw data */
+ for(i = 0; i < 2; i++){
+ dest = (char *)uboot_env->env[i];
+ src = (char *)(uboot_env->offset[i] + uboot_env->io_base);
+ memset(dest, 0x00, ENV_MAX_SIZE);
+ sf_copy_env(dest, src, ENV_MAX_SIZE);
+ }
+
+ env1 = uboot_env->env[ENV1];
+ crc1 = uboot_crc32(0, env1->data, ENV_DATA_SIZE);
+
+ env2 = uboot_env->env[ENV2];
+ crc2 = uboot_crc32(0, env2->data, ENV_DATA_SIZE);
+
+ if(crc1 != env1->crc){
+ printk("Error:env1 crc error!");
+ return 1;
+ }
+
+ if(crc2 != env2->crc)
+ printk("Warning:env2 crc error!");
+
+ return 0;
+}