summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/drivers/edac
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /ANDROID_3.4.5/drivers/edac
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'ANDROID_3.4.5/drivers/edac')
-rw-r--r--ANDROID_3.4.5/drivers/edac/Kconfig297
-rw-r--r--ANDROID_3.4.5/drivers/edac/Makefile57
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd64_edac.c2849
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd64_edac.h462
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd64_edac_dbg.c72
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd64_edac_inj.c213
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd76x_edac.c366
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd8111_edac.c594
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd8111_edac.h130
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd8131_edac.c379
-rw-r--r--ANDROID_3.4.5/drivers/edac/amd8131_edac.h119
-rw-r--r--ANDROID_3.4.5/drivers/edac/cell_edac.c262
-rw-r--r--ANDROID_3.4.5/drivers/edac/cpc925_edac.c1080
-rw-r--r--ANDROID_3.4.5/drivers/edac/e752x_edac.c1449
-rw-r--r--ANDROID_3.4.5/drivers/edac/e7xxx_edac.c576
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_core.h528
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_device.c723
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_device_sysfs.c886
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_mc.c918
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_mc_sysfs.c1064
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_module.c145
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_module.h81
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_pci.c499
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_pci_sysfs.c770
-rw-r--r--ANDROID_3.4.5/drivers/edac/edac_stub.c91
-rw-r--r--ANDROID_3.4.5/drivers/edac/i3000_edac.c553
-rw-r--r--ANDROID_3.4.5/drivers/edac/i3200_edac.c528
-rw-r--r--ANDROID_3.4.5/drivers/edac/i5000_edac.c1580
-rw-r--r--ANDROID_3.4.5/drivers/edac/i5100_edac.c1085
-rw-r--r--ANDROID_3.4.5/drivers/edac/i5400_edac.c1465
-rw-r--r--ANDROID_3.4.5/drivers/edac/i7300_edac.c1248
-rw-r--r--ANDROID_3.4.5/drivers/edac/i7core_edac.c2506
-rw-r--r--ANDROID_3.4.5/drivers/edac/i82443bxgx_edac.c467
-rw-r--r--ANDROID_3.4.5/drivers/edac/i82860_edac.c353
-rw-r--r--ANDROID_3.4.5/drivers/edac/i82875p_edac.c596
-rw-r--r--ANDROID_3.4.5/drivers/edac/i82975x_edac.c698
-rw-r--r--ANDROID_3.4.5/drivers/edac/mce_amd.c822
-rw-r--r--ANDROID_3.4.5/drivers/edac/mce_amd.h93
-rw-r--r--ANDROID_3.4.5/drivers/edac/mce_amd_inj.c173
-rw-r--r--ANDROID_3.4.5/drivers/edac/mpc85xx_edac.c1235
-rw-r--r--ANDROID_3.4.5/drivers/edac/mpc85xx_edac.h166
-rw-r--r--ANDROID_3.4.5/drivers/edac/mv64x60_edac.c890
-rw-r--r--ANDROID_3.4.5/drivers/edac/mv64x60_edac.h114
-rw-r--r--ANDROID_3.4.5/drivers/edac/pasemi_edac.c306
-rw-r--r--ANDROID_3.4.5/drivers/edac/ppc4xx_edac.c1439
-rw-r--r--ANDROID_3.4.5/drivers/edac/ppc4xx_edac.h172
-rw-r--r--ANDROID_3.4.5/drivers/edac/r82600_edac.c420
-rw-r--r--ANDROID_3.4.5/drivers/edac/sb_edac.c1899
-rw-r--r--ANDROID_3.4.5/drivers/edac/tile_edac.c258
-rw-r--r--ANDROID_3.4.5/drivers/edac/x38_edac.c523
50 files changed, 0 insertions, 34199 deletions
diff --git a/ANDROID_3.4.5/drivers/edac/Kconfig b/ANDROID_3.4.5/drivers/edac/Kconfig
deleted file mode 100644
index fdffa1be..00000000
--- a/ANDROID_3.4.5/drivers/edac/Kconfig
+++ /dev/null
@@ -1,297 +0,0 @@
-#
-# EDAC Kconfig
-# Copyright (c) 2008 Doug Thompson www.softwarebitmaker.com
-# Licensed and distributed under the GPL
-#
-
-menuconfig EDAC
- bool "EDAC (Error Detection And Correction) reporting"
- depends on HAS_IOMEM
- depends on X86 || PPC || TILE
- help
- EDAC is designed to report errors in the core system.
- These are low-level errors that are reported in the CPU or
- supporting chipset or other subsystems:
- memory errors, cache errors, PCI errors, thermal throttling, etc..
- If unsure, select 'Y'.
-
- If this code is reporting problems on your system, please
- see the EDAC project web pages for more information at:
-
- <http://bluesmoke.sourceforge.net/>
-
- and:
-
- <http://buttersideup.com/edacwiki>
-
- There is also a mailing list for the EDAC project, which can
- be found via the sourceforge page.
-
-if EDAC
-
-comment "Reporting subsystems"
-
-config EDAC_DEBUG
- bool "Debugging"
- help
- This turns on debugging information for the entire EDAC
- sub-system. You can insert module with "debug_level=x", current
- there're four debug levels (x=0,1,2,3 from low to high).
- Usually you should select 'N'.
-
-config EDAC_DECODE_MCE
- tristate "Decode MCEs in human-readable form (only on AMD for now)"
- depends on CPU_SUP_AMD && X86_MCE_AMD
- default y
- ---help---
- Enable this option if you want to decode Machine Check Exceptions
- occurring on your machine in human-readable form.
-
- You should definitely say Y here in case you want to decode MCEs
- which occur really early upon boot, before the module infrastructure
- has been initialized.
-
-config EDAC_MCE_INJ
- tristate "Simple MCE injection interface over /sysfs"
- depends on EDAC_DECODE_MCE
- default n
- help
- This is a simple interface to inject MCEs over /sysfs and test
- the MCE decoding code in EDAC.
-
- This is currently AMD-only.
-
-config EDAC_MM_EDAC
- tristate "Main Memory EDAC (Error Detection And Correction) reporting"
- help
- Some systems are able to detect and correct errors in main
- memory. EDAC can report statistics on memory error
- detection and correction (EDAC - or commonly referred to ECC
- errors). EDAC will also try to decode where these errors
- occurred so that a particular failing memory module can be
- replaced. If unsure, select 'Y'.
-
-config EDAC_AMD64
- tristate "AMD64 (Opteron, Athlon64) K8, F10h"
- depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
- help
- Support for error detection and correction of DRAM ECC errors on
- the AMD64 families of memory controllers (K8 and F10h)
-
-config EDAC_AMD64_ERROR_INJECTION
- bool "Sysfs HW Error injection facilities"
- depends on EDAC_AMD64
- help
- Recent Opterons (Family 10h and later) provide for Memory Error
- Injection into the ECC detection circuits. The amd64_edac module
- allows the operator/user to inject Uncorrectable and Correctable
- errors into DRAM.
-
- When enabled, in each of the respective memory controller directories
- (/sys/devices/system/edac/mc/mcX), there are 3 input files:
-
- - inject_section (0..3, 16-byte section of 64-byte cacheline),
- - inject_word (0..8, 16-bit word of 16-byte section),
- - inject_ecc_vector (hex ecc vector: select bits of inject word)
-
- In addition, there are two control files, inject_read and inject_write,
- which trigger the DRAM ECC Read and Write respectively.
-
-config EDAC_AMD76X
- tristate "AMD 76x (760, 762, 768)"
- depends on EDAC_MM_EDAC && PCI && X86_32
- help
- Support for error detection and correction on the AMD 76x
- series of chipsets used with the Athlon processor.
-
-config EDAC_E7XXX
- tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
- depends on EDAC_MM_EDAC && PCI && X86_32
- help
- Support for error detection and correction on the Intel
- E7205, E7500, E7501 and E7505 server chipsets.
-
-config EDAC_E752X
- tristate "Intel e752x (e7520, e7525, e7320) and 3100"
- depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
- help
- Support for error detection and correction on the Intel
- E7520, E7525, E7320 server chipsets.
-
-config EDAC_I82443BXGX
- tristate "Intel 82443BX/GX (440BX/GX)"
- depends on EDAC_MM_EDAC && PCI && X86_32
- depends on BROKEN
- help
- Support for error detection and correction on the Intel
- 82443BX/GX memory controllers (440BX/GX chipsets).
-
-config EDAC_I82875P
- tristate "Intel 82875p (D82875P, E7210)"
- depends on EDAC_MM_EDAC && PCI && X86_32
- help
- Support for error detection and correction on the Intel
- DP82785P and E7210 server chipsets.
-
-config EDAC_I82975X
- tristate "Intel 82975x (D82975x)"
- depends on EDAC_MM_EDAC && PCI && X86
- help
- Support for error detection and correction on the Intel
- DP82975x server chipsets.
-
-config EDAC_I3000
- tristate "Intel 3000/3010"
- depends on EDAC_MM_EDAC && PCI && X86
- help
- Support for error detection and correction on the Intel
- 3000 and 3010 server chipsets.
-
-config EDAC_I3200
- tristate "Intel 3200"
- depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL
- help
- Support for error detection and correction on the Intel
- 3200 and 3210 server chipsets.
-
-config EDAC_X38
- tristate "Intel X38"
- depends on EDAC_MM_EDAC && PCI && X86
- help
- Support for error detection and correction on the Intel
- X38 server chipsets.
-
-config EDAC_I5400
- tristate "Intel 5400 (Seaburg) chipsets"
- depends on EDAC_MM_EDAC && PCI && X86
- help
- Support for error detection and correction the Intel
- i5400 MCH chipset (Seaburg).
-
-config EDAC_I7CORE
- tristate "Intel i7 Core (Nehalem) processors"
- depends on EDAC_MM_EDAC && PCI && X86 && X86_MCE_INTEL
- help
- Support for error detection and correction the Intel
- i7 Core (Nehalem) Integrated Memory Controller that exists on
- newer processors like i7 Core, i7 Core Extreme, Xeon 35xx
- and Xeon 55xx processors.
-
-config EDAC_I82860
- tristate "Intel 82860"
- depends on EDAC_MM_EDAC && PCI && X86_32
- help
- Support for error detection and correction on the Intel
- 82860 chipset.
-
-config EDAC_R82600
- tristate "Radisys 82600 embedded chipset"
- depends on EDAC_MM_EDAC && PCI && X86_32
- help
- Support for error detection and correction on the Radisys
- 82600 embedded chipset.
-
-config EDAC_I5000
- tristate "Intel Greencreek/Blackford chipset"
- depends on EDAC_MM_EDAC && X86 && PCI
- help
- Support for error detection and correction the Intel
- Greekcreek/Blackford chipsets.
-
-config EDAC_I5100
- tristate "Intel San Clemente MCH"
- depends on EDAC_MM_EDAC && X86 && PCI
- help
- Support for error detection and correction the Intel
- San Clemente MCH.
-
-config EDAC_I7300
- tristate "Intel Clarksboro MCH"
- depends on EDAC_MM_EDAC && X86 && PCI
- help
- Support for error detection and correction the Intel
- Clarksboro MCH (Intel 7300 chipset).
-
-config EDAC_SBRIDGE
- tristate "Intel Sandy-Bridge Integrated MC"
- depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
- depends on PCI_MMCONFIG && EXPERIMENTAL
- help
- Support for error detection and correction the Intel
- Sandy Bridge Integrated Memory Controller.
-
-config EDAC_MPC85XX
- tristate "Freescale MPC83xx / MPC85xx"
- depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx)
- help
- Support for error detection and correction on the Freescale
- MPC8349, MPC8560, MPC8540, MPC8548
-
-config EDAC_MV64X60
- tristate "Marvell MV64x60"
- depends on EDAC_MM_EDAC && MV64X60
- help
- Support for error detection and correction on the Marvell
- MV64360 and MV64460 chipsets.
-
-config EDAC_PASEMI
- tristate "PA Semi PWRficient"
- depends on EDAC_MM_EDAC && PCI
- depends on PPC_PASEMI
- help
- Support for error detection and correction on PA Semi
- PWRficient.
-
-config EDAC_CELL
- tristate "Cell Broadband Engine memory controller"
- depends on EDAC_MM_EDAC && PPC_CELL_COMMON
- help
- Support for error detection and correction on the
- Cell Broadband Engine internal memory controller
- on platform without a hypervisor
-
-config EDAC_PPC4XX
- tristate "PPC4xx IBM DDR2 Memory Controller"
- depends on EDAC_MM_EDAC && 4xx
- help
- This enables support for EDAC on the ECC memory used
- with the IBM DDR2 memory controller found in various
- PowerPC 4xx embedded processors such as the 405EX[r],
- 440SP, 440SPe, 460EX, 460GT and 460SX.
-
-config EDAC_AMD8131
- tristate "AMD8131 HyperTransport PCI-X Tunnel"
- depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
- help
- Support for error detection and correction on the
- AMD8131 HyperTransport PCI-X Tunnel chip.
- Note, add more Kconfig dependency if it's adopted
- on some machine other than Maple.
-
-config EDAC_AMD8111
- tristate "AMD8111 HyperTransport I/O Hub"
- depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
- help
- Support for error detection and correction on the
- AMD8111 HyperTransport I/O Hub chip.
- Note, add more Kconfig dependency if it's adopted
- on some machine other than Maple.
-
-config EDAC_CPC925
- tristate "IBM CPC925 Memory Controller (PPC970FX)"
- depends on EDAC_MM_EDAC && PPC64
- help
- Support for error detection and correction on the
- IBM CPC925 Bridge and Memory Controller, which is
- a companion chip to the PowerPC 970 family of
- processors.
-
-config EDAC_TILE
- tristate "Tilera Memory Controller"
- depends on EDAC_MM_EDAC && TILE
- default y
- help
- Support for error detection and correction on the
- Tilera memory controller.
-
-endif # EDAC
diff --git a/ANDROID_3.4.5/drivers/edac/Makefile b/ANDROID_3.4.5/drivers/edac/Makefile
deleted file mode 100644
index 196a63dd..00000000
--- a/ANDROID_3.4.5/drivers/edac/Makefile
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Makefile for the Linux kernel EDAC drivers.
-#
-# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
-# This file may be distributed under the terms of the
-# GNU General Public License.
-#
-
-obj-$(CONFIG_EDAC) := edac_stub.o
-obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
-
-edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
-edac_core-y += edac_module.o edac_device_sysfs.o
-
-ifdef CONFIG_PCI
-edac_core-y += edac_pci.o edac_pci_sysfs.o
-endif
-
-obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o
-
-edac_mce_amd-y := mce_amd.o
-obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
-
-obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
-obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
-obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
-obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
-obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
-obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
-obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
-obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
-obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
-obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
-obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
-obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
-obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
-obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
-obj-$(CONFIG_EDAC_I3200) += i3200_edac.o
-obj-$(CONFIG_EDAC_X38) += x38_edac.o
-obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
-obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
-
-amd64_edac_mod-y := amd64_edac.o
-amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
-amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
-
-obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
-
-obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
-obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
-obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
-obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
-obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
-obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
-
-obj-$(CONFIG_EDAC_TILE) += tile_edac.o
diff --git a/ANDROID_3.4.5/drivers/edac/amd64_edac.c b/ANDROID_3.4.5/drivers/edac/amd64_edac.c
deleted file mode 100644
index 7ef73c91..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd64_edac.c
+++ /dev/null
@@ -1,2849 +0,0 @@
-#include "amd64_edac.h"
-#include <asm/amd_nb.h>
-
-static struct edac_pci_ctl_info *amd64_ctl_pci;
-
-static int report_gart_errors;
-module_param(report_gart_errors, int, 0644);
-
-/*
- * Set by command line parameter. If BIOS has enabled the ECC, this override is
- * cleared to prevent re-enabling the hardware by this driver.
- */
-static int ecc_enable_override;
-module_param(ecc_enable_override, int, 0644);
-
-static struct msr __percpu *msrs;
-
-/*
- * count successfully initialized driver instances for setup_pci_device()
- */
-static atomic_t drv_instances = ATOMIC_INIT(0);
-
-/* Per-node driver instances */
-static struct mem_ctl_info **mcis;
-static struct ecc_settings **ecc_stngs;
-
-/*
- * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
- * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
- * or higher value'.
- *
- *FIXME: Produce a better mapping/linearisation.
- */
-struct scrubrate {
- u32 scrubval; /* bit pattern for scrub rate */
- u32 bandwidth; /* bandwidth consumed (bytes/sec) */
-} scrubrates[] = {
- { 0x01, 1600000000UL},
- { 0x02, 800000000UL},
- { 0x03, 400000000UL},
- { 0x04, 200000000UL},
- { 0x05, 100000000UL},
- { 0x06, 50000000UL},
- { 0x07, 25000000UL},
- { 0x08, 12284069UL},
- { 0x09, 6274509UL},
- { 0x0A, 3121951UL},
- { 0x0B, 1560975UL},
- { 0x0C, 781440UL},
- { 0x0D, 390720UL},
- { 0x0E, 195300UL},
- { 0x0F, 97650UL},
- { 0x10, 48854UL},
- { 0x11, 24427UL},
- { 0x12, 12213UL},
- { 0x13, 6101UL},
- { 0x14, 3051UL},
- { 0x15, 1523UL},
- { 0x16, 761UL},
- { 0x00, 0UL}, /* scrubbing off */
-};
-
-static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
-{
- int err = 0;
-
- err = pci_read_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error reading F%dx%03x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
-
-int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 val, const char *func)
-{
- int err = 0;
-
- err = pci_write_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error writing to F%dx%03x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
-
-/*
- *
- * Depending on the family, F2 DCT reads need special handling:
- *
- * K8: has a single DCT only
- *
- * F10h: each DCT has its own set of regs
- * DCT0 -> F2x040..
- * DCT1 -> F2x140..
- *
- * F15h: we select which DCT we access using F1x10C[DctCfgSel]
- *
- */
-static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
- const char *func)
-{
- if (addr >= 0x100)
- return -EINVAL;
-
- return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
-}
-
-static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
- const char *func)
-{
- return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
-}
-
-/*
- * Select DCT to which PCI cfg accesses are routed
- */
-static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
-{
- u32 reg = 0;
-
- amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
- reg &= 0xfffffffe;
- reg |= dct;
- amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
-}
-
-static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
- const char *func)
-{
- u8 dct = 0;
-
- if (addr >= 0x140 && addr <= 0x1a0) {
- dct = 1;
- addr -= 0x100;
- }
-
- f15h_select_dct(pvt, dct);
-
- return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
-}
-
-/*
- * Memory scrubber control interface. For K8, memory scrubbing is handled by
- * hardware and can involve L2 cache, dcache as well as the main memory. With
- * F10, this is extended to L3 cache scrubbing on CPU models sporting that
- * functionality.
- *
- * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
- * (dram) over to cache lines. This is nasty, so we will use bandwidth in
- * bytes/sec for the setting.
- *
- * Currently, we only do dram scrubbing. If the scrubbing is done in software on
- * other archs, we might not have access to the caches directly.
- */
-
-/*
- * scan the scrub rate mapping table for a close or matching bandwidth value to
- * issue. If requested is too big, then use last maximum value found.
- */
-static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
-{
- u32 scrubval;
- int i;
-
- /*
- * map the configured rate (new_bw) to a value specific to the AMD64
- * memory controller and apply to register. Search for the first
- * bandwidth entry that is greater or equal than the setting requested
- * and program that. If at last entry, turn off DRAM scrubbing.
- */
- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
- /*
- * skip scrub rates which aren't recommended
- * (see F10 BKDG, F3x58)
- */
- if (scrubrates[i].scrubval < min_rate)
- continue;
-
- if (scrubrates[i].bandwidth <= new_bw)
- break;
-
- /*
- * if no suitable bandwidth found, turn off DRAM scrubbing
- * entirely by falling back to the last element in the
- * scrubrates array.
- */
- }
-
- scrubval = scrubrates[i].scrubval;
-
- pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
-
- if (scrubval)
- return scrubrates[i].bandwidth;
-
- return 0;
-}
-
-static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u32 min_scrubrate = 0x5;
-
- if (boot_cpu_data.x86 == 0xf)
- min_scrubrate = 0x0;
-
- /* F15h Erratum #505 */
- if (boot_cpu_data.x86 == 0x15)
- f15h_select_dct(pvt, 0);
-
- return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
-}
-
-static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u32 scrubval = 0;
- int i, retval = -EINVAL;
-
- /* F15h Erratum #505 */
- if (boot_cpu_data.x86 == 0x15)
- f15h_select_dct(pvt, 0);
-
- amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
-
- scrubval = scrubval & 0x001F;
-
- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
- if (scrubrates[i].scrubval == scrubval) {
- retval = scrubrates[i].bandwidth;
- break;
- }
- }
- return retval;
-}
-
-/*
- * returns true if the SysAddr given by sys_addr matches the
- * DRAM base/limit associated with node_id
- */
-static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- unsigned nid)
-{
- u64 addr;
-
- /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
- * all ones if the most significant implemented address bit is 1.
- * Here we discard bits 63-40. See section 3.4.2 of AMD publication
- * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
- * Application Programming.
- */
- addr = sys_addr & 0x000000ffffffffffull;
-
- return ((addr >= get_dram_base(pvt, nid)) &&
- (addr <= get_dram_limit(pvt, nid)));
-}
-
-/*
- * Attempt to map a SysAddr to a node. On success, return a pointer to the
- * mem_ctl_info structure for the node that the SysAddr maps to.
- *
- * On failure, return NULL.
- */
-static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
- u64 sys_addr)
-{
- struct amd64_pvt *pvt;
- unsigned node_id;
- u32 intlv_en, bits;
-
- /*
- * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
- * 3.4.4.2) registers to map the SysAddr to a node ID.
- */
- pvt = mci->pvt_info;
-
- /*
- * The value of this field should be the same for all DRAM Base
- * registers. Therefore we arbitrarily choose to read it from the
- * register for node 0.
- */
- intlv_en = dram_intlv_en(pvt, 0);
-
- if (intlv_en == 0) {
- for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
- if (amd64_base_limit_match(pvt, sys_addr, node_id))
- goto found;
- }
- goto err_no_match;
- }
-
- if (unlikely((intlv_en != 0x01) &&
- (intlv_en != 0x03) &&
- (intlv_en != 0x07))) {
- amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
- return NULL;
- }
-
- bits = (((u32) sys_addr) >> 12) & intlv_en;
-
- for (node_id = 0; ; ) {
- if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
- break; /* intlv_sel field matches */
-
- if (++node_id >= DRAM_RANGES)
- goto err_no_match;
- }
-
- /* sanity test for sys_addr */
- if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
- amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
- "range for node %d with node interleaving enabled.\n",
- __func__, sys_addr, node_id);
- return NULL;
- }
-
-found:
- return edac_mc_find((int)node_id);
-
-err_no_match:
- debugf2("sys_addr 0x%lx doesn't match any node\n",
- (unsigned long)sys_addr);
-
- return NULL;
-}
-
-/*
- * compute the CS base address of the @csrow on the DRAM controller @dct.
- * For details see F2x[5C:40] in the processor's BKDG
- */
-static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
- u64 *base, u64 *mask)
-{
- u64 csbase, csmask, base_bits, mask_bits;
- u8 addr_shift;
-
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- csbase = pvt->csels[dct].csbases[csrow];
- csmask = pvt->csels[dct].csmasks[csrow];
- base_bits = GENMASK(21, 31) | GENMASK(9, 15);
- mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
- addr_shift = 4;
- } else {
- csbase = pvt->csels[dct].csbases[csrow];
- csmask = pvt->csels[dct].csmasks[csrow >> 1];
- addr_shift = 8;
-
- if (boot_cpu_data.x86 == 0x15)
- base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
- else
- base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
- }
-
- *base = (csbase & base_bits) << addr_shift;
-
- *mask = ~0ULL;
- /* poke holes for the csmask */
- *mask &= ~(mask_bits << addr_shift);
- /* OR them in */
- *mask |= (csmask & mask_bits) << addr_shift;
-}
-
-#define for_each_chip_select(i, dct, pvt) \
- for (i = 0; i < pvt->csels[dct].b_cnt; i++)
-
-#define chip_select_base(i, dct, pvt) \
- pvt->csels[dct].csbases[i]
-
-#define for_each_chip_select_mask(i, dct, pvt) \
- for (i = 0; i < pvt->csels[dct].m_cnt; i++)
-
-/*
- * @input_addr is an InputAddr associated with the node given by mci. Return the
- * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
- */
-static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
-{
- struct amd64_pvt *pvt;
- int csrow;
- u64 base, mask;
-
- pvt = mci->pvt_info;
-
- for_each_chip_select(csrow, 0, pvt) {
- if (!csrow_enabled(csrow, 0, pvt))
- continue;
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- mask = ~mask;
-
- if ((input_addr & mask) == (base & mask)) {
- debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
- (unsigned long)input_addr, csrow,
- pvt->mc_node_id);
-
- return csrow;
- }
- }
- debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
- (unsigned long)input_addr, pvt->mc_node_id);
-
- return -1;
-}
-
-/*
- * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
- * for the node represented by mci. Info is passed back in *hole_base,
- * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
- * info is invalid. Info may be invalid for either of the following reasons:
- *
- * - The revision of the node is not E or greater. In this case, the DRAM Hole
- * Address Register does not exist.
- *
- * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
- * indicating that its contents are not valid.
- *
- * The values passed back in *hole_base, *hole_offset, and *hole_size are
- * complete 32-bit values despite the fact that the bitfields in the DHAR
- * only represent bits 31-24 of the base and offset values.
- */
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 base;
-
- /* only revE and later have the DRAM Hole Address Register */
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
- debugf1(" revision %d for node %d does not support DHAR\n",
- pvt->ext_model, pvt->mc_node_id);
- return 1;
- }
-
- /* valid for Fam10h and above */
- if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
- return 1;
- }
-
- if (!dhar_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
- pvt->mc_node_id);
- return 1;
- }
-
- /* This node has Memory Hoisting */
-
- /* +------------------+--------------------+--------------------+-----
- * | memory | DRAM hole | relocated |
- * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
- * | | | DRAM hole |
- * | | | [0x100000000, |
- * | | | (0x100000000+ |
- * | | | (0xffffffff-x))] |
- * +------------------+--------------------+--------------------+-----
- *
- * Above is a diagram of physical memory showing the DRAM hole and the
- * relocated addresses from the DRAM hole. As shown, the DRAM hole
- * starts at address x (the base address) and extends through address
- * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
- * addresses in the hole so that they start at 0x100000000.
- */
-
- base = dhar_base(pvt);
-
- *hole_base = base;
- *hole_size = (0x1ull << 32) - base;
-
- if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt);
- else
- *hole_offset = k8_dhar_offset(pvt);
-
- debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
- pvt->mc_node_id, (unsigned long)*hole_base,
- (unsigned long)*hole_offset, (unsigned long)*hole_size);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
-
-/*
- * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
- * assumed that sys_addr maps to the node given by mci.
- *
- * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
- * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
- * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
- * then it is also involved in translating a SysAddr to a DramAddr. Sections
- * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
- * These parts of the documentation are unclear. I interpret them as follows:
- *
- * When node n receives a SysAddr, it processes the SysAddr as follows:
- *
- * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
- * Limit registers for node n. If the SysAddr is not within the range
- * specified by the base and limit values, then node n ignores the Sysaddr
- * (since it does not map to node n). Otherwise continue to step 2 below.
- *
- * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
- * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
- * the range of relocated addresses (starting at 0x100000000) from the DRAM
- * hole. If not, skip to step 3 below. Else get the value of the
- * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
- * offset defined by this value from the SysAddr.
- *
- * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
- * Base register for node n. To obtain the DramAddr, subtract the base
- * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
- */
-static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
- int ret = 0;
-
- dram_base = get_dram_base(pvt, pvt->mc_node_id);
-
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
- if (!ret) {
- if ((sys_addr >= (1ull << 32)) &&
- (sys_addr < ((1ull << 32) + hole_size))) {
- /* use DHAR to translate SysAddr to DramAddr */
- dram_addr = sys_addr - hole_offset;
-
- debugf2("using DHAR to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n",
- (unsigned long)sys_addr,
- (unsigned long)dram_addr);
-
- return dram_addr;
- }
- }
-
- /*
- * Translate the SysAddr to a DramAddr as shown near the start of
- * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
- * only deals with 40-bit values. Therefore we discard bits 63-40 of
- * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
- * discard are all 1s. Otherwise the bits we discard are all 0s. See
- * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
- * Programmer's Manual Volume 1 Application Programming.
- */
- dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
-
- debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n", (unsigned long)sys_addr,
- (unsigned long)dram_addr);
- return dram_addr;
-}
-
-/*
- * @intlv_en is the value of the IntlvEn field from a DRAM Base register
- * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
- * for node interleaving.
- */
-static int num_node_interleave_bits(unsigned intlv_en)
-{
- static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
- int n;
-
- BUG_ON(intlv_en > 7);
- n = intlv_shift_table[intlv_en];
- return n;
-}
-
-/* Translate the DramAddr given by @dram_addr to an InputAddr. */
-static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
-{
- struct amd64_pvt *pvt;
- int intlv_shift;
- u64 input_addr;
-
- pvt = mci->pvt_info;
-
- /*
- * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
- * concerning translating a DramAddr to an InputAddr.
- */
- intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
- (dram_addr & 0xfff);
-
- debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
- intlv_shift, (unsigned long)dram_addr,
- (unsigned long)input_addr);
-
- return input_addr;
-}
-
-/*
- * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
- * assumed that @sys_addr maps to the node given by mci.
- */
-static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
-{
- u64 input_addr;
-
- input_addr =
- dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
-
- debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
- (unsigned long)sys_addr, (unsigned long)input_addr);
-
- return input_addr;
-}
-
-
-/*
- * @input_addr is an InputAddr associated with the node represented by mci.
- * Translate @input_addr to a DramAddr and return the result.
- */
-static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
-{
- struct amd64_pvt *pvt;
- unsigned node_id, intlv_shift;
- u64 bits, dram_addr;
- u32 intlv_sel;
-
- /*
- * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
- * shows how to translate a DramAddr to an InputAddr. Here we reverse
- * this procedure. When translating from a DramAddr to an InputAddr, the
- * bits used for node interleaving are discarded. Here we recover these
- * bits from the IntlvSel field of the DRAM Limit register (section
- * 3.4.4.2) for the node that input_addr is associated with.
- */
- pvt = mci->pvt_info;
- node_id = pvt->mc_node_id;
-
- BUG_ON(node_id > 7);
-
- intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- if (intlv_shift == 0) {
- debugf1(" InputAddr 0x%lx translates to DramAddr of "
- "same value\n", (unsigned long)input_addr);
-
- return input_addr;
- }
-
- bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
- (input_addr & 0xfff);
-
- intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
- dram_addr = bits + (intlv_sel << 12);
-
- debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
- "(%d node interleave bits)\n", (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
-
- return dram_addr;
-}
-
-/*
- * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
- * @dram_addr to a SysAddr.
- */
-static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, sys_addr;
- int ret = 0;
-
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
- if (!ret) {
- if ((dram_addr >= hole_base) &&
- (dram_addr < (hole_base + hole_size))) {
- sys_addr = dram_addr + hole_offset;
-
- debugf1("using DHAR to translate DramAddr 0x%lx to "
- "SysAddr 0x%lx\n", (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
- }
- }
-
- base = get_dram_base(pvt, pvt->mc_node_id);
- sys_addr = dram_addr + base;
-
- /*
- * The sys_addr we have computed up to this point is a 40-bit value
- * because the k8 deals with 40-bit values. However, the value we are
- * supposed to return is a full 64-bit physical address. The AMD
- * x86-64 architecture specifies that the most significant implemented
- * address bit through bit 63 of a physical address must be either all
- * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
- * 64-bit value below. See section 3.4.2 of AMD publication 24592:
- * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
- * Programming.
- */
- sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
-
- debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
-}
-
-/*
- * @input_addr is an InputAddr associated with the node given by mci. Translate
- * @input_addr to a SysAddr.
- */
-static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
- u64 input_addr)
-{
- return dram_addr_to_sys_addr(mci,
- input_addr_to_dram_addr(mci, input_addr));
-}
-
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
- u64 *input_addr_min, u64 *input_addr_max)
-{
- struct amd64_pvt *pvt;
- u64 base, mask;
-
- pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- *input_addr_min = base & ~mask;
- *input_addr_max = base | mask;
-}
-
-/* Map the Error address to a PAGE and PAGE OFFSET. */
-static inline void error_address_to_page_and_offset(u64 error_address,
- u32 *page, u32 *offset)
-{
- *page = (u32) (error_address >> PAGE_SHIFT);
- *offset = ((u32) error_address) & ~PAGE_MASK;
-}
-
-/*
- * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
- * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
- * of a node that detected an ECC memory error. mci represents the node that
- * the error address maps to (possibly different from the node that detected
- * the error). Return the number of the csrow that sys_addr maps to, or -1 on
- * error.
- */
-static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
-{
- int csrow;
-
- csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
-
- if (csrow == -1)
- amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
- "address 0x%lx\n", (unsigned long)sys_addr);
- return csrow;
-}
-
-static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
-
-/*
- * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
- * are ECC capable.
- */
-static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
-{
- u8 bit;
- unsigned long edac_cap = EDAC_FLAG_NONE;
-
- bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
- ? 19
- : 17;
-
- if (pvt->dclr0 & BIT(bit))
- edac_cap = EDAC_FLAG_SECDED;
-
- return edac_cap;
-}
-
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
-
-static void amd64_dump_dramcfg_low(u32 dclr, int chan)
-{
- debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
-
- debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
- (dclr & BIT(16)) ? "un" : "",
- (dclr & BIT(19)) ? "yes" : "no");
-
- debugf1(" PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
-
- if (boot_cpu_data.x86 == 0x10)
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
-
- debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
-}
-
-/* Display and decode various NB registers for debug purposes. */
-static void dump_misc_regs(struct amd64_pvt *pvt)
-{
- debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
-
- debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
-
- debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
-
- amd64_dump_dramcfg_low(pvt->dclr0, 0);
-
- debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
-
- debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
- "offset: 0x%08x\n",
- pvt->dhar, dhar_base(pvt),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
- : f10_dhar_offset(pvt));
-
- debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
-
- amd64_debug_display_dimm_sizes(pvt, 0);
-
- /* everything below this point is Fam10h and above */
- if (boot_cpu_data.x86 == 0xf)
- return;
-
- amd64_debug_display_dimm_sizes(pvt, 1);
-
- amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
-
- /* Only if NOT ganged does dclr1 have valid info */
- if (!dct_ganging_enabled(pvt))
- amd64_dump_dramcfg_low(pvt->dclr1, 1);
-}
-
-/*
- * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
- */
-static void prep_chip_selects(struct amd64_pvt *pvt)
-{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
- pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
- } else {
- pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
- pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
- }
-}
-
-/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
- */
-static void read_dct_base_mask(struct amd64_pvt *pvt)
-{
- int cs;
-
- prep_chip_selects(pvt);
-
- for_each_chip_select(cs, 0, pvt) {
- int reg0 = DCSB0 + (cs * 4);
- int reg1 = DCSB1 + (cs * 4);
- u32 *base0 = &pvt->csels[0].csbases[cs];
- u32 *base1 = &pvt->csels[1].csbases[cs];
-
- if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
- debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
-
- if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
- continue;
-
- if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, reg1);
- }
-
- for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = DCSM0 + (cs * 4);
- int reg1 = DCSM1 + (cs * 4);
- u32 *mask0 = &pvt->csels[0].csmasks[cs];
- u32 *mask1 = &pvt->csels[1].csmasks[cs];
-
- if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
- debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
-
- if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
- continue;
-
- if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, reg1);
- }
-}
-
-static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
-{
- enum mem_type type;
-
- /* F15h supports only DDR3 */
- if (boot_cpu_data.x86 >= 0x15)
- type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
- else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
- if (pvt->dchr0 & DDR3_MODE)
- type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
- else
- type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
- } else {
- type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
- }
-
- amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
-
- return type;
-}
-
-/* Get the number of DCT channels the memory controller is using. */
-static int k8_early_channel_count(struct amd64_pvt *pvt)
-{
- int flag;
-
- if (pvt->ext_model >= K8_REV_F)
- /* RevF (NPT) and later */
- flag = pvt->dclr0 & WIDTH_128;
- else
- /* RevE and earlier */
- flag = pvt->dclr0 & REVE_WIDTH_128;
-
- /* not used */
- pvt->dclr1 = 0;
-
- return (flag) ? 2 : 1;
-}
-
-/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
-static u64 get_error_address(struct mce *m)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- u64 addr;
- u8 start_bit = 1;
- u8 end_bit = 47;
-
- if (c->x86 == 0xf) {
- start_bit = 3;
- end_bit = 39;
- }
-
- addr = m->addr & GENMASK(start_bit, end_bit);
-
- /*
- * Erratum 637 workaround
- */
- if (c->x86 == 0x15) {
- struct amd64_pvt *pvt;
- u64 cc6_base, tmp_addr;
- u32 tmp;
- u8 mce_nid, intlv_en;
-
- if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
- return addr;
-
- mce_nid = amd_get_nb_id(m->extcpu);
- pvt = mcis[mce_nid]->pvt_info;
-
- amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
- intlv_en = tmp >> 21 & 0x7;
-
- /* add [47:27] + 3 trailing bits */
- cc6_base = (tmp & GENMASK(0, 20)) << 3;
-
- /* reverse and add DramIntlvEn */
- cc6_base |= intlv_en ^ 0x7;
-
- /* pin at [47:24] */
- cc6_base <<= 24;
-
- if (!intlv_en)
- return cc6_base | (addr & GENMASK(0, 23));
-
- amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
-
- /* faster log2 */
- tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
-
- /* OR DramIntlvSel into bits [14:12] */
- tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
-
- /* add remaining [11:0] bits from original MC4_ADDR */
- tmp_addr |= addr & GENMASK(0, 11);
-
- return cc6_base | tmp_addr;
- }
-
- return addr;
-}
-
-static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- int off = range << 3;
-
- amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
- amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
-
- if (c->x86 == 0xf)
- return;
-
- if (!dram_rw(pvt, range))
- return;
-
- amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
- amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
-
- /* Factor in CC6 save area by reading dst node's limit reg */
- if (c->x86 == 0x15) {
- struct pci_dev *f1 = NULL;
- u8 nid = dram_dst_node(pvt, range);
- u32 llim;
-
- f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
- if (WARN_ON(!f1))
- return;
-
- amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
-
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
-
- /* {[39:27],111b} */
- pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
-
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
-
- /* [47:40] */
- pvt->ranges[range].lim.hi |= llim >> 13;
-
- pci_dev_put(f1);
- }
-}
-
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
- u16 syndrome)
-{
- struct mem_ctl_info *src_mci;
- struct amd64_pvt *pvt = mci->pvt_info;
- int channel, csrow;
- u32 page, offset;
-
- /* CHIPKILL enabled */
- if (pvt->nbcfg & NBCFG_CHIPKILL) {
- channel = get_channel_from_ecc_syndrome(mci, syndrome);
- if (channel < 0) {
- /*
- * Syndrome didn't map, so we don't know which of the
- * 2 DIMMs is in error. So we need to ID 'both' of them
- * as suspect.
- */
- amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
- "error reporting race\n", syndrome);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
- } else {
- /*
- * non-chipkill ecc mode
- *
- * The k8 documentation is unclear about how to determine the
- * channel number when using non-chipkill memory. This method
- * was obtained from email communication with someone at AMD.
- * (Wish the email was placed in this comment - norsk)
- */
- channel = ((sys_addr & BIT(3)) != 0);
- }
-
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- /* Now map the sys_addr to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, sys_addr);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
- channel, EDAC_MOD_STR);
- }
-}
-
-static int ddr2_cs_size(unsigned i, bool dct_width)
-{
- unsigned shift = 0;
-
- if (i <= 2)
- shift = i;
- else if (!(i & 0x1))
- shift = i >> 1;
- else
- shift = (i + 1) >> 1;
-
- return 128 << (shift + !!dct_width);
-}
-
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
- unsigned cs_mode)
-{
- u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
-
- if (pvt->ext_model >= K8_REV_F) {
- WARN_ON(cs_mode > 11);
- return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
- }
- else if (pvt->ext_model >= K8_REV_D) {
- unsigned diff;
- WARN_ON(cs_mode > 10);
-
- /*
- * the below calculation, besides trying to win an obfuscated C
- * contest, maps cs_mode values to DIMM chip select sizes. The
- * mappings are:
- *
- * cs_mode CS size (mb)
- * ======= ============
- * 0 32
- * 1 64
- * 2 128
- * 3 128
- * 4 256
- * 5 512
- * 6 256
- * 7 512
- * 8 1024
- * 9 1024
- * 10 2048
- *
- * Basically, it calculates a value with which to shift the
- * smallest CS size of 32MB.
- *
- * ddr[23]_cs_size have a similar purpose.
- */
- diff = cs_mode/3 + (unsigned)(cs_mode > 5);
-
- return 32 << (cs_mode - diff);
- }
- else {
- WARN_ON(cs_mode > 6);
- return 32 << cs_mode;
- }
-}
-
-/*
- * Get the number of DCT channels in use.
- *
- * Return:
- * number of Memory Channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
-static int f1x_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, j, channels = 0;
-
- /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
- if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
- return 2;
-
- /*
- * Need to check if in unganged mode: In such, there are 2 channels,
- * but they are not in 128 bit mode and thus the above 'dclr0' status
- * bit will be OFF.
- *
- * Need to check DCT0[0] and DCT1[0] to see if only one of them has
- * their CSEnable bit on. If so, then SINGLE DIMM case.
- */
- debugf0("Data width is not 128 bits - need more decoding\n");
-
- /*
- * Check DRAM Bank Address Mapping values for each DIMM to see if there
- * is more than just one DIMM present in unganged mode. Need to check
- * both controllers since DIMMs can be placed in either one.
- */
- for (i = 0; i < 2; i++) {
- u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
-
- for (j = 0; j < 4; j++) {
- if (DBAM_DIMM(j, dbam) > 0) {
- channels++;
- break;
- }
- }
- }
-
- if (channels > 2)
- channels = 2;
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
-static int ddr3_cs_size(unsigned i, bool dct_width)
-{
- unsigned shift = 0;
- int cs_size = 0;
-
- if (i == 0 || i == 3 || i == 4)
- cs_size = -1;
- else if (i <= 2)
- shift = i;
- else if (i == 12)
- shift = 7;
- else if (!(i & 0x1))
- shift = i >> 1;
- else
- shift = (i + 1) >> 1;
-
- if (cs_size != -1)
- cs_size = (128 * (1 << !!dct_width)) << shift;
-
- return cs_size;
-}
-
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
- unsigned cs_mode)
-{
- u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
-
- WARN_ON(cs_mode > 11);
-
- if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
- return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
- else
- return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
-}
-
-/*
- * F15h supports only 64bit DCT interfaces
- */
-static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
- unsigned cs_mode)
-{
- WARN_ON(cs_mode > 12);
-
- return ddr3_cs_size(cs_mode, false);
-}
-
-static void read_dram_ctl_register(struct amd64_pvt *pvt)
-{
-
- if (boot_cpu_data.x86 == 0xf)
- return;
-
- if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
- debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
- pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
-
- debugf0(" DCTs operate in %s mode.\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
-
- if (!dct_ganging_enabled(pvt))
- debugf0(" Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
-
- debugf0(" data interleave for ECC: %s, "
- "DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
-
- debugf0(" channel interleave: %s, "
- "interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
- dct_sel_interleave_addr(pvt));
- }
-
- amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
-}
-
-/*
- * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
- * Interleaving Modes.
- */
-static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
- bool hi_range_sel, u8 intlv_en)
-{
- u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
-
- if (dct_ganging_enabled(pvt))
- return 0;
-
- if (hi_range_sel)
- return dct_sel_high;
-
- /*
- * see F2x110[DctSelIntLvAddr] - channel interleave mode
- */
- if (dct_interleave_enabled(pvt)) {
- u8 intlv_addr = dct_sel_interleave_addr(pvt);
-
- /* return DCT select function: 0=DCT0, 1=DCT1 */
- if (!intlv_addr)
- return sys_addr >> 6 & 1;
-
- if (intlv_addr & 0x2) {
- u8 shift = intlv_addr & 0x1 ? 9 : 6;
- u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
-
- return ((sys_addr >> shift) & 1) ^ temp;
- }
-
- return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
- }
-
- if (dct_high_range_enabled(pvt))
- return ~dct_sel_high & 1;
-
- return 0;
-}
-
-/* Convert the sys_addr to the normalized DCT address */
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
- u64 sys_addr, bool hi_rng,
- u32 dct_sel_base_addr)
-{
- u64 chan_off;
- u64 dram_base = get_dram_base(pvt, range);
- u64 hole_off = f10_dhar_offset(pvt);
- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
-
- if (hi_rng) {
- /*
- * if
- * base address of high range is below 4Gb
- * (bits [47:27] at [31:11])
- * DRAM address space on this DCT is hoisted above 4Gb &&
- * sys_addr > 4Gb
- *
- * remove hole offset from sys_addr
- * else
- * remove high range offset from sys_addr
- */
- if ((!(dct_sel_base_addr >> 16) ||
- dct_sel_base_addr < dhar_base(pvt)) &&
- dhar_valid(pvt) &&
- (sys_addr >= BIT_64(32)))
- chan_off = hole_off;
- else
- chan_off = dct_sel_base_off;
- } else {
- /*
- * if
- * we have a valid hole &&
- * sys_addr > 4Gb
- *
- * remove hole
- * else
- * remove dram base to normalize to DCT address
- */
- if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
- chan_off = hole_off;
- else
- chan_off = dram_base;
- }
-
- return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
-}
-
-/*
- * checks if the csrow passed in is marked as SPARED, if so returns the new
- * spare row
- */
-static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
-{
- int tmp_cs;
-
- if (online_spare_swap_done(pvt, dct) &&
- csrow == online_spare_bad_dramcs(pvt, dct)) {
-
- for_each_chip_select(tmp_cs, dct, pvt) {
- if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
- csrow = tmp_cs;
- break;
- }
- }
- }
- return csrow;
-}
-
-/*
- * Iterate over the DRAM DCT "base" and "mask" registers looking for a
- * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
- *
- * Return:
- * -EINVAL: NOT FOUND
- * 0..csrow = Chip-Select Row
- */
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
-{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
- u64 cs_base, cs_mask;
- int cs_found = -EINVAL;
- int csrow;
-
- mci = mcis[nid];
- if (!mci)
- return cs_found;
-
- pvt = mci->pvt_info;
-
- debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
-
- for_each_chip_select(csrow, dct, pvt) {
- if (!csrow_enabled(csrow, dct, pvt))
- continue;
-
- get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
-
- debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
- csrow, cs_base, cs_mask);
-
- cs_mask = ~cs_mask;
-
- debugf1(" (InputAddr & ~CSMask)=0x%llx "
- "(CSBase & ~CSMask)=0x%llx\n",
- (in_addr & cs_mask), (cs_base & cs_mask));
-
- if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
- cs_found = f10_process_possible_spare(pvt, dct, csrow);
-
- debugf1(" MATCH csrow=%d\n", cs_found);
- break;
- }
- }
- return cs_found;
-}
-
-/*
- * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
- * swapped with a region located at the bottom of memory so that the GPU can use
- * the interleaved region and thus two channels.
- */
-static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
-{
- u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
-
- if (boot_cpu_data.x86 == 0x10) {
- /* only revC3 and revE have that feature */
- if (boot_cpu_data.x86_model < 4 ||
- (boot_cpu_data.x86_model < 0xa &&
- boot_cpu_data.x86_mask < 3))
- return sys_addr;
- }
-
- amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
-
- if (!(swap_reg & 0x1))
- return sys_addr;
-
- swap_base = (swap_reg >> 3) & 0x7f;
- swap_limit = (swap_reg >> 11) & 0x7f;
- rgn_size = (swap_reg >> 20) & 0x7f;
- tmp_addr = sys_addr >> 27;
-
- if (!(sys_addr >> 34) &&
- (((tmp_addr >= swap_base) &&
- (tmp_addr <= swap_limit)) ||
- (tmp_addr < rgn_size)))
- return sys_addr ^ (u64)swap_base << 27;
-
- return sys_addr;
-}
-
-/* For a given @dram_range, check if @sys_addr falls within it. */
-static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
- u64 sys_addr, int *nid, int *chan_sel)
-{
- int cs_found = -EINVAL;
- u64 chan_addr;
- u32 dct_sel_base;
- u8 channel;
- bool high_range = false;
-
- u8 node_id = dram_dst_node(pvt, range);
- u8 intlv_en = dram_intlv_en(pvt, range);
- u32 intlv_sel = dram_intlv_sel(pvt, range);
-
- debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
- range, sys_addr, get_dram_limit(pvt, range));
-
- if (dhar_valid(pvt) &&
- dhar_base(pvt) <= sys_addr &&
- sys_addr < BIT_64(32)) {
- amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
- sys_addr);
- return -EINVAL;
- }
-
- if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
- return -EINVAL;
-
- sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
-
- dct_sel_base = dct_sel_baseaddr(pvt);
-
- /*
- * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
- * select between DCT0 and DCT1.
- */
- if (dct_high_range_enabled(pvt) &&
- !dct_ganging_enabled(pvt) &&
- ((sys_addr >> 27) >= (dct_sel_base >> 11)))
- high_range = true;
-
- channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
-
- chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
- high_range, dct_sel_base);
-
- /* Remove node interleaving, see F1x120 */
- if (intlv_en)
- chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
- (chan_addr & 0xfff);
-
- /* remove channel interleave */
- if (dct_interleave_enabled(pvt) &&
- !dct_high_range_enabled(pvt) &&
- !dct_ganging_enabled(pvt)) {
-
- if (dct_sel_interleave_addr(pvt) != 1) {
- if (dct_sel_interleave_addr(pvt) == 0x3)
- /* hash 9 */
- chan_addr = ((chan_addr >> 10) << 9) |
- (chan_addr & 0x1ff);
- else
- /* A[6] or hash 6 */
- chan_addr = ((chan_addr >> 7) << 6) |
- (chan_addr & 0x3f);
- } else
- /* A[12] */
- chan_addr = ((chan_addr >> 13) << 12) |
- (chan_addr & 0xfff);
- }
-
- debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
-
- cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
-
- if (cs_found >= 0) {
- *nid = node_id;
- *chan_sel = channel;
- }
- return cs_found;
-}
-
-static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
- int *node, int *chan_sel)
-{
- int cs_found = -EINVAL;
- unsigned range;
-
- for (range = 0; range < DRAM_RANGES; range++) {
-
- if (!dram_rw(pvt, range))
- continue;
-
- if ((get_dram_base(pvt, range) <= sys_addr) &&
- (get_dram_limit(pvt, range) >= sys_addr)) {
-
- cs_found = f1x_match_to_this_node(pvt, range,
- sys_addr, node,
- chan_sel);
- if (cs_found >= 0)
- break;
- }
- }
- return cs_found;
-}
-
-/*
- * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
- * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
- *
- * The @sys_addr is usually an error address received from the hardware
- * (MCX_ADDR).
- */
-static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
- u16 syndrome)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u32 page, offset;
- int nid, csrow, chan = 0;
-
- csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
-
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- /*
- * We need the syndromes for channel detection only when we're
- * ganged. Otherwise @chan should already contain the channel at
- * this point.
- */
- if (dct_ganging_enabled(pvt))
- chan = get_channel_from_ecc_syndrome(mci, syndrome);
-
- if (chan >= 0)
- edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
- EDAC_MOD_STR);
- else
- /*
- * Channel unknown, report all channels on this CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
-}
-
-/*
- * debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs
- */
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
-{
- int dimm, size0, size1, factor = 0;
- u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
- u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-
- if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & WIDTH_128)
- factor = 1;
-
- /* K8 families < revF not supported yet */
- if (pvt->ext_model < K8_REV_F)
- return;
- else
- WARN_ON(ctrl != 0);
- }
-
- dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
- : pvt->csels[0].csbases;
-
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
-
- edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
-
- /* Dump memory sizes for DIMM and its CSROWs */
- for (dimm = 0; dimm < 4; dimm++) {
-
- size0 = 0;
- if (dcsb[dimm*2] & DCSB_CS_ENABLE)
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
- DBAM_DIMM(dimm, dbam));
-
- size1 = 0;
- if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
- DBAM_DIMM(dimm, dbam));
-
- amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
- dimm * 2, size0 << factor,
- dimm * 2 + 1, size1 << factor);
- }
-}
-
-static struct amd64_family_type amd64_family_types[] = {
- [K8_CPUS] = {
- .ctl_name = "K8",
- .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
- .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
- .ops = {
- .early_channel_count = k8_early_channel_count,
- .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
- .dbam_to_cs = k8_dbam_to_chip_select,
- .read_dct_pci_cfg = k8_read_dct_pci_cfg,
- }
- },
- [F10_CPUS] = {
- .ctl_name = "F10h",
- .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
- .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f10_dbam_to_chip_select,
- .read_dct_pci_cfg = f10_read_dct_pci_cfg,
- }
- },
- [F15_CPUS] = {
- .ctl_name = "F15h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_dbam_to_chip_select,
- .read_dct_pci_cfg = f15_read_dct_pci_cfg,
- }
- },
-};
-
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
- unsigned int device,
- struct pci_dev *related)
-{
- struct pci_dev *dev = NULL;
-
- dev = pci_get_device(vendor, device, dev);
- while (dev) {
- if ((dev->bus->number == related->bus->number) &&
- (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
- break;
- dev = pci_get_device(vendor, device, dev);
- }
-
- return dev;
-}
-
-/*
- * These are tables of eigenvectors (one per line) which can be used for the
- * construction of the syndrome tables. The modified syndrome search algorithm
- * uses those to find the symbol in error and thus the DIMM.
- *
- * Algorithm courtesy of Ross LaFetra from AMD.
- */
-static u16 x4_vectors[] = {
- 0x2f57, 0x1afe, 0x66cc, 0xdd88,
- 0x11eb, 0x3396, 0x7f4c, 0xeac8,
- 0x0001, 0x0002, 0x0004, 0x0008,
- 0x1013, 0x3032, 0x4044, 0x8088,
- 0x106b, 0x30d6, 0x70fc, 0xe0a8,
- 0x4857, 0xc4fe, 0x13cc, 0x3288,
- 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
- 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
- 0x15c1, 0x2a42, 0x89ac, 0x4758,
- 0x2b03, 0x1602, 0x4f0c, 0xca08,
- 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
- 0x8ba7, 0x465e, 0x244c, 0x1cc8,
- 0x2b87, 0x164e, 0x642c, 0xdc18,
- 0x40b9, 0x80de, 0x1094, 0x20e8,
- 0x27db, 0x1eb6, 0x9dac, 0x7b58,
- 0x11c1, 0x2242, 0x84ac, 0x4c58,
- 0x1be5, 0x2d7a, 0x5e34, 0xa718,
- 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
- 0x4c97, 0xc87e, 0x11fc, 0x33a8,
- 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
- 0x16b3, 0x3d62, 0x4f34, 0x8518,
- 0x1e2f, 0x391a, 0x5cac, 0xf858,
- 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
- 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
- 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
- 0x4397, 0xc27e, 0x17fc, 0x3ea8,
- 0x1617, 0x3d3e, 0x6464, 0xb8b8,
- 0x23ff, 0x12aa, 0xab6c, 0x56d8,
- 0x2dfb, 0x1ba6, 0x913c, 0x7328,
- 0x185d, 0x2ca6, 0x7914, 0x9e28,
- 0x171b, 0x3e36, 0x7d7c, 0xebe8,
- 0x4199, 0x82ee, 0x19f4, 0x2e58,
- 0x4807, 0xc40e, 0x130c, 0x3208,
- 0x1905, 0x2e0a, 0x5804, 0xac08,
- 0x213f, 0x132a, 0xadfc, 0x5ba8,
- 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
-};
-
-static u16 x8_vectors[] = {
- 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
- 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
- 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
- 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
- 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
- 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
- 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
- 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
- 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
- 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
- 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
- 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
- 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
- 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
- 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
- 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
- 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
- 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
- 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
-};
-
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
- unsigned v_dim)
-{
- unsigned int i, err_sym;
-
- for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
- u16 s = syndrome;
- unsigned v_idx = err_sym * v_dim;
- unsigned v_end = (err_sym + 1) * v_dim;
-
- /* walk over all 16 bits of the syndrome */
- for (i = 1; i < (1U << 16); i <<= 1) {
-
- /* if bit is set in that eigenvector... */
- if (v_idx < v_end && vectors[v_idx] & i) {
- u16 ev_comp = vectors[v_idx++];
-
- /* ... and bit set in the modified syndrome, */
- if (s & i) {
- /* remove it. */
- s ^= ev_comp;
-
- if (!s)
- return err_sym;
- }
-
- } else if (s & i)
- /* can't get to zero, move to next symbol */
- break;
- }
- }
-
- debugf0("syndrome(%x) not found\n", syndrome);
- return -1;
-}
-
-static int map_err_sym_to_channel(int err_sym, int sym_size)
-{
- if (sym_size == 4)
- switch (err_sym) {
- case 0x20:
- case 0x21:
- return 0;
- break;
- case 0x22:
- case 0x23:
- return 1;
- break;
- default:
- return err_sym >> 4;
- break;
- }
- /* x8 symbols */
- else
- switch (err_sym) {
- /* imaginary bits not in a DIMM */
- case 0x10:
- WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
- err_sym);
- return -1;
- break;
-
- case 0x11:
- return 0;
- break;
- case 0x12:
- return 1;
- break;
- default:
- return err_sym >> 3;
- break;
- }
- return -1;
-}
-
-static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- int err_sym = -1;
-
- if (pvt->ecc_sym_sz == 8)
- err_sym = decode_syndrome(syndrome, x8_vectors,
- ARRAY_SIZE(x8_vectors),
- pvt->ecc_sym_sz);
- else if (pvt->ecc_sym_sz == 4)
- err_sym = decode_syndrome(syndrome, x4_vectors,
- ARRAY_SIZE(x4_vectors),
- pvt->ecc_sym_sz);
- else {
- amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
- return err_sym;
- }
-
- return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
-}
-
-/*
- * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
- * ADDRESS and process.
- */
-static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 sys_addr;
- u16 syndrome;
-
- /* Ensure that the Error Address is VALID */
- if (!(m->status & MCI_STATUS_ADDRV)) {
- amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- sys_addr = get_error_address(m);
- syndrome = extract_syndrome(m->status);
-
- amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
-
- pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
-}
-
-/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
-{
- struct mem_ctl_info *log_mci, *src_mci = NULL;
- int csrow;
- u64 sys_addr;
- u32 page, offset;
-
- log_mci = mci;
-
- if (!(m->status & MCI_STATUS_ADDRV)) {
- amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
- return;
- }
-
- sys_addr = get_error_address(m);
-
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
- return;
- }
-
- log_mci = src_mci;
-
- csrow = sys_addr_to_csrow(log_mci, sys_addr);
- if (csrow < 0) {
- amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
- edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
- }
-}
-
-static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct mce *m)
-{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, 0x1f);
- u8 ecc_type = (m->status >> 45) & 0x3;
-
- /* Bail early out if this was an 'observed' error */
- if (PP(ec) == NBSL_PP_OBS)
- return;
-
- /* Do only ECC errors */
- if (xec && xec != F10_NBSL_EXT_ERR_ECC)
- return;
-
- if (ecc_type == 2)
- amd64_handle_ce(mci, m);
- else if (ecc_type == 1)
- amd64_handle_ue(mci, m);
-}
-
-void amd64_decode_bus_error(int node_id, struct mce *m)
-{
- __amd64_decode_bus_error(mcis[node_id], m);
-}
-
-/*
- * Use pvt->F2 which contains the F2 CPU PCI device to get the related
- * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
- */
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
-{
- /* Reserve the ADDRESS MAP Device */
- pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
- if (!pvt->F1) {
- amd64_err("error address map device not found: "
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f1_id);
- return -ENODEV;
- }
-
- /* Reserve the MISC Device */
- pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
- if (!pvt->F3) {
- pci_dev_put(pvt->F1);
- pvt->F1 = NULL;
-
- amd64_err("error F3 device not found: "
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f3_id);
-
- return -ENODEV;
- }
- debugf1("F1: %s\n", pci_name(pvt->F1));
- debugf1("F2: %s\n", pci_name(pvt->F2));
- debugf1("F3: %s\n", pci_name(pvt->F3));
-
- return 0;
-}
-
-static void free_mc_sibling_devs(struct amd64_pvt *pvt)
-{
- pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F3);
-}
-
-/*
- * Retrieve the hardware registers of the memory controller (this includes the
- * 'Address Map' and 'Misc' device regs)
- */
-static void read_mc_regs(struct amd64_pvt *pvt)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- u64 msr_val;
- u32 tmp;
- unsigned range;
-
- /*
- * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
- * those are Read-As-Zero
- */
- rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
- debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
-
- /* check first whether TOP_MEM2 is enabled */
- rdmsrl(MSR_K8_SYSCFG, msr_val);
- if (msr_val & (1U << 21)) {
- rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
- debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
- } else
- debugf0(" TOP_MEM2 disabled.\n");
-
- amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
-
- read_dram_ctl_register(pvt);
-
- for (range = 0; range < DRAM_RANGES; range++) {
- u8 rw;
-
- /* read settings for this DRAM range */
- read_dram_base_limit_regs(pvt, range);
-
- rw = dram_rw(pvt, range);
- if (!rw)
- continue;
-
- debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
- range,
- get_dram_base(pvt, range),
- get_dram_limit(pvt, range));
-
- debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
- dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
- (rw & 0x1) ? "R" : "-",
- (rw & 0x2) ? "W" : "-",
- dram_intlv_sel(pvt, range),
- dram_dst_node(pvt, range));
- }
-
- read_dct_base_mask(pvt);
-
- amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
- amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
-
- amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
-
- amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
- amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
-
- if (!dct_ganging_enabled(pvt)) {
- amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
- amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
- }
-
- pvt->ecc_sym_sz = 4;
-
- if (c->x86 >= 0x10) {
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
- amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
-
- /* F10h, revD and later can do x8 ECC too */
- if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
- pvt->ecc_sym_sz = 8;
- }
- dump_misc_regs(pvt);
-}
-
-/*
- * NOTE: CPU Revision Dependent code
- *
- * Input:
- * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
- * k8 private pointer to -->
- * DRAM Bank Address mapping register
- * node_id
- * DCL register where dual_channel_active is
- *
- * The DBAM register consists of 4 sets of 4 bits each definitions:
- *
- * Bits: CSROWs
- * 0-3 CSROWs 0 and 1
- * 4-7 CSROWs 2 and 3
- * 8-11 CSROWs 4 and 5
- * 12-15 CSROWs 6 and 7
- *
- * Values range from: 0 to 15
- * The meaning of the values depends on CPU revision and dual-channel state,
- * see relevant BKDG more info.
- *
- * The memory controller provides for total of only 8 CSROWs in its current
- * architecture. Each "pair" of CSROWs normally represents just one DIMM in
- * single channel or two (2) DIMMs in dual channel mode.
- *
- * The following code logic collapses the various tables for CSROW based on CPU
- * revision.
- *
- * Returns:
- * The number of PAGE_SIZE pages on the specified CSROW number it
- * encompasses
- *
- */
-static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
-{
- u32 cs_mode, nr_pages;
- u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
-
- /*
- * The math on this doesn't look right on the surface because x/2*4 can
- * be simplified to x*2 but this expression makes use of the fact that
- * it is integral math where 1/2=0. This intermediate value becomes the
- * number of bits to shift the DBAM register to extract the proper CSROW
- * field.
- */
- cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
-
- nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
-
- debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages= %u channel-count = %d\n",
- nr_pages, pvt->channel_count);
-
- return nr_pages;
-}
-
-/*
- * Initialize the array of csrow attribute instances, based on the values
- * from pci config hardware registers.
- */
-static int init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow;
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr, base, mask;
- u32 val;
- int i, empty = 1;
-
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
-
- pvt->nbcfg = val;
-
- debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
-
- for_each_chip_select(i, 0, pvt) {
- csrow = &mci->csrows[i];
-
- if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
- debugf1("----CSROW %d EMPTY for node %d\n", i,
- pvt->mc_node_id);
- continue;
- }
-
- debugf1("----CSROW %d VALID for MC node %d\n",
- i, pvt->mc_node_id);
-
- empty = 0;
- if (csrow_enabled(i, 0, pvt))
- csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
- if (csrow_enabled(i, 1, pvt))
- csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
- find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
- csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
- csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
-
- get_cs_base_and_mask(pvt, i, 0, &base, &mask);
- csrow->page_mask = ~mask;
- /* 8 bytes of resolution */
-
- csrow->mtype = amd64_determine_memory_type(pvt, i);
-
- debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
- (unsigned long)input_addr_min,
- (unsigned long)input_addr_max);
- debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
- (unsigned long)sys_addr, csrow->page_mask);
- debugf1(" nr_pages: %u first_page: 0x%lx "
- "last_page: 0x%lx\n",
- (unsigned)csrow->nr_pages,
- csrow->first_page, csrow->last_page);
-
- /*
- * determine whether CHIPKILL or JUST ECC or NO ECC is operating
- */
- if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- csrow->edac_mode =
- (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
- else
- csrow->edac_mode = EDAC_NONE;
- }
-
- return empty;
-}
-
-/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- if (amd_get_nb_id(cpu) == nid)
- cpumask_set_cpu(cpu, mask);
-}
-
-/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
-{
- cpumask_var_t mask;
- int cpu, nbe;
- bool ret = false;
-
- if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
- amd64_warn("%s: Error allocating mask\n", __func__);
- return false;
- }
-
- get_cpus_on_this_dct_cpumask(mask, nid);
-
- rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
-
- for_each_cpu(cpu, mask) {
- struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & MSR_MCGCTL_NBE;
-
- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
-
- if (!nbe)
- goto out;
- }
- ret = true;
-
-out:
- free_cpumask_var(mask);
- return ret;
-}
-
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
-{
- cpumask_var_t cmask;
- int cpu;
-
- if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
- amd64_warn("%s: error allocating mask\n", __func__);
- return false;
- }
-
- get_cpus_on_this_dct_cpumask(cmask, nid);
-
- rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
-
- for_each_cpu(cpu, cmask) {
-
- struct msr *reg = per_cpu_ptr(msrs, cpu);
-
- if (on) {
- if (reg->l & MSR_MCGCTL_NBE)
- s->flags.nb_mce_enable = 1;
-
- reg->l |= MSR_MCGCTL_NBE;
- } else {
- /*
- * Turn off NB MCE reporting only when it was off before
- */
- if (!s->flags.nb_mce_enable)
- reg->l &= ~MSR_MCGCTL_NBE;
- }
- }
- wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
-
- free_cpumask_var(cmask);
-
- return 0;
-}
-
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
- struct pci_dev *F3)
-{
- bool ret = true;
- u32 value, mask = 0x3; /* UECC/CECC enable */
-
- if (toggle_ecc_err_reporting(s, nid, ON)) {
- amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
- return false;
- }
-
- amd64_read_pci_cfg(F3, NBCTL, &value);
-
- s->old_nbctl = value & mask;
- s->nbctl_valid = true;
-
- value |= mask;
- amd64_write_pci_cfg(F3, NBCTL, value);
-
- amd64_read_pci_cfg(F3, NBCFG, &value);
-
- debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
-
- if (!(value & NBCFG_ECC_ENABLE)) {
- amd64_warn("DRAM ECC disabled on this node, enabling...\n");
-
- s->flags.nb_ecc_prev = 0;
-
- /* Attempt to turn on DRAM ECC Enable */
- value |= NBCFG_ECC_ENABLE;
- amd64_write_pci_cfg(F3, NBCFG, value);
-
- amd64_read_pci_cfg(F3, NBCFG, &value);
-
- if (!(value & NBCFG_ECC_ENABLE)) {
- amd64_warn("Hardware rejected DRAM ECC enable,"
- "check memory DIMM configuration.\n");
- ret = false;
- } else {
- amd64_info("Hardware accepted DRAM ECC Enable\n");
- }
- } else {
- s->flags.nb_ecc_prev = 1;
- }
-
- debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
-
- return ret;
-}
-
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
- struct pci_dev *F3)
-{
- u32 value, mask = 0x3; /* UECC/CECC enable */
-
-
- if (!s->nbctl_valid)
- return;
-
- amd64_read_pci_cfg(F3, NBCTL, &value);
- value &= ~mask;
- value |= s->old_nbctl;
-
- amd64_write_pci_cfg(F3, NBCTL, value);
-
- /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
- if (!s->flags.nb_ecc_prev) {
- amd64_read_pci_cfg(F3, NBCFG, &value);
- value &= ~NBCFG_ECC_ENABLE;
- amd64_write_pci_cfg(F3, NBCFG, value);
- }
-
- /* restore the NB Enable MCGCTL bit */
- if (toggle_ecc_err_reporting(s, nid, OFF))
- amd64_warn("Error restoring NB MCGCTL settings!\n");
-}
-
-/*
- * EDAC requires that the BIOS have ECC enabled before
- * taking over the processing of ECC errors. A command line
- * option allows to force-enable hardware ECC later in
- * enable_ecc_error_reporting().
- */
-static const char *ecc_msg =
- "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
- " Either enable ECC checking or force module loading by setting "
- "'ecc_enable_override'.\n"
- " (Note that use of the override may cause unknown side effects.)\n";
-
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
-{
- u32 value;
- u8 ecc_en = 0;
- bool nb_mce_en = false;
-
- amd64_read_pci_cfg(F3, NBCFG, &value);
-
- ecc_en = !!(value & NBCFG_ECC_ENABLE);
- amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
-
- nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
- if (!nb_mce_en)
- amd64_notice("NB MCE bank disabled, set MSR "
- "0x%08x[4] on node %d to enable.\n",
- MSR_IA32_MCG_CTL, nid);
-
- if (!ecc_en || !nb_mce_en) {
- amd64_notice("%s", ecc_msg);
- return false;
- }
- return true;
-}
-
-struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
- ARRAY_SIZE(amd64_inj_attrs) +
- 1];
-
-struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
-
-static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
-{
- unsigned int i = 0, j = 0;
-
- for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
- sysfs_attrs[i] = amd64_dbg_attrs[i];
-
- if (boot_cpu_data.x86 >= 0x10)
- for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
- sysfs_attrs[i] = amd64_inj_attrs[j];
-
- sysfs_attrs[i] = terminator;
-
- mci->mc_driver_sysfs_attributes = sysfs_attrs;
-}
-
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
- struct amd64_family_type *fam)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE;
-
- if (pvt->nbcap & NBCAP_SECDED)
- mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
-
- if (pvt->nbcap & NBCAP_CHIPKILL)
- mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
-
- mci->edac_cap = amd64_determine_edac_cap(pvt);
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = fam->ctl_name;
- mci->dev_name = pci_name(pvt->F2);
- mci->ctl_page_to_phys = NULL;
-
- /* memory scrubber interface */
- mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
- mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
-}
-
-/*
- * returns a pointer to the family descriptor on success, NULL otherwise.
- */
-static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
-{
- u8 fam = boot_cpu_data.x86;
- struct amd64_family_type *fam_type = NULL;
-
- switch (fam) {
- case 0xf:
- fam_type = &amd64_family_types[K8_CPUS];
- pvt->ops = &amd64_family_types[K8_CPUS].ops;
- break;
-
- case 0x10:
- fam_type = &amd64_family_types[F10_CPUS];
- pvt->ops = &amd64_family_types[F10_CPUS].ops;
- break;
-
- case 0x15:
- fam_type = &amd64_family_types[F15_CPUS];
- pvt->ops = &amd64_family_types[F15_CPUS].ops;
- break;
-
- default:
- amd64_err("Unsupported family!\n");
- return NULL;
- }
-
- pvt->ext_model = boot_cpu_data.x86_model >> 4;
-
- amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (fam == 0xf ?
- (pvt->ext_model >= K8_REV_F ? "revF or later "
- : "revE or earlier ")
- : ""), pvt->mc_node_id);
- return fam_type;
-}
-
-static int amd64_init_one_instance(struct pci_dev *F2)
-{
- struct amd64_pvt *pvt = NULL;
- struct amd64_family_type *fam_type = NULL;
- struct mem_ctl_info *mci = NULL;
- int err = 0, ret;
- u8 nid = get_node_id(F2);
-
- ret = -ENOMEM;
- pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
- if (!pvt)
- goto err_ret;
-
- pvt->mc_node_id = nid;
- pvt->F2 = F2;
-
- ret = -EINVAL;
- fam_type = amd64_per_family_init(pvt);
- if (!fam_type)
- goto err_free;
-
- ret = -ENODEV;
- err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
- if (err)
- goto err_free;
-
- read_mc_regs(pvt);
-
- /*
- * We need to determine how many memory channels there are. Then use
- * that information for calculating the size of the dynamic instance
- * tables in the 'mci' structure.
- */
- ret = -EINVAL;
- pvt->channel_count = pvt->ops->early_channel_count(pvt);
- if (pvt->channel_count < 0)
- goto err_siblings;
-
- ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
- if (!mci)
- goto err_siblings;
-
- mci->pvt_info = pvt;
- mci->dev = &pvt->F2->dev;
-
- setup_mci_misc_attrs(mci, fam_type);
-
- if (init_csrows(mci))
- mci->edac_cap = EDAC_FLAG_NONE;
-
- set_mc_sysfs_attrs(mci);
-
- ret = -ENODEV;
- if (edac_mc_add_mc(mci)) {
- debugf1("failed edac_mc_add_mc()\n");
- goto err_add_mc;
- }
-
- /* register stuff with EDAC MCE */
- if (report_gart_errors)
- amd_report_gart_errors(true);
-
- amd_register_ecc_decoder(amd64_decode_bus_error);
-
- mcis[nid] = mci;
-
- atomic_inc(&drv_instances);
-
- return 0;
-
-err_add_mc:
- edac_mc_free(mci);
-
-err_siblings:
- free_mc_sibling_devs(pvt);
-
-err_free:
- kfree(pvt);
-
-err_ret:
- return ret;
-}
-
-static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
-{
- u8 nid = get_node_id(pdev);
- struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
- struct ecc_settings *s;
- int ret = 0;
-
- ret = pci_enable_device(pdev);
- if (ret < 0) {
- debugf0("ret=%d\n", ret);
- return -EIO;
- }
-
- ret = -ENOMEM;
- s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
- if (!s)
- goto err_out;
-
- ecc_stngs[nid] = s;
-
- if (!ecc_enabled(F3, nid)) {
- ret = -ENODEV;
-
- if (!ecc_enable_override)
- goto err_enable;
-
- amd64_warn("Forcing ECC on!\n");
-
- if (!enable_ecc_error_reporting(s, nid, F3))
- goto err_enable;
- }
-
- ret = amd64_init_one_instance(pdev);
- if (ret < 0) {
- amd64_err("Error probing instance: %d\n", nid);
- restore_ecc_error_reporting(s, nid, F3);
- }
-
- return ret;
-
-err_enable:
- kfree(s);
- ecc_stngs[nid] = NULL;
-
-err_out:
- return ret;
-}
-
-static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
- u8 nid = get_node_id(pdev);
- struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
- struct ecc_settings *s = ecc_stngs[nid];
-
- /* Remove from EDAC CORE tracking list */
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
-
- pvt = mci->pvt_info;
-
- restore_ecc_error_reporting(s, nid, F3);
-
- free_mc_sibling_devs(pvt);
-
- /* unregister from EDAC MCE */
- amd_report_gart_errors(false);
- amd_unregister_ecc_decoder(amd64_decode_bus_error);
-
- kfree(ecc_stngs[nid]);
- ecc_stngs[nid] = NULL;
-
- /* Free the EDAC CORE resources */
- mci->pvt_info = NULL;
- mcis[nid] = NULL;
-
- kfree(pvt);
- edac_mc_free(mci);
-}
-
-/*
- * This table is part of the interface for loading drivers for PCI devices. The
- * PCI core identifies what devices are on a system during boot, and then
- * inquiry this table to see if this driver is for a given device found.
- */
-static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
- {
- .vendor = PCI_VENDOR_ID_AMD,
- .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- },
- {
- .vendor = PCI_VENDOR_ID_AMD,
- .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- },
- {
- .vendor = PCI_VENDOR_ID_AMD,
- .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- },
-
- {0, }
-};
-MODULE_DEVICE_TABLE(pci, amd64_pci_table);
-
-static struct pci_driver amd64_pci_driver = {
- .name = EDAC_MOD_STR,
- .probe = amd64_probe_one_instance,
- .remove = __devexit_p(amd64_remove_one_instance),
- .id_table = amd64_pci_table,
-};
-
-static void setup_pci_device(void)
-{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
-
- if (amd64_ctl_pci)
- return;
-
- mci = mcis[0];
- if (mci) {
-
- pvt = mci->pvt_info;
- amd64_ctl_pci =
- edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
-
- if (!amd64_ctl_pci) {
- pr_warning("%s(): Unable to create PCI control\n",
- __func__);
-
- pr_warning("%s(): PCI error report via EDAC not set\n",
- __func__);
- }
- }
-}
-
-static int __init amd64_edac_init(void)
-{
- int err = -ENODEV;
-
- printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
-
- opstate_init();
-
- if (amd_cache_northbridges() < 0)
- goto err_ret;
-
- err = -ENOMEM;
- mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
- ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
- if (!(mcis && ecc_stngs))
- goto err_free;
-
- msrs = msrs_alloc();
- if (!msrs)
- goto err_free;
-
- err = pci_register_driver(&amd64_pci_driver);
- if (err)
- goto err_pci;
-
- err = -ENODEV;
- if (!atomic_read(&drv_instances))
- goto err_no_instances;
-
- setup_pci_device();
- return 0;
-
-err_no_instances:
- pci_unregister_driver(&amd64_pci_driver);
-
-err_pci:
- msrs_free(msrs);
- msrs = NULL;
-
-err_free:
- kfree(mcis);
- mcis = NULL;
-
- kfree(ecc_stngs);
- ecc_stngs = NULL;
-
-err_ret:
- return err;
-}
-
-static void __exit amd64_edac_exit(void)
-{
- if (amd64_ctl_pci)
- edac_pci_release_generic_ctl(amd64_ctl_pci);
-
- pci_unregister_driver(&amd64_pci_driver);
-
- kfree(ecc_stngs);
- ecc_stngs = NULL;
-
- kfree(mcis);
- mcis = NULL;
-
- msrs_free(msrs);
- msrs = NULL;
-}
-
-module_init(amd64_edac_init);
-module_exit(amd64_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
- "Dave Peterson, Thayne Harbaugh");
-MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
- EDAC_AMD64_VERSION);
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/amd64_edac.h b/ANDROID_3.4.5/drivers/edac/amd64_edac.h
deleted file mode 100644
index 9a666cb9..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd64_edac.h
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * AMD64 class Memory Controller kernel module
- *
- * Copyright (c) 2009 SoftwareBitMaker.
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Originally Written by Thayne Harbaugh
- *
- * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- * - K8 CPU Revision D and greater support
- *
- * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
- * - Module largely rewritten, with new (and hopefully correct)
- * code for dealing with node and chip select interleaving,
- * various code cleanup, and bug fixes
- * - Added support for memory hoisting using DRAM hole address
- * register
- *
- * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- * -K8 Rev (1207) revision support added, required Revision
- * specific mini-driver code to support Rev F as well as
- * prior revisions
- *
- * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- * -Family 10h revision support added. New PCI Device IDs,
- * indicating new changes. Actual registers modified
- * were slight, less than the Rev E to Rev F transition
- * but changing the PCI Device ID was the proper thing to
- * do, as it provides for almost automactic family
- * detection. The mods to Rev F required more family
- * information detection.
- *
- * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
- * - misc fixes and code cleanups
- *
- * This module is based on the following documents
- * (available from http://www.amd.com/):
- *
- * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
- * Opteron Processors
- * AMD publication #: 26094
- *` Revision: 3.26
- *
- * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
- * Processors
- * AMD publication #: 32559
- * Revision: 3.00
- * Issue Date: May 2006
- *
- * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
- * Processors
- * AMD publication #: 31116
- * Revision: 3.00
- * Issue Date: September 07, 2007
- *
- * Sections in the first 2 documents are no longer in sync with each other.
- * The Family 10h BKDG was totally re-written from scratch with a new
- * presentation model.
- * Therefore, comments that refer to a Document section might be off.
- */
-
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/slab.h>
-#include <linux/mmzone.h>
-#include <linux/edac.h>
-#include <asm/msr.h>
-#include "edac_core.h"
-#include "mce_amd.h"
-
-#define amd64_debug(fmt, arg...) \
- edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
-
-#define amd64_info(fmt, arg...) \
- edac_printk(KERN_INFO, "amd64", fmt, ##arg)
-
-#define amd64_notice(fmt, arg...) \
- edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
-
-#define amd64_warn(fmt, arg...) \
- edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
-
-#define amd64_err(fmt, arg...) \
- edac_printk(KERN_ERR, "amd64", fmt, ##arg)
-
-#define amd64_mc_warn(mci, fmt, arg...) \
- edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
-
-#define amd64_mc_err(mci, fmt, arg...) \
- edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
-
-/*
- * Throughout the comments in this code, the following terms are used:
- *
- * SysAddr, DramAddr, and InputAddr
- *
- * These terms come directly from the amd64 documentation
- * (AMD publication #26094). They are defined as follows:
- *
- * SysAddr:
- * This is a physical address generated by a CPU core or a device
- * doing DMA. If generated by a CPU core, a SysAddr is the result of
- * a virtual to physical address translation by the CPU core's address
- * translation mechanism (MMU).
- *
- * DramAddr:
- * A DramAddr is derived from a SysAddr by subtracting an offset that
- * depends on which node the SysAddr maps to and whether the SysAddr
- * is within a range affected by memory hoisting. The DRAM Base
- * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
- * determine which node a SysAddr maps to.
- *
- * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
- * is within the range of addresses specified by this register, then
- * a value x from the DHAR is subtracted from the SysAddr to produce a
- * DramAddr. Here, x represents the base address for the node that
- * the SysAddr maps to plus an offset due to memory hoisting. See
- * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
- * sys_addr_to_dram_addr() below for more information.
- *
- * If the SysAddr is not affected by the DHAR then a value y is
- * subtracted from the SysAddr to produce a DramAddr. Here, y is the
- * base address for the node that the SysAddr maps to. See section
- * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
- * information.
- *
- * InputAddr:
- * A DramAddr is translated to an InputAddr before being passed to the
- * memory controller for the node that the DramAddr is associated
- * with. The memory controller then maps the InputAddr to a csrow.
- * If node interleaving is not in use, then the InputAddr has the same
- * value as the DramAddr. Otherwise, the InputAddr is produced by
- * discarding the bits used for node interleaving from the DramAddr.
- * See section 3.4.4 for more information.
- *
- * The memory controller for a given node uses its DRAM CS Base and
- * DRAM CS Mask registers to map an InputAddr to a csrow. See
- * sections 3.5.4 and 3.5.5 for more information.
- */
-
-#define EDAC_AMD64_VERSION "3.4.0"
-#define EDAC_MOD_STR "amd64_edac"
-
-/* Extended Model from CPUID, for CPU Revision numbers */
-#define K8_REV_D 1
-#define K8_REV_E 2
-#define K8_REV_F 4
-
-/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define NUM_CHIPSELECTS 8
-#define DRAM_RANGES 8
-
-#define ON true
-#define OFF false
-
-/*
- * Create a contiguous bitmask starting at bit position @lo and ending at
- * position @hi. For example
- *
- * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
-
-/*
- * PCI-defined configuration space registers
- */
-#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
-#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
-
-
-/*
- * Function 1 - Address Map
- */
-#define DRAM_BASE_LO 0x40
-#define DRAM_LIMIT_LO 0x44
-
-#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
-#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
-#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
-#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
-
-#define DHAR 0xf0
-#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
-#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
-#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
-#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
-
- /* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
-
-#define DCT_CFG_SEL 0x10C
-
-#define DRAM_LOCAL_NODE_BASE 0x120
-#define DRAM_LOCAL_NODE_LIM 0x124
-
-#define DRAM_BASE_HI 0x140
-#define DRAM_LIMIT_HI 0x144
-
-
-/*
- * Function 2 - DRAM controller
- */
-#define DCSB0 0x40
-#define DCSB1 0x140
-#define DCSB_CS_ENABLE BIT(0)
-
-#define DCSM0 0x60
-#define DCSM1 0x160
-
-#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
-
-#define DBAM0 0x80
-#define DBAM1 0x180
-
-/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
-#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
-
-#define DBAM_MAX_VALUE 11
-
-#define DCLR0 0x90
-#define DCLR1 0x190
-#define REVE_WIDTH_128 BIT(16)
-#define WIDTH_128 BIT(11)
-
-#define DCHR0 0x94
-#define DCHR1 0x194
-#define DDR3_MODE BIT(8)
-
-#define DCT_SEL_LO 0x110
-#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
-#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
-#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
-
-#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
-
-#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
-#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
-
-#define SWAP_INTLV_REG 0x10c
-
-#define DCT_SEL_HI 0x114
-
-/*
- * Function 3 - Misc Control
- */
-#define NBCTL 0x40
-
-#define NBCFG 0x44
-#define NBCFG_CHIPKILL BIT(23)
-#define NBCFG_ECC_ENABLE BIT(22)
-
-/* F3x48: NBSL */
-#define F10_NBSL_EXT_ERR_ECC 0x8
-#define NBSL_PP_OBS 0x2
-
-#define SCRCTRL 0x58
-
-#define F10_ONLINE_SPARE 0xB0
-#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
-#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
-
-#define F10_NB_ARRAY_ADDR 0xB8
-#define F10_NB_ARRAY_DRAM_ECC BIT(31)
-
-/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
-#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
-
-#define F10_NB_ARRAY_DATA 0xBC
-#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
- (BIT(((word) & 0xF) + 20) | \
- BIT(17) | bits)
-#define SET_NB_DRAM_INJECTION_READ(word, bits) \
- (BIT(((word) & 0xF) + 20) | \
- BIT(16) | bits)
-
-#define NBCAP 0xE8
-#define NBCAP_CHIPKILL BIT(4)
-#define NBCAP_SECDED BIT(3)
-#define NBCAP_DCT_DUAL BIT(0)
-
-#define EXT_NB_MCA_CFG 0x180
-
-/* MSRs */
-#define MSR_MCGCTL_NBE BIT(4)
-
-/* AMD sets the first MC device at device ID 0x18. */
-static inline u8 get_node_id(struct pci_dev *pdev)
-{
- return PCI_SLOT(pdev->devfn) - 0x18;
-}
-
-enum amd_families {
- K8_CPUS = 0,
- F10_CPUS,
- F15_CPUS,
- NUM_FAMILIES,
-};
-
-/* Error injection control structure */
-struct error_injection {
- u32 section;
- u32 word;
- u32 bit_map;
-};
-
-/* low and high part of PCI config space regs */
-struct reg_pair {
- u32 lo, hi;
-};
-
-/*
- * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
- */
-struct dram_range {
- struct reg_pair base;
- struct reg_pair lim;
-};
-
-/* A DCT chip selects collection */
-struct chip_select {
- u32 csbases[NUM_CHIPSELECTS];
- u8 b_cnt;
-
- u32 csmasks[NUM_CHIPSELECTS];
- u8 m_cnt;
-};
-
-struct amd64_pvt {
- struct low_ops *ops;
-
- /* pci_device handles which we utilize */
- struct pci_dev *F1, *F2, *F3;
-
- unsigned mc_node_id; /* MC index of this MC node */
- int ext_model; /* extended model value of this node */
- int channel_count;
-
- /* Raw registers */
- u32 dclr0; /* DRAM Configuration Low DCT0 reg */
- u32 dclr1; /* DRAM Configuration Low DCT1 reg */
- u32 dchr0; /* DRAM Configuration High DCT0 reg */
- u32 dchr1; /* DRAM Configuration High DCT1 reg */
- u32 nbcap; /* North Bridge Capabilities */
- u32 nbcfg; /* F10 North Bridge Configuration */
- u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
- u32 dhar; /* DRAM Hoist reg */
- u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
- u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
-
- /* one for each DCT */
- struct chip_select csels[2];
-
- /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
- struct dram_range ranges[DRAM_RANGES];
-
- u64 top_mem; /* top of memory below 4GB */
- u64 top_mem2; /* top of memory above 4GB */
-
- u32 dct_sel_lo; /* DRAM Controller Select Low */
- u32 dct_sel_hi; /* DRAM Controller Select High */
- u32 online_spare; /* On-Line spare Reg */
-
- /* x4 or x8 syndromes in use */
- u8 ecc_sym_sz;
-
- /* place to store error injection parameters prior to issue */
- struct error_injection injection;
-};
-
-static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
-{
- u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
-
- if (boot_cpu_data.x86 == 0xf)
- return addr;
-
- return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
-}
-
-static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
-{
- u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
-
- if (boot_cpu_data.x86 == 0xf)
- return lim;
-
- return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
-}
-
-static inline u16 extract_syndrome(u64 status)
-{
- return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
-}
-
-/*
- * per-node ECC settings descriptor
- */
-struct ecc_settings {
- u32 old_nbctl;
- bool nbctl_valid;
-
- struct flags {
- unsigned long nb_mce_enable:1;
- unsigned long nb_ecc_prev:1;
- } flags;
-};
-
-#ifdef CONFIG_EDAC_DEBUG
-#define NUM_DBG_ATTRS 5
-#else
-#define NUM_DBG_ATTRS 0
-#endif
-
-#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
-#define NUM_INJ_ATTRS 5
-#else
-#define NUM_INJ_ATTRS 0
-#endif
-
-extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
- amd64_inj_attrs[NUM_INJ_ATTRS];
-
-/*
- * Each of the PCI Device IDs types have their own set of hardware accessor
- * functions and per device encoding/decoding logic.
- */
-struct low_ops {
- int (*early_channel_count) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
- u16 syndrome);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
- int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
- u32 *val, const char *func);
-};
-
-struct amd64_family_type {
- const char *ctl_name;
- u16 f1_id, f3_id;
- struct low_ops ops;
-};
-
-int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 val, const char *func);
-
-#define amd64_read_pci_cfg(pdev, offset, val) \
- __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
-
-#define amd64_write_pci_cfg(pdev, offset, val) \
- __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
-
-#define amd64_read_dct_pci_cfg(pvt, offset, val) \
- pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
-
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size);
diff --git a/ANDROID_3.4.5/drivers/edac/amd64_edac_dbg.c b/ANDROID_3.4.5/drivers/edac/amd64_edac_dbg.c
deleted file mode 100644
index e3562288..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd64_edac_dbg.c
+++ /dev/null
@@ -1,72 +0,0 @@
-#include "amd64_edac.h"
-
-#define EDAC_DCT_ATTR_SHOW(reg) \
-static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \
-{ \
- struct amd64_pvt *pvt = mci->pvt_info; \
- return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
-}
-
-EDAC_DCT_ATTR_SHOW(dhar);
-EDAC_DCT_ATTR_SHOW(dbam0);
-EDAC_DCT_ATTR_SHOW(top_mem);
-EDAC_DCT_ATTR_SHOW(top_mem2);
-
-static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
-{
- u64 hole_base = 0;
- u64 hole_offset = 0;
- u64 hole_size = 0;
-
- amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
-
- return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
- hole_size);
-}
-
-/*
- * update NUM_DBG_ATTRS in case you add new members
- */
-struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
-
- {
- .attr = {
- .name = "dhar",
- .mode = (S_IRUGO)
- },
- .show = amd64_dhar_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "dbam",
- .mode = (S_IRUGO)
- },
- .show = amd64_dbam0_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "topmem",
- .mode = (S_IRUGO)
- },
- .show = amd64_top_mem_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "topmem2",
- .mode = (S_IRUGO)
- },
- .show = amd64_top_mem2_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "dram_hole",
- .mode = (S_IRUGO)
- },
- .show = amd64_hole_show,
- .store = NULL,
- },
-};
diff --git a/ANDROID_3.4.5/drivers/edac/amd64_edac_inj.c b/ANDROID_3.4.5/drivers/edac/amd64_edac_inj.c
deleted file mode 100644
index 303f10e0..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd64_edac_inj.c
+++ /dev/null
@@ -1,213 +0,0 @@
-#include "amd64_edac.h"
-
-static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.section);
-}
-
-/*
- * store error injection section value which refers to one of 4 16-byte sections
- * within a 64-byte cacheline
- *
- * range: 0..3
- */
-static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret = 0;
-
- ret = strict_strtoul(data, 10, &value);
- if (ret != -EINVAL) {
-
- if (value > 3) {
- amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.section = (u32) value;
- return count;
- }
- return ret;
-}
-
-static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.word);
-}
-
-/*
- * store error injection word value which refers to one of 9 16-bit word of the
- * 16-byte (128-bit + ECC bits) section
- *
- * range: 0..8
- */
-static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret = 0;
-
- ret = strict_strtoul(data, 10, &value);
- if (ret != -EINVAL) {
-
- if (value > 8) {
- amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.word = (u32) value;
- return count;
- }
- return ret;
-}
-
-static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
-}
-
-/*
- * store 16 bit error injection vector which enables injecting errors to the
- * corresponding bit within the error injection word above. When used during a
- * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
- */
-static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret = 0;
-
- ret = strict_strtoul(data, 16, &value);
- if (ret != -EINVAL) {
-
- if (value & 0xFFFF0000) {
- amd64_warn("%s: invalid EccVector: 0x%lx\n",
- __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.bit_map = (u32) value;
- return count;
- }
- return ret;
-}
-
-/*
- * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
- * fields needed by the injection registers and read the NB Array Data Port.
- */
-static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- u32 section, word_bits;
- int ret = 0;
-
- ret = strict_strtoul(data, 10, &value);
- if (ret != -EINVAL) {
-
- /* Form value to choose 16-byte section of cacheline */
- section = F10_NB_ARRAY_DRAM_ECC |
- SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
-
- word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
- pvt->injection.bit_map);
-
- /* Issue 'word' and 'bit' along with the READ request */
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
-
- debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
-
- return count;
- }
- return ret;
-}
-
-/*
- * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
- * fields needed by the injection registers.
- */
-static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- u32 section, word_bits;
- int ret = 0;
-
- ret = strict_strtoul(data, 10, &value);
- if (ret != -EINVAL) {
-
- /* Form value to choose 16-byte section of cacheline */
- section = F10_NB_ARRAY_DRAM_ECC |
- SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
-
- word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
- pvt->injection.bit_map);
-
- /* Issue 'word' and 'bit' along with the READ request */
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
-
- debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
-
- return count;
- }
- return ret;
-}
-
-/*
- * update NUM_INJ_ATTRS in case you add new members
- */
-struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
-
- {
- .attr = {
- .name = "inject_section",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = amd64_inject_section_show,
- .store = amd64_inject_section_store,
- },
- {
- .attr = {
- .name = "inject_word",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = amd64_inject_word_show,
- .store = amd64_inject_word_store,
- },
- {
- .attr = {
- .name = "inject_ecc_vector",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = amd64_inject_ecc_vector_show,
- .store = amd64_inject_ecc_vector_store,
- },
- {
- .attr = {
- .name = "inject_write",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = NULL,
- .store = amd64_inject_write_store,
- },
- {
- .attr = {
- .name = "inject_read",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = NULL,
- .store = amd64_inject_read_store,
- },
-};
diff --git a/ANDROID_3.4.5/drivers/edac/amd76x_edac.c b/ANDROID_3.4.5/drivers/edac/amd76x_edac.c
deleted file mode 100644
index f8fd3c80..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd76x_edac.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * AMD 76x Memory Controller kernel module
- * (C) 2003 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Thayne Harbaugh
- * Based on work by Dan Hollis <goemon at anime dot net> and others.
- * http://www.anime.net/~goemon/linux-ecc/
- *
- * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define AMD76X_REVISION " Ver: 2.0.2"
-#define EDAC_MOD_STR "amd76x_edac"
-
-#define amd76x_printk(level, fmt, arg...) \
- edac_printk(level, "amd76x", fmt, ##arg)
-
-#define amd76x_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
-
-#define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS 1
-#define AMD76X_NR_DIMMS 4
-
-/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
-
-#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
- *
- * 31:16 reserved
- * 15:14 SERR enabled: x1=ue 1x=ce
- * 13 reserved
- * 12 diag: disabled, enabled
- * 11:10 mode: dis, EC, ECC, ECC+scrub
- * 9:8 status: x1=ue 1x=ce
- * 7:4 UE cs row
- * 3:0 CE cs row
- */
-
-#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
- *
- * 31:26 clock disable 5 - 0
- * 25 SDRAM init
- * 24 reserved
- * 23 mode register service
- * 22:21 suspend to RAM
- * 20 burst refresh enable
- * 19 refresh disable
- * 18 reserved
- * 17:16 cycles-per-refresh
- * 15:8 reserved
- * 7:0 x4 mode enable 7 - 0
- */
-
-#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
- *
- * 31:23 chip-select base
- * 22:16 reserved
- * 15:7 chip-select mask
- * 6:3 reserved
- * 2:1 address mode
- * 0 chip-select enable
- */
-
-struct amd76x_error_info {
- u32 ecc_mode_status;
-};
-
-enum amd76x_chips {
- AMD761 = 0,
- AMD762
-};
-
-struct amd76x_dev_info {
- const char *ctl_name;
-};
-
-static const struct amd76x_dev_info amd76x_devs[] = {
- [AMD761] = {
- .ctl_name = "AMD761"},
- [AMD762] = {
- .ctl_name = "AMD762"},
-};
-
-static struct edac_pci_ctl_info *amd76x_pci;
-
-/**
- * amd76x_get_error_info - fetch error information
- * @mci: Memory controller
- * @info: Info to fill in
- *
- * Fetch and store the AMD76x ECC status. Clear pending status
- * on the chip so that further errors will be reported
- */
-static void amd76x_get_error_info(struct mem_ctl_info *mci,
- struct amd76x_error_info *info)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
- pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
- &info->ecc_mode_status);
-
- if (info->ecc_mode_status & BIT(8))
- pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
- (u32) BIT(8), (u32) BIT(8));
-
- if (info->ecc_mode_status & BIT(9))
- pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
- (u32) BIT(9), (u32) BIT(9));
-}
-
-/**
- * amd76x_process_error_info - Error check
- * @mci: Memory controller
- * @info: Previously fetched information from chip
- * @handle_errors: 1 if we should do recovery
- *
- * Process the chip state and decide if an error has occurred.
- * A return of 1 indicates an error. Also if handle_errors is true
- * then attempt to handle and clean up after the error
- */
-static int amd76x_process_error_info(struct mem_ctl_info *mci,
- struct amd76x_error_info *info,
- int handle_errors)
-{
- int error_found;
- u32 row;
-
- error_found = 0;
-
- /*
- * Check for an uncorrectable error
- */
- if (info->ecc_mode_status & BIT(8)) {
- error_found = 1;
-
- if (handle_errors) {
- row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
- }
- }
-
- /*
- * Check for a correctable error
- */
- if (info->ecc_mode_status & BIT(9)) {
- error_found = 1;
-
- if (handle_errors) {
- row = info->ecc_mode_status & 0xf;
- edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
- }
- }
-
- return error_found;
-}
-
-/**
- * amd76x_check - Poll the controller
- * @mci: Memory controller
- *
- * Called by the poll handlers this function reads the status
- * from the controller and checks for errors.
- */
-static void amd76x_check(struct mem_ctl_info *mci)
-{
- struct amd76x_error_info info;
- debugf3("%s()\n", __func__);
- amd76x_get_error_info(mci, &info);
- amd76x_process_error_info(mci, &info, 1);
-}
-
-static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- enum edac_type edac_mode)
-{
- struct csrow_info *csrow;
- u32 mba, mba_base, mba_mask, dms;
- int index;
-
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
-
- /* find the DRAM Chip Select Base address and mask */
- pci_read_config_dword(pdev,
- AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
-
- if (!(mba & BIT(0)))
- continue;
-
- mba_base = mba & 0xff800000UL;
- mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
- pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
- csrow->first_page = mba_base >> PAGE_SHIFT;
- csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->page_mask = mba_mask >> PAGE_SHIFT;
- csrow->grain = csrow->nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_RDDR;
- csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
- }
-}
-
-/**
- * amd76x_probe1 - Perform set up for detected device
- * @pdev; PCI device detected
- * @dev_idx: Device type index
- *
- * We have found an AMD76x and now need to set up the memory
- * controller status reporting. We configure and set up the
- * memory controller reporting and claim the device.
- */
-static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
-{
- static const enum edac_type ems_modes[] = {
- EDAC_NONE,
- EDAC_EC,
- EDAC_SECDED,
- EDAC_SECDED
- };
- struct mem_ctl_info *mci = NULL;
- u32 ems;
- u32 ems_mode;
- struct amd76x_error_info discard;
-
- debugf0("%s()\n", __func__);
- pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
- ems_mode = (ems >> 10) & 0x3;
- mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
-
- if (mci == NULL) {
- return -ENOMEM;
- }
-
- debugf0("%s(): mci = %p\n", __func__, mci);
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_RDDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = ems_mode ?
- (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = AMD76X_REVISION;
- mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = amd76x_check;
- mci->ctl_page_to_phys = NULL;
-
- amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
- amd76x_get_error_info(mci, &discard); /* clear counters */
-
- /* Here we assume that we will never see multiple instances of this
- * type of memory controller. The ID is therefore hardcoded to 0.
- */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* allocating generic PCI control info */
- amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!amd76x_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
- return 0;
-
-fail:
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit amd76x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- debugf0("%s()\n", __func__);
-
- /* don't need to call pci_enable_device() */
- return amd76x_probe1(pdev, ent->driver_data);
-}
-
-/**
- * amd76x_remove_one - driver shutdown
- * @pdev: PCI device being handed back
- *
- * Called when the driver is unloaded. Find the matching mci
- * structure for the device then delete the mci and free the
- * resources.
- */
-static void __devexit amd76x_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s()\n", __func__);
-
- if (amd76x_pci)
- edac_pci_release_generic_ctl(amd76x_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = {
- {
- PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD762},
- {
- PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD761},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
-
-static struct pci_driver amd76x_driver = {
- .name = EDAC_MOD_STR,
- .probe = amd76x_init_one,
- .remove = __devexit_p(amd76x_remove_one),
- .id_table = amd76x_pci_tbl,
-};
-
-static int __init amd76x_init(void)
-{
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- return pci_register_driver(&amd76x_driver);
-}
-
-static void __exit amd76x_exit(void)
-{
- pci_unregister_driver(&amd76x_driver);
-}
-
-module_init(amd76x_init);
-module_exit(amd76x_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
-MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/amd8111_edac.c b/ANDROID_3.4.5/drivers/edac/amd8111_edac.c
deleted file mode 100644
index ddd89005..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd8111_edac.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/edac.h>
-#include <linux/pci_ids.h>
-#include <asm/io.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-#include "amd8111_edac.h"
-
-#define AMD8111_EDAC_REVISION " Ver: 1.0.0"
-#define AMD8111_EDAC_MOD_STR "amd8111_edac"
-
-#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
-
-enum amd8111_edac_devs {
- LPC_BRIDGE = 0,
-};
-
-enum amd8111_edac_pcis {
- PCI_BRIDGE = 0,
-};
-
-/* Wrapper functions for accessing PCI configuration space */
-static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-
- return ret;
-}
-
-static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
-{
- int ret;
-
- ret = pci_read_config_byte(dev, reg, val8);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
-{
- int ret;
-
- ret = pci_write_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
-{
- int ret;
-
- ret = pci_write_config_byte(dev, reg, val8);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-/*
- * device-specific methods for amd8111 PCI Bridge Controller
- *
- * Error Reporting and Handling for amd8111 chipset could be found
- * in its datasheet 3.1.2 section, P37
- */
-static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
-{
- u32 val32;
- struct pci_dev *dev = pci_info->dev;
-
- /* First clear error detection flags on the host interface */
-
- /* Clear SSE/SMA/STA flags in the global status register*/
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- if (val32 & PCI_STSCMD_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Clear CRC and Link Fail flags in HT Link Control reg */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- if (val32 & HT_LINK_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Second clear all fault on the secondary interface */
-
- /* Clear error flags in the memory-base limit reg. */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- /* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
-
- /* Last enable error detections */
- if (edac_op_state == EDAC_OPSTATE_POLL) {
- /* Enable System Error reporting in global status register */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- val32 |= PCI_STSCMD_SERREN;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Enable CRC Sync flood packets to HyperTransport Link */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- val32 |= HT_LINK_CRCFEN;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Enable SSE reporting etc in Interrupt control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- val32 |= PCI_INTBRG_CTRL_POLL_MASK;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
- }
-}
-
-static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
-{
- u32 val32;
- struct pci_dev *dev = pci_info->dev;
-
- if (edac_op_state == EDAC_OPSTATE_POLL) {
- /* Disable System Error reporting */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- val32 &= ~PCI_STSCMD_SERREN;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Disable CRC flood packets */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- val32 &= ~HT_LINK_CRCFEN;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
- }
-}
-
-static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
-{
- struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
- struct pci_dev *dev = pci_info->dev;
- u32 val32;
-
- /* Check out PCI Bridge Status and Command Register */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- if (val32 & PCI_STSCMD_CLEAR_MASK) {
- printk(KERN_INFO "Error(s) in PCI bridge status and command"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
- (val32 & PCI_STSCMD_SSE) != 0,
- (val32 & PCI_STSCMD_RMA) != 0,
- (val32 & PCI_STSCMD_RTA) != 0);
-
- val32 |= PCI_STSCMD_CLEAR_MASK;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out HyperTransport Link Control Register */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- if (val32 & HT_LINK_LKFAIL) {
- printk(KERN_INFO "Error(s) in hypertransport link control"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "LKFAIL: %d\n",
- (val32 & HT_LINK_LKFAIL) != 0);
-
- val32 |= HT_LINK_LKFAIL;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out PCI Interrupt and Bridge Control Register */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
- printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "DTSTAT: %d\n",
- (val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
-
- val32 |= PCI_INTBRG_CTRL_DTSTAT;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out PCI Bridge Memory Base-Limit Register */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_CLEAR_MASK) {
- printk(KERN_INFO
- "Error(s) in mem limit register on %s device\n",
- pci_info->ctl_name);
- printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
- "RTA: %d, STA: %d, MDPE: %d\n",
- (val32 & MEM_LIMIT_DPE) != 0,
- (val32 & MEM_LIMIT_RSE) != 0,
- (val32 & MEM_LIMIT_RMA) != 0,
- (val32 & MEM_LIMIT_RTA) != 0,
- (val32 & MEM_LIMIT_STA) != 0,
- (val32 & MEM_LIMIT_MDPE) != 0);
-
- val32 |= MEM_LIMIT_CLEAR_MASK;
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-}
-
-static struct resource *legacy_io_res;
-static int at_compat_reg_broken;
-#define LEGACY_NR_PORTS 1
-
-/* device-specific methods for amd8111 LPC Bridge device */
-static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
-{
- u8 val8;
- struct pci_dev *dev = dev_info->dev;
-
- /* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
- legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
- AMD8111_EDAC_MOD_STR);
- if (!legacy_io_res)
- printk(KERN_INFO "%s: failed to request legacy I/O region "
- "start %d, len %d\n", __func__,
- REG_AT_COMPAT, LEGACY_NR_PORTS);
- else {
- val8 = __do_inb(REG_AT_COMPAT);
- if (val8 == 0xff) { /* buggy port */
- printk(KERN_INFO "%s: port %d is buggy, not supported"
- " by hardware?\n", __func__, REG_AT_COMPAT);
- at_compat_reg_broken = 1;
- release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
- legacy_io_res = NULL;
- } else {
- u8 out8 = 0;
- if (val8 & AT_COMPAT_SERR)
- out8 = AT_COMPAT_CLRSERR;
- if (val8 & AT_COMPAT_IOCHK)
- out8 |= AT_COMPAT_CLRIOCHK;
- if (out8 > 0)
- __do_outb(out8, REG_AT_COMPAT);
- }
- }
-
- /* Second clear error flags on LPC bridge */
- edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
- if (val8 & IO_CTRL_1_CLEAR_MASK)
- edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
-}
-
-static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
-{
- if (legacy_io_res)
- release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
-}
-
-static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
-{
- struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
- struct pci_dev *dev = dev_info->dev;
- u8 val8;
-
- edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
- if (val8 & IO_CTRL_1_CLEAR_MASK) {
- printk(KERN_INFO
- "Error(s) in IO control register on %s device\n",
- dev_info->ctl_name);
- printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
- (val8 & IO_CTRL_1_LPC_ERR) != 0,
- (val8 & IO_CTRL_1_PW2LPC) != 0);
-
- val8 |= IO_CTRL_1_CLEAR_MASK;
- edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
-
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
- }
-
- if (at_compat_reg_broken == 0) {
- u8 out8 = 0;
- val8 = __do_inb(REG_AT_COMPAT);
- if (val8 & AT_COMPAT_SERR)
- out8 = AT_COMPAT_CLRSERR;
- if (val8 & AT_COMPAT_IOCHK)
- out8 |= AT_COMPAT_CLRIOCHK;
- if (out8 > 0) {
- __do_outb(out8, REG_AT_COMPAT);
- edac_device_handle_ue(edac_dev, 0, 0,
- edac_dev->ctl_name);
- }
- }
-}
-
-/* General devices represented by edac_device_ctl_info */
-static struct amd8111_dev_info amd8111_devices[] = {
- [LPC_BRIDGE] = {
- .err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
- .ctl_name = "lpc",
- .init = amd8111_lpc_bridge_init,
- .exit = amd8111_lpc_bridge_exit,
- .check = amd8111_lpc_bridge_check,
- },
- {0},
-};
-
-/* PCI controllers represented by edac_pci_ctl_info */
-static struct amd8111_pci_info amd8111_pcis[] = {
- [PCI_BRIDGE] = {
- .err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
- .ctl_name = "AMD8111_PCI_Controller",
- .init = amd8111_pci_bridge_init,
- .exit = amd8111_pci_bridge_exit,
- .check = amd8111_pci_bridge_check,
- },
- {0},
-};
-
-static int amd8111_dev_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
-
- dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
- dev_info->err_dev, NULL);
-
- if (!dev_info->dev) {
- printk(KERN_ERR "EDAC device not found:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
- return -ENODEV;
- }
-
- if (pci_enable_device(dev_info->dev)) {
- pci_dev_put(dev_info->dev);
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
- return -ENODEV;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_device_ctl_info, but make use of existing
- * one instead.
- */
- dev_info->edac_idx = edac_device_alloc_index();
- dev_info->edac_dev =
- edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
- NULL, 0, 0,
- NULL, 0, dev_info->edac_idx);
- if (!dev_info->edac_dev)
- return -ENOMEM;
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->dev->dev;
- dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = dev_info->check;
-
- if (dev_info->init)
- dev_info->init(dev_info);
-
- if (edac_device_add_device(dev_info->edac_dev) > 0) {
- printk(KERN_ERR "failed to add edac_dev for %s\n",
- dev_info->ctl_name);
- edac_device_free_ctl_info(dev_info->edac_dev);
- return -ENODEV;
- }
-
- printk(KERN_INFO "added one edac_dev on AMD8111 "
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
-
- return 0;
-}
-
-static void amd8111_dev_remove(struct pci_dev *dev)
-{
- struct amd8111_dev_info *dev_info;
-
- for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
- if (dev_info->dev->device == dev->device)
- break;
-
- if (!dev_info->err_dev) /* should never happen */
- return;
-
- if (dev_info->edac_dev) {
- edac_device_del_device(dev_info->edac_dev->dev);
- edac_device_free_ctl_info(dev_info->edac_dev);
- }
-
- if (dev_info->exit)
- dev_info->exit(dev_info);
-
- pci_dev_put(dev_info->dev);
-}
-
-static int amd8111_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
-
- pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
- pci_info->err_dev, NULL);
-
- if (!pci_info->dev) {
- printk(KERN_ERR "EDAC device not found:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
- return -ENODEV;
- }
-
- if (pci_enable_device(pci_info->dev)) {
- pci_dev_put(pci_info->dev);
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
- return -ENODEV;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_pci_ctl_info, but make use of existing
- * one instead.
- */
- pci_info->edac_idx = edac_pci_alloc_index();
- pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
- if (!pci_info->edac_dev)
- return -ENOMEM;
-
- pci_info->edac_dev->pvt_info = pci_info;
- pci_info->edac_dev->dev = &pci_info->dev->dev;
- pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
- pci_info->edac_dev->ctl_name = pci_info->ctl_name;
- pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci_info->edac_dev->edac_check = pci_info->check;
-
- if (pci_info->init)
- pci_info->init(pci_info);
-
- if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
- printk(KERN_ERR "failed to add edac_pci for %s\n",
- pci_info->ctl_name);
- edac_pci_free_ctl_info(pci_info->edac_dev);
- return -ENODEV;
- }
-
- printk(KERN_INFO "added one edac_pci on AMD8111 "
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
-
- return 0;
-}
-
-static void amd8111_pci_remove(struct pci_dev *dev)
-{
- struct amd8111_pci_info *pci_info;
-
- for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
- if (pci_info->dev->device == dev->device)
- break;
-
- if (!pci_info->err_dev) /* should never happen */
- return;
-
- if (pci_info->edac_dev) {
- edac_pci_del_device(pci_info->edac_dev->dev);
- edac_pci_free_ctl_info(pci_info->edac_dev);
- }
-
- if (pci_info->exit)
- pci_info->exit(pci_info);
-
- pci_dev_put(pci_info->dev);
-}
-
-/* PCI Device ID talbe for general EDAC device */
-static const struct pci_device_id amd8111_edac_dev_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8111_LPC),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = LPC_BRIDGE,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
-
-static struct pci_driver amd8111_edac_dev_driver = {
- .name = "AMD8111_EDAC_DEV",
- .probe = amd8111_dev_probe,
- .remove = amd8111_dev_remove,
- .id_table = amd8111_edac_dev_tbl,
-};
-
-/* PCI Device ID table for EDAC PCI controller */
-static const struct pci_device_id amd8111_edac_pci_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8111_PCI),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = PCI_BRIDGE,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
-
-static struct pci_driver amd8111_edac_pci_driver = {
- .name = "AMD8111_EDAC_PCI",
- .probe = amd8111_pci_probe,
- .remove = amd8111_pci_remove,
- .id_table = amd8111_edac_pci_tbl,
-};
-
-static int __init amd8111_edac_init(void)
-{
- int val;
-
- printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
-
- /* Only POLL mode supported so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- val = pci_register_driver(&amd8111_edac_dev_driver);
- val |= pci_register_driver(&amd8111_edac_pci_driver);
-
- return val;
-}
-
-static void __exit amd8111_edac_exit(void)
-{
- pci_unregister_driver(&amd8111_edac_pci_driver);
- pci_unregister_driver(&amd8111_edac_dev_driver);
-}
-
-
-module_init(amd8111_edac_init);
-module_exit(amd8111_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
-MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
diff --git a/ANDROID_3.4.5/drivers/edac/amd8111_edac.h b/ANDROID_3.4.5/drivers/edac/amd8111_edac.h
deleted file mode 100644
index 35794331..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd8111_edac.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _AMD8111_EDAC_H_
-#define _AMD8111_EDAC_H_
-
-/************************************************************
- * PCI Bridge Status and Command Register, DevA:0x04
- ************************************************************/
-#define REG_PCI_STSCMD 0x04
-enum pci_stscmd_bits {
- PCI_STSCMD_SSE = BIT(30),
- PCI_STSCMD_RMA = BIT(29),
- PCI_STSCMD_RTA = BIT(28),
- PCI_STSCMD_SERREN = BIT(8),
- PCI_STSCMD_CLEAR_MASK = (PCI_STSCMD_SSE |
- PCI_STSCMD_RMA |
- PCI_STSCMD_RTA)
-};
-
-/************************************************************
- * PCI Bridge Memory Base-Limit Register, DevA:0x1c
- ************************************************************/
-#define REG_MEM_LIM 0x1c
-enum mem_limit_bits {
- MEM_LIMIT_DPE = BIT(31),
- MEM_LIMIT_RSE = BIT(30),
- MEM_LIMIT_RMA = BIT(29),
- MEM_LIMIT_RTA = BIT(28),
- MEM_LIMIT_STA = BIT(27),
- MEM_LIMIT_MDPE = BIT(24),
- MEM_LIMIT_CLEAR_MASK = (MEM_LIMIT_DPE |
- MEM_LIMIT_RSE |
- MEM_LIMIT_RMA |
- MEM_LIMIT_RTA |
- MEM_LIMIT_STA |
- MEM_LIMIT_MDPE)
-};
-
-/************************************************************
- * HyperTransport Link Control Register, DevA:0xc4
- ************************************************************/
-#define REG_HT_LINK 0xc4
-enum ht_link_bits {
- HT_LINK_LKFAIL = BIT(4),
- HT_LINK_CRCFEN = BIT(1),
- HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL)
-};
-
-/************************************************************
- * PCI Bridge Interrupt and Bridge Control, DevA:0x3c
- ************************************************************/
-#define REG_PCI_INTBRG_CTRL 0x3c
-enum pci_intbrg_ctrl_bits {
- PCI_INTBRG_CTRL_DTSERREN = BIT(27),
- PCI_INTBRG_CTRL_DTSTAT = BIT(26),
- PCI_INTBRG_CTRL_MARSP = BIT(21),
- PCI_INTBRG_CTRL_SERREN = BIT(17),
- PCI_INTBRG_CTRL_PEREN = BIT(16),
- PCI_INTBRG_CTRL_CLEAR_MASK = (PCI_INTBRG_CTRL_DTSTAT),
- PCI_INTBRG_CTRL_POLL_MASK = (PCI_INTBRG_CTRL_DTSERREN |
- PCI_INTBRG_CTRL_MARSP |
- PCI_INTBRG_CTRL_SERREN)
-};
-
-/************************************************************
- * I/O Control 1 Register, DevB:0x40
- ************************************************************/
-#define REG_IO_CTRL_1 0x40
-enum io_ctrl_1_bits {
- IO_CTRL_1_NMIONERR = BIT(7),
- IO_CTRL_1_LPC_ERR = BIT(6),
- IO_CTRL_1_PW2LPC = BIT(1),
- IO_CTRL_1_CLEAR_MASK = (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC)
-};
-
-/************************************************************
- * Legacy I/O Space Registers
- ************************************************************/
-#define REG_AT_COMPAT 0x61
-enum at_compat_bits {
- AT_COMPAT_SERR = BIT(7),
- AT_COMPAT_IOCHK = BIT(6),
- AT_COMPAT_CLRIOCHK = BIT(3),
- AT_COMPAT_CLRSERR = BIT(2),
-};
-
-struct amd8111_dev_info {
- u16 err_dev; /* PCI Device ID */
- struct pci_dev *dev;
- int edac_idx; /* device index */
- char *ctl_name;
- struct edac_device_ctl_info *edac_dev;
- void (*init)(struct amd8111_dev_info *dev_info);
- void (*exit)(struct amd8111_dev_info *dev_info);
- void (*check)(struct edac_device_ctl_info *edac_dev);
-};
-
-struct amd8111_pci_info {
- u16 err_dev; /* PCI Device ID */
- struct pci_dev *dev;
- int edac_idx; /* pci index */
- const char *ctl_name;
- struct edac_pci_ctl_info *edac_dev;
- void (*init)(struct amd8111_pci_info *dev_info);
- void (*exit)(struct amd8111_pci_info *dev_info);
- void (*check)(struct edac_pci_ctl_info *edac_dev);
-};
-
-#endif /* _AMD8111_EDAC_H_ */
diff --git a/ANDROID_3.4.5/drivers/edac/amd8131_edac.c b/ANDROID_3.4.5/drivers/edac/amd8131_edac.c
deleted file mode 100644
index a5c68056..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd8131_edac.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/edac.h>
-#include <linux/pci_ids.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-#include "amd8131_edac.h"
-
-#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
-#define AMD8131_EDAC_MOD_STR "amd8131_edac"
-
-/* Wrapper functions for accessing PCI configuration space */
-static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8131_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
-{
- int ret;
-
- ret = pci_write_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8131_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-static char * const bridge_str[] = {
- [NORTH_A] = "NORTH A",
- [NORTH_B] = "NORTH B",
- [SOUTH_A] = "SOUTH A",
- [SOUTH_B] = "SOUTH B",
- [NO_BRIDGE] = "NO BRIDGE",
-};
-
-/* Support up to two AMD8131 chipsets on a platform */
-static struct amd8131_dev_info amd8131_devices[] = {
- {
- .inst = NORTH_A,
- .devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
- .ctl_name = "AMD8131_PCIX_NORTH_A",
- },
- {
- .inst = NORTH_B,
- .devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
- .ctl_name = "AMD8131_PCIX_NORTH_B",
- },
- {
- .inst = SOUTH_A,
- .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
- .ctl_name = "AMD8131_PCIX_SOUTH_A",
- },
- {
- .inst = SOUTH_B,
- .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
- .ctl_name = "AMD8131_PCIX_SOUTH_B",
- },
- {.inst = NO_BRIDGE,},
-};
-
-static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
-{
- u32 val32;
- struct pci_dev *dev = dev_info->dev;
-
- /* First clear error detection flags */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_MASK)
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- /* Clear Discard Timer Timedout flag */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- if (val32 & INT_CTLR_DTS)
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Clear CRC Error flag on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- if (val32 & LNK_CTRL_CRCERR_A)
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Clear CRC Error flag on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- if (val32 & LNK_CTRL_CRCERR_B)
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-
- /*
- * Then enable all error detections.
- *
- * Setup Discard Timer Sync Flood Enable,
- * System Error Enable and Parity Error Enable.
- */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Enable overall SERR Error detection */
- edac_pci_read_dword(dev, REG_STS_CMD, &val32);
- val32 |= STS_CMD_SERREN;
- edac_pci_write_dword(dev, REG_STS_CMD, val32);
-
- /* Setup CRC Flood Enable for link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- val32 |= LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Setup CRC Flood Enable for link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- val32 |= LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-}
-
-static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
-{
- u32 val32;
- struct pci_dev *dev = dev_info->dev;
-
- /* Disable SERR, PERR and DTSE Error detection */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Disable overall System Error detection */
- edac_pci_read_dword(dev, REG_STS_CMD, &val32);
- val32 &= ~STS_CMD_SERREN;
- edac_pci_write_dword(dev, REG_STS_CMD, val32);
-
- /* Disable CRC Sync Flood on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- val32 &= ~LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Disable CRC Sync Flood on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- val32 &= ~LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-}
-
-static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
-{
- struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
- struct pci_dev *dev = dev_info->dev;
- u32 val32;
-
- /* Check PCI-X Bridge Memory Base-Limit Register for errors */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_MASK) {
- printk(KERN_INFO "Error(s) in mem limit register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
- "RTA: %d, STA: %d, MDPE: %d\n",
- val32 & MEM_LIMIT_DPE,
- val32 & MEM_LIMIT_RSE,
- val32 & MEM_LIMIT_RMA,
- val32 & MEM_LIMIT_RTA,
- val32 & MEM_LIMIT_STA,
- val32 & MEM_LIMIT_MDPE);
-
- val32 |= MEM_LIMIT_MASK;
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if Discard Timer timed out */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- if (val32 & INT_CTLR_DTS) {
- printk(KERN_INFO "Error(s) in interrupt and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
-
- val32 |= INT_CTLR_DTS;
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if CRC error happens on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- if (val32 & LNK_CTRL_CRCERR_A) {
- printk(KERN_INFO "Error(s) in link conf and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
-
- val32 |= LNK_CTRL_CRCERR_A;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if CRC error happens on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- if (val32 & LNK_CTRL_CRCERR_B) {
- printk(KERN_INFO "Error(s) in link conf and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
-
- val32 |= LNK_CTRL_CRCERR_B;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-}
-
-static struct amd8131_info amd8131_chipset = {
- .err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
- .devices = amd8131_devices,
- .init = amd8131_pcix_init,
- .exit = amd8131_pcix_exit,
- .check = amd8131_pcix_check,
-};
-
-/*
- * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
- * so amd8131_probe() would be called by kernel 4 times, with different
- * address of pci_dev for each of them each time.
- */
-static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct amd8131_dev_info *dev_info;
-
- for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
- dev_info++)
- if (dev_info->devfn == dev->devfn)
- break;
-
- if (dev_info->inst == NO_BRIDGE) /* should never happen */
- return -ENODEV;
-
- /*
- * We can't call pci_get_device() as we are used to do because
- * there are 4 of them but pci_dev_get() instead.
- */
- dev_info->dev = pci_dev_get(dev);
-
- if (pci_enable_device(dev_info->dev)) {
- pci_dev_put(dev_info->dev);
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, devfn %x, name %s\n",
- PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
- dev_info->devfn, dev_info->ctl_name);
- return -ENODEV;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_pci_ctl_info, but make use of existing
- * one instead.
- */
- dev_info->edac_idx = edac_pci_alloc_index();
- dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
- if (!dev_info->edac_dev)
- return -ENOMEM;
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->dev->dev;
- dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = amd8131_chipset.check;
-
- if (amd8131_chipset.init)
- amd8131_chipset.init(dev_info);
-
- if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
- printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
- dev_info->ctl_name);
- edac_pci_free_ctl_info(dev_info->edac_dev);
- return -ENODEV;
- }
-
- printk(KERN_INFO "added one device on AMD8131 "
- "vendor %x, device %x, devfn %x, name %s\n",
- PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
- dev_info->devfn, dev_info->ctl_name);
-
- return 0;
-}
-
-static void amd8131_remove(struct pci_dev *dev)
-{
- struct amd8131_dev_info *dev_info;
-
- for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
- dev_info++)
- if (dev_info->devfn == dev->devfn)
- break;
-
- if (dev_info->inst == NO_BRIDGE) /* should never happen */
- return;
-
- if (dev_info->edac_dev) {
- edac_pci_del_device(dev_info->edac_dev->dev);
- edac_pci_free_ctl_info(dev_info->edac_dev);
- }
-
- if (amd8131_chipset.exit)
- amd8131_chipset.exit(dev_info);
-
- pci_dev_put(dev_info->dev);
-}
-
-static const struct pci_device_id amd8131_edac_pci_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8131_BRIDGE),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = 0,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
-
-static struct pci_driver amd8131_edac_driver = {
- .name = AMD8131_EDAC_MOD_STR,
- .probe = amd8131_probe,
- .remove = amd8131_remove,
- .id_table = amd8131_edac_pci_tbl,
-};
-
-static int __init amd8131_edac_init(void)
-{
- printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
-
- /* Only POLL mode supported so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- return pci_register_driver(&amd8131_edac_driver);
-}
-
-static void __exit amd8131_edac_exit(void)
-{
- pci_unregister_driver(&amd8131_edac_driver);
-}
-
-module_init(amd8131_edac_init);
-module_exit(amd8131_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
-MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
diff --git a/ANDROID_3.4.5/drivers/edac/amd8131_edac.h b/ANDROID_3.4.5/drivers/edac/amd8131_edac.h
deleted file mode 100644
index 6f8b0713..00000000
--- a/ANDROID_3.4.5/drivers/edac/amd8131_edac.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _AMD8131_EDAC_H_
-#define _AMD8131_EDAC_H_
-
-#define DEVFN_PCIX_BRIDGE_NORTH_A 8
-#define DEVFN_PCIX_BRIDGE_NORTH_B 16
-#define DEVFN_PCIX_BRIDGE_SOUTH_A 24
-#define DEVFN_PCIX_BRIDGE_SOUTH_B 32
-
-/************************************************************
- * PCI-X Bridge Status and Command Register, DevA:0x04
- ************************************************************/
-#define REG_STS_CMD 0x04
-enum sts_cmd_bits {
- STS_CMD_SSE = BIT(30),
- STS_CMD_SERREN = BIT(8)
-};
-
-/************************************************************
- * PCI-X Bridge Interrupt and Bridge Control Register,
- ************************************************************/
-#define REG_INT_CTLR 0x3c
-enum int_ctlr_bits {
- INT_CTLR_DTSE = BIT(27),
- INT_CTLR_DTS = BIT(26),
- INT_CTLR_SERR = BIT(17),
- INT_CTLR_PERR = BIT(16)
-};
-
-/************************************************************
- * PCI-X Bridge Memory Base-Limit Register, DevA:0x1C
- ************************************************************/
-#define REG_MEM_LIM 0x1c
-enum mem_limit_bits {
- MEM_LIMIT_DPE = BIT(31),
- MEM_LIMIT_RSE = BIT(30),
- MEM_LIMIT_RMA = BIT(29),
- MEM_LIMIT_RTA = BIT(28),
- MEM_LIMIT_STA = BIT(27),
- MEM_LIMIT_MDPE = BIT(24),
- MEM_LIMIT_MASK = MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA|
- MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE
-};
-
-/************************************************************
- * Link Configuration And Control Register, side A
- ************************************************************/
-#define REG_LNK_CTRL_A 0xc4
-
-/************************************************************
- * Link Configuration And Control Register, side B
- ************************************************************/
-#define REG_LNK_CTRL_B 0xc8
-
-enum lnk_ctrl_bits {
- LNK_CTRL_CRCERR_A = BIT(9),
- LNK_CTRL_CRCERR_B = BIT(8),
- LNK_CTRL_CRCFEN = BIT(1)
-};
-
-enum pcix_bridge_inst {
- NORTH_A = 0,
- NORTH_B = 1,
- SOUTH_A = 2,
- SOUTH_B = 3,
- NO_BRIDGE = 4
-};
-
-struct amd8131_dev_info {
- int devfn;
- enum pcix_bridge_inst inst;
- struct pci_dev *dev;
- int edac_idx; /* pci device index */
- char *ctl_name;
- struct edac_pci_ctl_info *edac_dev;
-};
-
-/*
- * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
- * four PCIX Bridges on ATCA-6101 altogether.
- *
- * These PCIX Bridges share the same PCI Device ID and are all of
- * Function Zero, they could be discrimated by their pci_dev->devfn.
- * They share the same set of init/check/exit methods, and their
- * private structures are collected in the devices[] array.
- */
-struct amd8131_info {
- u16 err_dev; /* PCI Device ID for AMD8131 APIC*/
- struct amd8131_dev_info *devices;
- void (*init)(struct amd8131_dev_info *dev_info);
- void (*exit)(struct amd8131_dev_info *dev_info);
- void (*check)(struct edac_pci_ctl_info *edac_dev);
-};
-
-#endif /* _AMD8131_EDAC_H_ */
-
diff --git a/ANDROID_3.4.5/drivers/edac/cell_edac.c b/ANDROID_3.4.5/drivers/edac/cell_edac.c
deleted file mode 100644
index 9a6a274e..00000000
--- a/ANDROID_3.4.5/drivers/edac/cell_edac.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-#undef DEBUG
-
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/stop_machine.h>
-#include <linux/io.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "edac_core.h"
-
-struct cell_edac_priv
-{
- struct cbe_mic_tm_regs __iomem *regs;
- int node;
- int chanmask;
-#ifdef DEBUG
- u64 prev_fir;
-#endif
-};
-
-static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = &mci->csrows[0];
- unsigned long address, pfn, offset, syndrome;
-
- dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
- syndrome = (ar & 0x000000001fe00000ul) >> 21;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
- syndrome, 0, chan, "");
-}
-
-static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = &mci->csrows[0];
- unsigned long address, pfn, offset;
-
- dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
-}
-
-static void cell_edac_check(struct mem_ctl_info *mci)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- u64 fir, addreg, clear = 0;
-
- fir = in_be64(&priv->regs->mic_fir);
-#ifdef DEBUG
- if (fir != priv->prev_fir) {
- dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir);
- priv->prev_fir = fir;
- }
-#endif
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
- cell_edac_count_ce(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
- cell_edac_count_ce(mci, 1, addreg);
- }
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
- cell_edac_count_ue(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
- cell_edac_count_ue(mci, 1, addreg);
- }
-
- /* The procedure for clearing FIR bits is a bit ... weird */
- if (clear) {
- fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
- fir |= CBE_MIC_FIR_ECC_RESET_MASK;
- fir &= ~clear;
- out_be64(&priv->regs->mic_fir, fir);
- (void)in_be64(&priv->regs->mic_fir);
-
- mb(); /* sync up */
-#ifdef DEBUG
- fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir);
-#endif
- }
-}
-
-static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = &mci->csrows[0];
- struct cell_edac_priv *priv = mci->pvt_info;
- struct device_node *np;
-
- for (np = NULL;
- (np = of_find_node_by_name(np, "memory")) != NULL;) {
- struct resource r;
-
- /* We "know" that the Cell firmware only creates one entry
- * in the "memory" nodes. If that changes, this code will
- * need to be adapted.
- */
- if (of_address_to_resource(np, 0, &r))
- continue;
- if (of_node_to_nid(np) != priv->node)
- continue;
- csrow->first_page = r.start >> PAGE_SHIFT;
- csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->mtype = MEM_XDR;
- csrow->edac_mode = EDAC_SECDED;
- dev_dbg(mci->dev,
- "Initialized on node %d, chanmask=0x%x,"
- " first_page=0x%lx, nr_pages=0x%x\n",
- priv->node, priv->chanmask,
- csrow->first_page, csrow->nr_pages);
- break;
- }
-}
-
-static int __devinit cell_edac_probe(struct platform_device *pdev)
-{
- struct cbe_mic_tm_regs __iomem *regs;
- struct mem_ctl_info *mci;
- struct cell_edac_priv *priv;
- u64 reg;
- int rc, chanmask;
-
- regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
- if (regs == NULL)
- return -ENODEV;
-
- edac_op_state = EDAC_OPSTATE_POLL;
-
- /* Get channel population */
- reg = in_be64(&regs->mic_mnt_cfg);
- dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
- chanmask = 0;
- if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
- chanmask |= 0x1;
- if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
- chanmask |= 0x2;
- if (chanmask == 0) {
- dev_warn(&pdev->dev,
- "Yuck ! No channel populated ? Aborting !\n");
- return -ENODEV;
- }
- dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
- in_be64(&regs->mic_fir));
-
- /* Allocate & init EDAC MC data structure */
- mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
- chanmask == 3 ? 2 : 1, pdev->id);
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->regs = regs;
- priv->node = pdev->id;
- priv->chanmask = chanmask;
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_XDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->mod_name = "cell_edac";
- mci->ctl_name = "MIC";
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = cell_edac_check;
- cell_edac_init_csrows(mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static int __devexit cell_edac_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
- return 0;
-}
-
-static struct platform_driver cell_edac_driver = {
- .driver = {
- .name = "cbe-mic",
- .owner = THIS_MODULE,
- },
- .probe = cell_edac_probe,
- .remove = __devexit_p(cell_edac_remove),
-};
-
-static int __init cell_edac_init(void)
-{
- /* Sanity check registers data structure */
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_0) != 0xf8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_1) != 0x1b8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_config) != 0x218);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_fir) != 0x230);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_mnt_cfg) != 0x210);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_exc) != 0x208);
-
- return platform_driver_register(&cell_edac_driver);
-}
-
-static void __exit cell_edac_exit(void)
-{
- platform_driver_unregister(&cell_edac_driver);
-}
-
-module_init(cell_edac_init);
-module_exit(cell_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/ANDROID_3.4.5/drivers/edac/cpc925_edac.c b/ANDROID_3.4.5/drivers/edac/cpc925_edac.c
deleted file mode 100644
index a774c0dd..00000000
--- a/ANDROID_3.4.5/drivers/edac/cpc925_edac.c
+++ /dev/null
@@ -1,1080 +0,0 @@
-/*
- * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/edac.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/gfp.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-#define CPC925_EDAC_REVISION " Ver: 1.0.0"
-#define CPC925_EDAC_MOD_STR "cpc925_edac"
-
-#define cpc925_printk(level, fmt, arg...) \
- edac_printk(level, "CPC925", fmt, ##arg)
-
-#define cpc925_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
-
-/*
- * CPC925 registers are of 32 bits with bit0 defined at the
- * most significant bit and bit31 at that of least significant.
- */
-#define CPC925_BITS_PER_REG 32
-#define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
-
-/*
- * EDAC device names for the error detections of
- * CPU Interface and Hypertransport Link.
- */
-#define CPC925_CPU_ERR_DEV "cpu"
-#define CPC925_HT_LINK_DEV "htlink"
-
-/* Suppose DDR Refresh cycle is 15.6 microsecond */
-#define CPC925_REF_FREQ 0xFA69
-#define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
-#define CPC925_NR_CSROWS 8
-
-/*
- * All registers and bits definitions are taken from
- * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
- */
-
-/*
- * CPU and Memory Controller Registers
- */
-/************************************************************
- * Processor Interface Exception Mask Register (APIMASK)
- ************************************************************/
-#define REG_APIMASK_OFFSET 0x30070
-enum apimask_bits {
- APIMASK_DART = CPC925_BIT(0), /* DART Exception */
- APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
- APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
- APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
- APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
- APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
- APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
- /* BIT(7) Reserved */
- APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
- APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
- APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
- APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
-
- CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
- APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
- APIMASK_ADRS1),
- ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
- APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
-};
-#define APIMASK_ADI(n) CPC925_BIT(((n)+1))
-
-/************************************************************
- * Processor Interface Exception Register (APIEXCP)
- ************************************************************/
-#define REG_APIEXCP_OFFSET 0x30060
-enum apiexcp_bits {
- APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
- APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
- APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
- APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
- APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
- APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
- APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
- /* BIT(7) Reserved */
- APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
- APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
- APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
- APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
-
- CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
- APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
- APIEXCP_ADRS1),
- UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
- CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
- ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
-};
-
-/************************************************************
- * Memory Bus Configuration Register (MBCR)
-************************************************************/
-#define REG_MBCR_OFFSET 0x2190
-#define MBCR_64BITCFG_SHIFT 23
-#define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
-#define MBCR_64BITBUS_SHIFT 22
-#define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
-
-/************************************************************
- * Memory Bank Mode Register (MBMR)
-************************************************************/
-#define REG_MBMR_OFFSET 0x21C0
-#define MBMR_MODE_MAX_VALUE 0xF
-#define MBMR_MODE_SHIFT 25
-#define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
-#define MBMR_BBA_SHIFT 24
-#define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
-
-/************************************************************
- * Memory Bank Boundary Address Register (MBBAR)
- ************************************************************/
-#define REG_MBBAR_OFFSET 0x21D0
-#define MBBAR_BBA_MAX_VALUE 0xFF
-#define MBBAR_BBA_SHIFT 24
-#define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
-
-/************************************************************
- * Memory Scrub Control Register (MSCR)
- ************************************************************/
-#define REG_MSCR_OFFSET 0x2400
-#define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
-#define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
-#define MSCR_SI_SHIFT 16 /* si - bit8:15*/
-#define MSCR_SI_MAX_VALUE 0xFF
-#define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
-
-/************************************************************
- * Memory Scrub Range Start Register (MSRSR)
- ************************************************************/
-#define REG_MSRSR_OFFSET 0x2410
-
-/************************************************************
- * Memory Scrub Range End Register (MSRER)
- ************************************************************/
-#define REG_MSRER_OFFSET 0x2420
-
-/************************************************************
- * Memory Scrub Pattern Register (MSPR)
- ************************************************************/
-#define REG_MSPR_OFFSET 0x2430
-
-/************************************************************
- * Memory Check Control Register (MCCR)
- ************************************************************/
-#define REG_MCCR_OFFSET 0x2440
-enum mccr_bits {
- MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
-};
-
-/************************************************************
- * Memory Check Range End Register (MCRER)
- ************************************************************/
-#define REG_MCRER_OFFSET 0x2450
-
-/************************************************************
- * Memory Error Address Register (MEAR)
- ************************************************************/
-#define REG_MEAR_OFFSET 0x2460
-#define MEAR_BCNT_MAX_VALUE 0x3
-#define MEAR_BCNT_SHIFT 30
-#define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
-#define MEAR_RANK_MAX_VALUE 0x7
-#define MEAR_RANK_SHIFT 27
-#define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
-#define MEAR_COL_MAX_VALUE 0x7FF
-#define MEAR_COL_SHIFT 16
-#define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
-#define MEAR_BANK_MAX_VALUE 0x3
-#define MEAR_BANK_SHIFT 14
-#define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
-#define MEAR_ROW_MASK 0x00003FFF
-
-/************************************************************
- * Memory Error Syndrome Register (MESR)
- ************************************************************/
-#define REG_MESR_OFFSET 0x2470
-#define MESR_ECC_SYN_H_MASK 0xFF00
-#define MESR_ECC_SYN_L_MASK 0x00FF
-
-/************************************************************
- * Memory Mode Control Register (MMCR)
- ************************************************************/
-#define REG_MMCR_OFFSET 0x2500
-enum mmcr_bits {
- MMCR_REG_DIMM_MODE = CPC925_BIT(3),
-};
-
-/*
- * HyperTransport Link Registers
- */
-/************************************************************
- * Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
- ************************************************************/
-#define REG_ERRCTRL_OFFSET 0x70140
-enum errctrl_bits { /* nonfatal interrupts for */
- ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
- ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
- ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
- ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
- ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
- ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
-
- ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
- ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
-
- HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
- ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
- ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
- HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
-};
-
-/************************************************************
- * Link Configuration and Link Control Register (LINKCTRL)
- ************************************************************/
-#define REG_LINKCTRL_OFFSET 0x70110
-enum linkctrl_bits {
- LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
- LINKCTRL_LINK_FAIL = CPC925_BIT(27),
-
- HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
-};
-
-/************************************************************
- * Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
- ************************************************************/
-#define REG_LINKERR_OFFSET 0x70120
-enum linkerr_bits {
- LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
- LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
- LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
-
- HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
- LINKERR_PROT_ERR),
-};
-
-/************************************************************
- * Bridge Control Register (BRGCTRL)
- ************************************************************/
-#define REG_BRGCTRL_OFFSET 0x70300
-enum brgctrl_bits {
- BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
- BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
-};
-
-/* Private structure for edac memory controller */
-struct cpc925_mc_pdata {
- void __iomem *vbase;
- unsigned long total_mem;
- const char *name;
- int edac_idx;
-};
-
-/* Private structure for common edac device */
-struct cpc925_dev_info {
- void __iomem *vbase;
- struct platform_device *pdev;
- char *ctl_name;
- int edac_idx;
- struct edac_device_ctl_info *edac_dev;
- void (*init)(struct cpc925_dev_info *dev_info);
- void (*exit)(struct cpc925_dev_info *dev_info);
- void (*check)(struct edac_device_ctl_info *edac_dev);
-};
-
-/* Get total memory size from Open Firmware DTB */
-static void get_total_mem(struct cpc925_mc_pdata *pdata)
-{
- struct device_node *np = NULL;
- const unsigned int *reg, *reg_end;
- int len, sw, aw;
- unsigned long start, size;
-
- np = of_find_node_by_type(NULL, "memory");
- if (!np)
- return;
-
- aw = of_n_addr_cells(np);
- sw = of_n_size_cells(np);
- reg = (const unsigned int *)of_get_property(np, "reg", &len);
- reg_end = reg + len/4;
-
- pdata->total_mem = 0;
- do {
- start = of_read_number(reg, aw);
- reg += aw;
- size = of_read_number(reg, sw);
- reg += sw;
- debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
- start, size);
- pdata->total_mem += size;
- } while (reg < reg_end);
-
- of_node_put(np);
- debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
-}
-
-static void cpc925_init_csrows(struct mem_ctl_info *mci)
-{
- struct cpc925_mc_pdata *pdata = mci->pvt_info;
- struct csrow_info *csrow;
- int index;
- u32 mbmr, mbbar, bba;
- unsigned long row_size, last_nr_pages = 0;
-
- get_total_mem(pdata);
-
- for (index = 0; index < mci->nr_csrows; index++) {
- mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
- 0x20 * index);
- mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
- 0x20 + index);
- bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
- ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
-
- if (bba == 0)
- continue; /* not populated */
-
- csrow = &mci->csrows[index];
-
- row_size = bba * (1UL << 28); /* 256M */
- csrow->first_page = last_nr_pages;
- csrow->nr_pages = row_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- last_nr_pages = csrow->last_page + 1;
-
- csrow->mtype = MEM_RDDR;
- csrow->edac_mode = EDAC_SECDED;
-
- switch (csrow->nr_channels) {
- case 1: /* Single channel */
- csrow->grain = 32; /* four-beat burst of 32 bytes */
- break;
- case 2: /* Dual channel */
- default:
- csrow->grain = 64; /* four-beat burst of 64 bytes */
- break;
- }
-
- switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
- case 6: /* 0110, no way to differentiate X8 VS X16 */
- case 5: /* 0101 */
- case 8: /* 1000 */
- csrow->dtype = DEV_X16;
- break;
- case 7: /* 0111 */
- case 9: /* 1001 */
- csrow->dtype = DEV_X8;
- break;
- default:
- csrow->dtype = DEV_UNKNOWN;
- break;
- }
- }
-}
-
-/* Enable memory controller ECC detection */
-static void cpc925_mc_init(struct mem_ctl_info *mci)
-{
- struct cpc925_mc_pdata *pdata = mci->pvt_info;
- u32 apimask;
- u32 mccr;
-
- /* Enable various ECC error exceptions */
- apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
- if ((apimask & ECC_MASK_ENABLE) == 0) {
- apimask |= ECC_MASK_ENABLE;
- __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
- }
-
- /* Enable ECC detection */
- mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
- if ((mccr & MCCR_ECC_EN) == 0) {
- mccr |= MCCR_ECC_EN;
- __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
- }
-}
-
-/* Disable memory controller ECC detection */
-static void cpc925_mc_exit(struct mem_ctl_info *mci)
-{
- /*
- * WARNING:
- * We are supposed to clear the ECC error detection bits,
- * and it will be no problem to do so. However, once they
- * are cleared here if we want to re-install CPC925 EDAC
- * module later, setting them up in cpc925_mc_init() will
- * trigger machine check exception.
- * Also, it's ok to leave ECC error detection bits enabled,
- * since they are reset to 1 by default or by boot loader.
- */
-
- return;
-}
-
-/*
- * Revert DDR column/row/bank addresses into page frame number and
- * offset in page.
- *
- * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
- * physical address(PA) bits to column address(CA) bits mappings are:
- * CA 0 1 2 3 4 5 6 7 8 9 10
- * PA 59 58 57 56 55 54 53 52 51 50 49
- *
- * physical address(PA) bits to bank address(BA) bits mappings are:
- * BA 0 1
- * PA 43 44
- *
- * physical address(PA) bits to row address(RA) bits mappings are:
- * RA 0 1 2 3 4 5 6 7 8 9 10 11 12
- * PA 36 35 34 48 47 46 45 40 41 42 39 38 37
- */
-static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
- unsigned long *pfn, unsigned long *offset, int *csrow)
-{
- u32 bcnt, rank, col, bank, row;
- u32 c;
- unsigned long pa;
- int i;
-
- bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
- rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
- col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
- bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
- row = mear & MEAR_ROW_MASK;
-
- *csrow = rank;
-
-#ifdef CONFIG_EDAC_DEBUG
- if (mci->csrows[rank].first_page == 0) {
- cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
- "non-populated csrow, broken hardware?\n");
- return;
- }
-#endif
-
- /* Revert csrow number */
- pa = mci->csrows[rank].first_page << PAGE_SHIFT;
-
- /* Revert column address */
- col += bcnt;
- for (i = 0; i < 11; i++) {
- c = col & 0x1;
- col >>= 1;
- pa |= c << (14 - i);
- }
-
- /* Revert bank address */
- pa |= bank << 19;
-
- /* Revert row address, in 4 steps */
- for (i = 0; i < 3; i++) {
- c = row & 0x1;
- row >>= 1;
- pa |= c << (26 - i);
- }
-
- for (i = 0; i < 3; i++) {
- c = row & 0x1;
- row >>= 1;
- pa |= c << (21 + i);
- }
-
- for (i = 0; i < 4; i++) {
- c = row & 0x1;
- row >>= 1;
- pa |= c << (18 - i);
- }
-
- for (i = 0; i < 3; i++) {
- c = row & 0x1;
- row >>= 1;
- pa |= c << (29 - i);
- }
-
- *offset = pa & (PAGE_SIZE - 1);
- *pfn = pa >> PAGE_SHIFT;
-
- debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
-}
-
-static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
-{
- if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
- return 0;
-
- if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
- return 1;
-
- cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
- syndrome);
- return 1;
-}
-
-/* Check memory controller registers for ECC errors */
-static void cpc925_mc_check(struct mem_ctl_info *mci)
-{
- struct cpc925_mc_pdata *pdata = mci->pvt_info;
- u32 apiexcp;
- u32 mear;
- u32 mesr;
- u16 syndrome;
- unsigned long pfn = 0, offset = 0;
- int csrow = 0, channel = 0;
-
- /* APIEXCP is cleared when read */
- apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
- if ((apiexcp & ECC_EXCP_DETECTED) == 0)
- return;
-
- mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
- syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
-
- mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
-
- /* Revert column/row addresses into page frame number, etc */
- cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
-
- if (apiexcp & CECC_EXCP_DETECTED) {
- cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
- channel = cpc925_mc_find_channel(mci, syndrome);
- edac_mc_handle_ce(mci, pfn, offset, syndrome,
- csrow, channel, mci->ctl_name);
- }
-
- if (apiexcp & UECC_EXCP_DETECTED) {
- cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
- edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
- }
-
- cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
- cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
- __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
- apiexcp);
- cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
- __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
- __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
- __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
- __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
- __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
- __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
- cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
- mesr);
- cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
- syndrome);
-}
-
-/******************** CPU err device********************************/
-static u32 cpc925_cpu_mask_disabled(void)
-{
- struct device_node *cpus;
- struct device_node *cpunode = NULL;
- static u32 mask = 0;
-
- /* use cached value if available */
- if (mask != 0)
- return mask;
-
- mask = APIMASK_ADI0 | APIMASK_ADI1;
-
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL) {
- cpc925_printk(KERN_DEBUG, "No /cpus node !\n");
- return 0;
- }
-
- while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) {
- const u32 *reg = of_get_property(cpunode, "reg", NULL);
-
- if (strcmp(cpunode->type, "cpu")) {
- cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name);
- continue;
- }
-
- if (reg == NULL || *reg > 2) {
- cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name);
- continue;
- }
-
- mask &= ~APIMASK_ADI(*reg);
- }
-
- if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
- /* We assume that each CPU sits on it's own PI and that
- * for present CPUs the reg property equals to the PI
- * interface id */
- cpc925_printk(KERN_WARNING,
- "Assuming PI id is equal to CPU MPIC id!\n");
- }
-
- of_node_put(cpunode);
- of_node_put(cpus);
-
- return mask;
-}
-
-/* Enable CPU Errors detection */
-static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
-{
- u32 apimask;
- u32 cpumask;
-
- apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
-
- cpumask = cpc925_cpu_mask_disabled();
- if (apimask & cpumask) {
- cpc925_printk(KERN_WARNING, "CPU(s) not present, "
- "but enabled in APIMASK, disabling\n");
- apimask &= ~cpumask;
- }
-
- if ((apimask & CPU_MASK_ENABLE) == 0)
- apimask |= CPU_MASK_ENABLE;
-
- __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
-}
-
-/* Disable CPU Errors detection */
-static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
-{
- /*
- * WARNING:
- * We are supposed to clear the CPU error detection bits,
- * and it will be no problem to do so. However, once they
- * are cleared here if we want to re-install CPC925 EDAC
- * module later, setting them up in cpc925_cpu_init() will
- * trigger machine check exception.
- * Also, it's ok to leave CPU error detection bits enabled,
- * since they are reset to 1 by default.
- */
-
- return;
-}
-
-/* Check for CPU Errors */
-static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
-{
- struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
- u32 apiexcp;
- u32 apimask;
-
- /* APIEXCP is cleared when read */
- apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
- if ((apiexcp & CPU_EXCP_DETECTED) == 0)
- return;
-
- if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
- return;
-
- apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
- cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
- "Processor Interface register dump:\n");
- cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
- cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
-
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
-}
-
-/******************** HT Link err device****************************/
-/* Enable HyperTransport Link Error detection */
-static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
-{
- u32 ht_errctrl;
-
- ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
- if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
- ht_errctrl |= HT_ERRCTRL_ENABLE;
- __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
- }
-}
-
-/* Disable HyperTransport Link Error detection */
-static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
-{
- u32 ht_errctrl;
-
- ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
- ht_errctrl &= ~HT_ERRCTRL_ENABLE;
- __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
-}
-
-/* Check for HyperTransport Link errors */
-static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
-{
- struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
- u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
- u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
- u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
- u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
-
- if (!((brgctrl & BRGCTRL_DETSERR) ||
- (linkctrl & HT_LINKCTRL_DETECTED) ||
- (errctrl & HT_ERRCTRL_DETECTED) ||
- (linkerr & HT_LINKERR_DETECTED)))
- return;
-
- cpc925_printk(KERN_INFO, "HT Link Fault\n"
- "HT register dump:\n");
- cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
- brgctrl);
- cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
- linkctrl);
- cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
- errctrl);
- cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
- linkerr);
-
- /* Clear by write 1 */
- if (brgctrl & BRGCTRL_DETSERR)
- __raw_writel(BRGCTRL_DETSERR,
- dev_info->vbase + REG_BRGCTRL_OFFSET);
-
- if (linkctrl & HT_LINKCTRL_DETECTED)
- __raw_writel(HT_LINKCTRL_DETECTED,
- dev_info->vbase + REG_LINKCTRL_OFFSET);
-
- /* Initiate Secondary Bus Reset to clear the chain failure */
- if (errctrl & ERRCTRL_CHN_FAL)
- __raw_writel(BRGCTRL_SECBUSRESET,
- dev_info->vbase + REG_BRGCTRL_OFFSET);
-
- if (errctrl & ERRCTRL_RSP_ERR)
- __raw_writel(ERRCTRL_RSP_ERR,
- dev_info->vbase + REG_ERRCTRL_OFFSET);
-
- if (linkerr & HT_LINKERR_DETECTED)
- __raw_writel(HT_LINKERR_DETECTED,
- dev_info->vbase + REG_LINKERR_OFFSET);
-
- edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
-}
-
-static struct cpc925_dev_info cpc925_devs[] = {
- {
- .ctl_name = CPC925_CPU_ERR_DEV,
- .init = cpc925_cpu_init,
- .exit = cpc925_cpu_exit,
- .check = cpc925_cpu_check,
- },
- {
- .ctl_name = CPC925_HT_LINK_DEV,
- .init = cpc925_htlink_init,
- .exit = cpc925_htlink_exit,
- .check = cpc925_htlink_check,
- },
- {0}, /* Terminated by NULL */
-};
-
-/*
- * Add CPU Err detection and HyperTransport Link Err detection
- * as common "edac_device", they have no corresponding device
- * nodes in the Open Firmware DTB and we have to add platform
- * devices for them. Also, they will share the MMIO with that
- * of memory controller.
- */
-static void cpc925_add_edac_devices(void __iomem *vbase)
-{
- struct cpc925_dev_info *dev_info;
-
- if (!vbase) {
- cpc925_printk(KERN_ERR, "MMIO not established yet\n");
- return;
- }
-
- for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
- dev_info->vbase = vbase;
- dev_info->pdev = platform_device_register_simple(
- dev_info->ctl_name, 0, NULL, 0);
- if (IS_ERR(dev_info->pdev)) {
- cpc925_printk(KERN_ERR,
- "Can't register platform device for %s\n",
- dev_info->ctl_name);
- continue;
- }
-
- /*
- * Don't have to allocate private structure but
- * make use of cpc925_devs[] instead.
- */
- dev_info->edac_idx = edac_device_alloc_index();
- dev_info->edac_dev =
- edac_device_alloc_ctl_info(0, dev_info->ctl_name,
- 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
- if (!dev_info->edac_dev) {
- cpc925_printk(KERN_ERR, "No memory for edac device\n");
- goto err1;
- }
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->pdev->dev;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = dev_info->check;
-
- if (dev_info->init)
- dev_info->init(dev_info);
-
- if (edac_device_add_device(dev_info->edac_dev) > 0) {
- cpc925_printk(KERN_ERR,
- "Unable to add edac device for %s\n",
- dev_info->ctl_name);
- goto err2;
- }
-
- debugf0("%s: Successfully added edac device for %s\n",
- __func__, dev_info->ctl_name);
-
- continue;
-
-err2:
- if (dev_info->exit)
- dev_info->exit(dev_info);
- edac_device_free_ctl_info(dev_info->edac_dev);
-err1:
- platform_device_unregister(dev_info->pdev);
- }
-}
-
-/*
- * Delete the common "edac_device" for CPU Err Detection
- * and HyperTransport Link Err Detection
- */
-static void cpc925_del_edac_devices(void)
-{
- struct cpc925_dev_info *dev_info;
-
- for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
- if (dev_info->edac_dev) {
- edac_device_del_device(dev_info->edac_dev->dev);
- edac_device_free_ctl_info(dev_info->edac_dev);
- platform_device_unregister(dev_info->pdev);
- }
-
- if (dev_info->exit)
- dev_info->exit(dev_info);
-
- debugf0("%s: Successfully deleted edac device for %s\n",
- __func__, dev_info->ctl_name);
- }
-}
-
-/* Convert current back-ground scrub rate into byte/sec bandwidth */
-static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
-{
- struct cpc925_mc_pdata *pdata = mci->pvt_info;
- int bw;
- u32 mscr;
- u8 si;
-
- mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
- si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
-
- debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
-
- if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
- (si == 0)) {
- cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
- bw = 0;
- } else
- bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
-
- return bw;
-}
-
-/* Return 0 for single channel; 1 for dual channel */
-static int cpc925_mc_get_channels(void __iomem *vbase)
-{
- int dual = 0;
- u32 mbcr;
-
- mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
-
- /*
- * Dual channel only when 128-bit wide physical bus
- * and 128-bit configuration.
- */
- if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
- ((mbcr & MBCR_64BITBUS_MASK) == 0))
- dual = 1;
-
- debugf0("%s: %s channel\n", __func__,
- (dual > 0) ? "Dual" : "Single");
-
- return dual;
-}
-
-static int __devinit cpc925_probe(struct platform_device *pdev)
-{
- static int edac_mc_idx;
- struct mem_ctl_info *mci;
- void __iomem *vbase;
- struct cpc925_mc_pdata *pdata;
- struct resource *r;
- int res = 0, nr_channels;
-
- debugf0("%s: %s platform device found!\n", __func__, pdev->name);
-
- if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
- res = -ENOMEM;
- goto out;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- cpc925_printk(KERN_ERR, "Unable to get resource\n");
- res = -ENOENT;
- goto err1;
- }
-
- if (!devm_request_mem_region(&pdev->dev,
- r->start,
- resource_size(r),
- pdev->name)) {
- cpc925_printk(KERN_ERR, "Unable to request mem region\n");
- res = -EBUSY;
- goto err1;
- }
-
- vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (!vbase) {
- cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
- res = -ENOMEM;
- goto err2;
- }
-
- nr_channels = cpc925_mc_get_channels(vbase);
- mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
- CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
- if (!mci) {
- cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
- res = -ENOMEM;
- goto err2;
- }
-
- pdata = mci->pvt_info;
- pdata->vbase = vbase;
- pdata->edac_idx = edac_mc_idx++;
- pdata->name = pdev->name;
-
- mci->dev = &pdev->dev;
- platform_set_drvdata(pdev, mci);
- mci->dev_name = dev_name(&pdev->dev);
- mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = CPC925_EDAC_MOD_STR;
- mci->mod_ver = CPC925_EDAC_REVISION;
- mci->ctl_name = pdev->name;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- mci->edac_check = cpc925_mc_check;
-
- mci->ctl_page_to_phys = NULL;
- mci->scrub_mode = SCRUB_SW_SRC;
- mci->set_sdram_scrub_rate = NULL;
- mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
-
- cpc925_init_csrows(mci);
-
- /* Setup memory controller registers */
- cpc925_mc_init(mci);
-
- if (edac_mc_add_mc(mci) > 0) {
- cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
- goto err3;
- }
-
- cpc925_add_edac_devices(vbase);
-
- /* get this far and it's successful */
- debugf0("%s: success\n", __func__);
-
- res = 0;
- goto out;
-
-err3:
- cpc925_mc_exit(mci);
- edac_mc_free(mci);
-err2:
- devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
-err1:
- devres_release_group(&pdev->dev, cpc925_probe);
-out:
- return res;
-}
-
-static int cpc925_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = platform_get_drvdata(pdev);
-
- /*
- * Delete common edac devices before edac mc, because
- * the former share the MMIO of the latter.
- */
- cpc925_del_edac_devices();
- cpc925_mc_exit(mci);
-
- edac_mc_del_mc(&pdev->dev);
- edac_mc_free(mci);
-
- return 0;
-}
-
-static struct platform_driver cpc925_edac_driver = {
- .probe = cpc925_probe,
- .remove = cpc925_remove,
- .driver = {
- .name = "cpc925_edac",
- }
-};
-
-static int __init cpc925_edac_init(void)
-{
- int ret = 0;
-
- printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
-
- /* Only support POLL mode so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- ret = platform_driver_register(&cpc925_edac_driver);
- if (ret) {
- printk(KERN_WARNING "Failed to register %s\n",
- CPC925_EDAC_MOD_STR);
- }
-
- return ret;
-}
-
-static void __exit cpc925_edac_exit(void)
-{
- platform_driver_unregister(&cpc925_edac_driver);
-}
-
-module_init(cpc925_edac_init);
-module_exit(cpc925_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
-MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
diff --git a/ANDROID_3.4.5/drivers/edac/e752x_edac.c b/ANDROID_3.4.5/drivers/edac/e752x_edac.c
deleted file mode 100644
index 41223261..00000000
--- a/ANDROID_3.4.5/drivers/edac/e752x_edac.c
+++ /dev/null
@@ -1,1449 +0,0 @@
-/*
- * Intel e752x Memory Controller kernel module
- * (C) 2004 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * See "enum e752x_chips" below for supported chipsets
- *
- * Written by Tom Zimmerman
- *
- * Contributors:
- * Thayne Harbaugh at realmsys.com (?)
- * Wang Zhenyu at intel.com
- * Dave Jiang at mvista.com
- *
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define E752X_REVISION " Ver: 2.0.2"
-#define EDAC_MOD_STR "e752x_edac"
-
-static int report_non_memory_errors;
-static int force_function_unhide;
-static int sysbus_parity = -1;
-
-static struct edac_pci_ctl_info *e752x_pci;
-
-#define e752x_printk(level, fmt, arg...) \
- edac_printk(level, "e752x", fmt, ##arg)
-
-#define e752x_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
-
-#ifndef PCI_DEVICE_ID_INTEL_7520_0
-#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
-#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
-#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
-#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
-
-#ifndef PCI_DEVICE_ID_INTEL_7525_0
-#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
-#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
-#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
-#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
-
-#ifndef PCI_DEVICE_ID_INTEL_7320_0
-#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
-#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
-#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
-#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
-
-#ifndef PCI_DEVICE_ID_INTEL_3100_0
-#define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
-#endif /* PCI_DEVICE_ID_INTEL_3100_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
-#define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
-#endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
-
-#define E752X_NR_CSROWS 8 /* number of csrows */
-
-/* E752X register addresses - device 0 function 0 */
-#define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
- /*
- * 6:5 Scrub Completion Count
- * 3:2 Scrub Rate (i3100 only)
- * 01=fast 10=normal
- * 1:0 Scrub Mode enable
- * 00=off 10=on
- */
-#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
-#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
- /*
- * 31:30 Device width row 7
- * 01=x8 10=x4 11=x8 DDR2
- * 27:26 Device width row 6
- * 23:22 Device width row 5
- * 19:20 Device width row 4
- * 15:14 Device width row 3
- * 11:10 Device width row 2
- * 7:6 Device width row 1
- * 3:2 Device width row 0
- */
-#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
- /* FIXME:IS THIS RIGHT? */
- /*
- * 22 Number channels 0=1,1=2
- * 19:18 DRB Granularity 32/64MB
- */
-#define E752X_DRM 0x80 /* Dimm mapping register */
-#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
- /*
- * 14:12 1 single A, 2 single B, 3 dual
- */
-#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
-#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
-#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
-#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
-
-/* E752X register addresses - device 0 function 1 */
-#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
-#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
-#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
-#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
-#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
-#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
-#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
-#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
-#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
-#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
-#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
-#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
-#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
-#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
-#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
-#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
-#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
-#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
-#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
-#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
- /* error address register (32b) */
- /*
- * 31 Reserved
- * 30:2 CE address (64 byte block 34:6
- * 1 Reserved
- * 0 HiLoCS
- */
-#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
- /* error address register (32b) */
- /*
- * 31 Reserved
- * 30:2 CE address (64 byte block 34:6)
- * 1 Reserved
- * 0 HiLoCS
- */
-#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
- /* error address register (32b) */
- /*
- * 31 Reserved
- * 30:2 CE address (64 byte block 34:6)
- * 1 Reserved
- * 0 HiLoCS
- */
-#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
- /* error address register (32b) */
- /*
- * 31 Reserved
- * 30:2 CE address (64 byte block 34:6
- * 1 Reserved
- * 0 HiLoCS
- */
-#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
- /* error syndrome register (16b) */
-#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
- /* error syndrome register (16b) */
-#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
-
-/* 3100 IMCH specific register addresses - device 0 function 1 */
-#define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
-#define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
-#define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
-#define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
-
-/* ICH5R register addresses - device 30 function 0 */
-#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
-#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
-#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
-
-enum e752x_chips {
- E7520 = 0,
- E7525 = 1,
- E7320 = 2,
- I3100 = 3
-};
-
-struct e752x_pvt {
- struct pci_dev *bridge_ck;
- struct pci_dev *dev_d0f0;
- struct pci_dev *dev_d0f1;
- u32 tolm;
- u32 remapbase;
- u32 remaplimit;
- int mc_symmetric;
- u8 map[8];
- int map_type;
- const struct e752x_dev_info *dev_info;
-};
-
-struct e752x_dev_info {
- u16 err_dev;
- u16 ctl_dev;
- const char *ctl_name;
-};
-
-struct e752x_error_info {
- u32 ferr_global;
- u32 nerr_global;
- u32 nsi_ferr; /* 3100 only */
- u32 nsi_nerr; /* 3100 only */
- u8 hi_ferr; /* all but 3100 */
- u8 hi_nerr; /* all but 3100 */
- u16 sysbus_ferr;
- u16 sysbus_nerr;
- u8 buf_ferr;
- u8 buf_nerr;
- u16 dram_ferr;
- u16 dram_nerr;
- u32 dram_sec1_add;
- u32 dram_sec2_add;
- u16 dram_sec1_syndrome;
- u16 dram_sec2_syndrome;
- u32 dram_ded_add;
- u32 dram_scrb_add;
- u32 dram_retr_add;
-};
-
-static const struct e752x_dev_info e752x_devs[] = {
- [E7520] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
- .ctl_name = "E7520"},
- [E7525] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
- .ctl_name = "E7525"},
- [E7320] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
- .ctl_name = "E7320"},
- [I3100] = {
- .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
- .ctl_name = "3100"},
-};
-
-/* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
- * map the scrubbing bandwidth to a hardware register value. The 'set'
- * operation finds the 'matching or higher value'. Note that scrubbing
- * on the e752x can only be enabled/disabled. The 3100 supports
- * a normal and fast mode.
- */
-
-#define SDRATE_EOT 0xFFFFFFFF
-
-struct scrubrate {
- u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
- u16 scrubval; /* register value for scrub rate */
-};
-
-/* Rate below assumes same performance as i3100 using PC3200 DDR2 in
- * normal mode. e752x bridges don't support choosing normal or fast mode,
- * so the scrubbing bandwidth value isn't all that important - scrubbing is
- * either on or off.
- */
-static const struct scrubrate scrubrates_e752x[] = {
- {0, 0x00}, /* Scrubbing Off */
- {500000, 0x02}, /* Scrubbing On */
- {SDRATE_EOT, 0x00} /* End of Table */
-};
-
-/* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
- * Normal mode: 125 (32000 / 256) times slower than fast mode.
- */
-static const struct scrubrate scrubrates_i3100[] = {
- {0, 0x00}, /* Scrubbing Off */
- {500000, 0x0a}, /* Normal mode - 32k clocks */
- {62500000, 0x06}, /* Fast mode - 256 clocks */
- {SDRATE_EOT, 0x00} /* End of Table */
-};
-
-static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
-{
- u32 remap;
- struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
-
- debugf3("%s()\n", __func__);
-
- if (page < pvt->tolm)
- return page;
-
- if ((page >= 0x100000) && (page < pvt->remapbase))
- return page;
-
- remap = (page - pvt->tolm) + pvt->remapbase;
-
- if (remap < pvt->remaplimit)
- return remap;
-
- e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
- return pvt->tolm - 1;
-}
-
-static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome)
-{
- u32 page;
- int row;
- int channel;
- int i;
- struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
-
- debugf3("%s()\n", __func__);
-
- /* convert the addr to 4k page */
- page = sec1_add >> (PAGE_SHIFT - 4);
-
- /* FIXME - check for -1 */
- if (pvt->mc_symmetric) {
- /* chip select are bits 14 & 13 */
- row = ((page >> 1) & 3);
- e752x_printk(KERN_WARNING,
- "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
- pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
- pvt->map[4], pvt->map[5], pvt->map[6],
- pvt->map[7]);
-
- /* test for channel remapping */
- for (i = 0; i < 8; i++) {
- if (pvt->map[i] == row)
- break;
- }
-
- e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
-
- if (i < 8)
- row = i;
- else
- e752x_mc_printk(mci, KERN_WARNING,
- "row %d not found in remap table\n",
- row);
- } else
- row = edac_mc_find_csrow_by_page(mci, page);
-
- /* 0 = channel A, 1 = channel B */
- channel = !(error_one & 1);
-
- /* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
- sec1_syndrome, row, channel, "e752x CE");
-}
-
-static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
-}
-
-static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
- u32 ded_add, u32 scrb_add)
-{
- u32 error_2b, block_page;
- int row;
- struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
-
- debugf3("%s()\n", __func__);
-
- if (error_one & 0x0202) {
- error_2b = ded_add;
-
- /* convert to 4k address */
- block_page = error_2b >> (PAGE_SHIFT - 4);
-
- row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
- ((block_page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, block_page);
-
- /* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
- }
- if (error_one & 0x0404) {
- error_2b = scrb_add;
-
- /* convert to 4k address */
- block_page = error_2b >> (PAGE_SHIFT - 4);
-
- row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
- ((block_page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, block_page);
-
- /* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
- }
-}
-
-static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
- u32 ded_add, u32 scrb_add, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_process_ue(mci, error_one, ded_add, scrb_add);
-}
-
-static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
- int *error_found, int handle_error)
-{
- *error_found = 1;
-
- if (!handle_error)
- return;
-
- debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
-}
-
-static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
- u32 retry_add)
-{
- u32 error_1b, page;
- int row;
- struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
-
- error_1b = retry_add;
- page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
-
- /* chip select are bits 14 & 13 */
- row = pvt->mc_symmetric ? ((page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, page);
-
- e752x_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, row %d : Memory read retry\n",
- (long unsigned int)page, row);
-}
-
-static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
- u32 retry_add, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_process_ded_retry(mci, error, retry_add);
-}
-
-static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
- int *error_found, int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
-}
-
-static char *global_message[11] = {
- "PCI Express C1",
- "PCI Express C",
- "PCI Express B1",
- "PCI Express B",
- "PCI Express A1",
- "PCI Express A",
- "DMA Controller",
- "HUB or NS Interface",
- "System Bus",
- "DRAM Controller", /* 9th entry */
- "Internal Buffer"
-};
-
-#define DRAM_ENTRY 9
-
-static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
-
-static void do_global_error(int fatal, u32 errors)
-{
- int i;
-
- for (i = 0; i < 11; i++) {
- if (errors & (1 << i)) {
- /* If the error is from DRAM Controller OR
- * we are to report ALL errors, then
- * report the error
- */
- if ((i == DRAM_ENTRY) || report_non_memory_errors)
- e752x_printk(KERN_WARNING, "%sError %s\n",
- fatal_message[fatal],
- global_message[i]);
- }
- }
-}
-
-static inline void global_error(int fatal, u32 errors, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_global_error(fatal, errors);
-}
-
-static char *hub_message[7] = {
- "HI Address or Command Parity", "HI Illegal Access",
- "HI Internal Parity", "Out of Range Access",
- "HI Data Parity", "Enhanced Config Access",
- "Hub Interface Target Abort"
-};
-
-static void do_hub_error(int fatal, u8 errors)
-{
- int i;
-
- for (i = 0; i < 7; i++) {
- if (errors & (1 << i))
- e752x_printk(KERN_WARNING, "%sError %s\n",
- fatal_message[fatal], hub_message[i]);
- }
-}
-
-static inline void hub_error(int fatal, u8 errors, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_hub_error(fatal, errors);
-}
-
-#define NSI_FATAL_MASK 0x0c080081
-#define NSI_NON_FATAL_MASK 0x23a0ba64
-#define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
-
-static char *nsi_message[30] = {
- "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
- "", /* reserved */
- "NSI Parity Error", /* bit 2, non-fatal */
- "", /* reserved */
- "", /* reserved */
- "Correctable Error Message", /* bit 5, non-fatal */
- "Non-Fatal Error Message", /* bit 6, non-fatal */
- "Fatal Error Message", /* bit 7, fatal */
- "", /* reserved */
- "Receiver Error", /* bit 9, non-fatal */
- "", /* reserved */
- "Bad TLP", /* bit 11, non-fatal */
- "Bad DLLP", /* bit 12, non-fatal */
- "REPLAY_NUM Rollover", /* bit 13, non-fatal */
- "", /* reserved */
- "Replay Timer Timeout", /* bit 15, non-fatal */
- "", /* reserved */
- "", /* reserved */
- "", /* reserved */
- "Data Link Protocol Error", /* bit 19, fatal */
- "", /* reserved */
- "Poisoned TLP", /* bit 21, non-fatal */
- "", /* reserved */
- "Completion Timeout", /* bit 23, non-fatal */
- "Completer Abort", /* bit 24, non-fatal */
- "Unexpected Completion", /* bit 25, non-fatal */
- "Receiver Overflow", /* bit 26, fatal */
- "Malformed TLP", /* bit 27, fatal */
- "", /* reserved */
- "Unsupported Request" /* bit 29, non-fatal */
-};
-
-static void do_nsi_error(int fatal, u32 errors)
-{
- int i;
-
- for (i = 0; i < 30; i++) {
- if (errors & (1 << i))
- printk(KERN_WARNING "%sError %s\n",
- fatal_message[fatal], nsi_message[i]);
- }
-}
-
-static inline void nsi_error(int fatal, u32 errors, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_nsi_error(fatal, errors);
-}
-
-static char *membuf_message[4] = {
- "Internal PMWB to DRAM parity",
- "Internal PMWB to System Bus Parity",
- "Internal System Bus or IO to PMWB Parity",
- "Internal DRAM to PMWB Parity"
-};
-
-static void do_membuf_error(u8 errors)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (errors & (1 << i))
- e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
- membuf_message[i]);
- }
-}
-
-static inline void membuf_error(u8 errors, int *error_found, int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_membuf_error(errors);
-}
-
-static char *sysbus_message[10] = {
- "Addr or Request Parity",
- "Data Strobe Glitch",
- "Addr Strobe Glitch",
- "Data Parity",
- "Addr Above TOM",
- "Non DRAM Lock Error",
- "MCERR", "BINIT",
- "Memory Parity",
- "IO Subsystem Parity"
-};
-
-static void do_sysbus_error(int fatal, u32 errors)
-{
- int i;
-
- for (i = 0; i < 10; i++) {
- if (errors & (1 << i))
- e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
- fatal_message[fatal], sysbus_message[i]);
- }
-}
-
-static inline void sysbus_error(int fatal, u32 errors, int *error_found,
- int handle_error)
-{
- *error_found = 1;
-
- if (handle_error)
- do_sysbus_error(fatal, errors);
-}
-
-static void e752x_check_hub_interface(struct e752x_error_info *info,
- int *error_found, int handle_error)
-{
- u8 stat8;
-
- //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
-
- stat8 = info->hi_ferr;
-
- if (stat8 & 0x7f) { /* Error, so process */
- stat8 &= 0x7f;
-
- if (stat8 & 0x2b)
- hub_error(1, stat8 & 0x2b, error_found, handle_error);
-
- if (stat8 & 0x54)
- hub_error(0, stat8 & 0x54, error_found, handle_error);
- }
- //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
-
- stat8 = info->hi_nerr;
-
- if (stat8 & 0x7f) { /* Error, so process */
- stat8 &= 0x7f;
-
- if (stat8 & 0x2b)
- hub_error(1, stat8 & 0x2b, error_found, handle_error);
-
- if (stat8 & 0x54)
- hub_error(0, stat8 & 0x54, error_found, handle_error);
- }
-}
-
-static void e752x_check_ns_interface(struct e752x_error_info *info,
- int *error_found, int handle_error)
-{
- u32 stat32;
-
- stat32 = info->nsi_ferr;
- if (stat32 & NSI_ERR_MASK) { /* Error, so process */
- if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
- nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
- handle_error);
- if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
- nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
- handle_error);
- }
- stat32 = info->nsi_nerr;
- if (stat32 & NSI_ERR_MASK) {
- if (stat32 & NSI_FATAL_MASK)
- nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
- handle_error);
- if (stat32 & NSI_NON_FATAL_MASK)
- nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
- handle_error);
- }
-}
-
-static void e752x_check_sysbus(struct e752x_error_info *info,
- int *error_found, int handle_error)
-{
- u32 stat32, error32;
-
- //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
- stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
-
- if (stat32 == 0)
- return; /* no errors */
-
- error32 = (stat32 >> 16) & 0x3ff;
- stat32 = stat32 & 0x3ff;
-
- if (stat32 & 0x087)
- sysbus_error(1, stat32 & 0x087, error_found, handle_error);
-
- if (stat32 & 0x378)
- sysbus_error(0, stat32 & 0x378, error_found, handle_error);
-
- if (error32 & 0x087)
- sysbus_error(1, error32 & 0x087, error_found, handle_error);
-
- if (error32 & 0x378)
- sysbus_error(0, error32 & 0x378, error_found, handle_error);
-}
-
-static void e752x_check_membuf(struct e752x_error_info *info,
- int *error_found, int handle_error)
-{
- u8 stat8;
-
- stat8 = info->buf_ferr;
-
- if (stat8 & 0x0f) { /* Error, so process */
- stat8 &= 0x0f;
- membuf_error(stat8, error_found, handle_error);
- }
-
- stat8 = info->buf_nerr;
-
- if (stat8 & 0x0f) { /* Error, so process */
- stat8 &= 0x0f;
- membuf_error(stat8, error_found, handle_error);
- }
-}
-
-static void e752x_check_dram(struct mem_ctl_info *mci,
- struct e752x_error_info *info, int *error_found,
- int handle_error)
-{
- u16 error_one, error_next;
-
- error_one = info->dram_ferr;
- error_next = info->dram_nerr;
-
- /* decode and report errors */
- if (error_one & 0x0101) /* check first error correctable */
- process_ce(mci, error_one, info->dram_sec1_add,
- info->dram_sec1_syndrome, error_found, handle_error);
-
- if (error_next & 0x0101) /* check next error correctable */
- process_ce(mci, error_next, info->dram_sec2_add,
- info->dram_sec2_syndrome, error_found, handle_error);
-
- if (error_one & 0x4040)
- process_ue_no_info_wr(mci, error_found, handle_error);
-
- if (error_next & 0x4040)
- process_ue_no_info_wr(mci, error_found, handle_error);
-
- if (error_one & 0x2020)
- process_ded_retry(mci, error_one, info->dram_retr_add,
- error_found, handle_error);
-
- if (error_next & 0x2020)
- process_ded_retry(mci, error_next, info->dram_retr_add,
- error_found, handle_error);
-
- if (error_one & 0x0808)
- process_threshold_ce(mci, error_one, error_found, handle_error);
-
- if (error_next & 0x0808)
- process_threshold_ce(mci, error_next, error_found,
- handle_error);
-
- if (error_one & 0x0606)
- process_ue(mci, error_one, info->dram_ded_add,
- info->dram_scrb_add, error_found, handle_error);
-
- if (error_next & 0x0606)
- process_ue(mci, error_next, info->dram_ded_add,
- info->dram_scrb_add, error_found, handle_error);
-}
-
-static void e752x_get_error_info(struct mem_ctl_info *mci,
- struct e752x_error_info *info)
-{
- struct pci_dev *dev;
- struct e752x_pvt *pvt;
-
- memset(info, 0, sizeof(*info));
- pvt = (struct e752x_pvt *)mci->pvt_info;
- dev = pvt->dev_d0f1;
- pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
-
- if (info->ferr_global) {
- if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
- pci_read_config_dword(dev, I3100_NSI_FERR,
- &info->nsi_ferr);
- info->hi_ferr = 0;
- } else {
- pci_read_config_byte(dev, E752X_HI_FERR,
- &info->hi_ferr);
- info->nsi_ferr = 0;
- }
- pci_read_config_word(dev, E752X_SYSBUS_FERR,
- &info->sysbus_ferr);
- pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
- pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
- pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
- &info->dram_sec1_add);
- pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
- &info->dram_sec1_syndrome);
- pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
- &info->dram_ded_add);
- pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
- &info->dram_scrb_add);
- pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
- &info->dram_retr_add);
-
- /* ignore the reserved bits just in case */
- if (info->hi_ferr & 0x7f)
- pci_write_config_byte(dev, E752X_HI_FERR,
- info->hi_ferr);
-
- if (info->nsi_ferr & NSI_ERR_MASK)
- pci_write_config_dword(dev, I3100_NSI_FERR,
- info->nsi_ferr);
-
- if (info->sysbus_ferr)
- pci_write_config_word(dev, E752X_SYSBUS_FERR,
- info->sysbus_ferr);
-
- if (info->buf_ferr & 0x0f)
- pci_write_config_byte(dev, E752X_BUF_FERR,
- info->buf_ferr);
-
- if (info->dram_ferr)
- pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
- info->dram_ferr, info->dram_ferr);
-
- pci_write_config_dword(dev, E752X_FERR_GLOBAL,
- info->ferr_global);
- }
-
- pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
-
- if (info->nerr_global) {
- if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
- pci_read_config_dword(dev, I3100_NSI_NERR,
- &info->nsi_nerr);
- info->hi_nerr = 0;
- } else {
- pci_read_config_byte(dev, E752X_HI_NERR,
- &info->hi_nerr);
- info->nsi_nerr = 0;
- }
- pci_read_config_word(dev, E752X_SYSBUS_NERR,
- &info->sysbus_nerr);
- pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
- pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
- pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
- &info->dram_sec2_add);
- pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
- &info->dram_sec2_syndrome);
-
- if (info->hi_nerr & 0x7f)
- pci_write_config_byte(dev, E752X_HI_NERR,
- info->hi_nerr);
-
- if (info->nsi_nerr & NSI_ERR_MASK)
- pci_write_config_dword(dev, I3100_NSI_NERR,
- info->nsi_nerr);
-
- if (info->sysbus_nerr)
- pci_write_config_word(dev, E752X_SYSBUS_NERR,
- info->sysbus_nerr);
-
- if (info->buf_nerr & 0x0f)
- pci_write_config_byte(dev, E752X_BUF_NERR,
- info->buf_nerr);
-
- if (info->dram_nerr)
- pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
- info->dram_nerr, info->dram_nerr);
-
- pci_write_config_dword(dev, E752X_NERR_GLOBAL,
- info->nerr_global);
- }
-}
-
-static int e752x_process_error_info(struct mem_ctl_info *mci,
- struct e752x_error_info *info,
- int handle_errors)
-{
- u32 error32, stat32;
- int error_found;
-
- error_found = 0;
- error32 = (info->ferr_global >> 18) & 0x3ff;
- stat32 = (info->ferr_global >> 4) & 0x7ff;
-
- if (error32)
- global_error(1, error32, &error_found, handle_errors);
-
- if (stat32)
- global_error(0, stat32, &error_found, handle_errors);
-
- error32 = (info->nerr_global >> 18) & 0x3ff;
- stat32 = (info->nerr_global >> 4) & 0x7ff;
-
- if (error32)
- global_error(1, error32, &error_found, handle_errors);
-
- if (stat32)
- global_error(0, stat32, &error_found, handle_errors);
-
- e752x_check_hub_interface(info, &error_found, handle_errors);
- e752x_check_ns_interface(info, &error_found, handle_errors);
- e752x_check_sysbus(info, &error_found, handle_errors);
- e752x_check_membuf(info, &error_found, handle_errors);
- e752x_check_dram(mci, info, &error_found, handle_errors);
- return error_found;
-}
-
-static void e752x_check(struct mem_ctl_info *mci)
-{
- struct e752x_error_info info;
-
- debugf3("%s()\n", __func__);
- e752x_get_error_info(mci, &info);
- e752x_process_error_info(mci, &info, 1);
-}
-
-/* Program byte/sec bandwidth scrub rate to hardware */
-static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
-{
- const struct scrubrate *scrubrates;
- struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
- struct pci_dev *pdev = pvt->dev_d0f0;
- int i;
-
- if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
- scrubrates = scrubrates_i3100;
- else
- scrubrates = scrubrates_e752x;
-
- /* Translate the desired scrub rate to a e752x/3100 register value.
- * Search for the bandwidth that is equal or greater than the
- * desired rate and program the cooresponding register value.
- */
- for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
- if (scrubrates[i].bandwidth >= new_bw)
- break;
-
- if (scrubrates[i].bandwidth == SDRATE_EOT)
- return -1;
-
- pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
-
- return scrubrates[i].bandwidth;
-}
-
-/* Convert current scrub rate value into byte/sec bandwidth */
-static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
-{
- const struct scrubrate *scrubrates;
- struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
- struct pci_dev *pdev = pvt->dev_d0f0;
- u16 scrubval;
- int i;
-
- if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
- scrubrates = scrubrates_i3100;
- else
- scrubrates = scrubrates_e752x;
-
- /* Find the bandwidth matching the memory scrubber configuration */
- pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
- scrubval = scrubval & 0x0f;
-
- for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
- if (scrubrates[i].scrubval == scrubval)
- break;
-
- if (scrubrates[i].bandwidth == SDRATE_EOT) {
- e752x_printk(KERN_WARNING,
- "Invalid sdram scrub control value: 0x%x\n", scrubval);
- return -1;
- }
- return scrubrates[i].bandwidth;
-
-}
-
-/* Return 1 if dual channel mode is active. Else return 0. */
-static inline int dual_channel_active(u16 ddrcsr)
-{
- return (((ddrcsr >> 12) & 3) == 3);
-}
-
-/* Remap csrow index numbers if map_type is "reverse"
- */
-static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
-{
- struct e752x_pvt *pvt = mci->pvt_info;
-
- if (!pvt->map_type)
- return (7 - index);
-
- return (index);
-}
-
-static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- u16 ddrcsr)
-{
- struct csrow_info *csrow;
- unsigned long last_cumul_size;
- int index, mem_dev, drc_chan;
- int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
- int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
- u8 value;
- u32 dra, drc, cumul_size;
-
- dra = 0;
- for (index = 0; index < 4; index++) {
- u8 dra_reg;
- pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
- dra |= dra_reg << (index * 8);
- }
- pci_read_config_dword(pdev, E752X_DRC, &drc);
- drc_chan = dual_channel_active(ddrcsr);
- drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
- drc_ddim = (drc >> 20) & 0x3;
-
- /* The dram row boundary (DRB) reg values are boundary address for
- * each DRAM row with a granularity of 64 or 128MB (single/dual
- * channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all eight rows.
- */
- for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
- /* mem_dev 0=x8, 1=x4 */
- mem_dev = (dra >> (index * 4 + 2)) & 0x3;
- csrow = &mci->csrows[remap_csrow_index(mci, index)];
-
- mem_dev = (mem_dev == 2);
- pci_read_config_byte(pdev, E752X_DRB + index, &value);
- /* convert a 128 or 64 MiB DRB to a page size. */
- cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
-
- csrow->first_page = last_cumul_size;
- csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
- last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
- }
-}
-
-static void e752x_init_mem_map_table(struct pci_dev *pdev,
- struct e752x_pvt *pvt)
-{
- int index;
- u8 value, last, row;
-
- last = 0;
- row = 0;
-
- for (index = 0; index < 8; index += 2) {
- pci_read_config_byte(pdev, E752X_DRB + index, &value);
- /* test if there is a dimm in this slot */
- if (value == last) {
- /* no dimm in the slot, so flag it as empty */
- pvt->map[index] = 0xff;
- pvt->map[index + 1] = 0xff;
- } else { /* there is a dimm in the slot */
- pvt->map[index] = row;
- row++;
- last = value;
- /* test the next value to see if the dimm is double
- * sided
- */
- pci_read_config_byte(pdev, E752X_DRB + index + 1,
- &value);
-
- /* the dimm is single sided, so flag as empty */
- /* this is a double sided dimm to save the next row #*/
- pvt->map[index + 1] = (value == last) ? 0xff : row;
- row++;
- last = value;
- }
- }
-}
-
-/* Return 0 on success or 1 on failure. */
-static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
- struct e752x_pvt *pvt)
-{
- struct pci_dev *dev;
-
- pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev, pvt->bridge_ck);
-
- if (pvt->bridge_ck == NULL)
- pvt->bridge_ck = pci_scan_single_device(pdev->bus,
- PCI_DEVFN(0, 1));
-
- if (pvt->bridge_ck == NULL) {
- e752x_printk(KERN_ERR, "error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
- return 1;
- }
-
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- e752x_devs[dev_idx].ctl_dev,
- NULL);
-
- if (dev == NULL)
- goto fail;
-
- pvt->dev_d0f0 = dev;
- pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
-
- return 0;
-
-fail:
- pci_dev_put(pvt->bridge_ck);
- return 1;
-}
-
-/* Setup system bus parity mask register.
- * Sysbus parity supported on:
- * e7320/e7520/e7525 + Xeon
- */
-static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
-{
- char *cpu_id = cpu_data(0).x86_model_id;
- struct pci_dev *dev = pvt->dev_d0f1;
- int enable = 1;
-
- /* Allow module parameter override, else see if CPU supports parity */
- if (sysbus_parity != -1) {
- enable = sysbus_parity;
- } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
- e752x_printk(KERN_INFO, "System Bus Parity not "
- "supported by CPU, disabling\n");
- enable = 0;
- }
-
- if (enable)
- pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
- else
- pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
-}
-
-static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
-{
- struct pci_dev *dev;
-
- dev = pvt->dev_d0f1;
- /* Turn off error disable & SMI in case the BIOS turned it on */
- if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
- pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
- pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
- } else {
- pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
- pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
- }
-
- e752x_init_sysbus_parity_mask(pvt);
-
- pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
- pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
- pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
- pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
- pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
-}
-
-static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
-{
- u16 pci_data;
- u8 stat8;
- struct mem_ctl_info *mci;
- struct e752x_pvt *pvt;
- u16 ddrcsr;
- int drc_chan; /* Number of channels 0=1chan,1=2chan */
- struct e752x_error_info discard;
-
- debugf0("%s(): mci\n", __func__);
- debugf0("Starting Probe1\n");
-
- /* check to see if device 0 function 1 is enabled; if it isn't, we
- * assume the BIOS has reserved it for a reason and is expecting
- * exclusive access, we take care not to violate that assumption and
- * fail the probe. */
- pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
- if (!force_function_unhide && !(stat8 & (1 << 5))) {
- printk(KERN_INFO "Contact your BIOS vendor to see if the "
- "E752x error registers can be safely un-hidden\n");
- return -ENODEV;
- }
- stat8 |= (1 << 5);
- pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
-
- pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
- /* FIXME: should check >>12 or 0xf, true for all? */
- /* Dual channel = 1, Single channel = 0 */
- drc_chan = dual_channel_active(ddrcsr);
-
- mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
- if (mci == NULL) {
- return -ENOMEM;
- }
-
- debugf3("%s(): init mci\n", __func__);
- mci->mtype_cap = MEM_FLAG_RDDR;
- /* 3100 IMCH supports SECDEC only */
- mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
- (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
- /* FIXME - what if different memory types are in different csrows? */
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = E752X_REVISION;
- mci->dev = &pdev->dev;
-
- debugf3("%s(): init pvt\n", __func__);
- pvt = (struct e752x_pvt *)mci->pvt_info;
- pvt->dev_info = &e752x_devs[dev_idx];
- pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
-
- if (e752x_get_devs(pdev, dev_idx, pvt)) {
- edac_mc_free(mci);
- return -ENODEV;
- }
-
- debugf3("%s(): more mci init\n", __func__);
- mci->ctl_name = pvt->dev_info->ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = e752x_check;
- mci->ctl_page_to_phys = ctl_page_to_phys;
- mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
- mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
-
- /* set the map type. 1 = normal, 0 = reversed
- * Must be set before e752x_init_csrows in case csrow mapping
- * is reversed.
- */
- pci_read_config_byte(pdev, E752X_DRM, &stat8);
- pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
-
- e752x_init_csrows(mci, pdev, ddrcsr);
- e752x_init_mem_map_table(pdev, pvt);
-
- if (dev_idx == I3100)
- mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
- else
- mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
-
- /* load the top of low memory, remap base, and remap limit vars */
- pci_read_config_word(pdev, E752X_TOLM, &pci_data);
- pvt->tolm = ((u32) pci_data) << 4;
- pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
- pvt->remapbase = ((u32) pci_data) << 14;
- pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
- pvt->remaplimit = ((u32) pci_data) << 14;
- e752x_printk(KERN_INFO,
- "tolm = %x, remapbase = %x, remaplimit = %x\n",
- pvt->tolm, pvt->remapbase, pvt->remaplimit);
-
- /* Here we assume that we will never see multiple instances of this
- * type of memory controller. The ID is therefore hardcoded to 0.
- */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- e752x_init_error_reporting_regs(pvt);
- e752x_get_error_info(mci, &discard); /* clear other MCH errors */
-
- /* allocating generic PCI control info */
- e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!e752x_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n", __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
- return 0;
-
-fail:
- pci_dev_put(pvt->dev_d0f0);
- pci_dev_put(pvt->dev_d0f1);
- pci_dev_put(pvt->bridge_ck);
- edac_mc_free(mci);
-
- return -ENODEV;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit e752x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- debugf0("%s()\n", __func__);
-
- /* wake up and enable device */
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- return e752x_probe1(pdev, ent->driver_data);
-}
-
-static void __devexit e752x_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct e752x_pvt *pvt;
-
- debugf0("%s()\n", __func__);
-
- if (e752x_pci)
- edac_pci_release_generic_ctl(e752x_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- pvt = (struct e752x_pvt *)mci->pvt_info;
- pci_dev_put(pvt->dev_d0f0);
- pci_dev_put(pvt->dev_d0f1);
- pci_dev_put(pvt->bridge_ck);
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7520},
- {
- PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7525},
- {
- PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7320},
- {
- PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I3100},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
-
-static struct pci_driver e752x_driver = {
- .name = EDAC_MOD_STR,
- .probe = e752x_init_one,
- .remove = __devexit_p(e752x_remove_one),
- .id_table = e752x_pci_tbl,
-};
-
-static int __init e752x_init(void)
-{
- int pci_rc;
-
- debugf3("%s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&e752x_driver);
- return (pci_rc < 0) ? pci_rc : 0;
-}
-
-static void __exit e752x_exit(void)
-{
- debugf3("%s()\n", __func__);
- pci_unregister_driver(&e752x_driver);
-}
-
-module_init(e752x_init);
-module_exit(e752x_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
-MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
-
-module_param(force_function_unhide, int, 0444);
-MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
- " 1=force unhide and hope BIOS doesn't fight driver for "
- "Dev0:Fun1 access");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
-
-module_param(sysbus_parity, int, 0444);
-MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
- " 1=enable system bus parity checking, default=auto-detect");
-module_param(report_non_memory_errors, int, 0644);
-MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
- "reporting, 1=enable non-memory error reporting");
diff --git a/ANDROID_3.4.5/drivers/edac/e7xxx_edac.c b/ANDROID_3.4.5/drivers/edac/e7xxx_edac.c
deleted file mode 100644
index 68dea87b..00000000
--- a/ANDROID_3.4.5/drivers/edac/e7xxx_edac.c
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * Intel e7xxx Memory Controller kernel module
- * (C) 2003 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * See "enum e7xxx_chips" below for supported chipsets
- *
- * Written by Thayne Harbaugh
- * Based on work by Dan Hollis <goemon at anime dot net> and others.
- * http://www.anime.net/~goemon/linux-ecc/
- *
- * Contributors:
- * Eric Biederman (Linux Networx)
- * Tom Zimmerman (Linux Networx)
- * Jim Garlick (Lawrence Livermore National Labs)
- * Dave Peterson (Lawrence Livermore National Labs)
- * That One Guy (Some other place)
- * Wang Zhenyu (intel.com)
- *
- * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define E7XXX_REVISION " Ver: 2.0.2"
-#define EDAC_MOD_STR "e7xxx_edac"
-
-#define e7xxx_printk(level, fmt, arg...) \
- edac_printk(level, "e7xxx", fmt, ##arg)
-
-#define e7xxx_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
-
-#ifndef PCI_DEVICE_ID_INTEL_7205_0
-#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
-#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
-#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
-#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
-
-#ifndef PCI_DEVICE_ID_INTEL_7500_0
-#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
-#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
-#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
-#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
-
-#ifndef PCI_DEVICE_ID_INTEL_7501_0
-#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
-#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
-#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
-#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
-
-#ifndef PCI_DEVICE_ID_INTEL_7505_0
-#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
-#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
-#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
-#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
-
-#define E7XXX_NR_CSROWS 8 /* number of csrows */
-#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
-
-/* E7XXX register addresses - device 0 function 0 */
-#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
-#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
- /*
- * 31 Device width row 7 0=x8 1=x4
- * 27 Device width row 6
- * 23 Device width row 5
- * 19 Device width row 4
- * 15 Device width row 3
- * 11 Device width row 2
- * 7 Device width row 1
- * 3 Device width row 0
- */
-#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
- /*
- * 22 Number channels 0=1,1=2
- * 19:18 DRB Granularity 32/64MB
- */
-#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
-#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
-#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
-
-/* E7XXX register addresses - device 0 function 1 */
-#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
-#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
-#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
- /* error address register (32b) */
- /*
- * 31:28 Reserved
- * 27:6 CE address (4k block 33:12)
- * 5:0 Reserved
- */
-#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
- /* error address register (32b) */
- /*
- * 31:28 Reserved
- * 27:6 CE address (4k block 33:12)
- * 5:0 Reserved
- */
-#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
- /* error syndrome register (16b) */
-
-enum e7xxx_chips {
- E7500 = 0,
- E7501,
- E7505,
- E7205,
-};
-
-struct e7xxx_pvt {
- struct pci_dev *bridge_ck;
- u32 tolm;
- u32 remapbase;
- u32 remaplimit;
- const struct e7xxx_dev_info *dev_info;
-};
-
-struct e7xxx_dev_info {
- u16 err_dev;
- const char *ctl_name;
-};
-
-struct e7xxx_error_info {
- u8 dram_ferr;
- u8 dram_nerr;
- u32 dram_celog_add;
- u16 dram_celog_syndrome;
- u32 dram_uelog_add;
-};
-
-static struct edac_pci_ctl_info *e7xxx_pci;
-
-static const struct e7xxx_dev_info e7xxx_devs[] = {
- [E7500] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
- .ctl_name = "E7500"},
- [E7501] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
- .ctl_name = "E7501"},
- [E7505] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
- .ctl_name = "E7505"},
- [E7205] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
- .ctl_name = "E7205"},
-};
-
-/* FIXME - is this valid for both SECDED and S4ECD4ED? */
-static inline int e7xxx_find_channel(u16 syndrome)
-{
- debugf3("%s()\n", __func__);
-
- if ((syndrome & 0xff00) == 0)
- return 0;
-
- if ((syndrome & 0x00ff) == 0)
- return 1;
-
- if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
- return 0;
-
- return 1;
-}
-
-static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
-{
- u32 remap;
- struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
-
- debugf3("%s()\n", __func__);
-
- if ((page < pvt->tolm) ||
- ((page >= 0x100000) && (page < pvt->remapbase)))
- return page;
-
- remap = (page - pvt->tolm) + pvt->remapbase;
-
- if (remap < pvt->remaplimit)
- return remap;
-
- e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
- return pvt->tolm - 1;
-}
-
-static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
-{
- u32 error_1b, page;
- u16 syndrome;
- int row;
- int channel;
-
- debugf3("%s()\n", __func__);
- /* read the error address */
- error_1b = info->dram_celog_add;
- /* FIXME - should use PAGE_SHIFT */
- page = error_1b >> 6; /* convert the address to 4k page */
- /* read the syndrome */
- syndrome = info->dram_celog_syndrome;
- /* FIXME - check for -1 */
- row = edac_mc_find_csrow_by_page(mci, page);
- /* convert syndrome to channel */
- channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
-}
-
-static void process_ce_no_info(struct mem_ctl_info *mci)
-{
- debugf3("%s()\n", __func__);
- edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
-}
-
-static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
-{
- u32 error_2b, block_page;
- int row;
-
- debugf3("%s()\n", __func__);
- /* read the error address */
- error_2b = info->dram_uelog_add;
- /* FIXME - should use PAGE_SHIFT */
- block_page = error_2b >> 6; /* convert to 4k address */
- row = edac_mc_find_csrow_by_page(mci, block_page);
- edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
-}
-
-static void process_ue_no_info(struct mem_ctl_info *mci)
-{
- debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
-}
-
-static void e7xxx_get_error_info(struct mem_ctl_info *mci,
- struct e7xxx_error_info *info)
-{
- struct e7xxx_pvt *pvt;
-
- pvt = (struct e7xxx_pvt *)mci->pvt_info;
- pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
- pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
-
- if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
- pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
- &info->dram_celog_add);
- pci_read_config_word(pvt->bridge_ck,
- E7XXX_DRAM_CELOG_SYNDROME,
- &info->dram_celog_syndrome);
- }
-
- if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
- pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
- &info->dram_uelog_add);
-
- if (info->dram_ferr & 3)
- pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
-
- if (info->dram_nerr & 3)
- pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
-}
-
-static int e7xxx_process_error_info(struct mem_ctl_info *mci,
- struct e7xxx_error_info *info,
- int handle_errors)
-{
- int error_found;
-
- error_found = 0;
-
- /* decode and report errors */
- if (info->dram_ferr & 1) { /* check first error correctable */
- error_found = 1;
-
- if (handle_errors)
- process_ce(mci, info);
- }
-
- if (info->dram_ferr & 2) { /* check first error uncorrectable */
- error_found = 1;
-
- if (handle_errors)
- process_ue(mci, info);
- }
-
- if (info->dram_nerr & 1) { /* check next error correctable */
- error_found = 1;
-
- if (handle_errors) {
- if (info->dram_ferr & 1)
- process_ce_no_info(mci);
- else
- process_ce(mci, info);
- }
- }
-
- if (info->dram_nerr & 2) { /* check next error uncorrectable */
- error_found = 1;
-
- if (handle_errors) {
- if (info->dram_ferr & 2)
- process_ue_no_info(mci);
- else
- process_ue(mci, info);
- }
- }
-
- return error_found;
-}
-
-static void e7xxx_check(struct mem_ctl_info *mci)
-{
- struct e7xxx_error_info info;
-
- debugf3("%s()\n", __func__);
- e7xxx_get_error_info(mci, &info);
- e7xxx_process_error_info(mci, &info, 1);
-}
-
-/* Return 1 if dual channel mode is active. Else return 0. */
-static inline int dual_channel_active(u32 drc, int dev_idx)
-{
- return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
-}
-
-/* Return DRB granularity (0=32mb, 1=64mb). */
-static inline int drb_granularity(u32 drc, int dev_idx)
-{
- /* only e7501 can be single channel */
- return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
-}
-
-static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- int dev_idx, u32 drc)
-{
- unsigned long last_cumul_size;
- int index;
- u8 value;
- u32 dra, cumul_size;
- int drc_chan, drc_drbg, drc_ddim, mem_dev;
- struct csrow_info *csrow;
-
- pci_read_config_dword(pdev, E7XXX_DRA, &dra);
- drc_chan = dual_channel_active(drc, dev_idx);
- drc_drbg = drb_granularity(drc, dev_idx);
- drc_ddim = (drc >> 20) & 0x3;
- last_cumul_size = 0;
-
- /* The dram row boundary (DRB) reg values are boundary address
- * for each DRAM row with a granularity of 32 or 64MB (single/dual
- * channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all eight rows.
- */
- for (index = 0; index < mci->nr_csrows; index++) {
- /* mem_dev 0=x8, 1=x4 */
- mem_dev = (dra >> (index * 4 + 3)) & 0x1;
- csrow = &mci->csrows[index];
-
- pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
- /* convert a 64 or 32 MiB DRB to a page size. */
- cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
-
- csrow->first_page = last_cumul_size;
- csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
- last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
- }
-}
-
-static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
-{
- u16 pci_data;
- struct mem_ctl_info *mci = NULL;
- struct e7xxx_pvt *pvt = NULL;
- u32 drc;
- int drc_chan;
- struct e7xxx_error_info discard;
-
- debugf0("%s(): mci\n", __func__);
-
- pci_read_config_dword(pdev, E7XXX_DRC, &drc);
-
- drc_chan = dual_channel_active(drc, dev_idx);
- mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
- if (mci == NULL)
- return -ENOMEM;
-
- debugf3("%s(): init mci\n", __func__);
- mci->mtype_cap = MEM_FLAG_RDDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
- EDAC_FLAG_S4ECD4ED;
- /* FIXME - what if different memory types are in different csrows? */
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = E7XXX_REVISION;
- mci->dev = &pdev->dev;
- debugf3("%s(): init pvt\n", __func__);
- pvt = (struct e7xxx_pvt *)mci->pvt_info;
- pvt->dev_info = &e7xxx_devs[dev_idx];
- pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev, pvt->bridge_ck);
-
- if (!pvt->bridge_ck) {
- e7xxx_printk(KERN_ERR, "error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
- goto fail0;
- }
-
- debugf3("%s(): more mci init\n", __func__);
- mci->ctl_name = pvt->dev_info->ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = e7xxx_check;
- mci->ctl_page_to_phys = ctl_page_to_phys;
- e7xxx_init_csrows(mci, pdev, dev_idx, drc);
- mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
- /* load the top of low memory, remap base, and remap limit vars */
- pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
- pvt->tolm = ((u32) pci_data) << 4;
- pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
- pvt->remapbase = ((u32) pci_data) << 14;
- pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
- pvt->remaplimit = ((u32) pci_data) << 14;
- e7xxx_printk(KERN_INFO,
- "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
- pvt->remapbase, pvt->remaplimit);
-
- /* clear any pending errors, or initial state bits */
- e7xxx_get_error_info(mci, &discard);
-
- /* Here we assume that we will never see multiple instances of this
- * type of memory controller. The ID is therefore hardcoded to 0.
- */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail1;
- }
-
- /* allocating generic PCI control info */
- e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!e7xxx_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
- return 0;
-
-fail1:
- pci_dev_put(pvt->bridge_ck);
-
-fail0:
- edac_mc_free(mci);
-
- return -ENODEV;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit e7xxx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- debugf0("%s()\n", __func__);
-
- /* wake up and enable device */
- return pci_enable_device(pdev) ?
- -EIO : e7xxx_probe1(pdev, ent->driver_data);
-}
-
-static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct e7xxx_pvt *pvt;
-
- debugf0("%s()\n", __func__);
-
- if (e7xxx_pci)
- edac_pci_release_generic_ctl(e7xxx_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- pvt = (struct e7xxx_pvt *)mci->pvt_info;
- pci_dev_put(pvt->bridge_ck);
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7205},
- {
- PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7500},
- {
- PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7501},
- {
- PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7505},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
-
-static struct pci_driver e7xxx_driver = {
- .name = EDAC_MOD_STR,
- .probe = e7xxx_init_one,
- .remove = __devexit_p(e7xxx_remove_one),
- .id_table = e7xxx_pci_tbl,
-};
-
-static int __init e7xxx_init(void)
-{
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- return pci_register_driver(&e7xxx_driver);
-}
-
-static void __exit e7xxx_exit(void)
-{
- pci_unregister_driver(&e7xxx_driver);
-}
-
-module_init(e7xxx_init);
-module_exit(e7xxx_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on.work by Dan Hollis et al");
-MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/edac_core.h b/ANDROID_3.4.5/drivers/edac/edac_core.h
deleted file mode 100644
index e48ab310..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_core.h
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * Defines, structures, APIs for edac_core module
- *
- * (C) 2007 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Thayne Harbaugh
- * Based on work by Dan Hollis <goemon at anime dot net> and others.
- * http://www.anime.net/~goemon/linux-ecc/
- *
- * NMI handling support added by
- * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
- *
- * Refactored for multi-source files:
- * Doug Thompson <norsk5@xmission.com>
- *
- */
-
-#ifndef _EDAC_CORE_H_
-#define _EDAC_CORE_H_
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/pci.h>
-#include <linux/time.h>
-#include <linux/nmi.h>
-#include <linux/rcupdate.h>
-#include <linux/completion.h>
-#include <linux/kobject.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/edac.h>
-
-#define EDAC_DEVICE_NAME_LEN 31
-#define EDAC_ATTRIB_VALUE_LEN 15
-
-#if PAGE_SHIFT < 20
-#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
-#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
-#else /* PAGE_SHIFT > 20 */
-#define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
-#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
-#endif
-
-#define edac_printk(level, prefix, fmt, arg...) \
- printk(level "EDAC " prefix ": " fmt, ##arg)
-
-#define edac_mc_printk(mci, level, fmt, arg...) \
- printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
- printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_device_printk(ctl, level, fmt, arg...) \
- printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
-
-#define edac_pci_printk(ctl, level, fmt, arg...) \
- printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
-
-/* prefixes for edac_printk() and edac_mc_printk() */
-#define EDAC_MC "MC"
-#define EDAC_PCI "PCI"
-#define EDAC_DEBUG "DEBUG"
-
-extern const char *edac_mem_types[];
-
-#ifdef CONFIG_EDAC_DEBUG
-extern int edac_debug_level;
-
-#define edac_debug_printk(level, fmt, arg...) \
- do { \
- if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, \
- "%s: " fmt, __func__, ##arg); \
- } while (0)
-
-#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
-#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
-#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
-#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
-#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
-
-#else /* !CONFIG_EDAC_DEBUG */
-
-#define debugf0( ... )
-#define debugf1( ... )
-#define debugf2( ... )
-#define debugf3( ... )
-#define debugf4( ... )
-
-#endif /* !CONFIG_EDAC_DEBUG */
-
-#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
- PCI_DEVICE_ID_ ## vend ## _ ## dev
-
-#define edac_dev_name(dev) (dev)->dev_name
-
-/*
- * The following are the structures to provide for a generic
- * or abstract 'edac_device'. This set of structures and the
- * code that implements the APIs for the same, provide for
- * registering EDAC type devices which are NOT standard memory.
- *
- * CPU caches (L1 and L2)
- * DMA engines
- * Core CPU swithces
- * Fabric switch units
- * PCIe interface controllers
- * other EDAC/ECC type devices that can be monitored for
- * errors, etc.
- *
- * It allows for a 2 level set of hiearchry. For example:
- *
- * cache could be composed of L1, L2 and L3 levels of cache.
- * Each CPU core would have its own L1 cache, while sharing
- * L2 and maybe L3 caches.
- *
- * View them arranged, via the sysfs presentation:
- * /sys/devices/system/edac/..
- *
- * mc/ <existing memory device directory>
- * cpu/cpu0/.. <L1 and L2 block directory>
- * /L1-cache/ce_count
- * /ue_count
- * /L2-cache/ce_count
- * /ue_count
- * cpu/cpu1/.. <L1 and L2 block directory>
- * /L1-cache/ce_count
- * /ue_count
- * /L2-cache/ce_count
- * /ue_count
- * ...
- *
- * the L1 and L2 directories would be "edac_device_block's"
- */
-
-struct edac_device_counter {
- u32 ue_count;
- u32 ce_count;
-};
-
-/* forward reference */
-struct edac_device_ctl_info;
-struct edac_device_block;
-
-/* edac_dev_sysfs_attribute structure
- * used for driver sysfs attributes in mem_ctl_info
- * for extra controls and attributes:
- * like high level error Injection controls
- */
-struct edac_dev_sysfs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct edac_device_ctl_info *, char *);
- ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
-};
-
-/* edac_dev_sysfs_block_attribute structure
- *
- * used in leaf 'block' nodes for adding controls/attributes
- *
- * each block in each instance of the containing control structure
- * can have an array of the following. The show and store functions
- * will be filled in with the show/store function in the
- * low level driver.
- *
- * The 'value' field will be the actual value field used for
- * counting
- */
-struct edac_dev_sysfs_block_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *, struct attribute *, char *);
- ssize_t (*store)(struct kobject *, struct attribute *,
- const char *, size_t);
- struct edac_device_block *block;
-
- unsigned int value;
-};
-
-/* device block control structure */
-struct edac_device_block {
- struct edac_device_instance *instance; /* Up Pointer */
- char name[EDAC_DEVICE_NAME_LEN + 1];
-
- struct edac_device_counter counters; /* basic UE and CE counters */
-
- int nr_attribs; /* how many attributes */
-
- /* this block's attributes, could be NULL */
- struct edac_dev_sysfs_block_attribute *block_attributes;
-
- /* edac sysfs device control */
- struct kobject kobj;
-};
-
-/* device instance control structure */
-struct edac_device_instance {
- struct edac_device_ctl_info *ctl; /* Up pointer */
- char name[EDAC_DEVICE_NAME_LEN + 4];
-
- struct edac_device_counter counters; /* instance counters */
-
- u32 nr_blocks; /* how many blocks */
- struct edac_device_block *blocks; /* block array */
-
- /* edac sysfs device control */
- struct kobject kobj;
-};
-
-
-/*
- * Abstract edac_device control info structure
- *
- */
-struct edac_device_ctl_info {
- /* for global list of edac_device_ctl_info structs */
- struct list_head link;
-
- struct module *owner; /* Module owner of this control struct */
-
- int dev_idx;
-
- /* Per instance controls for this edac_device */
- int log_ue; /* boolean for logging UEs */
- int log_ce; /* boolean for logging CEs */
- int panic_on_ue; /* boolean for panic'ing on an UE */
- unsigned poll_msec; /* number of milliseconds to poll interval */
- unsigned long delay; /* number of jiffies for poll_msec */
-
- /* Additional top controller level attributes, but specified
- * by the low level driver.
- *
- * Set by the low level driver to provide attributes at the
- * controller level, same level as 'ue_count' and 'ce_count' above.
- * An array of structures, NULL terminated
- *
- * If attributes are desired, then set to array of attributes
- * If no attributes are desired, leave NULL
- */
- struct edac_dev_sysfs_attribute *sysfs_attributes;
-
- /* pointer to main 'edac' subsys in sysfs */
- struct bus_type *edac_subsys;
-
- /* the internal state of this controller instance */
- int op_state;
- /* work struct for this instance */
- struct delayed_work work;
-
- /* pointer to edac polling checking routine:
- * If NOT NULL: points to polling check routine
- * If NULL: Then assumes INTERRUPT operation, where
- * MC driver will receive events
- */
- void (*edac_check) (struct edac_device_ctl_info * edac_dev);
-
- struct device *dev; /* pointer to device structure */
-
- const char *mod_name; /* module name */
- const char *ctl_name; /* edac controller name */
- const char *dev_name; /* pci/platform/etc... name */
-
- void *pvt_info; /* pointer to 'private driver' info */
-
- unsigned long start_time; /* edac_device load start time (jiffies) */
-
- struct completion removal_complete;
-
- /* sysfs top name under 'edac' directory
- * and instance name:
- * cpu/cpu0/...
- * cpu/cpu1/...
- * cpu/cpu2/...
- * ...
- */
- char name[EDAC_DEVICE_NAME_LEN + 1];
-
- /* Number of instances supported on this control structure
- * and the array of those instances
- */
- u32 nr_instances;
- struct edac_device_instance *instances;
-
- /* Event counters for the this whole EDAC Device */
- struct edac_device_counter counters;
-
- /* edac sysfs device control for the 'name'
- * device this structure controls
- */
- struct kobject kobj;
-};
-
-/* To get from the instance's wq to the beginning of the ctl structure */
-#define to_edac_mem_ctl_work(w) \
- container_of(w, struct mem_ctl_info, work)
-
-#define to_edac_device_ctl_work(w) \
- container_of(w,struct edac_device_ctl_info,work)
-
-/*
- * The alloc() and free() functions for the 'edac_device' control info
- * structure. A MC driver will allocate one of these for each edac_device
- * it is going to control/register with the EDAC CORE.
- */
-extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
- unsigned sizeof_private,
- char *edac_device_name, unsigned nr_instances,
- char *edac_block_name, unsigned nr_blocks,
- unsigned offset_value,
- struct edac_dev_sysfs_block_attribute *block_attributes,
- unsigned nr_attribs,
- int device_index);
-
-/* The offset value can be:
- * -1 indicating no offset value
- * 0 for zero-based block numbers
- * 1 for 1-based block number
- * other for other-based block number
- */
-#define BLOCK_OFFSET_VALUE_OFF ((unsigned) -1)
-
-extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
-
-#ifdef CONFIG_PCI
-
-struct edac_pci_counter {
- atomic_t pe_count;
- atomic_t npe_count;
-};
-
-/*
- * Abstract edac_pci control info structure
- *
- */
-struct edac_pci_ctl_info {
- /* for global list of edac_pci_ctl_info structs */
- struct list_head link;
-
- int pci_idx;
-
- struct bus_type *edac_subsys; /* pointer to subsystem */
-
- /* the internal state of this controller instance */
- int op_state;
- /* work struct for this instance */
- struct delayed_work work;
-
- /* pointer to edac polling checking routine:
- * If NOT NULL: points to polling check routine
- * If NULL: Then assumes INTERRUPT operation, where
- * MC driver will receive events
- */
- void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
-
- struct device *dev; /* pointer to device structure */
-
- const char *mod_name; /* module name */
- const char *ctl_name; /* edac controller name */
- const char *dev_name; /* pci/platform/etc... name */
-
- void *pvt_info; /* pointer to 'private driver' info */
-
- unsigned long start_time; /* edac_pci load start time (jiffies) */
-
- struct completion complete;
-
- /* sysfs top name under 'edac' directory
- * and instance name:
- * cpu/cpu0/...
- * cpu/cpu1/...
- * cpu/cpu2/...
- * ...
- */
- char name[EDAC_DEVICE_NAME_LEN + 1];
-
- /* Event counters for the this whole EDAC Device */
- struct edac_pci_counter counters;
-
- /* edac sysfs device control for the 'name'
- * device this structure controls
- */
- struct kobject kobj;
- struct completion kobj_complete;
-};
-
-#define to_edac_pci_ctl_work(w) \
- container_of(w, struct edac_pci_ctl_info,work)
-
-/* write all or some bits in a byte-register*/
-static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
- u8 mask)
-{
- if (mask != 0xff) {
- u8 buf;
-
- pci_read_config_byte(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_byte(pdev, offset, value);
-}
-
-/* write all or some bits in a word-register*/
-static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
- u16 value, u16 mask)
-{
- if (mask != 0xffff) {
- u16 buf;
-
- pci_read_config_word(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_word(pdev, offset, value);
-}
-
-/*
- * pci_write_bits32
- *
- * edac local routine to do pci_write_config_dword, but adds
- * a mask parameter. If mask is all ones, ignore the mask.
- * Otherwise utilize the mask to isolate specified bits
- *
- * write all or some bits in a dword-register
- */
-static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
- u32 value, u32 mask)
-{
- if (mask != 0xffffffff) {
- u32 buf;
-
- pci_read_config_dword(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_dword(pdev, offset, value);
-}
-
-#endif /* CONFIG_PCI */
-
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index);
-extern int edac_mc_add_mc(struct mem_ctl_info *mci);
-extern void edac_mc_free(struct mem_ctl_info *mci);
-extern struct mem_ctl_info *edac_mc_find(int idx);
-extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
-extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
-extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
- unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted. When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information. The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row,
- const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel0, unsigned int channel1,
- char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel, char *msg);
-
-/*
- * edac_device APIs
- */
-extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
-extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
-extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg);
-extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg);
-extern int edac_device_alloc_index(void);
-
-/*
- * edac_pci APIs
- */
-extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
- const char *edac_pci_name);
-
-extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
-
-extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
- unsigned long value);
-
-extern int edac_pci_alloc_index(void);
-extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
-extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
-
-extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
- struct device *dev,
- const char *mod_name);
-
-extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
-extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
-extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
-
-/*
- * edac misc APIs
- */
-extern char *edac_op_state_to_string(int op_state);
-
-#endif /* _EDAC_CORE_H_ */
diff --git a/ANDROID_3.4.5/drivers/edac/edac_device.c b/ANDROID_3.4.5/drivers/edac/edac_device.c
deleted file mode 100644
index 4b154593..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_device.c
+++ /dev/null
@@ -1,723 +0,0 @@
-
-/*
- * edac_device.c
- * (C) 2007 www.douglaskthompson.com
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Doug Thompson <norsk5@xmission.com>
- *
- * edac_device API implementation
- * 19 Jan 2007
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/highmem.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-/* lock for the list: 'edac_device_list', manipulation of this list
- * is protected by the 'device_ctls_mutex' lock
- */
-static DEFINE_MUTEX(device_ctls_mutex);
-static LIST_HEAD(edac_device_list);
-
-#ifdef CONFIG_EDAC_DEBUG
-static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
-{
- debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
- debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
- debugf3("\tdev = %p\n", edac_dev->dev);
- debugf3("\tmod_name:ctl_name = %s:%s\n",
- edac_dev->mod_name, edac_dev->ctl_name);
- debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
-}
-#endif /* CONFIG_EDAC_DEBUG */
-
-
-/*
- * edac_device_alloc_ctl_info()
- * Allocate a new edac device control info structure
- *
- * The control structure is allocated in complete chunk
- * from the OS. It is in turn sub allocated to the
- * various objects that compose the struture
- *
- * The structure has a 'nr_instance' array within itself.
- * Each instance represents a major component
- * Example: L1 cache and L2 cache are 2 instance components
- *
- * Within each instance is an array of 'nr_blocks' blockoffsets
- */
-struct edac_device_ctl_info *edac_device_alloc_ctl_info(
- unsigned sz_private,
- char *edac_device_name, unsigned nr_instances,
- char *edac_block_name, unsigned nr_blocks,
- unsigned offset_value, /* zero, 1, or other based offset */
- struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
- int device_index)
-{
- struct edac_device_ctl_info *dev_ctl;
- struct edac_device_instance *dev_inst, *inst;
- struct edac_device_block *dev_blk, *blk_p, *blk;
- struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
- unsigned total_size;
- unsigned count;
- unsigned instance, block, attr;
- void *pvt;
- int err;
-
- debugf4("%s() instances=%d blocks=%d\n",
- __func__, nr_instances, nr_blocks);
-
- /* Calculate the size of memory we need to allocate AND
- * determine the offsets of the various item arrays
- * (instance,block,attrib) from the start of an allocated structure.
- * We want the alignment of each item (instance,block,attrib)
- * to be at least as stringent as what the compiler would
- * provide if we could simply hardcode everything into a single struct.
- */
- dev_ctl = (struct edac_device_ctl_info *)NULL;
-
- /* Calc the 'end' offset past end of ONE ctl_info structure
- * which will become the start of the 'instance' array
- */
- dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
-
- /* Calc the 'end' offset past the instance array within the ctl_info
- * which will become the start of the block array
- */
- dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
-
- /* Calc the 'end' offset past the dev_blk array
- * which will become the start of the attrib array, if any.
- */
- count = nr_instances * nr_blocks;
- dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
- /* Check for case of when an attribute array is specified */
- if (nr_attrib > 0) {
- /* calc how many nr_attrib we need */
- count *= nr_attrib;
-
- /* Calc the 'end' offset past the attributes array */
- pvt = edac_align_ptr(&dev_attrib[count], sz_private);
- } else {
- /* no attribute array specificed */
- pvt = edac_align_ptr(dev_attrib, sz_private);
- }
-
- /* 'pvt' now points to where the private data area is.
- * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
- * is baselined at ZERO
- */
- total_size = ((unsigned long)pvt) + sz_private;
-
- /* Allocate the amount of memory for the set of control structures */
- dev_ctl = kzalloc(total_size, GFP_KERNEL);
- if (dev_ctl == NULL)
- return NULL;
-
- /* Adjust pointers so they point within the actual memory we
- * just allocated rather than an imaginary chunk of memory
- * located at address 0.
- * 'dev_ctl' points to REAL memory, while the others are
- * ZERO based and thus need to be adjusted to point within
- * the allocated memory.
- */
- dev_inst = (struct edac_device_instance *)
- (((char *)dev_ctl) + ((unsigned long)dev_inst));
- dev_blk = (struct edac_device_block *)
- (((char *)dev_ctl) + ((unsigned long)dev_blk));
- dev_attrib = (struct edac_dev_sysfs_block_attribute *)
- (((char *)dev_ctl) + ((unsigned long)dev_attrib));
- pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
-
- /* Begin storing the information into the control info structure */
- dev_ctl->dev_idx = device_index;
- dev_ctl->nr_instances = nr_instances;
- dev_ctl->instances = dev_inst;
- dev_ctl->pvt_info = pvt;
-
- /* Default logging of CEs and UEs */
- dev_ctl->log_ce = 1;
- dev_ctl->log_ue = 1;
-
- /* Name of this edac device */
- snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
-
- debugf4("%s() edac_dev=%p next after end=%p\n",
- __func__, dev_ctl, pvt + sz_private );
-
- /* Initialize every Instance */
- for (instance = 0; instance < nr_instances; instance++) {
- inst = &dev_inst[instance];
- inst->ctl = dev_ctl;
- inst->nr_blocks = nr_blocks;
- blk_p = &dev_blk[instance * nr_blocks];
- inst->blocks = blk_p;
-
- /* name of this instance */
- snprintf(inst->name, sizeof(inst->name),
- "%s%u", edac_device_name, instance);
-
- /* Initialize every block in each instance */
- for (block = 0; block < nr_blocks; block++) {
- blk = &blk_p[block];
- blk->instance = inst;
- snprintf(blk->name, sizeof(blk->name),
- "%s%d", edac_block_name, block+offset_value);
-
- debugf4("%s() instance=%d inst_p=%p block=#%d "
- "block_p=%p name='%s'\n",
- __func__, instance, inst, block,
- blk, blk->name);
-
- /* if there are NO attributes OR no attribute pointer
- * then continue on to next block iteration
- */
- if ((nr_attrib == 0) || (attrib_spec == NULL))
- continue;
-
- /* setup the attribute array for this block */
- blk->nr_attribs = nr_attrib;
- attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
- blk->block_attributes = attrib_p;
-
- debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
- __func__, blk->block_attributes);
-
- /* Initialize every user specified attribute in this
- * block with the data the caller passed in
- * Each block gets its own copy of pointers,
- * and its unique 'value'
- */
- for (attr = 0; attr < nr_attrib; attr++) {
- attrib = &attrib_p[attr];
-
- /* populate the unique per attrib
- * with the code pointers and info
- */
- attrib->attr = attrib_spec[attr].attr;
- attrib->show = attrib_spec[attr].show;
- attrib->store = attrib_spec[attr].store;
-
- attrib->block = blk; /* up link */
-
- debugf4("%s() alloc-attrib=%p attrib_name='%s' "
- "attrib-spec=%p spec-name=%s\n",
- __func__, attrib, attrib->attr.name,
- &attrib_spec[attr],
- attrib_spec[attr].attr.name
- );
- }
- }
- }
-
- /* Mark this instance as merely ALLOCATED */
- dev_ctl->op_state = OP_ALLOC;
-
- /*
- * Initialize the 'root' kobj for the edac_device controller
- */
- err = edac_device_register_sysfs_main_kobj(dev_ctl);
- if (err) {
- kfree(dev_ctl);
- return NULL;
- }
-
- /* at this point, the root kobj is valid, and in order to
- * 'free' the object, then the function:
- * edac_device_unregister_sysfs_main_kobj() must be called
- * which will perform kobj unregistration and the actual free
- * will occur during the kobject callback operation
- */
-
- return dev_ctl;
-}
-EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
-
-/*
- * edac_device_free_ctl_info()
- * frees the memory allocated by the edac_device_alloc_ctl_info()
- * function
- */
-void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
-{
- edac_device_unregister_sysfs_main_kobj(ctl_info);
-}
-EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
-
-/*
- * find_edac_device_by_dev
- * scans the edac_device list for a specific 'struct device *'
- *
- * lock to be held prior to call: device_ctls_mutex
- *
- * Return:
- * pointer to control structure managing 'dev'
- * NULL if not found on list
- */
-static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
-{
- struct edac_device_ctl_info *edac_dev;
- struct list_head *item;
-
- debugf0("%s()\n", __func__);
-
- list_for_each(item, &edac_device_list) {
- edac_dev = list_entry(item, struct edac_device_ctl_info, link);
-
- if (edac_dev->dev == dev)
- return edac_dev;
- }
-
- return NULL;
-}
-
-/*
- * add_edac_dev_to_global_list
- * Before calling this function, caller must
- * assign a unique value to edac_dev->dev_idx.
- *
- * lock to be held prior to call: device_ctls_mutex
- *
- * Return:
- * 0 on success
- * 1 on failure.
- */
-static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
-{
- struct list_head *item, *insert_before;
- struct edac_device_ctl_info *rover;
-
- insert_before = &edac_device_list;
-
- /* Determine if already on the list */
- rover = find_edac_device_by_dev(edac_dev->dev);
- if (unlikely(rover != NULL))
- goto fail0;
-
- /* Insert in ascending order by 'dev_idx', so find position */
- list_for_each(item, &edac_device_list) {
- rover = list_entry(item, struct edac_device_ctl_info, link);
-
- if (rover->dev_idx >= edac_dev->dev_idx) {
- if (unlikely(rover->dev_idx == edac_dev->dev_idx))
- goto fail1;
-
- insert_before = item;
- break;
- }
- }
-
- list_add_tail_rcu(&edac_dev->link, insert_before);
- return 0;
-
-fail0:
- edac_printk(KERN_WARNING, EDAC_MC,
- "%s (%s) %s %s already assigned %d\n",
- dev_name(rover->dev), edac_dev_name(rover),
- rover->mod_name, rover->ctl_name, rover->dev_idx);
- return 1;
-
-fail1:
- edac_printk(KERN_WARNING, EDAC_MC,
- "bug in low-level driver: attempt to assign\n"
- " duplicate dev_idx %d in %s()\n", rover->dev_idx,
- __func__);
- return 1;
-}
-
-/*
- * del_edac_device_from_global_list
- */
-static void del_edac_device_from_global_list(struct edac_device_ctl_info
- *edac_device)
-{
- list_del_rcu(&edac_device->link);
-
- /* these are for safe removal of devices from global list while
- * NMI handlers may be traversing list
- */
- synchronize_rcu();
- INIT_LIST_HEAD(&edac_device->link);
-}
-
-/*
- * edac_device_workq_function
- * performs the operation scheduled by a workq request
- *
- * this workq is embedded within an edac_device_ctl_info
- * structure, that needs to be polled for possible error events.
- *
- * This operation is to acquire the list mutex lock
- * (thus preventing insertation or deletion)
- * and then call the device's poll function IFF this device is
- * running polled and there is a poll function defined.
- */
-static void edac_device_workq_function(struct work_struct *work_req)
-{
- struct delayed_work *d_work = to_delayed_work(work_req);
- struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
-
- mutex_lock(&device_ctls_mutex);
-
- /* If we are being removed, bail out immediately */
- if (edac_dev->op_state == OP_OFFLINE) {
- mutex_unlock(&device_ctls_mutex);
- return;
- }
-
- /* Only poll controllers that are running polled and have a check */
- if ((edac_dev->op_state == OP_RUNNING_POLL) &&
- (edac_dev->edac_check != NULL)) {
- edac_dev->edac_check(edac_dev);
- }
-
- mutex_unlock(&device_ctls_mutex);
-
- /* Reschedule the workq for the next time period to start again
- * if the number of msec is for 1 sec, then adjust to the next
- * whole one second to save timers fireing all over the period
- * between integral seconds
- */
- if (edac_dev->poll_msec == 1000)
- queue_delayed_work(edac_workqueue, &edac_dev->work,
- round_jiffies_relative(edac_dev->delay));
- else
- queue_delayed_work(edac_workqueue, &edac_dev->work,
- edac_dev->delay);
-}
-
-/*
- * edac_device_workq_setup
- * initialize a workq item for this edac_device instance
- * passing in the new delay period in msec
- */
-void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
- unsigned msec)
-{
- debugf0("%s()\n", __func__);
-
- /* take the arg 'msec' and set it into the control structure
- * to used in the time period calculation
- * then calc the number of jiffies that represents
- */
- edac_dev->poll_msec = msec;
- edac_dev->delay = msecs_to_jiffies(msec);
-
- INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
-
- /* optimize here for the 1 second case, which will be normal value, to
- * fire ON the 1 second time event. This helps reduce all sorts of
- * timers firing on sub-second basis, while they are happy
- * to fire together on the 1 second exactly
- */
- if (edac_dev->poll_msec == 1000)
- queue_delayed_work(edac_workqueue, &edac_dev->work,
- round_jiffies_relative(edac_dev->delay));
- else
- queue_delayed_work(edac_workqueue, &edac_dev->work,
- edac_dev->delay);
-}
-
-/*
- * edac_device_workq_teardown
- * stop the workq processing on this edac_dev
- */
-void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
-{
- int status;
-
- status = cancel_delayed_work(&edac_dev->work);
- if (status == 0) {
- /* workq instance might be running, wait for it */
- flush_workqueue(edac_workqueue);
- }
-}
-
-/*
- * edac_device_reset_delay_period
- *
- * need to stop any outstanding workq queued up at this time
- * because we will be resetting the sleep time.
- * Then restart the workq on the new delay
- */
-void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
- unsigned long value)
-{
- /* cancel the current workq request, without the mutex lock */
- edac_device_workq_teardown(edac_dev);
-
- /* acquire the mutex before doing the workq setup */
- mutex_lock(&device_ctls_mutex);
-
- /* restart the workq request, with new delay value */
- edac_device_workq_setup(edac_dev, value);
-
- mutex_unlock(&device_ctls_mutex);
-}
-
-/*
- * edac_device_alloc_index: Allocate a unique device index number
- *
- * Return:
- * allocated index number
- */
-int edac_device_alloc_index(void)
-{
- static atomic_t device_indexes = ATOMIC_INIT(0);
-
- return atomic_inc_return(&device_indexes) - 1;
-}
-EXPORT_SYMBOL_GPL(edac_device_alloc_index);
-
-/**
- * edac_device_add_device: Insert the 'edac_dev' structure into the
- * edac_device global list and create sysfs entries associated with
- * edac_device structure.
- * @edac_device: pointer to the edac_device structure to be added to the list
- * 'edac_device' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
-int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
-{
- debugf0("%s()\n", __func__);
-
-#ifdef CONFIG_EDAC_DEBUG
- if (edac_debug_level >= 3)
- edac_device_dump_device(edac_dev);
-#endif
- mutex_lock(&device_ctls_mutex);
-
- if (add_edac_dev_to_global_list(edac_dev))
- goto fail0;
-
- /* set load time so that error rate can be tracked */
- edac_dev->start_time = jiffies;
-
- /* create this instance's sysfs entries */
- if (edac_device_create_sysfs(edac_dev)) {
- edac_device_printk(edac_dev, KERN_WARNING,
- "failed to create sysfs device\n");
- goto fail1;
- }
-
- /* If there IS a check routine, then we are running POLLED */
- if (edac_dev->edac_check != NULL) {
- /* This instance is NOW RUNNING */
- edac_dev->op_state = OP_RUNNING_POLL;
-
- /*
- * enable workq processing on this instance,
- * default = 1000 msec
- */
- edac_device_workq_setup(edac_dev, 1000);
- } else {
- edac_dev->op_state = OP_RUNNING_INTERRUPT;
- }
-
- /* Report action taken */
- edac_device_printk(edac_dev, KERN_INFO,
- "Giving out device to module '%s' controller "
- "'%s': DEV '%s' (%s)\n",
- edac_dev->mod_name,
- edac_dev->ctl_name,
- edac_dev_name(edac_dev),
- edac_op_state_to_string(edac_dev->op_state));
-
- mutex_unlock(&device_ctls_mutex);
- return 0;
-
-fail1:
- /* Some error, so remove the entry from the lsit */
- del_edac_device_from_global_list(edac_dev);
-
-fail0:
- mutex_unlock(&device_ctls_mutex);
- return 1;
-}
-EXPORT_SYMBOL_GPL(edac_device_add_device);
-
-/**
- * edac_device_del_device:
- * Remove sysfs entries for specified edac_device structure and
- * then remove edac_device structure from global list
- *
- * @pdev:
- * Pointer to 'struct device' representing edac_device
- * structure to remove.
- *
- * Return:
- * Pointer to removed edac_device structure,
- * OR NULL if device not found.
- */
-struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
-{
- struct edac_device_ctl_info *edac_dev;
-
- debugf0("%s()\n", __func__);
-
- mutex_lock(&device_ctls_mutex);
-
- /* Find the structure on the list, if not there, then leave */
- edac_dev = find_edac_device_by_dev(dev);
- if (edac_dev == NULL) {
- mutex_unlock(&device_ctls_mutex);
- return NULL;
- }
-
- /* mark this instance as OFFLINE */
- edac_dev->op_state = OP_OFFLINE;
-
- /* deregister from global list */
- del_edac_device_from_global_list(edac_dev);
-
- mutex_unlock(&device_ctls_mutex);
-
- /* clear workq processing on this instance */
- edac_device_workq_teardown(edac_dev);
-
- /* Tear down the sysfs entries for this instance */
- edac_device_remove_sysfs(edac_dev);
-
- edac_printk(KERN_INFO, EDAC_MC,
- "Removed device %d for %s %s: DEV %s\n",
- edac_dev->dev_idx,
- edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
-
- return edac_dev;
-}
-EXPORT_SYMBOL_GPL(edac_device_del_device);
-
-static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
-{
- return edac_dev->log_ce;
-}
-
-static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
-{
- return edac_dev->log_ue;
-}
-
-static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
- *edac_dev)
-{
- return edac_dev->panic_on_ue;
-}
-
-/*
- * edac_device_handle_ce
- * perform a common output and handling of an 'edac_dev' CE event
- */
-void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
-{
- struct edac_device_instance *instance;
- struct edac_device_block *block = NULL;
-
- if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
- edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: 'instance' out of range "
- "(%d >= %d)\n", inst_nr,
- edac_dev->nr_instances);
- return;
- }
-
- instance = edac_dev->instances + inst_nr;
-
- if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
- edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: instance %d 'block' "
- "out of range (%d >= %d)\n",
- inst_nr, block_nr,
- instance->nr_blocks);
- return;
- }
-
- if (instance->nr_blocks > 0) {
- block = instance->blocks + block_nr;
- block->counters.ce_count++;
- }
-
- /* Propagate the count up the 'totals' tree */
- instance->counters.ce_count++;
- edac_dev->counters.ce_count++;
-
- if (edac_device_get_log_ce(edac_dev))
- edac_device_printk(edac_dev, KERN_WARNING,
- "CE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
-}
-EXPORT_SYMBOL_GPL(edac_device_handle_ce);
-
-/*
- * edac_device_handle_ue
- * perform a common output and handling of an 'edac_dev' UE event
- */
-void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
-{
- struct edac_device_instance *instance;
- struct edac_device_block *block = NULL;
-
- if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
- edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: 'instance' out of range "
- "(%d >= %d)\n", inst_nr,
- edac_dev->nr_instances);
- return;
- }
-
- instance = edac_dev->instances + inst_nr;
-
- if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
- edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: instance %d 'block' "
- "out of range (%d >= %d)\n",
- inst_nr, block_nr,
- instance->nr_blocks);
- return;
- }
-
- if (instance->nr_blocks > 0) {
- block = instance->blocks + block_nr;
- block->counters.ue_count++;
- }
-
- /* Propagate the count up the 'totals' tree */
- instance->counters.ue_count++;
- edac_dev->counters.ue_count++;
-
- if (edac_device_get_log_ue(edac_dev))
- edac_device_printk(edac_dev, KERN_EMERG,
- "UE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
-
- if (edac_device_get_panic_on_ue(edac_dev))
- panic("EDAC %s: UE instance: %s block %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
-}
-EXPORT_SYMBOL_GPL(edac_device_handle_ue);
diff --git a/ANDROID_3.4.5/drivers/edac/edac_device_sysfs.c b/ANDROID_3.4.5/drivers/edac/edac_device_sysfs.c
deleted file mode 100644
index b4ea185c..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_device_sysfs.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/*
- * file for managing the edac_device subsystem of devices for EDAC
- *
- * (C) 2007 SoftwareBitMaker
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written Doug Thompson <norsk5@xmission.com>
- *
- */
-
-#include <linux/ctype.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/edac.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-#define EDAC_DEVICE_SYMLINK "device"
-
-#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
-#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
-
-
-/*
- * Set of edac_device_ctl_info attribute store/show functions
- */
-
-/* 'log_ue' */
-static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
- *ctl_info, char *data)
-{
- return sprintf(data, "%u\n", ctl_info->log_ue);
-}
-
-static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
-{
- /* if parameter is zero, turn off flag, if non-zero turn on flag */
- ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
-
- return count;
-}
-
-/* 'log_ce' */
-static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
- *ctl_info, char *data)
-{
- return sprintf(data, "%u\n", ctl_info->log_ce);
-}
-
-static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
-{
- /* if parameter is zero, turn off flag, if non-zero turn on flag */
- ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
-
- return count;
-}
-
-/* 'panic_on_ue' */
-static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
- *ctl_info, char *data)
-{
- return sprintf(data, "%u\n", ctl_info->panic_on_ue);
-}
-
-static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
-{
- /* if parameter is zero, turn off flag, if non-zero turn on flag */
- ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
-
- return count;
-}
-
-/* 'poll_msec' show and store functions*/
-static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
- *ctl_info, char *data)
-{
- return sprintf(data, "%u\n", ctl_info->poll_msec);
-}
-
-static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
-{
- unsigned long value;
-
- /* get the value and enforce that it is non-zero, must be at least
- * one millisecond for the delay period, between scans
- * Then cancel last outstanding delay for the work request
- * and set a new one.
- */
- value = simple_strtoul(data, NULL, 0);
- edac_device_reset_delay_period(ctl_info, value);
-
- return count;
-}
-
-/* edac_device_ctl_info specific attribute structure */
-struct ctl_info_attribute {
- struct attribute attr;
- ssize_t(*show) (struct edac_device_ctl_info *, char *);
- ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
-};
-
-#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
-#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
-
-/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
-static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
- struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
-
- if (ctl_info_attr->show)
- return ctl_info_attr->show(edac_dev, buffer);
- return -EIO;
-}
-
-/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
-static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
- struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
-
- if (ctl_info_attr->store)
- return ctl_info_attr->store(edac_dev, buffer, count);
- return -EIO;
-}
-
-/* edac_dev file operations for an 'ctl_info' */
-static const struct sysfs_ops device_ctl_info_ops = {
- .show = edac_dev_ctl_info_show,
- .store = edac_dev_ctl_info_store
-};
-
-#define CTL_INFO_ATTR(_name,_mode,_show,_store) \
-static struct ctl_info_attribute attr_ctl_info_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-/* Declare the various ctl_info attributes here and their respective ops */
-CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
- edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
-CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
- edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
-CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
- edac_device_ctl_panic_on_ue_show,
- edac_device_ctl_panic_on_ue_store);
-CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
- edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
-
-/* Base Attributes of the EDAC_DEVICE ECC object */
-static struct ctl_info_attribute *device_ctrl_attr[] = {
- &attr_ctl_info_panic_on_ue,
- &attr_ctl_info_log_ue,
- &attr_ctl_info_log_ce,
- &attr_ctl_info_poll_msec,
- NULL,
-};
-
-/*
- * edac_device_ctrl_master_release
- *
- * called when the reference count for the 'main' kobj
- * for a edac_device control struct reaches zero
- *
- * Reference count model:
- * One 'main' kobject for each control structure allocated.
- * That main kobj is initially set to one AND
- * the reference count for the EDAC 'core' module is
- * bumped by one, thus added 'keep in memory' dependency.
- *
- * Each new internal kobj (in instances and blocks) then
- * bumps the 'main' kobject.
- *
- * When they are released their release functions decrement
- * the 'main' kobj.
- *
- * When the main kobj reaches zero (0) then THIS function
- * is called which then decrements the EDAC 'core' module.
- * When the module reference count reaches zero then the
- * module no longer has dependency on keeping the release
- * function code in memory and module can be unloaded.
- *
- * This will support several control objects as well, each
- * with its own 'main' kobj.
- */
-static void edac_device_ctrl_master_release(struct kobject *kobj)
-{
- struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
-
- debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
-
- /* decrement the EDAC CORE module ref count */
- module_put(edac_dev->owner);
-
- /* free the control struct containing the 'main' kobj
- * passed in to this routine
- */
- kfree(edac_dev);
-}
-
-/* ktype for the main (master) kobject */
-static struct kobj_type ktype_device_ctrl = {
- .release = edac_device_ctrl_master_release,
- .sysfs_ops = &device_ctl_info_ops,
- .default_attrs = (struct attribute **)device_ctrl_attr,
-};
-
-/*
- * edac_device_register_sysfs_main_kobj
- *
- * perform the high level setup for the new edac_device instance
- *
- * Return: 0 SUCCESS
- * !0 FAILURE
- */
-int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
-{
- struct bus_type *edac_subsys;
- int err;
-
- debugf1("%s()\n", __func__);
-
- /* get the /sys/devices/system/edac reference */
- edac_subsys = edac_get_sysfs_subsys();
- if (edac_subsys == NULL) {
- debugf1("%s() no edac_subsys error\n", __func__);
- err = -ENODEV;
- goto err_out;
- }
-
- /* Point to the 'edac_subsys' this instance 'reports' to */
- edac_dev->edac_subsys = edac_subsys;
-
- /* Init the devices's kobject */
- memset(&edac_dev->kobj, 0, sizeof(struct kobject));
-
- /* Record which module 'owns' this control structure
- * and bump the ref count of the module
- */
- edac_dev->owner = THIS_MODULE;
-
- if (!try_module_get(edac_dev->owner)) {
- err = -ENODEV;
- goto err_mod_get;
- }
-
- /* register */
- err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
- &edac_subsys->dev_root->kobj,
- "%s", edac_dev->name);
- if (err) {
- debugf1("%s()Failed to register '.../edac/%s'\n",
- __func__, edac_dev->name);
- goto err_kobj_reg;
- }
- kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
-
- /* At this point, to 'free' the control struct,
- * edac_device_unregister_sysfs_main_kobj() must be used
- */
-
- debugf4("%s() Registered '.../edac/%s' kobject\n",
- __func__, edac_dev->name);
-
- return 0;
-
- /* Error exit stack */
-err_kobj_reg:
- module_put(edac_dev->owner);
-
-err_mod_get:
- edac_put_sysfs_subsys();
-
-err_out:
- return err;
-}
-
-/*
- * edac_device_unregister_sysfs_main_kobj:
- * the '..../edac/<name>' kobject
- */
-void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
-{
- debugf0("%s()\n", __func__);
- debugf4("%s() name of kobject is: %s\n",
- __func__, kobject_name(&dev->kobj));
-
- /*
- * Unregister the edac device's kobject and
- * allow for reference count to reach 0 at which point
- * the callback will be called to:
- * a) module_put() this module
- * b) 'kfree' the memory
- */
- kobject_put(&dev->kobj);
- edac_put_sysfs_subsys();
-}
-
-/* edac_dev -> instance information */
-
-/*
- * Set of low-level instance attribute show functions
- */
-static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
- char *data)
-{
- return sprintf(data, "%u\n", instance->counters.ue_count);
-}
-
-static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
- char *data)
-{
- return sprintf(data, "%u\n", instance->counters.ce_count);
-}
-
-#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
-#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
-
-/* DEVICE instance kobject release() function */
-static void edac_device_ctrl_instance_release(struct kobject *kobj)
-{
- struct edac_device_instance *instance;
-
- debugf1("%s()\n", __func__);
-
- /* map from this kobj to the main control struct
- * and then dec the main kobj count
- */
- instance = to_instance(kobj);
- kobject_put(&instance->ctl->kobj);
-}
-
-/* instance specific attribute structure */
-struct instance_attribute {
- struct attribute attr;
- ssize_t(*show) (struct edac_device_instance *, char *);
- ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
-};
-
-/* Function to 'show' fields from the edac_dev 'instance' structure */
-static ssize_t edac_dev_instance_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct edac_device_instance *instance = to_instance(kobj);
- struct instance_attribute *instance_attr = to_instance_attr(attr);
-
- if (instance_attr->show)
- return instance_attr->show(instance, buffer);
- return -EIO;
-}
-
-/* Function to 'store' fields into the edac_dev 'instance' structure */
-static ssize_t edac_dev_instance_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct edac_device_instance *instance = to_instance(kobj);
- struct instance_attribute *instance_attr = to_instance_attr(attr);
-
- if (instance_attr->store)
- return instance_attr->store(instance, buffer, count);
- return -EIO;
-}
-
-/* edac_dev file operations for an 'instance' */
-static const struct sysfs_ops device_instance_ops = {
- .show = edac_dev_instance_show,
- .store = edac_dev_instance_store
-};
-
-#define INSTANCE_ATTR(_name,_mode,_show,_store) \
-static struct instance_attribute attr_instance_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-/*
- * Define attributes visible for the edac_device instance object
- * Each contains a pointer to a show and an optional set
- * function pointer that does the low level output/input
- */
-INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
-INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
-
-/* list of edac_dev 'instance' attributes */
-static struct instance_attribute *device_instance_attr[] = {
- &attr_instance_ce_count,
- &attr_instance_ue_count,
- NULL,
-};
-
-/* The 'ktype' for each edac_dev 'instance' */
-static struct kobj_type ktype_instance_ctrl = {
- .release = edac_device_ctrl_instance_release,
- .sysfs_ops = &device_instance_ops,
- .default_attrs = (struct attribute **)device_instance_attr,
-};
-
-/* edac_dev -> instance -> block information */
-
-#define to_block(k) container_of(k, struct edac_device_block, kobj)
-#define to_block_attr(a) \
- container_of(a, struct edac_dev_sysfs_block_attribute, attr)
-
-/*
- * Set of low-level block attribute show functions
- */
-static ssize_t block_ue_count_show(struct kobject *kobj,
- struct attribute *attr, char *data)
-{
- struct edac_device_block *block = to_block(kobj);
-
- return sprintf(data, "%u\n", block->counters.ue_count);
-}
-
-static ssize_t block_ce_count_show(struct kobject *kobj,
- struct attribute *attr, char *data)
-{
- struct edac_device_block *block = to_block(kobj);
-
- return sprintf(data, "%u\n", block->counters.ce_count);
-}
-
-/* DEVICE block kobject release() function */
-static void edac_device_ctrl_block_release(struct kobject *kobj)
-{
- struct edac_device_block *block;
-
- debugf1("%s()\n", __func__);
-
- /* get the container of the kobj */
- block = to_block(kobj);
-
- /* map from 'block kobj' to 'block->instance->controller->main_kobj'
- * now 'release' the block kobject
- */
- kobject_put(&block->instance->ctl->kobj);
-}
-
-
-/* Function to 'show' fields from the edac_dev 'block' structure */
-static ssize_t edac_dev_block_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct edac_dev_sysfs_block_attribute *block_attr =
- to_block_attr(attr);
-
- if (block_attr->show)
- return block_attr->show(kobj, attr, buffer);
- return -EIO;
-}
-
-/* Function to 'store' fields into the edac_dev 'block' structure */
-static ssize_t edac_dev_block_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct edac_dev_sysfs_block_attribute *block_attr;
-
- block_attr = to_block_attr(attr);
-
- if (block_attr->store)
- return block_attr->store(kobj, attr, buffer, count);
- return -EIO;
-}
-
-/* edac_dev file operations for a 'block' */
-static const struct sysfs_ops device_block_ops = {
- .show = edac_dev_block_show,
- .store = edac_dev_block_store
-};
-
-#define BLOCK_ATTR(_name,_mode,_show,_store) \
-static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
-BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
-
-/* list of edac_dev 'block' attributes */
-static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
- &attr_block_ce_count,
- &attr_block_ue_count,
- NULL,
-};
-
-/* The 'ktype' for each edac_dev 'block' */
-static struct kobj_type ktype_block_ctrl = {
- .release = edac_device_ctrl_block_release,
- .sysfs_ops = &device_block_ops,
- .default_attrs = (struct attribute **)device_block_attr,
-};
-
-/* block ctor/dtor code */
-
-/*
- * edac_device_create_block
- */
-static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
- struct edac_device_instance *instance,
- struct edac_device_block *block)
-{
- int i;
- int err;
- struct edac_dev_sysfs_block_attribute *sysfs_attrib;
- struct kobject *main_kobj;
-
- debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n",
- __func__, instance->name, instance, block->name, block);
- debugf4("%s() block kobj=%p block kobj->parent=%p\n",
- __func__, &block->kobj, &block->kobj.parent);
-
- /* init this block's kobject */
- memset(&block->kobj, 0, sizeof(struct kobject));
-
- /* bump the main kobject's reference count for this controller
- * and this instance is dependent on the main
- */
- main_kobj = kobject_get(&edac_dev->kobj);
- if (!main_kobj) {
- err = -ENODEV;
- goto err_out;
- }
-
- /* Add this block's kobject */
- err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
- &instance->kobj,
- "%s", block->name);
- if (err) {
- debugf1("%s() Failed to register instance '%s'\n",
- __func__, block->name);
- kobject_put(main_kobj);
- err = -ENODEV;
- goto err_out;
- }
-
- /* If there are driver level block attributes, then added them
- * to the block kobject
- */
- sysfs_attrib = block->block_attributes;
- if (sysfs_attrib && block->nr_attribs) {
- for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
-
- debugf4("%s() creating block attrib='%s' "
- "attrib->%p to kobj=%p\n",
- __func__,
- sysfs_attrib->attr.name,
- sysfs_attrib, &block->kobj);
-
- /* Create each block_attribute file */
- err = sysfs_create_file(&block->kobj,
- &sysfs_attrib->attr);
- if (err)
- goto err_on_attrib;
- }
- }
- kobject_uevent(&block->kobj, KOBJ_ADD);
-
- return 0;
-
- /* Error unwind stack */
-err_on_attrib:
- kobject_put(&block->kobj);
-
-err_out:
- return err;
-}
-
-/*
- * edac_device_delete_block(edac_dev,block);
- */
-static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
- struct edac_device_block *block)
-{
- struct edac_dev_sysfs_block_attribute *sysfs_attrib;
- int i;
-
- /* if this block has 'attributes' then we need to iterate over the list
- * and 'remove' the attributes on this block
- */
- sysfs_attrib = block->block_attributes;
- if (sysfs_attrib && block->nr_attribs) {
- for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
-
- /* remove each block_attrib file */
- sysfs_remove_file(&block->kobj,
- (struct attribute *) sysfs_attrib);
- }
- }
-
- /* unregister this block's kobject, SEE:
- * edac_device_ctrl_block_release() callback operation
- */
- kobject_put(&block->kobj);
-}
-
-/* instance ctor/dtor code */
-
-/*
- * edac_device_create_instance
- * create just one instance of an edac_device 'instance'
- */
-static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
- int idx)
-{
- int i, j;
- int err;
- struct edac_device_instance *instance;
- struct kobject *main_kobj;
-
- instance = &edac_dev->instances[idx];
-
- /* Init the instance's kobject */
- memset(&instance->kobj, 0, sizeof(struct kobject));
-
- instance->ctl = edac_dev;
-
- /* bump the main kobject's reference count for this controller
- * and this instance is dependent on the main
- */
- main_kobj = kobject_get(&edac_dev->kobj);
- if (!main_kobj) {
- err = -ENODEV;
- goto err_out;
- }
-
- /* Formally register this instance's kobject under the edac_device */
- err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
- &edac_dev->kobj, "%s", instance->name);
- if (err != 0) {
- debugf2("%s() Failed to register instance '%s'\n",
- __func__, instance->name);
- kobject_put(main_kobj);
- goto err_out;
- }
-
- debugf4("%s() now register '%d' blocks for instance %d\n",
- __func__, instance->nr_blocks, idx);
-
- /* register all blocks of this instance */
- for (i = 0; i < instance->nr_blocks; i++) {
- err = edac_device_create_block(edac_dev, instance,
- &instance->blocks[i]);
- if (err) {
- /* If any fail, remove all previous ones */
- for (j = 0; j < i; j++)
- edac_device_delete_block(edac_dev,
- &instance->blocks[j]);
- goto err_release_instance_kobj;
- }
- }
- kobject_uevent(&instance->kobj, KOBJ_ADD);
-
- debugf4("%s() Registered instance %d '%s' kobject\n",
- __func__, idx, instance->name);
-
- return 0;
-
- /* error unwind stack */
-err_release_instance_kobj:
- kobject_put(&instance->kobj);
-
-err_out:
- return err;
-}
-
-/*
- * edac_device_remove_instance
- * remove an edac_device instance
- */
-static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
- int idx)
-{
- struct edac_device_instance *instance;
- int i;
-
- instance = &edac_dev->instances[idx];
-
- /* unregister all blocks in this instance */
- for (i = 0; i < instance->nr_blocks; i++)
- edac_device_delete_block(edac_dev, &instance->blocks[i]);
-
- /* unregister this instance's kobject, SEE:
- * edac_device_ctrl_instance_release() for callback operation
- */
- kobject_put(&instance->kobj);
-}
-
-/*
- * edac_device_create_instances
- * create the first level of 'instances' for this device
- * (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc
- */
-static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
-{
- int i, j;
- int err;
-
- debugf0("%s()\n", __func__);
-
- /* iterate over creation of the instances */
- for (i = 0; i < edac_dev->nr_instances; i++) {
- err = edac_device_create_instance(edac_dev, i);
- if (err) {
- /* unwind previous instances on error */
- for (j = 0; j < i; j++)
- edac_device_delete_instance(edac_dev, j);
- return err;
- }
- }
-
- return 0;
-}
-
-/*
- * edac_device_delete_instances(edac_dev);
- * unregister all the kobjects of the instances
- */
-static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
-{
- int i;
-
- /* iterate over creation of the instances */
- for (i = 0; i < edac_dev->nr_instances; i++)
- edac_device_delete_instance(edac_dev, i);
-}
-
-/* edac_dev sysfs ctor/dtor code */
-
-/*
- * edac_device_add_main_sysfs_attributes
- * add some attributes to this instance's main kobject
- */
-static int edac_device_add_main_sysfs_attributes(
- struct edac_device_ctl_info *edac_dev)
-{
- struct edac_dev_sysfs_attribute *sysfs_attrib;
- int err = 0;
-
- sysfs_attrib = edac_dev->sysfs_attributes;
- if (sysfs_attrib) {
- /* iterate over the array and create an attribute for each
- * entry in the list
- */
- while (sysfs_attrib->attr.name != NULL) {
- err = sysfs_create_file(&edac_dev->kobj,
- (struct attribute*) sysfs_attrib);
- if (err)
- goto err_out;
-
- sysfs_attrib++;
- }
- }
-
-err_out:
- return err;
-}
-
-/*
- * edac_device_remove_main_sysfs_attributes
- * remove any attributes to this instance's main kobject
- */
-static void edac_device_remove_main_sysfs_attributes(
- struct edac_device_ctl_info *edac_dev)
-{
- struct edac_dev_sysfs_attribute *sysfs_attrib;
-
- /* if there are main attributes, defined, remove them. First,
- * point to the start of the array and iterate over it
- * removing each attribute listed from this device's instance's kobject
- */
- sysfs_attrib = edac_dev->sysfs_attributes;
- if (sysfs_attrib) {
- while (sysfs_attrib->attr.name != NULL) {
- sysfs_remove_file(&edac_dev->kobj,
- (struct attribute *) sysfs_attrib);
- sysfs_attrib++;
- }
- }
-}
-
-/*
- * edac_device_create_sysfs() Constructor
- *
- * accept a created edac_device control structure
- * and 'export' it to sysfs. The 'main' kobj should already have been
- * created. 'instance' and 'block' kobjects should be registered
- * along with any 'block' attributes from the low driver. In addition,
- * the main attributes (if any) are connected to the main kobject of
- * the control structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
-int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
-{
- int err;
- struct kobject *edac_kobj = &edac_dev->kobj;
-
- debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
-
- /* go create any main attributes callers wants */
- err = edac_device_add_main_sysfs_attributes(edac_dev);
- if (err) {
- debugf0("%s() failed to add sysfs attribs\n", __func__);
- goto err_out;
- }
-
- /* create a symlink from the edac device
- * to the platform 'device' being used for this
- */
- err = sysfs_create_link(edac_kobj,
- &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
- if (err) {
- debugf0("%s() sysfs_create_link() returned err= %d\n",
- __func__, err);
- goto err_remove_main_attribs;
- }
-
- /* Create the first level instance directories
- * In turn, the nested blocks beneath the instances will
- * be registered as well
- */
- err = edac_device_create_instances(edac_dev);
- if (err) {
- debugf0("%s() edac_device_create_instances() "
- "returned err= %d\n", __func__, err);
- goto err_remove_link;
- }
-
-
- debugf4("%s() create-instances done, idx=%d\n",
- __func__, edac_dev->dev_idx);
-
- return 0;
-
- /* Error unwind stack */
-err_remove_link:
- /* remove the sym link */
- sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
-
-err_remove_main_attribs:
- edac_device_remove_main_sysfs_attributes(edac_dev);
-
-err_out:
- return err;
-}
-
-/*
- * edac_device_remove_sysfs() destructor
- *
- * given an edac_device struct, tear down the kobject resources
- */
-void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
-{
- debugf0("%s()\n", __func__);
-
- /* remove any main attributes for this device */
- edac_device_remove_main_sysfs_attributes(edac_dev);
-
- /* remove the device sym link */
- sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
-
- /* walk the instance/block kobject tree, deconstructing it */
- edac_device_delete_instances(edac_dev);
-}
diff --git a/ANDROID_3.4.5/drivers/edac/edac_mc.c b/ANDROID_3.4.5/drivers/edac/edac_mc.c
deleted file mode 100644
index feef7733..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_mc.c
+++ /dev/null
@@ -1,918 +0,0 @@
-/*
- * edac_mc kernel module
- * (C) 2005, 2006 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Thayne Harbaugh
- * Based on work by Dan Hollis <goemon at anime dot net> and others.
- * http://www.anime.net/~goemon/linux-ecc/
- *
- * Modified by Dave Peterson and Doug Thompson
- *
- */
-
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/highmem.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/edac.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/edac.h>
-#include "edac_core.h"
-#include "edac_module.h"
-
-/* lock to memory controller's control array */
-static DEFINE_MUTEX(mem_ctls_mutex);
-static LIST_HEAD(mc_devices);
-
-#ifdef CONFIG_EDAC_DEBUG
-
-static void edac_mc_dump_channel(struct rank_info *chan)
-{
- debugf4("\tchannel = %p\n", chan);
- debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
- debugf4("\tchannel->label = '%s'\n", chan->label);
- debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
-}
-
-static void edac_mc_dump_csrow(struct csrow_info *csrow)
-{
- debugf4("\tcsrow = %p\n", csrow);
- debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
- debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
- debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
- debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
- debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
- debugf4("\tcsrow->channels = %p\n", csrow->channels);
- debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
-}
-
-static void edac_mc_dump_mci(struct mem_ctl_info *mci)
-{
- debugf3("\tmci = %p\n", mci);
- debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
- debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
- debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
- debugf4("\tmci->edac_check = %p\n", mci->edac_check);
- debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
- mci->nr_csrows, mci->csrows);
- debugf3("\tdev = %p\n", mci->dev);
- debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
- debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
-}
-
-#endif /* CONFIG_EDAC_DEBUG */
-
-/*
- * keep those in sync with the enum mem_type
- */
-const char *edac_mem_types[] = {
- "Empty csrow",
- "Reserved csrow type",
- "Unknown csrow type",
- "Fast page mode RAM",
- "Extended data out RAM",
- "Burst Extended data out RAM",
- "Single data rate SDRAM",
- "Registered single data rate SDRAM",
- "Double data rate SDRAM",
- "Registered Double data rate SDRAM",
- "Rambus DRAM",
- "Unbuffered DDR2 RAM",
- "Fully buffered DDR2",
- "Registered DDR2 RAM",
- "Rambus XDR",
- "Unbuffered DDR3 RAM",
- "Registered DDR3 RAM",
-};
-EXPORT_SYMBOL_GPL(edac_mem_types);
-
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
- *
- * If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
- */
-void *edac_align_ptr(void *ptr, unsigned size)
-{
- unsigned align, r;
-
- /* Here we assume that the alignment of a "long long" is the most
- * stringent alignment that the compiler will ever provide by default.
- * As far as I know, this is a reasonable assumption.
- */
- if (size > sizeof(long))
- align = sizeof(long long);
- else if (size > sizeof(int))
- align = sizeof(long);
- else if (size > sizeof(short))
- align = sizeof(int);
- else if (size > sizeof(char))
- align = sizeof(short);
- else
- return (char *)ptr;
-
- r = size % align;
-
- if (r == 0)
- return (char *)ptr;
-
- return (void *)(((unsigned long)ptr) + align - r);
-}
-
-/**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt: size of private storage needed
- * @nr_csrows: Number of CWROWS needed for this MC
- * @nr_chans: Number of channels for the MC
- *
- * Everything is kmalloc'ed as one big chunk - more efficient.
- * Only can be used if all structures have the same lifetime - otherwise
- * you have to allocate and initialize your own structures.
- *
- * Use edac_mc_free() to free mc structures allocated by this function.
- *
- * Returns:
- * NULL allocation failed
- * struct mem_ctl_info pointer
- */
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index)
-{
- struct mem_ctl_info *mci;
- struct csrow_info *csi, *csrow;
- struct rank_info *chi, *chp, *chan;
- void *pvt;
- unsigned size;
- int row, chn;
- int err;
-
- /* Figure out the offsets of the various items from the start of an mc
- * structure. We want the alignment of each item to be at least as
- * stringent as what the compiler would provide if we could simply
- * hardcode everything into a single struct.
- */
- mci = (struct mem_ctl_info *)0;
- csi = edac_align_ptr(&mci[1], sizeof(*csi));
- chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
- pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
- size = ((unsigned long)pvt) + sz_pvt;
-
- mci = kzalloc(size, GFP_KERNEL);
- if (mci == NULL)
- return NULL;
-
- /* Adjust pointers so they point within the memory we just allocated
- * rather than an imaginary chunk of memory located at address 0.
- */
- csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
- chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
- pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
-
- /* setup index and various internal pointers */
- mci->mc_idx = edac_index;
- mci->csrows = csi;
- mci->pvt_info = pvt;
- mci->nr_csrows = nr_csrows;
-
- for (row = 0; row < nr_csrows; row++) {
- csrow = &csi[row];
- csrow->csrow_idx = row;
- csrow->mci = mci;
- csrow->nr_channels = nr_chans;
- chp = &chi[row * nr_chans];
- csrow->channels = chp;
-
- for (chn = 0; chn < nr_chans; chn++) {
- chan = &chp[chn];
- chan->chan_idx = chn;
- chan->csrow = csrow;
- }
- }
-
- mci->op_state = OP_ALLOC;
- INIT_LIST_HEAD(&mci->grp_kobj_list);
-
- /*
- * Initialize the 'root' kobj for the edac_mc controller
- */
- err = edac_mc_register_sysfs_main_kobj(mci);
- if (err) {
- kfree(mci);
- return NULL;
- }
-
- /* at this point, the root kobj is valid, and in order to
- * 'free' the object, then the function:
- * edac_mc_unregister_sysfs_main_kobj() must be called
- * which will perform kobj unregistration and the actual free
- * will occur during the kobject callback operation
- */
- return mci;
-}
-EXPORT_SYMBOL_GPL(edac_mc_alloc);
-
-/**
- * edac_mc_free
- * 'Free' a previously allocated 'mci' structure
- * @mci: pointer to a struct mem_ctl_info structure
- */
-void edac_mc_free(struct mem_ctl_info *mci)
-{
- debugf1("%s()\n", __func__);
-
- edac_mc_unregister_sysfs_main_kobj(mci);
-
- /* free the mci instance memory here */
- kfree(mci);
-}
-EXPORT_SYMBOL_GPL(edac_mc_free);
-
-
-/**
- * find_mci_by_dev
- *
- * scan list of controllers looking for the one that manages
- * the 'dev' device
- * @dev: pointer to a struct device related with the MCI
- */
-struct mem_ctl_info *find_mci_by_dev(struct device *dev)
-{
- struct mem_ctl_info *mci;
- struct list_head *item;
-
- debugf3("%s()\n", __func__);
-
- list_for_each(item, &mc_devices) {
- mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->dev == dev)
- return mci;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(find_mci_by_dev);
-
-/*
- * handler for EDAC to check if NMI type handler has asserted interrupt
- */
-static int edac_mc_assert_error_check_and_clear(void)
-{
- int old_state;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- return 1;
-
- old_state = edac_err_assert;
- edac_err_assert = 0;
-
- return old_state;
-}
-
-/*
- * edac_mc_workq_function
- * performs the operation scheduled by a workq request
- */
-static void edac_mc_workq_function(struct work_struct *work_req)
-{
- struct delayed_work *d_work = to_delayed_work(work_req);
- struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
-
- mutex_lock(&mem_ctls_mutex);
-
- /* if this control struct has movd to offline state, we are done */
- if (mci->op_state == OP_OFFLINE) {
- mutex_unlock(&mem_ctls_mutex);
- return;
- }
-
- /* Only poll controllers that are running polled and have a check */
- if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
- mci->edac_check(mci);
-
- mutex_unlock(&mem_ctls_mutex);
-
- /* Reschedule */
- queue_delayed_work(edac_workqueue, &mci->work,
- msecs_to_jiffies(edac_mc_get_poll_msec()));
-}
-
-/*
- * edac_mc_workq_setup
- * initialize a workq item for this mci
- * passing in the new delay period in msec
- *
- * locking model:
- *
- * called with the mem_ctls_mutex held
- */
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
-{
- debugf0("%s()\n", __func__);
-
- /* if this instance is not in the POLL state, then simply return */
- if (mci->op_state != OP_RUNNING_POLL)
- return;
-
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
- queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
-}
-
-/*
- * edac_mc_workq_teardown
- * stop the workq processing on this mci
- *
- * locking model:
- *
- * called WITHOUT lock held
- */
-static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
-{
- int status;
-
- if (mci->op_state != OP_RUNNING_POLL)
- return;
-
- status = cancel_delayed_work(&mci->work);
- if (status == 0) {
- debugf0("%s() not canceled, flush the queue\n",
- __func__);
-
- /* workq instance might be running, wait for it */
- flush_workqueue(edac_workqueue);
- }
-}
-
-/*
- * edac_mc_reset_delay_period(unsigned long value)
- *
- * user space has updated our poll period value, need to
- * reset our workq delays
- */
-void edac_mc_reset_delay_period(int value)
-{
- struct mem_ctl_info *mci;
- struct list_head *item;
-
- mutex_lock(&mem_ctls_mutex);
-
- /* scan the list and turn off all workq timers, doing so under lock
- */
- list_for_each(item, &mc_devices) {
- mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->op_state == OP_RUNNING_POLL)
- cancel_delayed_work(&mci->work);
- }
-
- mutex_unlock(&mem_ctls_mutex);
-
-
- /* re-walk the list, and reset the poll delay */
- mutex_lock(&mem_ctls_mutex);
-
- list_for_each(item, &mc_devices) {
- mci = list_entry(item, struct mem_ctl_info, link);
-
- edac_mc_workq_setup(mci, (unsigned long) value);
- }
-
- mutex_unlock(&mem_ctls_mutex);
-}
-
-
-
-/* Return 0 on success, 1 on failure.
- * Before calling this function, caller must
- * assign a unique value to mci->mc_idx.
- *
- * locking model:
- *
- * called with the mem_ctls_mutex lock held
- */
-static int add_mc_to_global_list(struct mem_ctl_info *mci)
-{
- struct list_head *item, *insert_before;
- struct mem_ctl_info *p;
-
- insert_before = &mc_devices;
-
- p = find_mci_by_dev(mci->dev);
- if (unlikely(p != NULL))
- goto fail0;
-
- list_for_each(item, &mc_devices) {
- p = list_entry(item, struct mem_ctl_info, link);
-
- if (p->mc_idx >= mci->mc_idx) {
- if (unlikely(p->mc_idx == mci->mc_idx))
- goto fail1;
-
- insert_before = item;
- break;
- }
- }
-
- list_add_tail_rcu(&mci->link, insert_before);
- atomic_inc(&edac_handlers);
- return 0;
-
-fail0:
- edac_printk(KERN_WARNING, EDAC_MC,
- "%s (%s) %s %s already assigned %d\n", dev_name(p->dev),
- edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
- return 1;
-
-fail1:
- edac_printk(KERN_WARNING, EDAC_MC,
- "bug in low-level driver: attempt to assign\n"
- " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
- return 1;
-}
-
-static void del_mc_from_global_list(struct mem_ctl_info *mci)
-{
- atomic_dec(&edac_handlers);
- list_del_rcu(&mci->link);
-
- /* these are for safe removal of devices from global list while
- * NMI handlers may be traversing list
- */
- synchronize_rcu();
- INIT_LIST_HEAD(&mci->link);
-}
-
-/**
- * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
- *
- * If found, return a pointer to the structure.
- * Else return NULL.
- *
- * Caller must hold mem_ctls_mutex.
- */
-struct mem_ctl_info *edac_mc_find(int idx)
-{
- struct list_head *item;
- struct mem_ctl_info *mci;
-
- list_for_each(item, &mc_devices) {
- mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx)
- return mci;
-
- break;
- }
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(edac_mc_find);
-
-/**
- * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
- * create sysfs entries associated with mci structure
- * @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
-
-/* FIXME - should a warning be printed if no error detection? correction? */
-int edac_mc_add_mc(struct mem_ctl_info *mci)
-{
- debugf0("%s()\n", __func__);
-
-#ifdef CONFIG_EDAC_DEBUG
- if (edac_debug_level >= 3)
- edac_mc_dump_mci(mci);
-
- if (edac_debug_level >= 4) {
- int i;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- int j;
-
- edac_mc_dump_csrow(&mci->csrows[i]);
- for (j = 0; j < mci->csrows[i].nr_channels; j++)
- edac_mc_dump_channel(&mci->csrows[i].
- channels[j]);
- }
- }
-#endif
- mutex_lock(&mem_ctls_mutex);
-
- if (add_mc_to_global_list(mci))
- goto fail0;
-
- /* set load time so that error rate can be tracked */
- mci->start_time = jiffies;
-
- if (edac_create_sysfs_mci_device(mci)) {
- edac_mc_printk(mci, KERN_WARNING,
- "failed to create sysfs device\n");
- goto fail1;
- }
-
- /* If there IS a check routine, then we are running POLLED */
- if (mci->edac_check != NULL) {
- /* This instance is NOW RUNNING */
- mci->op_state = OP_RUNNING_POLL;
-
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
- } else {
- mci->op_state = OP_RUNNING_INTERRUPT;
- }
-
- /* Report action taken */
- edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
- " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
-
- mutex_unlock(&mem_ctls_mutex);
- return 0;
-
-fail1:
- del_mc_from_global_list(mci);
-
-fail0:
- mutex_unlock(&mem_ctls_mutex);
- return 1;
-}
-EXPORT_SYMBOL_GPL(edac_mc_add_mc);
-
-/**
- * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
- * remove mci structure from global list
- * @pdev: Pointer to 'struct device' representing mci structure to remove.
- *
- * Return pointer to removed mci structure, or NULL if device not found.
- */
-struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s()\n", __func__);
-
- mutex_lock(&mem_ctls_mutex);
-
- /* find the requested mci struct in the global list */
- mci = find_mci_by_dev(dev);
- if (mci == NULL) {
- mutex_unlock(&mem_ctls_mutex);
- return NULL;
- }
-
- del_mc_from_global_list(mci);
- mutex_unlock(&mem_ctls_mutex);
-
- /* flush workq processes */
- edac_mc_workq_teardown(mci);
-
- /* marking MCI offline */
- mci->op_state = OP_OFFLINE;
-
- /* remove from sysfs */
- edac_remove_sysfs_mci_device(mci);
-
- edac_printk(KERN_INFO, EDAC_MC,
- "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
- mci->mod_name, mci->ctl_name, edac_dev_name(mci));
-
- return mci;
-}
-EXPORT_SYMBOL_GPL(edac_mc_del_mc);
-
-static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
- u32 size)
-{
- struct page *pg;
- void *virt_addr;
- unsigned long flags = 0;
-
- debugf3("%s()\n", __func__);
-
- /* ECC error page was not in our memory. Ignore it. */
- if (!pfn_valid(page))
- return;
-
- /* Find the actual page structure then map it and fix */
- pg = pfn_to_page(page);
-
- if (PageHighMem(pg))
- local_irq_save(flags);
-
- virt_addr = kmap_atomic(pg);
-
- /* Perform architecture specific atomic scrub operation */
- atomic_scrub(virt_addr + offset, size);
-
- /* Unmap and complete */
- kunmap_atomic(virt_addr);
-
- if (PageHighMem(pg))
- local_irq_restore(flags);
-}
-
-/* FIXME - should return -1 */
-int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
-{
- struct csrow_info *csrows = mci->csrows;
- int row, i;
-
- debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
- row = -1;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- struct csrow_info *csrow = &csrows[i];
-
- if (csrow->nr_pages == 0)
- continue;
-
- debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
- "mask(0x%lx)\n", mci->mc_idx, __func__,
- csrow->first_page, page, csrow->last_page,
- csrow->page_mask);
-
- if ((page >= csrow->first_page) &&
- (page <= csrow->last_page) &&
- ((page & csrow->page_mask) ==
- (csrow->first_page & csrow->page_mask))) {
- row = i;
- break;
- }
- }
-
- if (row == -1)
- edac_mc_printk(mci, KERN_ERR,
- "could not look up page error address %lx\n",
- (unsigned long)page);
-
- return row;
-}
-EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
-
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, unsigned long syndrome,
- int row, int channel, const char *msg)
-{
- unsigned long remapped_page;
-
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- if (channel >= mci->csrows[row].nr_channels || channel < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n", channel,
- mci->csrows[row].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
- "0x%lx, row %d, channel %d, label \"%s\": %s\n",
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
-
- mci->ce_count++;
- mci->csrows[row].ce_count++;
- mci->csrows[row].channels[channel].ce_count++;
-
- if (mci->scrub_mode & SCRUB_SW_SRC) {
- /*
- * Some MC's can remap memory so that it is still available
- * at a different address when PCI devices map into memory.
- * MC's that can't do this lose the memory where PCI devices
- * are mapped. This mapping is MC dependent and so we call
- * back into the MC driver for it to map the MC page to
- * a physical (CPU) page which can then be mapped to a virtual
- * page - which can then be scrubbed.
- */
- remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
-
- edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
- }
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
-
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
-{
- if (edac_mc_get_log_ce())
- edac_mc_printk(mci, KERN_WARNING,
- "CE - no information available: %s\n", msg);
-
- mci->ce_noinfo_count++;
- mci->ce_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
-
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row, const char *msg)
-{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chan;
- int chars;
-
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
- len -= chars;
- pos += chars;
-
- for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
- chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
- len -= chars;
- pos += chars;
- }
-
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
- "labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row,
- labels, msg);
-
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
- "row %d, labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
-
- mci->ue_count++;
- mci->csrows[row].ue_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
-{
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
-
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_WARNING,
- "UE - no information available: %s\n", msg);
- mci->ue_noinfo_count++;
- mci->ue_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb, char *msg)
-{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chars;
-
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- if (channela >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-a out of range "
- "(%d >= %d)\n",
- channela, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- if (channelb >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-b out of range "
- "(%d >= %d)\n",
- channelb, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
-
- /* Generate the DIMM labels from the specified channels */
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[csrow].channels[channela].label);
- len -= chars;
- pos += chars;
- chars = snprintf(pos, len + 1, "-%s",
- mci->csrows[csrow].channels[channelb].label);
-
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela, channelb,
- labels, msg);
-
- if (edac_mc_get_panic_on_ue())
- panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
-
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow, unsigned int channel, char *msg)
-{
-
- /* Ensure boundary values */
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
- if (channel >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range (%d >= %d)\n",
- channel, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
-
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE row %d, channel %d, label \"%s\": %s\n",
- csrow, channel,
- mci->csrows[csrow].channels[channel].label, msg);
-
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[channel].ce_count++;
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
diff --git a/ANDROID_3.4.5/drivers/edac/edac_mc_sysfs.c b/ANDROID_3.4.5/drivers/edac/edac_mc_sysfs.c
deleted file mode 100644
index e9a28f57..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_mc_sysfs.c
+++ /dev/null
@@ -1,1064 +0,0 @@
-/*
- * edac_mc kernel module
- * (C) 2005-2007 Linux Networx (http://lnxi.com)
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
- *
- */
-
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/edac.h>
-#include <linux/bug.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-
-/* MC EDAC Controls, setable by module parameter, and sysfs */
-static int edac_mc_log_ue = 1;
-static int edac_mc_log_ce = 1;
-static int edac_mc_panic_on_ue;
-static int edac_mc_poll_msec = 1000;
-
-/* Getter functions for above */
-int edac_mc_get_log_ue(void)
-{
- return edac_mc_log_ue;
-}
-
-int edac_mc_get_log_ce(void)
-{
- return edac_mc_log_ce;
-}
-
-int edac_mc_get_panic_on_ue(void)
-{
- return edac_mc_panic_on_ue;
-}
-
-/* this is temporary */
-int edac_mc_get_poll_msec(void)
-{
- return edac_mc_poll_msec;
-}
-
-static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
-{
- long l;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = strict_strtol(val, 0, &l);
- if (ret == -EINVAL || ((int)l != l))
- return -EINVAL;
- *((int *)kp->arg) = l;
-
- /* notify edac_mc engine to reset the poll period */
- edac_mc_reset_delay_period(l);
-
- return 0;
-}
-
-/* Parameter declarations for above */
-module_param(edac_mc_panic_on_ue, int, 0644);
-MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
-module_param(edac_mc_log_ue, int, 0644);
-MODULE_PARM_DESC(edac_mc_log_ue,
- "Log uncorrectable error to console: 0=off 1=on");
-module_param(edac_mc_log_ce, int, 0644);
-MODULE_PARM_DESC(edac_mc_log_ce,
- "Log correctable error to console: 0=off 1=on");
-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
- &edac_mc_poll_msec, 0644);
-MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
-
-/*
- * various constants for Memory Controllers
- */
-static const char *mem_types[] = {
- [MEM_EMPTY] = "Empty",
- [MEM_RESERVED] = "Reserved",
- [MEM_UNKNOWN] = "Unknown",
- [MEM_FPM] = "FPM",
- [MEM_EDO] = "EDO",
- [MEM_BEDO] = "BEDO",
- [MEM_SDR] = "Unbuffered-SDR",
- [MEM_RDR] = "Registered-SDR",
- [MEM_DDR] = "Unbuffered-DDR",
- [MEM_RDDR] = "Registered-DDR",
- [MEM_RMBS] = "RMBS",
- [MEM_DDR2] = "Unbuffered-DDR2",
- [MEM_FB_DDR2] = "FullyBuffered-DDR2",
- [MEM_RDDR2] = "Registered-DDR2",
- [MEM_XDR] = "XDR",
- [MEM_DDR3] = "Unbuffered-DDR3",
- [MEM_RDDR3] = "Registered-DDR3"
-};
-
-static const char *dev_types[] = {
- [DEV_UNKNOWN] = "Unknown",
- [DEV_X1] = "x1",
- [DEV_X2] = "x2",
- [DEV_X4] = "x4",
- [DEV_X8] = "x8",
- [DEV_X16] = "x16",
- [DEV_X32] = "x32",
- [DEV_X64] = "x64"
-};
-
-static const char *edac_caps[] = {
- [EDAC_UNKNOWN] = "Unknown",
- [EDAC_NONE] = "None",
- [EDAC_RESERVED] = "Reserved",
- [EDAC_PARITY] = "PARITY",
- [EDAC_EC] = "EC",
- [EDAC_SECDED] = "SECDED",
- [EDAC_S2ECD2ED] = "S2ECD2ED",
- [EDAC_S4ECD4ED] = "S4ECD4ED",
- [EDAC_S8ECD8ED] = "S8ECD8ED",
- [EDAC_S16ECD16ED] = "S16ECD16ED"
-};
-
-/* EDAC sysfs CSROW data structures and methods
- */
-
-/* Set of more default csrow<id> attribute show/store functions */
-static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
- int private)
-{
- return sprintf(data, "%u\n", csrow->ue_count);
-}
-
-static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
- int private)
-{
- return sprintf(data, "%u\n", csrow->ce_count);
-}
-
-static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
- int private)
-{
- return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
-}
-
-static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
- int private)
-{
- return sprintf(data, "%s\n", mem_types[csrow->mtype]);
-}
-
-static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
- int private)
-{
- return sprintf(data, "%s\n", dev_types[csrow->dtype]);
-}
-
-static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
- int private)
-{
- return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
-}
-
-/* show/store functions for DIMM Label attributes */
-static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
- char *data, int channel)
-{
- /* if field has not been initialized, there is nothing to send */
- if (!csrow->channels[channel].label[0])
- return 0;
-
- return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
- csrow->channels[channel].label);
-}
-
-static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
- const char *data,
- size_t count, int channel)
-{
- ssize_t max_size = 0;
-
- max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
- strncpy(csrow->channels[channel].label, data, max_size);
- csrow->channels[channel].label[max_size] = '\0';
-
- return max_size;
-}
-
-/* show function for dynamic chX_ce_count attribute */
-static ssize_t channel_ce_count_show(struct csrow_info *csrow,
- char *data, int channel)
-{
- return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
-}
-
-/* csrow specific attribute structure */
-struct csrowdev_attribute {
- struct attribute attr;
- ssize_t(*show) (struct csrow_info *, char *, int);
- ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
- int private;
-};
-
-#define to_csrow(k) container_of(k, struct csrow_info, kobj)
-#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
-
-/* Set of show/store higher level functions for default csrow attributes */
-static ssize_t csrowdev_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct csrow_info *csrow = to_csrow(kobj);
- struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
-
- if (csrowdev_attr->show)
- return csrowdev_attr->show(csrow,
- buffer, csrowdev_attr->private);
- return -EIO;
-}
-
-static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct csrow_info *csrow = to_csrow(kobj);
- struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
-
- if (csrowdev_attr->store)
- return csrowdev_attr->store(csrow,
- buffer,
- count, csrowdev_attr->private);
- return -EIO;
-}
-
-static const struct sysfs_ops csrowfs_ops = {
- .show = csrowdev_show,
- .store = csrowdev_store
-};
-
-#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
-static struct csrowdev_attribute attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
- .private = _private, \
-};
-
-/* default cwrow<id>/attribute files */
-CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
-CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
-CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
-CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
-CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
-CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
-
-/* default attributes of the CSROW<id> object */
-static struct csrowdev_attribute *default_csrow_attr[] = {
- &attr_dev_type,
- &attr_mem_type,
- &attr_edac_mode,
- &attr_size_mb,
- &attr_ue_count,
- &attr_ce_count,
- NULL,
-};
-
-/* possible dynamic channel DIMM Label attribute files */
-CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 0);
-CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 1);
-CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 2);
-CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 3);
-CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 4);
-CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 5);
-
-/* Total possible dynamic DIMM Label attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
- &attr_ch0_dimm_label,
- &attr_ch1_dimm_label,
- &attr_ch2_dimm_label,
- &attr_ch3_dimm_label,
- &attr_ch4_dimm_label,
- &attr_ch5_dimm_label
-};
-
-/* possible dynamic channel ce_count attribute files */
-CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
-CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
-CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
-CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
-CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
-CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
-
-/* Total possible dynamic ce_count attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
- &attr_ch0_ce_count,
- &attr_ch1_ce_count,
- &attr_ch2_ce_count,
- &attr_ch3_ce_count,
- &attr_ch4_ce_count,
- &attr_ch5_ce_count
-};
-
-#define EDAC_NR_CHANNELS 6
-
-/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
-static int edac_create_channel_files(struct kobject *kobj, int chan)
-{
- int err = -ENODEV;
-
- if (chan >= EDAC_NR_CHANNELS)
- return err;
-
- /* create the DIMM label attribute file */
- err = sysfs_create_file(kobj,
- (struct attribute *)
- dynamic_csrow_dimm_attr[chan]);
-
- if (!err) {
- /* create the CE Count attribute file */
- err = sysfs_create_file(kobj,
- (struct attribute *)
- dynamic_csrow_ce_count_attr[chan]);
- } else {
- debugf1("%s() dimm labels and ce_count files created",
- __func__);
- }
-
- return err;
-}
-
-/* No memory to release for this kobj */
-static void edac_csrow_instance_release(struct kobject *kobj)
-{
- struct mem_ctl_info *mci;
- struct csrow_info *cs;
-
- debugf1("%s()\n", __func__);
-
- cs = container_of(kobj, struct csrow_info, kobj);
- mci = cs->mci;
-
- kobject_put(&mci->edac_mci_kobj);
-}
-
-/* the kobj_type instance for a CSROW */
-static struct kobj_type ktype_csrow = {
- .release = edac_csrow_instance_release,
- .sysfs_ops = &csrowfs_ops,
- .default_attrs = (struct attribute **)default_csrow_attr,
-};
-
-/* Create a CSROW object under specifed edac_mc_device */
-static int edac_create_csrow_object(struct mem_ctl_info *mci,
- struct csrow_info *csrow, int index)
-{
- struct kobject *kobj_mci = &mci->edac_mci_kobj;
- struct kobject *kobj;
- int chan;
- int err;
-
- /* generate ..../edac/mc/mc<id>/csrow<index> */
- memset(&csrow->kobj, 0, sizeof(csrow->kobj));
- csrow->mci = mci; /* include container up link */
-
- /* bump the mci instance's kobject's ref count */
- kobj = kobject_get(&mci->edac_mci_kobj);
- if (!kobj) {
- err = -ENODEV;
- goto err_out;
- }
-
- /* Instanstiate the csrow object */
- err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
- "csrow%d", index);
- if (err)
- goto err_release_top_kobj;
-
- /* At this point, to release a csrow kobj, one must
- * call the kobject_put and allow that tear down
- * to work the releasing
- */
-
- /* Create the dyanmic attribute files on this csrow,
- * namely, the DIMM labels and the channel ce_count
- */
- for (chan = 0; chan < csrow->nr_channels; chan++) {
- err = edac_create_channel_files(&csrow->kobj, chan);
- if (err) {
- /* special case the unregister here */
- kobject_put(&csrow->kobj);
- goto err_out;
- }
- }
- kobject_uevent(&csrow->kobj, KOBJ_ADD);
- return 0;
-
- /* error unwind stack */
-err_release_top_kobj:
- kobject_put(&mci->edac_mci_kobj);
-
-err_out:
- return err;
-}
-
-/* default sysfs methods and data structures for the main MCI kobject */
-
-static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- int row, chan;
-
- mci->ue_noinfo_count = 0;
- mci->ce_noinfo_count = 0;
- mci->ue_count = 0;
- mci->ce_count = 0;
-
- for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *ri = &mci->csrows[row];
-
- ri->ue_count = 0;
- ri->ce_count = 0;
-
- for (chan = 0; chan < ri->nr_channels; chan++)
- ri->channels[chan].ce_count = 0;
- }
-
- mci->start_time = jiffies;
- return count;
-}
-
-/* Memory scrubbing interface:
- *
- * A MC driver can limit the scrubbing bandwidth based on the CPU type.
- * Therefore, ->set_sdram_scrub_rate should be made to return the actual
- * bandwidth that is accepted or 0 when scrubbing is to be disabled.
- *
- * Negative value still means that an error has occurred while setting
- * the scrub rate.
- */
-static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- unsigned long bandwidth = 0;
- int new_bw = 0;
-
- if (!mci->set_sdram_scrub_rate)
- return -ENODEV;
-
- if (strict_strtoul(data, 10, &bandwidth) < 0)
- return -EINVAL;
-
- new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
- if (new_bw < 0) {
- edac_printk(KERN_WARNING, EDAC_MC,
- "Error setting scrub rate to: %lu\n", bandwidth);
- return -EINVAL;
- }
-
- return count;
-}
-
-/*
- * ->get_sdram_scrub_rate() return value semantics same as above.
- */
-static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
-{
- int bandwidth = 0;
-
- if (!mci->get_sdram_scrub_rate)
- return -ENODEV;
-
- bandwidth = mci->get_sdram_scrub_rate(mci);
- if (bandwidth < 0) {
- edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
- return bandwidth;
- }
-
- return sprintf(data, "%d\n", bandwidth);
-}
-
-/* default attribute files for the MCI object */
-static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data, "%d\n", mci->ue_count);
-}
-
-static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data, "%d\n", mci->ce_count);
-}
-
-static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data, "%d\n", mci->ce_noinfo_count);
-}
-
-static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data, "%d\n", mci->ue_noinfo_count);
-}
-
-static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
-}
-
-static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data, "%s\n", mci->ctl_name);
-}
-
-static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
-{
- int total_pages, csrow_idx;
-
- for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
- struct csrow_info *csrow = &mci->csrows[csrow_idx];
-
- if (!csrow->nr_pages)
- continue;
-
- total_pages += csrow->nr_pages;
- }
-
- return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
-}
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
-#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
-
-/* MCI show/store functions for top most object */
-static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->show)
- return mcidev_attr->show(mem_ctl_info, buffer);
-
- return -EIO;
-}
-
-static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->store)
- return mcidev_attr->store(mem_ctl_info, buffer, count);
-
- return -EIO;
-}
-
-/* Intermediate show/store table */
-static const struct sysfs_ops mci_ops = {
- .show = mcidev_show,
- .store = mcidev_store
-};
-
-#define MCIDEV_ATTR(_name,_mode,_show,_store) \
-static struct mcidev_sysfs_attribute mci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-/* default Control file */
-MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
-
-/* default Attribute files */
-MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
-MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
-MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
-MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
-MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
-MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
-MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
-
-/* memory scrubber attribute file */
-MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
- mci_sdram_scrub_rate_store);
-
-static struct mcidev_sysfs_attribute *mci_attr[] = {
- &mci_attr_reset_counters,
- &mci_attr_mc_name,
- &mci_attr_size_mb,
- &mci_attr_seconds_since_reset,
- &mci_attr_ue_noinfo_count,
- &mci_attr_ce_noinfo_count,
- &mci_attr_ue_count,
- &mci_attr_ce_count,
- &mci_attr_sdram_scrub_rate,
- NULL
-};
-
-
-/*
- * Release of a MC controlling instance
- *
- * each MC control instance has the following resources upon entry:
- * a) a ref count on the top memctl kobj
- * b) a ref count on this module
- *
- * this function must decrement those ref counts and then
- * issue a free on the instance's memory
- */
-static void edac_mci_control_release(struct kobject *kobj)
-{
- struct mem_ctl_info *mci;
-
- mci = to_mci(kobj);
-
- debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
-
- /* decrement the module ref count */
- module_put(mci->owner);
-}
-
-static struct kobj_type ktype_mci = {
- .release = edac_mci_control_release,
- .sysfs_ops = &mci_ops,
- .default_attrs = (struct attribute **)mci_attr,
-};
-
-/* EDAC memory controller sysfs kset:
- * /sys/devices/system/edac/mc
- */
-static struct kset *mc_kset;
-
-/*
- * edac_mc_register_sysfs_main_kobj
- *
- * setups and registers the main kobject for each mci
- */
-int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
-{
- struct kobject *kobj_mci;
- int err;
-
- debugf1("%s()\n", __func__);
-
- kobj_mci = &mci->edac_mci_kobj;
-
- /* Init the mci's kobject */
- memset(kobj_mci, 0, sizeof(*kobj_mci));
-
- /* Record which module 'owns' this control structure
- * and bump the ref count of the module
- */
- mci->owner = THIS_MODULE;
-
- /* bump ref count on this module */
- if (!try_module_get(mci->owner)) {
- err = -ENODEV;
- goto fail_out;
- }
-
- /* this instance become part of the mc_kset */
- kobj_mci->kset = mc_kset;
-
- /* register the mc<id> kobject to the mc_kset */
- err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
- "mc%d", mci->mc_idx);
- if (err) {
- debugf1("%s()Failed to register '.../edac/mc%d'\n",
- __func__, mci->mc_idx);
- goto kobj_reg_fail;
- }
- kobject_uevent(kobj_mci, KOBJ_ADD);
-
- /* At this point, to 'free' the control struct,
- * edac_mc_unregister_sysfs_main_kobj() must be used
- */
-
- debugf1("%s() Registered '.../edac/mc%d' kobject\n",
- __func__, mci->mc_idx);
-
- return 0;
-
- /* Error exit stack */
-
-kobj_reg_fail:
- module_put(mci->owner);
-
-fail_out:
- return err;
-}
-
-/*
- * edac_mc_register_sysfs_main_kobj
- *
- * tears down and the main mci kobject from the mc_kset
- */
-void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
-{
- debugf1("%s()\n", __func__);
-
- /* delete the kobj from the mc_kset */
- kobject_put(&mci->edac_mci_kobj);
-}
-
-#define EDAC_DEVICE_SYMLINK "device"
-
-#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
-
-/* MCI show/store functions for top most object */
-static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->show)
- return mcidev_attr->show(mem_ctl_info, buffer);
-
- return -EIO;
-}
-
-static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->store)
- return mcidev_attr->store(mem_ctl_info, buffer, count);
-
- return -EIO;
-}
-
-/* No memory to release for this kobj */
-static void edac_inst_grp_release(struct kobject *kobj)
-{
- struct mcidev_sysfs_group_kobj *grp;
- struct mem_ctl_info *mci;
-
- debugf1("%s()\n", __func__);
-
- grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
- mci = grp->mci;
-}
-
-/* Intermediate show/store table */
-static struct sysfs_ops inst_grp_ops = {
- .show = inst_grp_show,
- .store = inst_grp_store
-};
-
-/* the kobj_type instance for a instance group */
-static struct kobj_type ktype_inst_grp = {
- .release = edac_inst_grp_release,
- .sysfs_ops = &inst_grp_ops,
-};
-
-
-/*
- * edac_create_mci_instance_attributes
- * create MC driver specific attributes bellow an specified kobj
- * This routine calls itself recursively, in order to create an entire
- * object tree.
- */
-static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
- const struct mcidev_sysfs_attribute *sysfs_attrib,
- struct kobject *kobj)
-{
- int err;
-
- debugf4("%s()\n", __func__);
-
- while (sysfs_attrib) {
- debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
- if (sysfs_attrib->grp) {
- struct mcidev_sysfs_group_kobj *grp_kobj;
-
- grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
- if (!grp_kobj)
- return -ENOMEM;
-
- grp_kobj->grp = sysfs_attrib->grp;
- grp_kobj->mci = mci;
- list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
-
- debugf0("%s() grp %s, mci %p\n", __func__,
- sysfs_attrib->grp->name, mci);
-
- err = kobject_init_and_add(&grp_kobj->kobj,
- &ktype_inst_grp,
- &mci->edac_mci_kobj,
- sysfs_attrib->grp->name);
- if (err < 0) {
- printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
- return err;
- }
- err = edac_create_mci_instance_attributes(mci,
- grp_kobj->grp->mcidev_attr,
- &grp_kobj->kobj);
-
- if (err < 0)
- return err;
- } else if (sysfs_attrib->attr.name) {
- debugf4("%s() file %s\n", __func__,
- sysfs_attrib->attr.name);
-
- err = sysfs_create_file(kobj, &sysfs_attrib->attr);
- if (err < 0) {
- printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
- return err;
- }
- } else
- break;
-
- sysfs_attrib++;
- }
-
- return 0;
-}
-
-/*
- * edac_remove_mci_instance_attributes
- * remove MC driver specific attributes at the topmost level
- * directory of this mci instance.
- */
-static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
- const struct mcidev_sysfs_attribute *sysfs_attrib,
- struct kobject *kobj, int count)
-{
- struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
-
- debugf1("%s()\n", __func__);
-
- /*
- * loop if there are attributes and until we hit a NULL entry
- * Remove first all the attributes
- */
- while (sysfs_attrib) {
- debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
- if (sysfs_attrib->grp) {
- debugf4("%s() seeking for group %s\n",
- __func__, sysfs_attrib->grp->name);
- list_for_each_entry(grp_kobj,
- &mci->grp_kobj_list, list) {
- debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
- if (grp_kobj->grp == sysfs_attrib->grp) {
- edac_remove_mci_instance_attributes(mci,
- grp_kobj->grp->mcidev_attr,
- &grp_kobj->kobj, count + 1);
- debugf4("%s() group %s\n", __func__,
- sysfs_attrib->grp->name);
- kobject_put(&grp_kobj->kobj);
- }
- }
- debugf4("%s() end of seeking for group %s\n",
- __func__, sysfs_attrib->grp->name);
- } else if (sysfs_attrib->attr.name) {
- debugf4("%s() file %s\n", __func__,
- sysfs_attrib->attr.name);
- sysfs_remove_file(kobj, &sysfs_attrib->attr);
- } else
- break;
- sysfs_attrib++;
- }
-
- /* Remove the group objects */
- if (count)
- return;
- list_for_each_entry_safe(grp_kobj, tmp,
- &mci->grp_kobj_list, list) {
- list_del(&grp_kobj->list);
- kfree(grp_kobj);
- }
-}
-
-
-/*
- * Create a new Memory Controller kobject instance,
- * mc<id> under the 'mc' directory
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
-int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
-{
- int i;
- int err;
- struct csrow_info *csrow;
- struct kobject *kobj_mci = &mci->edac_mci_kobj;
-
- debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
-
- INIT_LIST_HEAD(&mci->grp_kobj_list);
-
- /* create a symlink for the device */
- err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
- EDAC_DEVICE_SYMLINK);
- if (err) {
- debugf1("%s() failure to create symlink\n", __func__);
- goto fail0;
- }
-
- /* If the low level driver desires some attributes,
- * then create them now for the driver.
- */
- if (mci->mc_driver_sysfs_attributes) {
- err = edac_create_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes,
- &mci->edac_mci_kobj);
- if (err) {
- debugf1("%s() failure to create mci attributes\n",
- __func__);
- goto fail0;
- }
- }
-
- /* Make directories for each CSROW object under the mc<id> kobject
- */
- for (i = 0; i < mci->nr_csrows; i++) {
- csrow = &mci->csrows[i];
-
- /* Only expose populated CSROWs */
- if (csrow->nr_pages > 0) {
- err = edac_create_csrow_object(mci, csrow, i);
- if (err) {
- debugf1("%s() failure: create csrow %d obj\n",
- __func__, i);
- goto fail1;
- }
- }
- }
-
- return 0;
-
- /* CSROW error: backout what has already been registered, */
-fail1:
- for (i--; i >= 0; i--) {
- if (csrow->nr_pages > 0) {
- kobject_put(&mci->csrows[i].kobj);
- }
- }
-
- /* remove the mci instance's attributes, if any */
- edac_remove_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
-
- /* remove the symlink */
- sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
-
-fail0:
- return err;
-}
-
-/*
- * remove a Memory Controller instance
- */
-void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
-{
- int i;
-
- debugf0("%s()\n", __func__);
-
- /* remove all csrow kobjects */
- debugf4("%s() unregister this mci kobj\n", __func__);
- for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
- debugf0("%s() unreg csrow-%d\n", __func__, i);
- kobject_put(&mci->csrows[i].kobj);
- }
- }
-
- /* remove this mci instance's attribtes */
- if (mci->mc_driver_sysfs_attributes) {
- debugf4("%s() unregister mci private attributes\n", __func__);
- edac_remove_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes,
- &mci->edac_mci_kobj, 0);
- }
-
- /* remove the symlink */
- debugf4("%s() remove_link\n", __func__);
- sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
-
- /* unregister this instance's kobject */
- debugf4("%s() remove_mci_instance\n", __func__);
- kobject_put(&mci->edac_mci_kobj);
-}
-
-
-
-
-/*
- * edac_setup_sysfs_mc_kset(void)
- *
- * Initialize the mc_kset for the 'mc' entry
- * This requires creating the top 'mc' directory with a kset
- * and its controls/attributes.
- *
- * To this 'mc' kset, instance 'mci' will be grouped as children.
- *
- * Return: 0 SUCCESS
- * !0 FAILURE error code
- */
-int edac_sysfs_setup_mc_kset(void)
-{
- int err = -EINVAL;
- struct bus_type *edac_subsys;
-
- debugf1("%s()\n", __func__);
-
- /* get the /sys/devices/system/edac subsys reference */
- edac_subsys = edac_get_sysfs_subsys();
- if (edac_subsys == NULL) {
- debugf1("%s() no edac_subsys error=%d\n", __func__, err);
- goto fail_out;
- }
-
- /* Init the MC's kobject */
- mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
- if (!mc_kset) {
- err = -ENOMEM;
- debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
- goto fail_kset;
- }
-
- debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
-
- return 0;
-
-fail_kset:
- edac_put_sysfs_subsys();
-
-fail_out:
- return err;
-}
-
-/*
- * edac_sysfs_teardown_mc_kset
- *
- * deconstruct the mc_ket for memory controllers
- */
-void edac_sysfs_teardown_mc_kset(void)
-{
- kset_unregister(mc_kset);
- edac_put_sysfs_subsys();
-}
-
diff --git a/ANDROID_3.4.5/drivers/edac/edac_module.c b/ANDROID_3.4.5/drivers/edac/edac_module.c
deleted file mode 100644
index 5ddaa86d..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_module.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * edac_module.c
- *
- * (C) 2007 www.softwarebitmaker.com
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- * Author: Doug Thompson <dougthompson@xmission.com>
- *
- */
-#include <linux/edac.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-#define EDAC_VERSION "Ver: 2.1.0"
-
-#ifdef CONFIG_EDAC_DEBUG
-/* Values of 0 to 4 will generate output */
-int edac_debug_level = 2;
-EXPORT_SYMBOL_GPL(edac_debug_level);
-#endif
-
-/* scope is to module level only */
-struct workqueue_struct *edac_workqueue;
-
-/*
- * edac_op_state_to_string()
- */
-char *edac_op_state_to_string(int opstate)
-{
- if (opstate == OP_RUNNING_POLL)
- return "POLLED";
- else if (opstate == OP_RUNNING_INTERRUPT)
- return "INTERRUPT";
- else if (opstate == OP_RUNNING_POLL_INTR)
- return "POLL-INTR";
- else if (opstate == OP_ALLOC)
- return "ALLOC";
- else if (opstate == OP_OFFLINE)
- return "OFFLINE";
-
- return "UNKNOWN";
-}
-
-/*
- * edac_workqueue_setup
- * initialize the edac work queue for polling operations
- */
-static int edac_workqueue_setup(void)
-{
- edac_workqueue = create_singlethread_workqueue("edac-poller");
- if (edac_workqueue == NULL)
- return -ENODEV;
- else
- return 0;
-}
-
-/*
- * edac_workqueue_teardown
- * teardown the edac workqueue
- */
-static void edac_workqueue_teardown(void)
-{
- if (edac_workqueue) {
- flush_workqueue(edac_workqueue);
- destroy_workqueue(edac_workqueue);
- edac_workqueue = NULL;
- }
-}
-
-/*
- * edac_init
- * module initialization entry point
- */
-static int __init edac_init(void)
-{
- int err = 0;
-
- edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n");
-
- /*
- * Harvest and clear any boot/initialization PCI parity errors
- *
- * FIXME: This only clears errors logged by devices present at time of
- * module initialization. We should also do an initial clear
- * of each newly hotplugged device.
- */
- edac_pci_clear_parity_errors();
-
- /*
- * now set up the mc_kset under the edac class object
- */
- err = edac_sysfs_setup_mc_kset();
- if (err)
- goto error;
-
- /* Setup/Initialize the workq for this core */
- err = edac_workqueue_setup();
- if (err) {
- edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
- goto workq_fail;
- }
-
- return 0;
-
- /* Error teardown stack */
-workq_fail:
- edac_sysfs_teardown_mc_kset();
-
-error:
- return err;
-}
-
-/*
- * edac_exit()
- * module exit/termination function
- */
-static void __exit edac_exit(void)
-{
- debugf0("%s()\n", __func__);
-
- /* tear down the various subsystems */
- edac_workqueue_teardown();
- edac_sysfs_teardown_mc_kset();
-}
-
-/*
- * Inform the kernel of our entry and exit points
- */
-module_init(edac_init);
-module_exit(edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al");
-MODULE_DESCRIPTION("Core library routines for EDAC reporting");
-
-/* refer to *_sysfs.c files for parameters that are exported via sysfs */
-
-#ifdef CONFIG_EDAC_DEBUG
-module_param(edac_debug_level, int, 0644);
-MODULE_PARM_DESC(edac_debug_level, "Debug level");
-#endif
diff --git a/ANDROID_3.4.5/drivers/edac/edac_module.h b/ANDROID_3.4.5/drivers/edac/edac_module.h
deleted file mode 100644
index 00f81b47..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_module.h
+++ /dev/null
@@ -1,81 +0,0 @@
-
-/*
- * edac_module.h
- *
- * For defining functions/data for within the EDAC_CORE module only
- *
- * written by doug thompson <norsk5@xmission.h>
- */
-
-#ifndef __EDAC_MODULE_H__
-#define __EDAC_MODULE_H__
-
-#include "edac_core.h"
-
-/*
- * INTERNAL EDAC MODULE:
- * EDAC memory controller sysfs create/remove functions
- * and setup/teardown functions
- *
- * edac_mc objects
- */
-extern int edac_sysfs_setup_mc_kset(void);
-extern void edac_sysfs_teardown_mc_kset(void);
-extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
-extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
-extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
-extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
-extern int edac_get_log_ue(void);
-extern int edac_get_log_ce(void);
-extern int edac_get_panic_on_ue(void);
-extern int edac_mc_get_log_ue(void);
-extern int edac_mc_get_log_ce(void);
-extern int edac_mc_get_panic_on_ue(void);
-extern int edac_get_poll_msec(void);
-extern int edac_mc_get_poll_msec(void);
-
-extern int edac_device_register_sysfs_main_kobj(
- struct edac_device_ctl_info *edac_dev);
-extern void edac_device_unregister_sysfs_main_kobj(
- struct edac_device_ctl_info *edac_dev);
-extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
-extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
-
-/* edac core workqueue: single CPU mode */
-extern struct workqueue_struct *edac_workqueue;
-extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
- unsigned msec);
-extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
-extern void edac_device_reset_delay_period(struct edac_device_ctl_info
- *edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
-
-extern void *edac_align_ptr(void *ptr, unsigned size);
-
-/*
- * EDAC PCI functions
- */
-#ifdef CONFIG_PCI
-extern void edac_pci_do_parity_check(void);
-extern void edac_pci_clear_parity_errors(void);
-extern int edac_sysfs_pci_setup(void);
-extern void edac_sysfs_pci_teardown(void);
-extern int edac_pci_get_check_errors(void);
-extern int edac_pci_get_poll_msec(void);
-extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
-extern void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg);
-extern void edac_pci_handle_npe(struct edac_pci_ctl_info *pci,
- const char *msg);
-#else /* CONFIG_PCI */
-/* pre-process these away */
-#define edac_pci_do_parity_check()
-#define edac_pci_clear_parity_errors()
-#define edac_sysfs_pci_setup() (0)
-#define edac_sysfs_pci_teardown()
-#define edac_pci_get_check_errors()
-#define edac_pci_get_poll_msec()
-#define edac_pci_handle_pe()
-#define edac_pci_handle_npe()
-#endif /* CONFIG_PCI */
-
-#endif /* __EDAC_MODULE_H__ */
diff --git a/ANDROID_3.4.5/drivers/edac/edac_pci.c b/ANDROID_3.4.5/drivers/edac/edac_pci.c
deleted file mode 100644
index 63af1c56..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_pci.c
+++ /dev/null
@@ -1,499 +0,0 @@
-/*
- * EDAC PCI component
- *
- * Author: Dave Jiang <djiang@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/highmem.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-static DEFINE_MUTEX(edac_pci_ctls_mutex);
-static LIST_HEAD(edac_pci_list);
-static atomic_t pci_indexes = ATOMIC_INIT(0);
-
-/*
- * edac_pci_alloc_ctl_info
- *
- * The alloc() function for the 'edac_pci' control info
- * structure. The chip driver will allocate one of these for each
- * edac_pci it is going to control/register with the EDAC CORE.
- */
-struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
- const char *edac_pci_name)
-{
- struct edac_pci_ctl_info *pci;
- void *pvt;
- unsigned int size;
-
- debugf1("%s()\n", __func__);
-
- pci = (struct edac_pci_ctl_info *)0;
- pvt = edac_align_ptr(&pci[1], sz_pvt);
- size = ((unsigned long)pvt) + sz_pvt;
-
- /* Alloc the needed control struct memory */
- pci = kzalloc(size, GFP_KERNEL);
- if (pci == NULL)
- return NULL;
-
- /* Now much private space */
- pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
-
- pci->pvt_info = pvt;
- pci->op_state = OP_ALLOC;
-
- snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
-
- return pci;
-}
-EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
-
-/*
- * edac_pci_free_ctl_info()
- *
- * Last action on the pci control structure.
- *
- * call the remove sysfs information, which will unregister
- * this control struct's kobj. When that kobj's ref count
- * goes to zero, its release function will be call and then
- * kfree() the memory.
- */
-void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
-{
- debugf1("%s()\n", __func__);
-
- edac_pci_remove_sysfs(pci);
-}
-EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
-
-/*
- * find_edac_pci_by_dev()
- * scans the edac_pci list for a specific 'struct device *'
- *
- * return NULL if not found, or return control struct pointer
- */
-static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
-{
- struct edac_pci_ctl_info *pci;
- struct list_head *item;
-
- debugf1("%s()\n", __func__);
-
- list_for_each(item, &edac_pci_list) {
- pci = list_entry(item, struct edac_pci_ctl_info, link);
-
- if (pci->dev == dev)
- return pci;
- }
-
- return NULL;
-}
-
-/*
- * add_edac_pci_to_global_list
- * Before calling this function, caller must assign a unique value to
- * edac_dev->pci_idx.
- * Return:
- * 0 on success
- * 1 on failure
- */
-static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
-{
- struct list_head *item, *insert_before;
- struct edac_pci_ctl_info *rover;
-
- debugf1("%s()\n", __func__);
-
- insert_before = &edac_pci_list;
-
- /* Determine if already on the list */
- rover = find_edac_pci_by_dev(pci->dev);
- if (unlikely(rover != NULL))
- goto fail0;
-
- /* Insert in ascending order by 'pci_idx', so find position */
- list_for_each(item, &edac_pci_list) {
- rover = list_entry(item, struct edac_pci_ctl_info, link);
-
- if (rover->pci_idx >= pci->pci_idx) {
- if (unlikely(rover->pci_idx == pci->pci_idx))
- goto fail1;
-
- insert_before = item;
- break;
- }
- }
-
- list_add_tail_rcu(&pci->link, insert_before);
- return 0;
-
-fail0:
- edac_printk(KERN_WARNING, EDAC_PCI,
- "%s (%s) %s %s already assigned %d\n",
- dev_name(rover->dev), edac_dev_name(rover),
- rover->mod_name, rover->ctl_name, rover->pci_idx);
- return 1;
-
-fail1:
- edac_printk(KERN_WARNING, EDAC_PCI,
- "but in low-level driver: attempt to assign\n"
- "\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
- __func__);
- return 1;
-}
-
-/*
- * del_edac_pci_from_global_list
- *
- * remove the PCI control struct from the global list
- */
-static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
-{
- list_del_rcu(&pci->link);
-
- /* these are for safe removal of devices from global list while
- * NMI handlers may be traversing list
- */
- synchronize_rcu();
- INIT_LIST_HEAD(&pci->link);
-}
-
-#if 0
-/* Older code, but might use in the future */
-
-/*
- * edac_pci_find()
- * Search for an edac_pci_ctl_info structure whose index is 'idx'
- *
- * If found, return a pointer to the structure
- * Else return NULL.
- *
- * Caller must hold pci_ctls_mutex.
- */
-struct edac_pci_ctl_info *edac_pci_find(int idx)
-{
- struct list_head *item;
- struct edac_pci_ctl_info *pci;
-
- /* Iterage over list, looking for exact match of ID */
- list_for_each(item, &edac_pci_list) {
- pci = list_entry(item, struct edac_pci_ctl_info, link);
-
- if (pci->pci_idx >= idx) {
- if (pci->pci_idx == idx)
- return pci;
-
- /* not on list, so terminate early */
- break;
- }
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(edac_pci_find);
-#endif
-
-/*
- * edac_pci_workq_function()
- *
- * periodic function that performs the operation
- * scheduled by a workq request, for a given PCI control struct
- */
-static void edac_pci_workq_function(struct work_struct *work_req)
-{
- struct delayed_work *d_work = to_delayed_work(work_req);
- struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
- int msec;
- unsigned long delay;
-
- debugf3("%s() checking\n", __func__);
-
- mutex_lock(&edac_pci_ctls_mutex);
-
- if (pci->op_state == OP_RUNNING_POLL) {
- /* we might be in POLL mode, but there may NOT be a poll func
- */
- if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
- pci->edac_check(pci);
-
- /* if we are on a one second period, then use round */
- msec = edac_pci_get_poll_msec();
- if (msec == 1000)
- delay = round_jiffies_relative(msecs_to_jiffies(msec));
- else
- delay = msecs_to_jiffies(msec);
-
- /* Reschedule only if we are in POLL mode */
- queue_delayed_work(edac_workqueue, &pci->work, delay);
- }
-
- mutex_unlock(&edac_pci_ctls_mutex);
-}
-
-/*
- * edac_pci_workq_setup()
- * initialize a workq item for this edac_pci instance
- * passing in the new delay period in msec
- *
- * locking model:
- * called when 'edac_pci_ctls_mutex' is locked
- */
-static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
- unsigned int msec)
-{
- debugf0("%s()\n", __func__);
-
- INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
- queue_delayed_work(edac_workqueue, &pci->work,
- msecs_to_jiffies(edac_pci_get_poll_msec()));
-}
-
-/*
- * edac_pci_workq_teardown()
- * stop the workq processing on this edac_pci instance
- */
-static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
-{
- int status;
-
- debugf0("%s()\n", __func__);
-
- status = cancel_delayed_work(&pci->work);
- if (status == 0)
- flush_workqueue(edac_workqueue);
-}
-
-/*
- * edac_pci_reset_delay_period
- *
- * called with a new period value for the workq period
- * a) stop current workq timer
- * b) restart workq timer with new value
- */
-void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
- unsigned long value)
-{
- debugf0("%s()\n", __func__);
-
- edac_pci_workq_teardown(pci);
-
- /* need to lock for the setup */
- mutex_lock(&edac_pci_ctls_mutex);
-
- edac_pci_workq_setup(pci, value);
-
- mutex_unlock(&edac_pci_ctls_mutex);
-}
-EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
-
-/*
- * edac_pci_alloc_index: Allocate a unique PCI index number
- *
- * Return:
- * allocated index number
- *
- */
-int edac_pci_alloc_index(void)
-{
- return atomic_inc_return(&pci_indexes) - 1;
-}
-EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
-
-/*
- * edac_pci_add_device: Insert the 'edac_dev' structure into the
- * edac_pci global list and create sysfs entries associated with
- * edac_pci structure.
- * @pci: pointer to the edac_device structure to be added to the list
- * @edac_idx: A unique numeric identifier to be assigned to the
- * 'edac_pci' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
-int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
-{
- debugf0("%s()\n", __func__);
-
- pci->pci_idx = edac_idx;
- pci->start_time = jiffies;
-
- mutex_lock(&edac_pci_ctls_mutex);
-
- if (add_edac_pci_to_global_list(pci))
- goto fail0;
-
- if (edac_pci_create_sysfs(pci)) {
- edac_pci_printk(pci, KERN_WARNING,
- "failed to create sysfs pci\n");
- goto fail1;
- }
-
- if (pci->edac_check != NULL) {
- pci->op_state = OP_RUNNING_POLL;
-
- edac_pci_workq_setup(pci, 1000);
- } else {
- pci->op_state = OP_RUNNING_INTERRUPT;
- }
-
- edac_pci_printk(pci, KERN_INFO,
- "Giving out device to module '%s' controller '%s':"
- " DEV '%s' (%s)\n",
- pci->mod_name,
- pci->ctl_name,
- edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
-
- mutex_unlock(&edac_pci_ctls_mutex);
- return 0;
-
- /* error unwind stack */
-fail1:
- del_edac_pci_from_global_list(pci);
-fail0:
- mutex_unlock(&edac_pci_ctls_mutex);
- return 1;
-}
-EXPORT_SYMBOL_GPL(edac_pci_add_device);
-
-/*
- * edac_pci_del_device()
- * Remove sysfs entries for specified edac_pci structure and
- * then remove edac_pci structure from global list
- *
- * @dev:
- * Pointer to 'struct device' representing edac_pci structure
- * to remove
- *
- * Return:
- * Pointer to removed edac_pci structure,
- * or NULL if device not found
- */
-struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
-{
- struct edac_pci_ctl_info *pci;
-
- debugf0("%s()\n", __func__);
-
- mutex_lock(&edac_pci_ctls_mutex);
-
- /* ensure the control struct is on the global list
- * if not, then leave
- */
- pci = find_edac_pci_by_dev(dev);
- if (pci == NULL) {
- mutex_unlock(&edac_pci_ctls_mutex);
- return NULL;
- }
-
- pci->op_state = OP_OFFLINE;
-
- del_edac_pci_from_global_list(pci);
-
- mutex_unlock(&edac_pci_ctls_mutex);
-
- /* stop the workq timer */
- edac_pci_workq_teardown(pci);
-
- edac_printk(KERN_INFO, EDAC_PCI,
- "Removed device %d for %s %s: DEV %s\n",
- pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci));
-
- return pci;
-}
-EXPORT_SYMBOL_GPL(edac_pci_del_device);
-
-/*
- * edac_pci_generic_check
- *
- * a Generic parity check API
- */
-static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
-{
- debugf4("%s()\n", __func__);
- edac_pci_do_parity_check();
-}
-
-/* free running instance index counter */
-static int edac_pci_idx;
-#define EDAC_PCI_GENCTL_NAME "EDAC PCI controller"
-
-struct edac_pci_gen_data {
- int edac_idx;
-};
-
-/*
- * edac_pci_create_generic_ctl
- *
- * A generic constructor for a PCI parity polling device
- * Some systems have more than one domain of PCI busses.
- * For systems with one domain, then this API will
- * provide for a generic poller.
- *
- * This routine calls the edac_pci_alloc_ctl_info() for
- * the generic device, with default values
- */
-struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
- const char *mod_name)
-{
- struct edac_pci_ctl_info *pci;
- struct edac_pci_gen_data *pdata;
-
- pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME);
- if (!pci)
- return NULL;
-
- pdata = pci->pvt_info;
- pci->dev = dev;
- dev_set_drvdata(pci->dev, pci);
- pci->dev_name = pci_name(to_pci_dev(dev));
-
- pci->mod_name = mod_name;
- pci->ctl_name = EDAC_PCI_GENCTL_NAME;
- pci->edac_check = edac_pci_generic_check;
-
- pdata->edac_idx = edac_pci_idx++;
-
- if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("%s(): failed edac_pci_add_device()\n", __func__);
- edac_pci_free_ctl_info(pci);
- return NULL;
- }
-
- return pci;
-}
-EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
-
-/*
- * edac_pci_release_generic_ctl
- *
- * The release function of a generic EDAC PCI polling device
- */
-void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
-{
- debugf0("%s() pci mod=%s\n", __func__, pci->mod_name);
-
- edac_pci_del_device(pci->dev);
- edac_pci_free_ctl_info(pci);
-}
-EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
diff --git a/ANDROID_3.4.5/drivers/edac/edac_pci_sysfs.c b/ANDROID_3.4.5/drivers/edac/edac_pci_sysfs.c
deleted file mode 100644
index 97f5064e..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_pci_sysfs.c
+++ /dev/null
@@ -1,770 +0,0 @@
-/*
- * (C) 2005, 2006 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written Doug Thompson <norsk5@xmission.com>
- *
- */
-#include <linux/module.h>
-#include <linux/edac.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-
-/* Turn off this whole feature if PCI is not configured */
-#ifdef CONFIG_PCI
-
-#define EDAC_PCI_SYMLINK "device"
-
-/* data variables exported via sysfs */
-static int check_pci_errors; /* default NO check PCI parity */
-static int edac_pci_panic_on_pe; /* default NO panic on PCI Parity */
-static int edac_pci_log_pe = 1; /* log PCI parity errors */
-static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
-static int edac_pci_poll_msec = 1000; /* one second workq period */
-
-static atomic_t pci_parity_count = ATOMIC_INIT(0);
-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
-
-static struct kobject *edac_pci_top_main_kobj;
-static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
-
-/* getter functions for the data variables */
-int edac_pci_get_check_errors(void)
-{
- return check_pci_errors;
-}
-
-static int edac_pci_get_log_pe(void)
-{
- return edac_pci_log_pe;
-}
-
-static int edac_pci_get_log_npe(void)
-{
- return edac_pci_log_npe;
-}
-
-static int edac_pci_get_panic_on_pe(void)
-{
- return edac_pci_panic_on_pe;
-}
-
-int edac_pci_get_poll_msec(void)
-{
- return edac_pci_poll_msec;
-}
-
-/**************************** EDAC PCI sysfs instance *******************/
-static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data)
-{
- return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count));
-}
-
-static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
- char *data)
-{
- return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
-}
-
-#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj)
-#define to_instance_attr(a) container_of(a, struct instance_attribute, attr)
-
-/* DEVICE instance kobject release() function */
-static void edac_pci_instance_release(struct kobject *kobj)
-{
- struct edac_pci_ctl_info *pci;
-
- debugf0("%s()\n", __func__);
-
- /* Form pointer to containing struct, the pci control struct */
- pci = to_instance(kobj);
-
- /* decrement reference count on top main kobj */
- kobject_put(edac_pci_top_main_kobj);
-
- kfree(pci); /* Free the control struct */
-}
-
-/* instance specific attribute structure */
-struct instance_attribute {
- struct attribute attr;
- ssize_t(*show) (struct edac_pci_ctl_info *, char *);
- ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
-};
-
-/* Function to 'show' fields from the edac_pci 'instance' structure */
-static ssize_t edac_pci_instance_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct edac_pci_ctl_info *pci = to_instance(kobj);
- struct instance_attribute *instance_attr = to_instance_attr(attr);
-
- if (instance_attr->show)
- return instance_attr->show(pci, buffer);
- return -EIO;
-}
-
-/* Function to 'store' fields into the edac_pci 'instance' structure */
-static ssize_t edac_pci_instance_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct edac_pci_ctl_info *pci = to_instance(kobj);
- struct instance_attribute *instance_attr = to_instance_attr(attr);
-
- if (instance_attr->store)
- return instance_attr->store(pci, buffer, count);
- return -EIO;
-}
-
-/* fs_ops table */
-static const struct sysfs_ops pci_instance_ops = {
- .show = edac_pci_instance_show,
- .store = edac_pci_instance_store
-};
-
-#define INSTANCE_ATTR(_name, _mode, _show, _store) \
-static struct instance_attribute attr_instance_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
-INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
-
-/* pci instance attributes */
-static struct instance_attribute *pci_instance_attr[] = {
- &attr_instance_pe_count,
- &attr_instance_npe_count,
- NULL
-};
-
-/* the ktype for a pci instance */
-static struct kobj_type ktype_pci_instance = {
- .release = edac_pci_instance_release,
- .sysfs_ops = &pci_instance_ops,
- .default_attrs = (struct attribute **)pci_instance_attr,
-};
-
-/*
- * edac_pci_create_instance_kobj
- *
- * construct one EDAC PCI instance's kobject for use
- */
-static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
-{
- struct kobject *main_kobj;
- int err;
-
- debugf0("%s()\n", __func__);
-
- /* First bump the ref count on the top main kobj, which will
- * track the number of PCI instances we have, and thus nest
- * properly on keeping the module loaded
- */
- main_kobj = kobject_get(edac_pci_top_main_kobj);
- if (!main_kobj) {
- err = -ENODEV;
- goto error_out;
- }
-
- /* And now register this new kobject under the main kobj */
- err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
- edac_pci_top_main_kobj, "pci%d", idx);
- if (err != 0) {
- debugf2("%s() failed to register instance pci%d\n",
- __func__, idx);
- kobject_put(edac_pci_top_main_kobj);
- goto error_out;
- }
-
- kobject_uevent(&pci->kobj, KOBJ_ADD);
- debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
-
- return 0;
-
- /* Error unwind statck */
-error_out:
- return err;
-}
-
-/*
- * edac_pci_unregister_sysfs_instance_kobj
- *
- * unregister the kobj for the EDAC PCI instance
- */
-static void edac_pci_unregister_sysfs_instance_kobj(
- struct edac_pci_ctl_info *pci)
-{
- debugf0("%s()\n", __func__);
-
- /* Unregister the instance kobject and allow its release
- * function release the main reference count and then
- * kfree the memory
- */
- kobject_put(&pci->kobj);
-}
-
-/***************************** EDAC PCI sysfs root **********************/
-#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
-#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
-
-/* simple show/store functions for attributes */
-static ssize_t edac_pci_int_show(void *ptr, char *buffer)
-{
- int *value = ptr;
- return sprintf(buffer, "%d\n", *value);
-}
-
-static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
-{
- int *value = ptr;
-
- if (isdigit(*buffer))
- *value = simple_strtoul(buffer, NULL, 0);
-
- return count;
-}
-
-struct edac_pci_dev_attribute {
- struct attribute attr;
- void *value;
- ssize_t(*show) (void *, char *);
- ssize_t(*store) (void *, const char *, size_t);
-};
-
-/* Set of show/store abstract level functions for PCI Parity object */
-static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct edac_pci_dev_attribute *edac_pci_dev;
- edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
-
- if (edac_pci_dev->show)
- return edac_pci_dev->show(edac_pci_dev->value, buffer);
- return -EIO;
-}
-
-static ssize_t edac_pci_dev_store(struct kobject *kobj,
- struct attribute *attr, const char *buffer,
- size_t count)
-{
- struct edac_pci_dev_attribute *edac_pci_dev;
- edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
-
- if (edac_pci_dev->show)
- return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
- return -EIO;
-}
-
-static const struct sysfs_ops edac_pci_sysfs_ops = {
- .show = edac_pci_dev_show,
- .store = edac_pci_dev_store
-};
-
-#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
-static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .value = &_name, \
- .show = _show, \
- .store = _store, \
-};
-
-#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
-static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .value = _data, \
- .show = _show, \
- .store = _store, \
-};
-
-/* PCI Parity control files */
-EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
-EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
-EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
-EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
-EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
-EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
-
-/* Base Attributes of the memory ECC object */
-static struct edac_pci_dev_attribute *edac_pci_attr[] = {
- &edac_pci_attr_check_pci_errors,
- &edac_pci_attr_edac_pci_log_pe,
- &edac_pci_attr_edac_pci_log_npe,
- &edac_pci_attr_edac_pci_panic_on_pe,
- &edac_pci_attr_pci_parity_count,
- &edac_pci_attr_pci_nonparity_count,
- NULL,
-};
-
-/*
- * edac_pci_release_main_kobj
- *
- * This release function is called when the reference count to the
- * passed kobj goes to zero.
- *
- * This kobj is the 'main' kobject that EDAC PCI instances
- * link to, and thus provide for proper nesting counts
- */
-static void edac_pci_release_main_kobj(struct kobject *kobj)
-{
- debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
-
- kfree(kobj);
-
- /* last reference to top EDAC PCI kobject has been removed,
- * NOW release our ref count on the core module
- */
- module_put(THIS_MODULE);
-}
-
-/* ktype struct for the EDAC PCI main kobj */
-static struct kobj_type ktype_edac_pci_main_kobj = {
- .release = edac_pci_release_main_kobj,
- .sysfs_ops = &edac_pci_sysfs_ops,
- .default_attrs = (struct attribute **)edac_pci_attr,
-};
-
-/**
- * edac_pci_main_kobj_setup()
- *
- * setup the sysfs for EDAC PCI attributes
- * assumes edac_subsys has already been initialized
- */
-static int edac_pci_main_kobj_setup(void)
-{
- int err;
- struct bus_type *edac_subsys;
-
- debugf0("%s()\n", __func__);
-
- /* check and count if we have already created the main kobject */
- if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
- return 0;
-
- /* First time, so create the main kobject and its
- * controls and attributes
- */
- edac_subsys = edac_get_sysfs_subsys();
- if (edac_subsys == NULL) {
- debugf1("%s() no edac_subsys\n", __func__);
- err = -ENODEV;
- goto decrement_count_fail;
- }
-
- /* Bump the reference count on this module to ensure the
- * modules isn't unloaded until we deconstruct the top
- * level main kobj for EDAC PCI
- */
- if (!try_module_get(THIS_MODULE)) {
- debugf1("%s() try_module_get() failed\n", __func__);
- err = -ENODEV;
- goto mod_get_fail;
- }
-
- edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
- if (!edac_pci_top_main_kobj) {
- debugf1("Failed to allocate\n");
- err = -ENOMEM;
- goto kzalloc_fail;
- }
-
- /* Instanstiate the pci object */
- err = kobject_init_and_add(edac_pci_top_main_kobj,
- &ktype_edac_pci_main_kobj,
- &edac_subsys->dev_root->kobj, "pci");
- if (err) {
- debugf1("Failed to register '.../edac/pci'\n");
- goto kobject_init_and_add_fail;
- }
-
- /* At this point, to 'release' the top level kobject
- * for EDAC PCI, then edac_pci_main_kobj_teardown()
- * must be used, for resources to be cleaned up properly
- */
- kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
- debugf1("Registered '.../edac/pci' kobject\n");
-
- return 0;
-
- /* Error unwind statck */
-kobject_init_and_add_fail:
- kfree(edac_pci_top_main_kobj);
-
-kzalloc_fail:
- module_put(THIS_MODULE);
-
-mod_get_fail:
- edac_put_sysfs_subsys();
-
-decrement_count_fail:
- /* if are on this error exit, nothing to tear down */
- atomic_dec(&edac_pci_sysfs_refcount);
-
- return err;
-}
-
-/*
- * edac_pci_main_kobj_teardown()
- *
- * if no longer linked (needed) remove the top level EDAC PCI
- * kobject with its controls and attributes
- */
-static void edac_pci_main_kobj_teardown(void)
-{
- debugf0("%s()\n", __func__);
-
- /* Decrement the count and only if no more controller instances
- * are connected perform the unregisteration of the top level
- * main kobj
- */
- if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
- debugf0("%s() called kobject_put on main kobj\n",
- __func__);
- kobject_put(edac_pci_top_main_kobj);
- }
- edac_put_sysfs_subsys();
-}
-
-/*
- *
- * edac_pci_create_sysfs
- *
- * Create the controls/attributes for the specified EDAC PCI device
- */
-int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
-{
- int err;
- struct kobject *edac_kobj = &pci->kobj;
-
- debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
-
- /* create the top main EDAC PCI kobject, IF needed */
- err = edac_pci_main_kobj_setup();
- if (err)
- return err;
-
- /* Create this instance's kobject under the MAIN kobject */
- err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
- if (err)
- goto unregister_cleanup;
-
- err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
- if (err) {
- debugf0("%s() sysfs_create_link() returned err= %d\n",
- __func__, err);
- goto symlink_fail;
- }
-
- return 0;
-
- /* Error unwind stack */
-symlink_fail:
- edac_pci_unregister_sysfs_instance_kobj(pci);
-
-unregister_cleanup:
- edac_pci_main_kobj_teardown();
-
- return err;
-}
-
-/*
- * edac_pci_remove_sysfs
- *
- * remove the controls and attributes for this EDAC PCI device
- */
-void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
-{
- debugf0("%s() index=%d\n", __func__, pci->pci_idx);
-
- /* Remove the symlink */
- sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
-
- /* remove this PCI instance's sysfs entries */
- edac_pci_unregister_sysfs_instance_kobj(pci);
-
- /* Call the main unregister function, which will determine
- * if this 'pci' is the last instance.
- * If it is, the main kobject will be unregistered as a result
- */
- debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__);
- edac_pci_main_kobj_teardown();
-}
-
-/************************ PCI error handling *************************/
-static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
-{
- int where;
- u16 status;
-
- where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
- pci_read_config_word(dev, where, &status);
-
- /* If we get back 0xFFFF then we must suspect that the card has been
- * pulled but the Linux PCI layer has not yet finished cleaning up.
- * We don't want to report on such devices
- */
-
- if (status == 0xFFFF) {
- u32 sanity;
-
- pci_read_config_dword(dev, 0, &sanity);
-
- if (sanity == 0xFFFFFFFF)
- return 0;
- }
-
- status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_PARITY;
-
- if (status)
- /* reset only the bits we are interested in */
- pci_write_config_word(dev, where, status);
-
- return status;
-}
-
-
-/* Clear any PCI parity errors logged by this device. */
-static void edac_pci_dev_parity_clear(struct pci_dev *dev)
-{
- u8 header_type;
-
- get_pci_parity_status(dev, 0);
-
- /* read the device TYPE, looking for bridges */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
- get_pci_parity_status(dev, 1);
-}
-
-/*
- * PCI Parity polling
- *
- * Function to retrieve the current parity status
- * and decode it
- *
- */
-static void edac_pci_dev_parity_test(struct pci_dev *dev)
-{
- unsigned long flags;
- u16 status;
- u8 header_type;
-
- /* stop any interrupts until we can acquire the status */
- local_irq_save(flags);
-
- /* read the STATUS register on this device */
- status = get_pci_parity_status(dev, 0);
-
- /* read the device TYPE, looking for bridges */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
- local_irq_restore(flags);
-
- debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
-
- /* check the status reg for errors on boards NOT marked as broken
- * if broken, we cannot trust any of the status bits
- */
- if (status && !dev->broken_parity_status) {
- if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Signaled System Error on %s\n",
- pci_name(dev));
- atomic_inc(&pci_nonparity_count);
- }
-
- if (status & (PCI_STATUS_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Master Data Parity Error on %s\n",
- pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
-
- if (status & (PCI_STATUS_DETECTED_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Detected Parity Error on %s\n",
- pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
- }
-
-
- debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev));
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- /* On bridges, need to examine secondary status register */
- status = get_pci_parity_status(dev, 1);
-
- debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
-
- /* check the secondary status reg for errors,
- * on NOT broken boards
- */
- if (status && !dev->broken_parity_status) {
- if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Signaled System Error on %s\n",
- pci_name(dev));
- atomic_inc(&pci_nonparity_count);
- }
-
- if (status & (PCI_STATUS_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Master Data Parity Error on "
- "%s\n", pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
-
- if (status & (PCI_STATUS_DETECTED_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Detected Parity Error on %s\n",
- pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
- }
- }
-}
-
-/* reduce some complexity in definition of the iterator */
-typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
-
-/*
- * pci_dev parity list iterator
- * Scan the PCI device list for one pass, looking for SERRORs
- * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
- */
-static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
-{
- struct pci_dev *dev = NULL;
-
- /* request for kernel access to the next PCI device, if any,
- * and while we are looking at it have its reference count
- * bumped until we are done with it
- */
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- fn(dev);
- }
-}
-
-/*
- * edac_pci_do_parity_check
- *
- * performs the actual PCI parity check operation
- */
-void edac_pci_do_parity_check(void)
-{
- int before_count;
-
- debugf3("%s()\n", __func__);
-
- /* if policy has PCI check off, leave now */
- if (!check_pci_errors)
- return;
-
- before_count = atomic_read(&pci_parity_count);
-
- /* scan all PCI devices looking for a Parity Error on devices and
- * bridges.
- * The iterator calls pci_get_device() which might sleep, thus
- * we cannot disable interrupts in this scan.
- */
- edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
-
- /* Only if operator has selected panic on PCI Error */
- if (edac_pci_get_panic_on_pe()) {
- /* If the count is different 'after' from 'before' */
- if (before_count != atomic_read(&pci_parity_count))
- panic("EDAC: PCI Parity Error");
- }
-}
-
-/*
- * edac_pci_clear_parity_errors
- *
- * function to perform an iteration over the PCI devices
- * and clearn their current status
- */
-void edac_pci_clear_parity_errors(void)
-{
- /* Clear any PCI bus parity errors that devices initially have logged
- * in their registers.
- */
- edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
-}
-
-/*
- * edac_pci_handle_pe
- *
- * Called to handle a PARITY ERROR event
- */
-void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
-{
-
- /* global PE counter incremented by edac_pci_do_parity_check() */
- atomic_inc(&pci->counters.pe_count);
-
- if (edac_pci_get_log_pe())
- edac_pci_printk(pci, KERN_WARNING,
- "Parity Error ctl: %s %d: %s\n",
- pci->ctl_name, pci->pci_idx, msg);
-
- /*
- * poke all PCI devices and see which one is the troublemaker
- * panic() is called if set
- */
- edac_pci_do_parity_check();
-}
-EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
-
-
-/*
- * edac_pci_handle_npe
- *
- * Called to handle a NON-PARITY ERROR event
- */
-void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
-{
-
- /* global NPE counter incremented by edac_pci_do_parity_check() */
- atomic_inc(&pci->counters.npe_count);
-
- if (edac_pci_get_log_npe())
- edac_pci_printk(pci, KERN_WARNING,
- "Non-Parity Error ctl: %s %d: %s\n",
- pci->ctl_name, pci->pci_idx, msg);
-
- /*
- * poke all PCI devices and see which one is the troublemaker
- * panic() is called if set
- */
- edac_pci_do_parity_check();
-}
-EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
-
-/*
- * Define the PCI parameter to the module
- */
-module_param(check_pci_errors, int, 0644);
-MODULE_PARM_DESC(check_pci_errors,
- "Check for PCI bus parity errors: 0=off 1=on");
-module_param(edac_pci_panic_on_pe, int, 0644);
-MODULE_PARM_DESC(edac_pci_panic_on_pe,
- "Panic on PCI Bus Parity error: 0=off 1=on");
-
-#endif /* CONFIG_PCI */
diff --git a/ANDROID_3.4.5/drivers/edac/edac_stub.c b/ANDROID_3.4.5/drivers/edac/edac_stub.c
deleted file mode 100644
index 6c86f6e5..00000000
--- a/ANDROID_3.4.5/drivers/edac/edac_stub.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * common EDAC components that must be in kernel
- *
- * Author: Dave Jiang <djiang@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc.
- * 2010 (c) Advanced Micro Devices Inc.
- * Borislav Petkov <borislav.petkov@amd.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- */
-#include <linux/module.h>
-#include <linux/edac.h>
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <asm/edac.h>
-
-int edac_op_state = EDAC_OPSTATE_INVAL;
-EXPORT_SYMBOL_GPL(edac_op_state);
-
-atomic_t edac_handlers = ATOMIC_INIT(0);
-EXPORT_SYMBOL_GPL(edac_handlers);
-
-int edac_err_assert = 0;
-EXPORT_SYMBOL_GPL(edac_err_assert);
-
-static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
-
-/*
- * called to determine if there is an EDAC driver interested in
- * knowing an event (such as NMI) occurred
- */
-int edac_handler_set(void)
-{
- if (edac_op_state == EDAC_OPSTATE_POLL)
- return 0;
-
- return atomic_read(&edac_handlers);
-}
-EXPORT_SYMBOL_GPL(edac_handler_set);
-
-/*
- * handler for NMI type of interrupts to assert error
- */
-void edac_atomic_assert_error(void)
-{
- edac_err_assert++;
-}
-EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
-
-/*
- * sysfs object: /sys/devices/system/edac
- * need to export to other files
- */
-struct bus_type edac_subsys = {
- .name = "edac",
- .dev_name = "edac",
-};
-EXPORT_SYMBOL_GPL(edac_subsys);
-
-/* return pointer to the 'edac' node in sysfs */
-struct bus_type *edac_get_sysfs_subsys(void)
-{
- int err = 0;
-
- if (atomic_read(&edac_subsys_valid))
- goto out;
-
- /* create the /sys/devices/system/edac directory */
- err = subsys_system_register(&edac_subsys, NULL);
- if (err) {
- printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
- return NULL;
- }
-
-out:
- atomic_inc(&edac_subsys_valid);
- return &edac_subsys;
-}
-EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys);
-
-void edac_put_sysfs_subsys(void)
-{
- /* last user unregisters it */
- if (atomic_dec_and_test(&edac_subsys_valid))
- bus_unregister(&edac_subsys);
-}
-EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys);
diff --git a/ANDROID_3.4.5/drivers/edac/i3000_edac.c b/ANDROID_3.4.5/drivers/edac/i3000_edac.c
deleted file mode 100644
index 277689a6..00000000
--- a/ANDROID_3.4.5/drivers/edac/i3000_edac.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/*
- * Intel 3000/3010 Memory Controller kernel module
- * Copyright (C) 2007 Akamai Technologies, Inc.
- * Shamelessly copied from:
- * Intel D82875P Memory Controller kernel module
- * (C) 2003 Linux Networx (http://lnxi.com)
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define I3000_REVISION "1.1"
-
-#define EDAC_MOD_STR "i3000_edac"
-
-#define I3000_RANKS 8
-#define I3000_RANKS_PER_CHANNEL 4
-#define I3000_CHANNELS 2
-
-/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
-
-#define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
-#define I3000_MCHBAR_MASK 0xffffc000
-#define I3000_MMR_WINDOW_SIZE 16384
-
-#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
- *
- * 7:1 reserved
- * 0 bit 32 of address
- */
-#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
- *
- * 31:7 address
- * 6:1 reserved
- * 0 Error channel 0/1
- */
-#define I3000_DEAP_GRAIN (1 << 7)
-
-/*
- * Helper functions to decode the DEAP/EDEAP hardware registers.
- *
- * The type promotion here is deliberate; we're deriving an
- * unsigned long pfn and offset from hardware regs which are u8/u32.
- */
-
-static inline unsigned long deap_pfn(u8 edeap, u32 deap)
-{
- deap >>= PAGE_SHIFT;
- deap |= (edeap & 1) << (32 - PAGE_SHIFT);
- return deap;
-}
-
-static inline unsigned long deap_offset(u32 deap)
-{
- return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
-}
-
-static inline int deap_channel(u32 deap)
-{
- return deap & 1;
-}
-
-#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
- *
- * 7:0 DRAM ECC Syndrome
- */
-
-#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15:12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 Received Refresh Timeout Flag (RRTOF)
- * 7:2 reserved
- * 1 Multi-bit DRAM ECC Error Flag (DMERR)
- * 0 Single-bit DRAM ECC Error Flag (DSERR)
- */
-#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
-#define I3000_ERRSTS_UE 0x0002
-#define I3000_ERRSTS_CE 0x0001
-
-#define I3000_ERRCMD 0xca /* Error Command (16b)
- *
- * 15:12 reserved
- * 11 SERR on MCH Thermal Sensor Event
- * (TSESERR)
- * 10 reserved
- * 9 SERR on LOCK to non-DRAM Memory
- * (LCKERR)
- * 8 SERR on DRAM Refresh Timeout
- * (DRTOERR)
- * 7:2 reserved
- * 1 SERR Multi-Bit DRAM ECC Error
- * (DMERR)
- * 0 SERR on Single-Bit ECC Error
- * (DSERR)
- */
-
-/* Intel MMIO register space - device 0 function 0 - MMR space */
-
-#define I3000_DRB_SHIFT 25 /* 32MiB grain */
-
-#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
- *
- * 7:0 Channel 0 DRAM Rank Boundary Address
- */
-#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
- *
- * 7:0 Channel 1 DRAM Rank Boundary Address
- */
-
-#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
- *
- * 7 reserved
- * 6:4 DRAM odd Rank Attribute
- * 3 reserved
- * 2:0 DRAM even Rank Attribute
- *
- * Each attribute defines the page
- * size of the corresponding rank:
- * 000: unpopulated
- * 001: reserved
- * 010: 4 KB
- * 011: 8 KB
- * 100: 16 KB
- * Others: reserved
- */
-#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
-
-static inline unsigned char odd_rank_attrib(unsigned char dra)
-{
- return (dra & 0x70) >> 4;
-}
-
-static inline unsigned char even_rank_attrib(unsigned char dra)
-{
- return dra & 0x07;
-}
-
-#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
- *
- * 31:30 reserved
- * 29 Initialization Complete (IC)
- * 28:11 reserved
- * 10:8 Refresh Mode Select (RMS)
- * 7 reserved
- * 6:4 Mode Select (SMS)
- * 3:2 reserved
- * 1:0 DRAM Type (DT)
- */
-
-#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
- *
- * 31 Enhanced Addressing Enable (ENHADE)
- * 30:0 reserved
- */
-
-enum i3000p_chips {
- I3000 = 0,
-};
-
-struct i3000_dev_info {
- const char *ctl_name;
-};
-
-struct i3000_error_info {
- u16 errsts;
- u8 derrsyn;
- u8 edeap;
- u32 deap;
- u16 errsts2;
-};
-
-static const struct i3000_dev_info i3000_devs[] = {
- [I3000] = {
- .ctl_name = "i3000"},
-};
-
-static struct pci_dev *mci_pdev;
-static int i3000_registered = 1;
-static struct edac_pci_ctl_info *i3000_pci;
-
-static void i3000_get_error_info(struct mem_ctl_info *mci,
- struct i3000_error_info *info)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * This is a mess because there is no atomic way to read all the
- * registers at once and the registers can transition from CE being
- * overwritten by UE.
- */
- pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
- if (!(info->errsts & I3000_ERRSTS_BITS))
- return;
- pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
- pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
- pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
- pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
-
- /*
- * If the error is the same for both reads then the first set
- * of reads is valid. If there is a change then there is a CE
- * with no info and the second set of reads is valid and
- * should be UE info.
- */
- if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
- pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
- pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
- }
-
- /*
- * Clear any error bits.
- * (Yes, we really clear bits by writing 1 to them.)
- */
- pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
- I3000_ERRSTS_BITS);
-}
-
-static int i3000_process_error_info(struct mem_ctl_info *mci,
- struct i3000_error_info *info,
- int handle_errors)
-{
- int row, multi_chan, channel;
- unsigned long pfn, offset;
-
- multi_chan = mci->csrows[0].nr_channels - 1;
-
- if (!(info->errsts & I3000_ERRSTS_BITS))
- return 0;
-
- if (!handle_errors)
- return 1;
-
- if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
- info->errsts = info->errsts2;
- }
-
- pfn = deap_pfn(info->edeap, info->deap);
- offset = deap_offset(info->deap);
- channel = deap_channel(info->deap);
-
- row = edac_mc_find_csrow_by_page(mci, pfn);
-
- if (info->errsts & I3000_ERRSTS_UE)
- edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
- else
- edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
- multi_chan ? channel : 0, "i3000 CE");
-
- return 1;
-}
-
-static void i3000_check(struct mem_ctl_info *mci)
-{
- struct i3000_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- i3000_get_error_info(mci, &info);
- i3000_process_error_info(mci, &info, 1);
-}
-
-static int i3000_is_interleaved(const unsigned char *c0dra,
- const unsigned char *c1dra,
- const unsigned char *c0drb,
- const unsigned char *c1drb)
-{
- int i;
-
- /*
- * If the channels aren't populated identically then
- * we're not interleaved.
- */
- for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
- if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
- even_rank_attrib(c0dra[i]) !=
- even_rank_attrib(c1dra[i]))
- return 0;
-
- /*
- * If the rank boundaries for the two channels are different
- * then we're not interleaved.
- */
- for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
- if (c0drb[i] != c1drb[i])
- return 0;
-
- return 1;
-}
-
-static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
-{
- int rc;
- int i;
- struct mem_ctl_info *mci = NULL;
- unsigned long last_cumul_size;
- int interleaved, nr_channels;
- unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
- unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
- unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
- unsigned long mchbar;
- void __iomem *window;
-
- debugf0("MC: %s()\n", __func__);
-
- pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
- mchbar &= I3000_MCHBAR_MASK;
- window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
- if (!window) {
- printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
- mchbar);
- return -ENODEV;
- }
-
- c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
- c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
- c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
- c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
-
- for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
- c0drb[i] = readb(window + I3000_C0DRB + i);
- c1drb[i] = readb(window + I3000_C1DRB + i);
- }
-
- iounmap(window);
-
- /*
- * Figure out how many channels we have.
- *
- * If we have what the datasheet calls "asymmetric channels"
- * (essentially the same as what was called "virtual single
- * channel mode" in the i82875) then it's a single channel as
- * far as EDAC is concerned.
- */
- interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
- nr_channels = interleaved ? 2 : 1;
- mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
- if (!mci)
- return -ENOMEM;
-
- debugf3("MC: %s(): init mci\n", __func__);
-
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR2;
-
- mci->edac_ctl_cap = EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
-
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I3000_REVISION;
- mci->ctl_name = i3000_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = i3000_check;
- mci->ctl_page_to_phys = NULL;
-
- /*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 32MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
- *
- * If we're in interleaved mode then we're only walking through
- * the ranks of controller 0, so we double all the values we see.
- */
- for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
- u8 value;
- u32 cumul_size;
- struct csrow_info *csrow = &mci->csrows[i];
-
- value = drb[i];
- cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
- if (interleaved)
- cumul_size <<= 1;
- debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
- __func__, i, cumul_size);
- if (cumul_size == last_cumul_size) {
- csrow->mtype = MEM_EMPTY;
- continue;
- }
-
- csrow->first_page = last_cumul_size;
- csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
- last_cumul_size = cumul_size;
- csrow->grain = I3000_DEAP_GRAIN;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
- }
-
- /*
- * Clear any error bits.
- * (Yes, we really clear bits by writing 1 to them.)
- */
- pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
- I3000_ERRSTS_BITS);
-
- rc = -ENODEV;
- if (edac_mc_add_mc(mci)) {
- debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* allocating generic PCI control info */
- i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i3000_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- /* get this far and it's successful */
- debugf3("MC: %s(): success\n", __func__);
- return 0;
-
-fail:
- if (mci)
- edac_mc_free(mci);
-
- return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit i3000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("MC: %s()\n", __func__);
-
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- rc = i3000_probe1(pdev, ent->driver_data);
- if (!mci_pdev)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit i3000_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s()\n", __func__);
-
- if (i3000_pci)
- edac_pci_release_generic_ctl(i3000_pci);
-
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I3000},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
-
-static struct pci_driver i3000_driver = {
- .name = EDAC_MOD_STR,
- .probe = i3000_init_one,
- .remove = __devexit_p(i3000_remove_one),
- .id_table = i3000_pci_tbl,
-};
-
-static int __init i3000_init(void)
-{
- int pci_rc;
-
- debugf3("MC: %s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i3000_driver);
- if (pci_rc < 0)
- goto fail0;
-
- if (!mci_pdev) {
- i3000_registered = 0;
- mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_3000_HB, NULL);
- if (!mci_pdev) {
- debugf0("i3000 pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
- if (pci_rc < 0) {
- debugf0("i3000 init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&i3000_driver);
-
-fail0:
- if (mci_pdev)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit i3000_exit(void)
-{
- debugf3("MC: %s()\n", __func__);
-
- pci_unregister_driver(&i3000_driver);
- if (!i3000_registered) {
- i3000_remove_one(mci_pdev);
- pci_dev_put(mci_pdev);
- }
-}
-
-module_init(i3000_init);
-module_exit(i3000_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
-MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i3200_edac.c b/ANDROID_3.4.5/drivers/edac/i3200_edac.c
deleted file mode 100644
index 046808c6..00000000
--- a/ANDROID_3.4.5/drivers/edac/i3200_edac.c
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * Intel 3200/3210 Memory Controller kernel module
- * Copyright (C) 2008-2009 Akamai Technologies, Inc.
- * Portions by Hitoshi Mitake <h.mitake@gmail.com>.
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include <linux/io.h>
-#include "edac_core.h"
-
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
-
-#define I3200_REVISION "1.1"
-
-#define EDAC_MOD_STR "i3200_edac"
-
-#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
-
-#define I3200_RANKS 8
-#define I3200_RANKS_PER_CHANNEL 4
-#define I3200_CHANNELS 2
-
-/* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
-
-#define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
-#define I3200_MCHBAR_HIGH 0x4c
-#define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
-#define I3200_MMR_WINDOW_SIZE 16384
-
-#define I3200_TOM 0xa0 /* Top of Memory (16b)
- *
- * 15:10 reserved
- * 9:0 total populated physical memory
- */
-#define I3200_TOM_MASK 0x3ff /* bits 9:0 */
-#define I3200_TOM_SHIFT 26 /* 64MiB grain */
-
-#define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
- * 1 Multi-bit DRAM ECC Error Flag (DMERR)
- * 0 Single-bit DRAM ECC Error Flag (DSERR)
- */
-#define I3200_ERRSTS_UE 0x0002
-#define I3200_ERRSTS_CE 0x0001
-#define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
-
-
-/* Intel MMIO register space - device 0 function 0 - MMR space */
-
-#define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
- *
- * 15:10 reserved
- * 9:0 Channel 0 DRAM Rank Boundary Address
- */
-#define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
-#define I3200_DRB_MASK 0x3ff /* bits 9:0 */
-#define I3200_DRB_SHIFT 26 /* 64MiB grain */
-
-#define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-#define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
-#define I3200_ECCERRLOG_CE 0x1
-#define I3200_ECCERRLOG_UE 0x2
-#define I3200_ECCERRLOG_RANK_BITS 0x18000000
-#define I3200_ECCERRLOG_RANK_SHIFT 27
-#define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
-#define I3200_ECCERRLOG_SYNDROME_SHIFT 16
-#define I3200_CAPID0 0xe0 /* P.95 of spec for details */
-
-struct i3200_priv {
- void __iomem *window;
-};
-
-static int nr_channels;
-
-static int how_many_channels(struct pci_dev *pdev)
-{
- unsigned char capid0_8b; /* 8th byte of CAPID0 */
-
- pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
- if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
- debugf0("In single channel mode.\n");
- return 1;
- } else {
- debugf0("In dual channel mode.\n");
- return 2;
- }
-}
-
-static unsigned long eccerrlog_syndrome(u64 log)
-{
- return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
- I3200_ECCERRLOG_SYNDROME_SHIFT;
-}
-
-static int eccerrlog_row(int channel, u64 log)
-{
- u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
- I3200_ECCERRLOG_RANK_SHIFT);
- return rank | (channel * I3200_RANKS_PER_CHANNEL);
-}
-
-enum i3200_chips {
- I3200 = 0,
-};
-
-struct i3200_dev_info {
- const char *ctl_name;
-};
-
-struct i3200_error_info {
- u16 errsts;
- u16 errsts2;
- u64 eccerrlog[I3200_CHANNELS];
-};
-
-static const struct i3200_dev_info i3200_devs[] = {
- [I3200] = {
- .ctl_name = "i3200"
- },
-};
-
-static struct pci_dev *mci_pdev;
-static int i3200_registered = 1;
-
-
-static void i3200_clear_error_info(struct mem_ctl_info *mci)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * Clear any error bits.
- * (Yes, we really clear bits by writing 1 to them.)
- */
- pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
- I3200_ERRSTS_BITS);
-}
-
-static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
- struct i3200_error_info *info)
-{
- struct pci_dev *pdev;
- struct i3200_priv *priv = mci->pvt_info;
- void __iomem *window = priv->window;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * This is a mess because there is no atomic way to read all the
- * registers at once and the registers can transition from CE being
- * overwritten by UE.
- */
- pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
- if (!(info->errsts & I3200_ERRSTS_BITS))
- return;
-
- info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
- if (nr_channels == 2)
- info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
-
- pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
-
- /*
- * If the error is the same for both reads then the first set
- * of reads is valid. If there is a change then there is a CE
- * with no info and the second set of reads is valid and
- * should be UE info.
- */
- if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
- if (nr_channels == 2)
- info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
- }
-
- i3200_clear_error_info(mci);
-}
-
-static void i3200_process_error_info(struct mem_ctl_info *mci,
- struct i3200_error_info *info)
-{
- int channel;
- u64 log;
-
- if (!(info->errsts & I3200_ERRSTS_BITS))
- return;
-
- if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
- info->errsts = info->errsts2;
- }
-
- for (channel = 0; channel < nr_channels; channel++) {
- log = info->eccerrlog[channel];
- if (log & I3200_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log),
- "i3200 UE");
- } else if (log & I3200_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0,
- "i3200 CE");
- }
- }
-}
-
-static void i3200_check(struct mem_ctl_info *mci)
-{
- struct i3200_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- i3200_get_and_clear_error_info(mci, &info);
- i3200_process_error_info(mci, &info);
-}
-
-
-void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
-{
- union {
- u64 mchbar;
- struct {
- u32 mchbar_low;
- u32 mchbar_high;
- };
- } u;
- void __iomem *window;
-
- pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
- pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= I3200_MCHBAR_MASK;
-
- if (u.mchbar != (resource_size_t)u.mchbar) {
- printk(KERN_ERR
- "i3200: mmio space beyond accessible range (0x%llx)\n",
- (unsigned long long)u.mchbar);
- return NULL;
- }
-
- window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
- if (!window)
- printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
- (unsigned long long)u.mchbar);
-
- return window;
-}
-
-
-static void i3200_get_drbs(void __iomem *window,
- u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
-{
- int i;
-
- for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
- drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
- drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
- }
-}
-
-static bool i3200_is_stacked(struct pci_dev *pdev,
- u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
-{
- u16 tom;
-
- pci_read_config_word(pdev, I3200_TOM, &tom);
- tom &= I3200_TOM_MASK;
-
- return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
-}
-
-static unsigned long drb_to_nr_pages(
- u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
- int channel, int rank)
-{
- int n;
-
- n = drbs[channel][rank];
- if (rank > 0)
- n -= drbs[channel][rank - 1];
- if (stacked && (channel == 1) &&
- drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
- n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
-
- n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
- return n;
-}
-
-static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
-{
- int rc;
- int i;
- struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
- u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
- bool stacked;
- void __iomem *window;
- struct i3200_priv *priv;
-
- debugf0("MC: %s()\n", __func__);
-
- window = i3200_map_mchbar(pdev);
- if (!window)
- return -ENODEV;
-
- i3200_get_drbs(window, drbs);
- nr_channels = how_many_channels(pdev);
-
- mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
- nr_channels, 0);
- if (!mci)
- return -ENOMEM;
-
- debugf3("MC: %s(): init mci\n", __func__);
-
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR2;
-
- mci->edac_ctl_cap = EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
-
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I3200_REVISION;
- mci->ctl_name = i3200_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = i3200_check;
- mci->ctl_page_to_phys = NULL;
- priv = mci->pvt_info;
- priv->window = window;
-
- stacked = i3200_is_stacked(pdev, drbs);
-
- /*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
- */
- last_page = -1UL;
- for (i = 0; i < mci->nr_csrows; i++) {
- unsigned long nr_pages;
- struct csrow_info *csrow = &mci->csrows[i];
-
- nr_pages = drb_to_nr_pages(drbs, stacked,
- i / I3200_RANKS_PER_CHANNEL,
- i % I3200_RANKS_PER_CHANNEL);
-
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
- continue;
- }
-
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
-
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
- }
-
- i3200_clear_error_info(mci);
-
- rc = -ENODEV;
- if (edac_mc_add_mc(mci)) {
- debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* get this far and it's successful */
- debugf3("MC: %s(): success\n", __func__);
- return 0;
-
-fail:
- iounmap(window);
- if (mci)
- edac_mc_free(mci);
-
- return rc;
-}
-
-static int __devinit i3200_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("MC: %s()\n", __func__);
-
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- rc = i3200_probe1(pdev, ent->driver_data);
- if (!mci_pdev)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit i3200_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct i3200_priv *priv;
-
- debugf0("%s()\n", __func__);
-
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
-
- priv = mci->pvt_info;
- iounmap(priv->window);
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I3200},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
-
-static struct pci_driver i3200_driver = {
- .name = EDAC_MOD_STR,
- .probe = i3200_init_one,
- .remove = __devexit_p(i3200_remove_one),
- .id_table = i3200_pci_tbl,
-};
-
-static int __init i3200_init(void)
-{
- int pci_rc;
-
- debugf3("MC: %s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i3200_driver);
- if (pci_rc < 0)
- goto fail0;
-
- if (!mci_pdev) {
- i3200_registered = 0;
- mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_3200_HB, NULL);
- if (!mci_pdev) {
- debugf0("i3200 pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
- if (pci_rc < 0) {
- debugf0("i3200 init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&i3200_driver);
-
-fail0:
- if (mci_pdev)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit i3200_exit(void)
-{
- debugf3("MC: %s()\n", __func__);
-
- pci_unregister_driver(&i3200_driver);
- if (!i3200_registered) {
- i3200_remove_one(mci_pdev);
- pci_dev_put(mci_pdev);
- }
-}
-
-module_init(i3200_init);
-module_exit(i3200_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Akamai Technologies, Inc.");
-MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i5000_edac.c b/ANDROID_3.4.5/drivers/edac/i5000_edac.c
deleted file mode 100644
index a2680d8e..00000000
--- a/ANDROID_3.4.5/drivers/edac/i5000_edac.c
+++ /dev/null
@@ -1,1580 +0,0 @@
-/*
- * Intel 5000(P/V/X) class Memory Controllers kernel module
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Douglas Thompson Linux Networx (http://lnxi.com)
- * norsk5@xmission.com
- *
- * This module is based on the following document:
- *
- * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet
- * http://developer.intel.com/design/chipsets/datashts/313070.htm
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/slab.h>
-#include <linux/edac.h>
-#include <asm/mmzone.h>
-
-#include "edac_core.h"
-
-/*
- * Alter this version for the I5000 module when modifications are made
- */
-#define I5000_REVISION " Ver: 2.0.12"
-#define EDAC_MOD_STR "i5000_edac"
-
-#define i5000_printk(level, fmt, arg...) \
- edac_printk(level, "i5000", fmt, ##arg)
-
-#define i5000_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg)
-
-#ifndef PCI_DEVICE_ID_INTEL_FBD_0
-#define PCI_DEVICE_ID_INTEL_FBD_0 0x25F5
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_FBD_1
-#define PCI_DEVICE_ID_INTEL_FBD_1 0x25F6
-#endif
-
-/* Device 16,
- * Function 0: System Address
- * Function 1: Memory Branch Map, Control, Errors Register
- * Function 2: FSB Error Registers
- *
- * All 3 functions of Device 16 (0,1,2) share the SAME DID
- */
-#define PCI_DEVICE_ID_INTEL_I5000_DEV16 0x25F0
-
-/* OFFSETS for Function 0 */
-
-/* OFFSETS for Function 1 */
-#define AMBASE 0x48
-#define MAXCH 0x56
-#define MAXDIMMPERCH 0x57
-#define TOLM 0x6C
-#define REDMEMB 0x7C
-#define RED_ECC_LOCATOR(x) ((x) & 0x3FFFF)
-#define REC_ECC_LOCATOR_EVEN(x) ((x) & 0x001FF)
-#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3FE00)
-#define MIR0 0x80
-#define MIR1 0x84
-#define MIR2 0x88
-#define AMIR0 0x8C
-#define AMIR1 0x90
-#define AMIR2 0x94
-
-#define FERR_FAT_FBD 0x98
-#define NERR_FAT_FBD 0x9C
-#define EXTRACT_FBDCHAN_INDX(x) (((x)>>28) & 0x3)
-#define FERR_FAT_FBDCHAN 0x30000000
-#define FERR_FAT_M3ERR 0x00000004
-#define FERR_FAT_M2ERR 0x00000002
-#define FERR_FAT_M1ERR 0x00000001
-#define FERR_FAT_MASK (FERR_FAT_M1ERR | \
- FERR_FAT_M2ERR | \
- FERR_FAT_M3ERR)
-
-#define FERR_NF_FBD 0xA0
-
-/* Thermal and SPD or BFD errors */
-#define FERR_NF_M28ERR 0x01000000
-#define FERR_NF_M27ERR 0x00800000
-#define FERR_NF_M26ERR 0x00400000
-#define FERR_NF_M25ERR 0x00200000
-#define FERR_NF_M24ERR 0x00100000
-#define FERR_NF_M23ERR 0x00080000
-#define FERR_NF_M22ERR 0x00040000
-#define FERR_NF_M21ERR 0x00020000
-
-/* Correctable errors */
-#define FERR_NF_M20ERR 0x00010000
-#define FERR_NF_M19ERR 0x00008000
-#define FERR_NF_M18ERR 0x00004000
-#define FERR_NF_M17ERR 0x00002000
-
-/* Non-Retry or redundant Retry errors */
-#define FERR_NF_M16ERR 0x00001000
-#define FERR_NF_M15ERR 0x00000800
-#define FERR_NF_M14ERR 0x00000400
-#define FERR_NF_M13ERR 0x00000200
-
-/* Uncorrectable errors */
-#define FERR_NF_M12ERR 0x00000100
-#define FERR_NF_M11ERR 0x00000080
-#define FERR_NF_M10ERR 0x00000040
-#define FERR_NF_M9ERR 0x00000020
-#define FERR_NF_M8ERR 0x00000010
-#define FERR_NF_M7ERR 0x00000008
-#define FERR_NF_M6ERR 0x00000004
-#define FERR_NF_M5ERR 0x00000002
-#define FERR_NF_M4ERR 0x00000001
-
-#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \
- FERR_NF_M11ERR | \
- FERR_NF_M10ERR | \
- FERR_NF_M9ERR | \
- FERR_NF_M8ERR | \
- FERR_NF_M7ERR | \
- FERR_NF_M6ERR | \
- FERR_NF_M5ERR | \
- FERR_NF_M4ERR)
-#define FERR_NF_CORRECTABLE (FERR_NF_M20ERR | \
- FERR_NF_M19ERR | \
- FERR_NF_M18ERR | \
- FERR_NF_M17ERR)
-#define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \
- FERR_NF_M28ERR)
-#define FERR_NF_THERMAL (FERR_NF_M26ERR | \
- FERR_NF_M25ERR | \
- FERR_NF_M24ERR | \
- FERR_NF_M23ERR)
-#define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR)
-#define FERR_NF_NORTH_CRC (FERR_NF_M21ERR)
-#define FERR_NF_NON_RETRY (FERR_NF_M13ERR | \
- FERR_NF_M14ERR | \
- FERR_NF_M15ERR)
-
-#define NERR_NF_FBD 0xA4
-#define FERR_NF_MASK (FERR_NF_UNCORRECTABLE | \
- FERR_NF_CORRECTABLE | \
- FERR_NF_DIMM_SPARE | \
- FERR_NF_THERMAL | \
- FERR_NF_SPD_PROTOCOL | \
- FERR_NF_NORTH_CRC | \
- FERR_NF_NON_RETRY)
-
-#define EMASK_FBD 0xA8
-#define EMASK_FBD_M28ERR 0x08000000
-#define EMASK_FBD_M27ERR 0x04000000
-#define EMASK_FBD_M26ERR 0x02000000
-#define EMASK_FBD_M25ERR 0x01000000
-#define EMASK_FBD_M24ERR 0x00800000
-#define EMASK_FBD_M23ERR 0x00400000
-#define EMASK_FBD_M22ERR 0x00200000
-#define EMASK_FBD_M21ERR 0x00100000
-#define EMASK_FBD_M20ERR 0x00080000
-#define EMASK_FBD_M19ERR 0x00040000
-#define EMASK_FBD_M18ERR 0x00020000
-#define EMASK_FBD_M17ERR 0x00010000
-
-#define EMASK_FBD_M15ERR 0x00004000
-#define EMASK_FBD_M14ERR 0x00002000
-#define EMASK_FBD_M13ERR 0x00001000
-#define EMASK_FBD_M12ERR 0x00000800
-#define EMASK_FBD_M11ERR 0x00000400
-#define EMASK_FBD_M10ERR 0x00000200
-#define EMASK_FBD_M9ERR 0x00000100
-#define EMASK_FBD_M8ERR 0x00000080
-#define EMASK_FBD_M7ERR 0x00000040
-#define EMASK_FBD_M6ERR 0x00000020
-#define EMASK_FBD_M5ERR 0x00000010
-#define EMASK_FBD_M4ERR 0x00000008
-#define EMASK_FBD_M3ERR 0x00000004
-#define EMASK_FBD_M2ERR 0x00000002
-#define EMASK_FBD_M1ERR 0x00000001
-
-#define ENABLE_EMASK_FBD_FATAL_ERRORS (EMASK_FBD_M1ERR | \
- EMASK_FBD_M2ERR | \
- EMASK_FBD_M3ERR)
-
-#define ENABLE_EMASK_FBD_UNCORRECTABLE (EMASK_FBD_M4ERR | \
- EMASK_FBD_M5ERR | \
- EMASK_FBD_M6ERR | \
- EMASK_FBD_M7ERR | \
- EMASK_FBD_M8ERR | \
- EMASK_FBD_M9ERR | \
- EMASK_FBD_M10ERR | \
- EMASK_FBD_M11ERR | \
- EMASK_FBD_M12ERR)
-#define ENABLE_EMASK_FBD_CORRECTABLE (EMASK_FBD_M17ERR | \
- EMASK_FBD_M18ERR | \
- EMASK_FBD_M19ERR | \
- EMASK_FBD_M20ERR)
-#define ENABLE_EMASK_FBD_DIMM_SPARE (EMASK_FBD_M27ERR | \
- EMASK_FBD_M28ERR)
-#define ENABLE_EMASK_FBD_THERMALS (EMASK_FBD_M26ERR | \
- EMASK_FBD_M25ERR | \
- EMASK_FBD_M24ERR | \
- EMASK_FBD_M23ERR)
-#define ENABLE_EMASK_FBD_SPD_PROTOCOL (EMASK_FBD_M22ERR)
-#define ENABLE_EMASK_FBD_NORTH_CRC (EMASK_FBD_M21ERR)
-#define ENABLE_EMASK_FBD_NON_RETRY (EMASK_FBD_M15ERR | \
- EMASK_FBD_M14ERR | \
- EMASK_FBD_M13ERR)
-
-#define ENABLE_EMASK_ALL (ENABLE_EMASK_FBD_NON_RETRY | \
- ENABLE_EMASK_FBD_NORTH_CRC | \
- ENABLE_EMASK_FBD_SPD_PROTOCOL | \
- ENABLE_EMASK_FBD_THERMALS | \
- ENABLE_EMASK_FBD_DIMM_SPARE | \
- ENABLE_EMASK_FBD_FATAL_ERRORS | \
- ENABLE_EMASK_FBD_CORRECTABLE | \
- ENABLE_EMASK_FBD_UNCORRECTABLE)
-
-#define ERR0_FBD 0xAC
-#define ERR1_FBD 0xB0
-#define ERR2_FBD 0xB4
-#define MCERR_FBD 0xB8
-#define NRECMEMA 0xBE
-#define NREC_BANK(x) (((x)>>12) & 0x7)
-#define NREC_RDWR(x) (((x)>>11) & 1)
-#define NREC_RANK(x) (((x)>>8) & 0x7)
-#define NRECMEMB 0xC0
-#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF)
-#define NREC_RAS(x) ((x) & 0x7FFF)
-#define NRECFGLOG 0xC4
-#define NREEECFBDA 0xC8
-#define NREEECFBDB 0xCC
-#define NREEECFBDC 0xD0
-#define NREEECFBDD 0xD4
-#define NREEECFBDE 0xD8
-#define REDMEMA 0xDC
-#define RECMEMA 0xE2
-#define REC_BANK(x) (((x)>>12) & 0x7)
-#define REC_RDWR(x) (((x)>>11) & 1)
-#define REC_RANK(x) (((x)>>8) & 0x7)
-#define RECMEMB 0xE4
-#define REC_CAS(x) (((x)>>16) & 0xFFFFFF)
-#define REC_RAS(x) ((x) & 0x7FFF)
-#define RECFGLOG 0xE8
-#define RECFBDA 0xEC
-#define RECFBDB 0xF0
-#define RECFBDC 0xF4
-#define RECFBDD 0xF8
-#define RECFBDE 0xFC
-
-/* OFFSETS for Function 2 */
-
-/*
- * Device 21,
- * Function 0: Memory Map Branch 0
- *
- * Device 22,
- * Function 0: Memory Map Branch 1
- */
-#define PCI_DEVICE_ID_I5000_BRANCH_0 0x25F5
-#define PCI_DEVICE_ID_I5000_BRANCH_1 0x25F6
-
-#define AMB_PRESENT_0 0x64
-#define AMB_PRESENT_1 0x66
-#define MTR0 0x80
-#define MTR1 0x84
-#define MTR2 0x88
-#define MTR3 0x8C
-
-#define NUM_MTRS 4
-#define CHANNELS_PER_BRANCH (2)
-
-/* Defines to extract the vaious fields from the
- * MTRx - Memory Technology Registers
- */
-#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
-#define MTR_DRAM_WIDTH(mtr) ((((mtr) >> 6) & 0x1) ? 8 : 4)
-#define MTR_DRAM_BANKS(mtr) ((((mtr) >> 5) & 0x1) ? 8 : 4)
-#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
-#define MTR_DIMM_RANK(mtr) (((mtr) >> 4) & 0x1)
-#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
-#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
-#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
-#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
-#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
-
-#ifdef CONFIG_EDAC_DEBUG
-static char *numrow_toString[] = {
- "8,192 - 13 rows",
- "16,384 - 14 rows",
- "32,768 - 15 rows",
- "reserved"
-};
-
-static char *numcol_toString[] = {
- "1,024 - 10 columns",
- "2,048 - 11 columns",
- "4,096 - 12 columns",
- "reserved"
-};
-#endif
-
-/* enables the report of miscellaneous messages as CE errors - default off */
-static int misc_messages;
-
-/* Enumeration of supported devices */
-enum i5000_chips {
- I5000P = 0,
- I5000V = 1, /* future */
- I5000X = 2 /* future */
-};
-
-/* Device name and register DID (Device ID) */
-struct i5000_dev_info {
- const char *ctl_name; /* name for this device */
- u16 fsb_mapping_errors; /* DID for the branchmap,control */
-};
-
-/* Table of devices attributes supported by this driver */
-static const struct i5000_dev_info i5000_devs[] = {
- [I5000P] = {
- .ctl_name = "I5000",
- .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
- },
-};
-
-struct i5000_dimm_info {
- int megabytes; /* size, 0 means not present */
- int dual_rank;
-};
-
-#define MAX_CHANNELS 6 /* max possible channels */
-#define MAX_CSROWS (8*2) /* max possible csrows per channel */
-
-/* driver private data structure */
-struct i5000_pvt {
- struct pci_dev *system_address; /* 16.0 */
- struct pci_dev *branchmap_werrors; /* 16.1 */
- struct pci_dev *fsb_error_regs; /* 16.2 */
- struct pci_dev *branch_0; /* 21.0 */
- struct pci_dev *branch_1; /* 22.0 */
-
- u16 tolm; /* top of low memory */
- u64 ambase; /* AMB BAR */
-
- u16 mir0, mir1, mir2;
-
- u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
- u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
-
- u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
- u16 b1_ambpresent0; /* Branch 1, Channel 8 */
- u16 b1_ambpresent1; /* Branch 1, Channel 1 */
-
- /* DIMM information matrix, allocating architecture maximums */
- struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
-
- /* Actual values for this controller */
- int maxch; /* Max channels */
- int maxdimmperch; /* Max DIMMs per channel */
-};
-
-/* I5000 MCH error information retrieved from Hardware */
-struct i5000_error_info {
-
- /* These registers are always read from the MC */
- u32 ferr_fat_fbd; /* First Errors Fatal */
- u32 nerr_fat_fbd; /* Next Errors Fatal */
- u32 ferr_nf_fbd; /* First Errors Non-Fatal */
- u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
-
- /* These registers are input ONLY if there was a Recoverable Error */
- u32 redmemb; /* Recoverable Mem Data Error log B */
- u16 recmema; /* Recoverable Mem Error log A */
- u32 recmemb; /* Recoverable Mem Error log B */
-
- /* These registers are input ONLY if there was a
- * Non-Recoverable Error */
- u16 nrecmema; /* Non-Recoverable Mem log A */
- u16 nrecmemb; /* Non-Recoverable Mem log B */
-
-};
-
-static struct edac_pci_ctl_info *i5000_pci;
-
-/*
- * i5000_get_error_info Retrieve the hardware error information from
- * the hardware and cache it in the 'info'
- * structure
- */
-static void i5000_get_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info *info)
-{
- struct i5000_pvt *pvt;
- u32 value;
-
- pvt = mci->pvt_info;
-
- /* read in the 1st FATAL error register */
- pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
-
- /* Mask only the bits that the doc says are valid
- */
- value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
-
- /* If there is an error, then read in the */
- /* NEXT FATAL error register and the Memory Error Log Register A */
- if (value & FERR_FAT_MASK) {
- info->ferr_fat_fbd = value;
-
- /* harvest the various error data we need */
- pci_read_config_dword(pvt->branchmap_werrors,
- NERR_FAT_FBD, &info->nerr_fat_fbd);
- pci_read_config_word(pvt->branchmap_werrors,
- NRECMEMA, &info->nrecmema);
- pci_read_config_word(pvt->branchmap_werrors,
- NRECMEMB, &info->nrecmemb);
-
- /* Clear the error bits, by writing them back */
- pci_write_config_dword(pvt->branchmap_werrors,
- FERR_FAT_FBD, value);
- } else {
- info->ferr_fat_fbd = 0;
- info->nerr_fat_fbd = 0;
- info->nrecmema = 0;
- info->nrecmemb = 0;
- }
-
- /* read in the 1st NON-FATAL error register */
- pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
-
- /* If there is an error, then read in the 1st NON-FATAL error
- * register as well */
- if (value & FERR_NF_MASK) {
- info->ferr_nf_fbd = value;
-
- /* harvest the various error data we need */
- pci_read_config_dword(pvt->branchmap_werrors,
- NERR_NF_FBD, &info->nerr_nf_fbd);
- pci_read_config_word(pvt->branchmap_werrors,
- RECMEMA, &info->recmema);
- pci_read_config_dword(pvt->branchmap_werrors,
- RECMEMB, &info->recmemb);
- pci_read_config_dword(pvt->branchmap_werrors,
- REDMEMB, &info->redmemb);
-
- /* Clear the error bits, by writing them back */
- pci_write_config_dword(pvt->branchmap_werrors,
- FERR_NF_FBD, value);
- } else {
- info->ferr_nf_fbd = 0;
- info->nerr_nf_fbd = 0;
- info->recmema = 0;
- info->recmemb = 0;
- info->redmemb = 0;
- }
-}
-
-/*
- * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
- * struct i5000_error_info *info,
- * int handle_errors);
- *
- * handle the Intel FATAL errors, if any
- */
-static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info *info,
- int handle_errors)
-{
- char msg[EDAC_MC_LABEL_LEN + 1 + 160];
- char *specific = NULL;
- u32 allErrors;
- int branch;
- int channel;
- int bank;
- int rank;
- int rdwr;
- int ras, cas;
-
- /* mask off the Error bits that are possible */
- allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
- if (!allErrors)
- return; /* if no error, return now */
-
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
- channel = branch;
-
- /* Use the NON-Recoverable macros to extract data */
- bank = NREC_BANK(info->nrecmema);
- rank = NREC_RANK(info->nrecmema);
- rdwr = NREC_RDWR(info->nrecmema);
- ras = NREC_RAS(info->nrecmemb);
- cas = NREC_CAS(info->nrecmemb);
-
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
-
- /* Only 1 bit will be on */
- switch (allErrors) {
- case FERR_FAT_M1ERR:
- specific = "Alert on non-redundant retry or fast "
- "reset timeout";
- break;
- case FERR_FAT_M2ERR:
- specific = "Northbound CRC error on non-redundant "
- "retry";
- break;
- case FERR_FAT_M3ERR:
- {
- static int done;
-
- /*
- * This error is generated to inform that the intelligent
- * throttling is disabled and the temperature passed the
- * specified middle point. Since this is something the BIOS
- * should take care of, we'll warn only once to avoid
- * worthlessly flooding the log.
- */
- if (done)
- return;
- done++;
-
- specific = ">Tmid Thermal event with intelligent "
- "throttling disabled";
- }
- break;
- }
-
- /* Form out message */
- snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
- "FATAL Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- allErrors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
-}
-
-/*
- * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
- * struct i5000_error_info *info,
- * int handle_errors);
- *
- * handle the Intel NON-FATAL errors, if any
- */
-static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info *info,
- int handle_errors)
-{
- char msg[EDAC_MC_LABEL_LEN + 1 + 170];
- char *specific = NULL;
- u32 allErrors;
- u32 ue_errors;
- u32 ce_errors;
- u32 misc_errors;
- int branch;
- int channel;
- int bank;
- int rank;
- int rdwr;
- int ras, cas;
-
- /* mask off the Error bits that are possible */
- allErrors = (info->ferr_nf_fbd & FERR_NF_MASK);
- if (!allErrors)
- return; /* if no error, return now */
-
- /* ONLY ONE of the possible error bits will be set, as per the docs */
- ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
- if (ue_errors) {
- debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
-
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
-
- /*
- * According with i5000 datasheet, bit 28 has no significance
- * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
- */
- channel = branch & 2;
-
- bank = NREC_BANK(info->nrecmema);
- rank = NREC_RANK(info->nrecmema);
- rdwr = NREC_RDWR(info->nrecmema);
- ras = NREC_RAS(info->nrecmemb);
- cas = NREC_CAS(info->nrecmemb);
-
- debugf0
- ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
-
- switch (ue_errors) {
- case FERR_NF_M12ERR:
- specific = "Non-Aliased Uncorrectable Patrol Data ECC";
- break;
- case FERR_NF_M11ERR:
- specific = "Non-Aliased Uncorrectable Spare-Copy "
- "Data ECC";
- break;
- case FERR_NF_M10ERR:
- specific = "Non-Aliased Uncorrectable Mirrored Demand "
- "Data ECC";
- break;
- case FERR_NF_M9ERR:
- specific = "Non-Aliased Uncorrectable Non-Mirrored "
- "Demand Data ECC";
- break;
- case FERR_NF_M8ERR:
- specific = "Aliased Uncorrectable Patrol Data ECC";
- break;
- case FERR_NF_M7ERR:
- specific = "Aliased Uncorrectable Spare-Copy Data ECC";
- break;
- case FERR_NF_M6ERR:
- specific = "Aliased Uncorrectable Mirrored Demand "
- "Data ECC";
- break;
- case FERR_NF_M5ERR:
- specific = "Aliased Uncorrectable Non-Mirrored Demand "
- "Data ECC";
- break;
- case FERR_NF_M4ERR:
- specific = "Uncorrectable Data ECC on Replay";
- break;
- }
-
- /* Form out message */
- snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, UE Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- ue_errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
- }
-
- /* Check correctable errors */
- ce_errors = allErrors & FERR_NF_CORRECTABLE;
- if (ce_errors) {
- debugf0("\tCorrected bits= 0x%x\n", ce_errors);
-
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
-
- channel = 0;
- if (REC_ECC_LOCATOR_ODD(info->redmemb))
- channel = 1;
-
- /* Convert channel to be based from zero, instead of
- * from branch base of 0 */
- channel += branch;
-
- bank = REC_BANK(info->recmema);
- rank = REC_RANK(info->recmema);
- rdwr = REC_RDWR(info->recmema);
- ras = REC_RAS(info->recmemb);
- cas = REC_CAS(info->recmemb);
-
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
-
- switch (ce_errors) {
- case FERR_NF_M17ERR:
- specific = "Correctable Non-Mirrored Demand Data ECC";
- break;
- case FERR_NF_M18ERR:
- specific = "Correctable Mirrored Demand Data ECC";
- break;
- case FERR_NF_M19ERR:
- specific = "Correctable Spare-Copy Data ECC";
- break;
- case FERR_NF_M20ERR:
- specific = "Correctable Patrol Data ECC";
- break;
- }
-
- /* Form out message */
- snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas, ce_errors,
- specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
- }
-
- if (!misc_messages)
- return;
-
- misc_errors = allErrors & (FERR_NF_NON_RETRY | FERR_NF_NORTH_CRC |
- FERR_NF_SPD_PROTOCOL | FERR_NF_DIMM_SPARE);
- if (misc_errors) {
- switch (misc_errors) {
- case FERR_NF_M13ERR:
- specific = "Non-Retry or Redundant Retry FBD Memory "
- "Alert or Redundant Fast Reset Timeout";
- break;
- case FERR_NF_M14ERR:
- specific = "Non-Retry or Redundant Retry FBD "
- "Configuration Alert";
- break;
- case FERR_NF_M15ERR:
- specific = "Non-Retry or Redundant Retry FBD "
- "Northbound CRC error on read data";
- break;
- case FERR_NF_M21ERR:
- specific = "FBD Northbound CRC error on "
- "FBD Sync Status";
- break;
- case FERR_NF_M22ERR:
- specific = "SPD protocol error";
- break;
- case FERR_NF_M27ERR:
- specific = "DIMM-spare copy started";
- break;
- case FERR_NF_M28ERR:
- specific = "DIMM-spare copy completed";
- break;
- }
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
-
- /* Form out message */
- snprintf(msg, sizeof(msg),
- "(Branch=%d Err=%#x (%s))", branch >> 1,
- misc_errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, 0, 0, msg);
- }
-}
-
-/*
- * i5000_process_error_info Process the error info that is
- * in the 'info' structure, previously retrieved from hardware
- */
-static void i5000_process_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info *info,
- int handle_errors)
-{
- /* First handle any fatal errors that occurred */
- i5000_process_fatal_error_info(mci, info, handle_errors);
-
- /* now handle any non-fatal errors that occurred */
- i5000_process_nonfatal_error_info(mci, info, handle_errors);
-}
-
-/*
- * i5000_clear_error Retrieve any error from the hardware
- * but do NOT process that error.
- * Used for 'clearing' out of previous errors
- * Called by the Core module.
- */
-static void i5000_clear_error(struct mem_ctl_info *mci)
-{
- struct i5000_error_info info;
-
- i5000_get_error_info(mci, &info);
-}
-
-/*
- * i5000_check_error Retrieve and process errors reported by the
- * hardware. Called by the Core module.
- */
-static void i5000_check_error(struct mem_ctl_info *mci)
-{
- struct i5000_error_info info;
- debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
- i5000_get_error_info(mci, &info);
- i5000_process_error_info(mci, &info, 1);
-}
-
-/*
- * i5000_get_devices Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver
- *
- * Need to 'get' device 16 func 1 and func 2
- */
-static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
-{
- //const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx];
- struct i5000_pvt *pvt;
- struct pci_dev *pdev;
-
- pvt = mci->pvt_info;
-
- /* Attempt to 'get' the MCH register we want */
- pdev = NULL;
- while (1) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
-
- /* End of list, leave */
- if (pdev == NULL) {
- i5000_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x FUNC 1 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16);
-
- return 1;
- }
-
- /* Scan for device 16 func 1 */
- if (PCI_FUNC(pdev->devfn) == 1)
- break;
- }
-
- pvt->branchmap_werrors = pdev;
-
- /* Attempt to 'get' the MCH register we want */
- pdev = NULL;
- while (1) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
-
- if (pdev == NULL) {
- i5000_printk(KERN_ERR,
- "MC: 'branchmap,control,errors' "
- "device not found:"
- "vendor 0x%x device 0x%x Func 2 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16);
-
- pci_dev_put(pvt->branchmap_werrors);
- return 1;
- }
-
- /* Scan for device 16 func 1 */
- if (PCI_FUNC(pdev->devfn) == 2)
- break;
- }
-
- pvt->fsb_error_regs = pdev;
-
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->system_address),
- pvt->system_address->vendor, pvt->system_address->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->branchmap_werrors),
- pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->fsb_error_regs),
- pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
-
- pdev = NULL;
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
-
- if (pdev == NULL) {
- i5000_printk(KERN_ERR,
- "MC: 'BRANCH 0' device not found:"
- "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
-
- pci_dev_put(pvt->branchmap_werrors);
- pci_dev_put(pvt->fsb_error_regs);
- return 1;
- }
-
- pvt->branch_0 = pdev;
-
- /* If this device claims to have more than 2 channels then
- * fetch Branch 1's information
- */
- if (pvt->maxch >= CHANNELS_PER_BRANCH) {
- pdev = NULL;
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
-
- if (pdev == NULL) {
- i5000_printk(KERN_ERR,
- "MC: 'BRANCH 1' device not found:"
- "vendor 0x%x device 0x%x Func 0 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_I5000_BRANCH_1);
-
- pci_dev_put(pvt->branchmap_werrors);
- pci_dev_put(pvt->fsb_error_regs);
- pci_dev_put(pvt->branch_0);
- return 1;
- }
-
- pvt->branch_1 = pdev;
- }
-
- return 0;
-}
-
-/*
- * i5000_put_devices 'put' all the devices that we have
- * reserved via 'get'
- */
-static void i5000_put_devices(struct mem_ctl_info *mci)
-{
- struct i5000_pvt *pvt;
-
- pvt = mci->pvt_info;
-
- pci_dev_put(pvt->branchmap_werrors); /* FUNC 1 */
- pci_dev_put(pvt->fsb_error_regs); /* FUNC 2 */
- pci_dev_put(pvt->branch_0); /* DEV 21 */
-
- /* Only if more than 2 channels do we release the second branch */
- if (pvt->maxch >= CHANNELS_PER_BRANCH)
- pci_dev_put(pvt->branch_1); /* DEV 22 */
-}
-
-/*
- * determine_amb_resent
- *
- * the information is contained in NUM_MTRS different registers
- * determineing which of the NUM_MTRS requires knowing
- * which channel is in question
- *
- * 2 branches, each with 2 channels
- * b0_ambpresent0 for channel '0'
- * b0_ambpresent1 for channel '1'
- * b1_ambpresent0 for channel '2'
- * b1_ambpresent1 for channel '3'
- */
-static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
-{
- int amb_present;
-
- if (channel < CHANNELS_PER_BRANCH) {
- if (channel & 0x1)
- amb_present = pvt->b0_ambpresent1;
- else
- amb_present = pvt->b0_ambpresent0;
- } else {
- if (channel & 0x1)
- amb_present = pvt->b1_ambpresent1;
- else
- amb_present = pvt->b1_ambpresent0;
- }
-
- return amb_present;
-}
-
-/*
- * determine_mtr(pvt, csrow, channel)
- *
- * return the proper MTR register as determine by the csrow and channel desired
- */
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
-{
- int mtr;
-
- if (channel < CHANNELS_PER_BRANCH)
- mtr = pvt->b0_mtr[csrow >> 1];
- else
- mtr = pvt->b1_mtr[csrow >> 1];
-
- return mtr;
-}
-
-/*
- */
-static void decode_mtr(int slot_row, u16 mtr)
-{
- int ans;
-
- ans = MTR_DIMMS_PRESENT(mtr);
-
- debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
- ans ? "Present" : "NOT Present");
- if (!ans)
- return;
-
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
- debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
-}
-
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
- struct i5000_dimm_info *dinfo)
-{
- int mtr;
- int amb_present_reg;
- int addrBits;
-
- mtr = determine_mtr(pvt, csrow, channel);
- if (MTR_DIMMS_PRESENT(mtr)) {
- amb_present_reg = determine_amb_present_reg(pvt, channel);
-
- /* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << (csrow >> 1))) {
- dinfo->dual_rank = MTR_DIMM_RANK(mtr);
-
- if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
- }
- }
-}
-
-/*
- * calculate_dimm_size
- *
- * also will output a DIMM matrix map, if debug is enabled, for viewing
- * how the DIMMs are populated
- */
-static void calculate_dimm_size(struct i5000_pvt *pvt)
-{
- struct i5000_dimm_info *dinfo;
- int csrow, max_csrows;
- char *p, *mem_buffer;
- int space, n;
- int channel;
-
- /* ================= Generate some debug output ================= */
- space = PAGE_SIZE;
- mem_buffer = p = kmalloc(space, GFP_KERNEL);
- if (p == NULL) {
- i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
- __FILE__, __func__);
- return;
- }
-
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
-
- /* Scan all the actual CSROWS (which is # of DIMMS * 2)
- * and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
- */
- max_csrows = pvt->maxdimmperch * 2;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
-
- /* on an odd csrow, first output a 'boundary' marker,
- * then reset the message buffer */
- if (csrow & 0x1) {
- n = snprintf(p, space, "---------------------------"
- "--------------------------------");
- p += n;
- space -= n;
- debugf2("%s\n", mem_buffer);
- p = mem_buffer;
- space = PAGE_SIZE;
- }
- n = snprintf(p, space, "csrow %2d ", csrow);
- p += n;
- space -= n;
-
- for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
- p += n;
- space -= n;
- }
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
- }
-
- /* Output the last bottom 'boundary' marker */
- n = snprintf(p, space, "---------------------------"
- "--------------------------------\n");
- p += n;
- space -= n;
-
- /* now output the 'channel' labels */
- n = snprintf(p, space, " ");
- p += n;
- space -= n;
- for (channel = 0; channel < pvt->maxch; channel++) {
- n = snprintf(p, space, "channel %d | ", channel);
- p += n;
- space -= n;
- }
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
-
- /* output the last message and free buffer */
- debugf2("%s\n", mem_buffer);
- kfree(mem_buffer);
-}
-
-/*
- * i5000_get_mc_regs read in the necessary registers and
- * cache locally
- *
- * Fills in the private data members
- */
-static void i5000_get_mc_regs(struct mem_ctl_info *mci)
-{
- struct i5000_pvt *pvt;
- u32 actual_tolm;
- u16 limit;
- int slot_row;
- int maxch;
- int maxdimmperch;
- int way0, way1;
-
- pvt = mci->pvt_info;
-
- pci_read_config_dword(pvt->system_address, AMBASE,
- (u32 *) & pvt->ambase);
- pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
- ((u32 *) & pvt->ambase) + sizeof(u32));
-
- maxdimmperch = pvt->maxdimmperch;
- maxch = pvt->maxch;
-
- debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
- (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
-
- /* Get the Branch Map regs */
- pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
- pvt->tolm >>= 12;
- debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
-
- actual_tolm = pvt->tolm << 28;
- debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
-
- pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
- pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
- pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2);
-
- /* Get the MIR[0-2] regs */
- limit = (pvt->mir0 >> 4) & 0x0FFF;
- way0 = pvt->mir0 & 0x1;
- way1 = pvt->mir0 & 0x2;
- debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
- limit = (pvt->mir1 >> 4) & 0x0FFF;
- way0 = pvt->mir1 & 0x1;
- way1 = pvt->mir1 & 0x2;
- debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
- limit = (pvt->mir2 >> 4) & 0x0FFF;
- way0 = pvt->mir2 & 0x1;
- way1 = pvt->mir2 & 0x2;
- debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
-
- /* Get the MTR[0-3] regs */
- for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
- int where = MTR0 + (slot_row * sizeof(u32));
-
- pci_read_config_word(pvt->branch_0, where,
- &pvt->b0_mtr[slot_row]);
-
- debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
- pvt->b0_mtr[slot_row]);
-
- if (pvt->maxch >= CHANNELS_PER_BRANCH) {
- pci_read_config_word(pvt->branch_1, where,
- &pvt->b1_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
- where, pvt->b1_mtr[slot_row]);
- } else {
- pvt->b1_mtr[slot_row] = 0;
- }
- }
-
- /* Read and dump branch 0's MTRs */
- debugf2("\nMemory Technology Registers:\n");
- debugf2(" Branch 0:\n");
- for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
- decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
- }
- pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
- &pvt->b0_ambpresent0);
- debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
- pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
- &pvt->b0_ambpresent1);
- debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
-
- /* Only if we have 2 branchs (4 channels) */
- if (pvt->maxch < CHANNELS_PER_BRANCH) {
- pvt->b1_ambpresent0 = 0;
- pvt->b1_ambpresent1 = 0;
- } else {
- /* Read and dump branch 1's MTRs */
- debugf2(" Branch 1:\n");
- for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
- decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
- }
- pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
- &pvt->b1_ambpresent0);
- debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
- pvt->b1_ambpresent0);
- pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
- &pvt->b1_ambpresent1);
- debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
- pvt->b1_ambpresent1);
- }
-
- /* Go and determine the size of each DIMM and place in an
- * orderly matrix */
- calculate_dimm_size(pvt);
-}
-
-/*
- * i5000_init_csrows Initialize the 'csrows' table within
- * the mci control structure with the
- * addressing of memory.
- *
- * return:
- * 0 success
- * 1 no actual memory found on this MC
- */
-static int i5000_init_csrows(struct mem_ctl_info *mci)
-{
- struct i5000_pvt *pvt;
- struct csrow_info *p_csrow;
- int empty, channel_count;
- int max_csrows;
- int mtr, mtr1;
- int csrow_megs;
- int channel;
- int csrow;
-
- pvt = mci->pvt_info;
-
- channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch * 2;
-
- empty = 1; /* Assume NO memory */
-
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = pvt->b0_mtr[csrow >> 1];
- mtr1 = pvt->b1_mtr[csrow >> 1];
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
- continue;
-
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
-
- p_csrow->grain = 8;
-
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++) {
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
- }
-
- p_csrow->nr_pages = csrow_megs << 8;
-
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
-
- p_csrow->edac_mode = EDAC_S8ECD8ED;
-
- empty = 0;
- }
-
- return empty;
-}
-
-/*
- * i5000_enable_error_reporting
- * Turn on the memory reporting features of the hardware
- */
-static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
-{
- struct i5000_pvt *pvt;
- u32 fbd_error_mask;
-
- pvt = mci->pvt_info;
-
- /* Read the FBD Error Mask Register */
- pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
- &fbd_error_mask);
-
- /* Enable with a '0' */
- fbd_error_mask &= ~(ENABLE_EMASK_ALL);
-
- pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
- fbd_error_mask);
-}
-
-/*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
- *
- * ask the device how many channels are present and how many CSROWS
- * as well
- */
-static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
- int *num_dimms_per_channel,
- int *num_channels)
-{
- u8 value;
-
- /* Need to retrieve just how many channels and dimms per channel are
- * supported on this memory controller
- */
- pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
- *num_dimms_per_channel = (int)value *2;
-
- pci_read_config_byte(pdev, MAXCH, &value);
- *num_channels = (int)value;
-}
-
-/*
- * i5000_probe1 Probe for ONE instance of device to see if it is
- * present.
- * return:
- * 0 for FOUND a device
- * < 0 for error code
- */
-static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
-{
- struct mem_ctl_info *mci;
- struct i5000_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
-
- debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __FILE__, __func__,
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
- /* We only are looking for func 0 of the set */
- if (PCI_FUNC(pdev->devfn) != 0)
- return -ENODEV;
-
- /* Ask the devices for the number of CSROWS and CHANNELS so
- * that we can calculate the memory resources, etc
- *
- * The Chipset will report what it can handle which will be greater
- * or equal to what the motherboard manufacturer will implement.
- *
- * As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
- &num_channels);
- num_csrows = num_dimms_per_channel * 2;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
- if (mci == NULL)
- return -ENOMEM;
-
- kobject_get(&mci->edac_mci_kobj);
- debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
-
- mci->dev = &pdev->dev; /* record ptr to the generic device */
-
- pvt = mci->pvt_info;
- pvt->system_address = pdev; /* Record this device in our private */
- pvt->maxch = num_channels;
- pvt->maxdimmperch = num_dimms_per_channel;
-
- /* 'get' the pci devices we want to reserve for our use */
- if (i5000_get_devices(mci, dev_idx))
- goto fail0;
-
- /* Time to get serious */
- i5000_get_mc_regs(mci); /* retrieve the hardware registers */
-
- mci->mc_idx = 0;
- mci->mtype_cap = MEM_FLAG_FB_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "i5000_edac.c";
- mci->mod_ver = I5000_REVISION;
- mci->ctl_name = i5000_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->ctl_page_to_phys = NULL;
-
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i5000_check_error;
-
- /* initialize the MC control structure 'csrows' table
- * with the mapping and control information */
- if (i5000_init_csrows(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5000_init_csrows() returned nonzero "
- "value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
- } else {
- debugf1("MC: Enable error reporting now\n");
- i5000_enable_error_reporting(mci);
- }
-
- /* add this new MC control structure to EDAC's list of MCs */
- if (edac_mc_add_mc(mci)) {
- debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
- __FILE__, __func__);
- /* FIXME: perhaps some code should go here that disables error
- * reporting if we just enabled it
- */
- goto fail1;
- }
-
- i5000_clear_error(mci);
-
- /* allocating generic PCI control info */
- i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i5000_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- return 0;
-
- /* Error exit unwinding stack */
-fail1:
-
- i5000_put_devices(mci);
-
-fail0:
- kobject_put(&mci->edac_mci_kobj);
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-/*
- * i5000_init_one constructor for one instance of device
- *
- * returns:
- * negative on error
- * count (>= 0)
- */
-static int __devinit i5000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int rc;
-
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
-
- /* wake up device */
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- /* now probe and enable the device */
- return i5000_probe1(pdev, id->driver_data);
-}
-
-/*
- * i5000_remove_one destructor for one instance of device
- *
- */
-static void __devexit i5000_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s: %s()\n", __FILE__, __func__);
-
- if (i5000_pci)
- edac_pci_release_generic_ctl(i5000_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- /* retrieve references to resources, and free those resources */
- i5000_put_devices(mci);
- kobject_put(&mci->edac_mci_kobj);
- edac_mc_free(mci);
-}
-
-/*
- * pci_device_id table for which devices we are looking for
- *
- * The "E500P" device is the first device supported.
- */
-static DEFINE_PCI_DEVICE_TABLE(i5000_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
- .driver_data = I5000P},
-
- {0,} /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
-
-/*
- * i5000_driver pci_driver structure for this module
- *
- */
-static struct pci_driver i5000_driver = {
- .name = KBUILD_BASENAME,
- .probe = i5000_init_one,
- .remove = __devexit_p(i5000_remove_one),
- .id_table = i5000_pci_tbl,
-};
-
-/*
- * i5000_init Module entry function
- * Try to initialize this module for its devices
- */
-static int __init i5000_init(void)
-{
- int pci_rc;
-
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i5000_driver);
-
- return (pci_rc < 0) ? pci_rc : 0;
-}
-
-/*
- * i5000_exit() Module exit function
- * Unregister the driver
- */
-static void __exit i5000_exit(void)
-{
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
- pci_unregister_driver(&i5000_driver);
-}
-
-module_init(i5000_init);
-module_exit(i5000_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR
- ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
-MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
- I5000_REVISION);
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
-module_param(misc_messages, int, 0444);
-MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
-
diff --git a/ANDROID_3.4.5/drivers/edac/i5100_edac.c b/ANDROID_3.4.5/drivers/edac/i5100_edac.c
deleted file mode 100644
index d5007494..00000000
--- a/ANDROID_3.4.5/drivers/edac/i5100_edac.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-/*
- * Intel 5100 Memory Controllers kernel module
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * This module is based on the following document:
- *
- * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
- * http://download.intel.com/design/chipsets/datashts/318378.pdf
- *
- * The intel 5100 has two independent channels. EDAC core currently
- * can not reflect this configuration so instead the chip-select
- * rows for each respective channel are laid out one after another,
- * the first half belonging to channel 0, the second half belonging
- * to channel 1.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include <linux/delay.h>
-#include <linux/mmzone.h>
-
-#include "edac_core.h"
-
-/* register addresses */
-
-/* device 16, func 1 */
-#define I5100_MC 0x40 /* Memory Control Register */
-#define I5100_MC_SCRBEN_MASK (1 << 7)
-#define I5100_MC_SCRBDONE_MASK (1 << 4)
-#define I5100_MS 0x44 /* Memory Status Register */
-#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
-#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
-#define I5100_TOLM 0x6c /* Top of Low Memory */
-#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
-#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
-#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
-#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
-#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
-#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
-#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
-#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
-#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
-#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
-#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
-#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
-#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
-#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
-#define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1)
-#define I5100_FERR_NF_MEM_ANY_MASK \
- (I5100_FERR_NF_MEM_M16ERR_MASK | \
- I5100_FERR_NF_MEM_M15ERR_MASK | \
- I5100_FERR_NF_MEM_M14ERR_MASK | \
- I5100_FERR_NF_MEM_M12ERR_MASK | \
- I5100_FERR_NF_MEM_M11ERR_MASK | \
- I5100_FERR_NF_MEM_M10ERR_MASK | \
- I5100_FERR_NF_MEM_M6ERR_MASK | \
- I5100_FERR_NF_MEM_M5ERR_MASK | \
- I5100_FERR_NF_MEM_M4ERR_MASK | \
- I5100_FERR_NF_MEM_M1ERR_MASK)
-#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
-#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
-
-/* device 21 and 22, func 0 */
-#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
-#define I5100_DMIR 0x15c /* DIMM Interleave Range */
-#define I5100_VALIDLOG 0x18c /* Valid Log Markers */
-#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
-#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
-#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
-#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
-#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
-#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
-#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
-
-/* bit field accessors */
-
-static inline u32 i5100_mc_scrben(u32 mc)
-{
- return mc >> 7 & 1;
-}
-
-static inline u32 i5100_mc_errdeten(u32 mc)
-{
- return mc >> 5 & 1;
-}
-
-static inline u32 i5100_mc_scrbdone(u32 mc)
-{
- return mc >> 4 & 1;
-}
-
-static inline u16 i5100_spddata_rdo(u16 a)
-{
- return a >> 15 & 1;
-}
-
-static inline u16 i5100_spddata_sbe(u16 a)
-{
- return a >> 13 & 1;
-}
-
-static inline u16 i5100_spddata_busy(u16 a)
-{
- return a >> 12 & 1;
-}
-
-static inline u16 i5100_spddata_data(u16 a)
-{
- return a & ((1 << 8) - 1);
-}
-
-static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
- u32 data, u32 cmd)
-{
- return ((dti & ((1 << 4) - 1)) << 28) |
- ((ckovrd & 1) << 27) |
- ((sa & ((1 << 3) - 1)) << 24) |
- ((ba & ((1 << 8) - 1)) << 16) |
- ((data & ((1 << 8) - 1)) << 8) |
- (cmd & 1);
-}
-
-static inline u16 i5100_tolm_tolm(u16 a)
-{
- return a >> 12 & ((1 << 4) - 1);
-}
-
-static inline u16 i5100_mir_limit(u16 a)
-{
- return a >> 4 & ((1 << 12) - 1);
-}
-
-static inline u16 i5100_mir_way1(u16 a)
-{
- return a >> 1 & 1;
-}
-
-static inline u16 i5100_mir_way0(u16 a)
-{
- return a & 1;
-}
-
-static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
-{
- return a >> 28 & 1;
-}
-
-static inline u32 i5100_ferr_nf_mem_any(u32 a)
-{
- return a & I5100_FERR_NF_MEM_ANY_MASK;
-}
-
-static inline u32 i5100_nerr_nf_mem_any(u32 a)
-{
- return i5100_ferr_nf_mem_any(a);
-}
-
-static inline u32 i5100_dmir_limit(u32 a)
-{
- return a >> 16 & ((1 << 11) - 1);
-}
-
-static inline u32 i5100_dmir_rank(u32 a, u32 i)
-{
- return a >> (4 * i) & ((1 << 2) - 1);
-}
-
-static inline u16 i5100_mtr_present(u16 a)
-{
- return a >> 10 & 1;
-}
-
-static inline u16 i5100_mtr_ethrottle(u16 a)
-{
- return a >> 9 & 1;
-}
-
-static inline u16 i5100_mtr_width(u16 a)
-{
- return a >> 8 & 1;
-}
-
-static inline u16 i5100_mtr_numbank(u16 a)
-{
- return a >> 6 & 1;
-}
-
-static inline u16 i5100_mtr_numrow(u16 a)
-{
- return a >> 2 & ((1 << 2) - 1);
-}
-
-static inline u16 i5100_mtr_numcol(u16 a)
-{
- return a & ((1 << 2) - 1);
-}
-
-
-static inline u32 i5100_validlog_redmemvalid(u32 a)
-{
- return a >> 2 & 1;
-}
-
-static inline u32 i5100_validlog_recmemvalid(u32 a)
-{
- return a >> 1 & 1;
-}
-
-static inline u32 i5100_validlog_nrecmemvalid(u32 a)
-{
- return a & 1;
-}
-
-static inline u32 i5100_nrecmema_merr(u32 a)
-{
- return a >> 15 & ((1 << 5) - 1);
-}
-
-static inline u32 i5100_nrecmema_bank(u32 a)
-{
- return a >> 12 & ((1 << 3) - 1);
-}
-
-static inline u32 i5100_nrecmema_rank(u32 a)
-{
- return a >> 8 & ((1 << 3) - 1);
-}
-
-static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
-{
- return a & ((1 << 8) - 1);
-}
-
-static inline u32 i5100_nrecmemb_cas(u32 a)
-{
- return a >> 16 & ((1 << 13) - 1);
-}
-
-static inline u32 i5100_nrecmemb_ras(u32 a)
-{
- return a & ((1 << 16) - 1);
-}
-
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
- return a & ((1 << 18) - 1);
-}
-
-static inline u32 i5100_recmema_merr(u32 a)
-{
- return i5100_nrecmema_merr(a);
-}
-
-static inline u32 i5100_recmema_bank(u32 a)
-{
- return i5100_nrecmema_bank(a);
-}
-
-static inline u32 i5100_recmema_rank(u32 a)
-{
- return i5100_nrecmema_rank(a);
-}
-
-static inline u32 i5100_recmema_dm_buf_id(u32 a)
-{
- return i5100_nrecmema_dm_buf_id(a);
-}
-
-static inline u32 i5100_recmemb_cas(u32 a)
-{
- return i5100_nrecmemb_cas(a);
-}
-
-static inline u32 i5100_recmemb_ras(u32 a)
-{
- return i5100_nrecmemb_ras(a);
-}
-
-/* some generic limits */
-#define I5100_MAX_RANKS_PER_CHAN 6
-#define I5100_CHANNELS 2
-#define I5100_MAX_RANKS_PER_DIMM 4
-#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
-#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
-#define I5100_MAX_RANK_INTERLEAVE 4
-#define I5100_MAX_DMIRS 5
-#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
-
-struct i5100_priv {
- /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
- int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
-
- /*
- * mainboard chip select map -- maps i5100 chip selects to
- * DIMM slot chip selects. In the case of only 4 ranks per
- * channel, the mapping is fairly obvious but not unique.
- * we map -1 -> NC and assume both channels use the same
- * map...
- *
- */
- int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
-
- /* memory interleave range */
- struct {
- u64 limit;
- unsigned way[2];
- } mir[I5100_CHANNELS];
-
- /* adjusted memory interleave range register */
- unsigned amir[I5100_CHANNELS];
-
- /* dimm interleave range */
- struct {
- unsigned rank[I5100_MAX_RANK_INTERLEAVE];
- u64 limit;
- } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
-
- /* memory technology registers... */
- struct {
- unsigned present; /* 0 or 1 */
- unsigned ethrottle; /* 0 or 1 */
- unsigned width; /* 4 or 8 bits */
- unsigned numbank; /* 2 or 3 lines */
- unsigned numrow; /* 13 .. 16 lines */
- unsigned numcol; /* 11 .. 12 lines */
- } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
-
- u64 tolm; /* top of low memory in bytes */
- unsigned ranksperchan; /* number of ranks per channel */
-
- struct pci_dev *mc; /* device 16 func 1 */
- struct pci_dev *ch0mm; /* device 21 func 0 */
- struct pci_dev *ch1mm; /* device 22 func 0 */
-
- struct delayed_work i5100_scrubbing;
- int scrub_enable;
-};
-
-/* map a rank/chan to a slot number on the mainboard */
-static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
- int chan, int rank)
-{
- const struct i5100_priv *priv = mci->pvt_info;
- int i;
-
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
- int j;
- const int numrank = priv->dimm_numrank[chan][i];
-
- for (j = 0; j < numrank; j++)
- if (priv->dimm_csmap[i][j] == rank)
- return i * 2 + chan;
- }
-
- return -1;
-}
-
-static const char *i5100_err_msg(unsigned err)
-{
- static const char *merrs[] = {
- "unknown", /* 0 */
- "uncorrectable data ECC on replay", /* 1 */
- "unknown", /* 2 */
- "unknown", /* 3 */
- "aliased uncorrectable demand data ECC", /* 4 */
- "aliased uncorrectable spare-copy data ECC", /* 5 */
- "aliased uncorrectable patrol data ECC", /* 6 */
- "unknown", /* 7 */
- "unknown", /* 8 */
- "unknown", /* 9 */
- "non-aliased uncorrectable demand data ECC", /* 10 */
- "non-aliased uncorrectable spare-copy data ECC", /* 11 */
- "non-aliased uncorrectable patrol data ECC", /* 12 */
- "unknown", /* 13 */
- "correctable demand data ECC", /* 14 */
- "correctable spare-copy data ECC", /* 15 */
- "correctable patrol data ECC", /* 16 */
- "unknown", /* 17 */
- "SPD protocol error", /* 18 */
- "unknown", /* 19 */
- "spare copy initiated", /* 20 */
- "spare copy completed", /* 21 */
- };
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(merrs); i++)
- if (1 << i & err)
- return merrs[i];
-
- return "none";
-}
-
-/* convert csrow index into a rank (per channel -- 0..5) */
-static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return csrow % priv->ranksperchan;
-}
-
-/* convert csrow index into a channel (0..1) */
-static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return csrow / priv->ranksperchan;
-}
-
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int chan, int rank)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return chan * priv->ranksperchan + rank;
-}
-
-static void i5100_handle_ce(struct mem_ctl_info *mci,
- int chan,
- unsigned bank,
- unsigned rank,
- unsigned long syndrome,
- unsigned cas,
- unsigned ras,
- const char *msg)
-{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
-
- printk(KERN_ERR
- "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
-
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[0].ce_count++;
-}
-
-static void i5100_handle_ue(struct mem_ctl_info *mci,
- int chan,
- unsigned bank,
- unsigned rank,
- unsigned long syndrome,
- unsigned cas,
- unsigned ras,
- const char *msg)
-{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
-
- printk(KERN_ERR
- "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
-
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
-}
-
-static void i5100_read_log(struct mem_ctl_info *mci, int chan,
- u32 ferr, u32 nerr)
-{
- struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
- u32 dw;
- u32 dw2;
- unsigned syndrome = 0;
- unsigned ecc_loc = 0;
- unsigned merr;
- unsigned bank;
- unsigned rank;
- unsigned cas;
- unsigned ras;
-
- pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
-
- if (i5100_validlog_redmemvalid(dw)) {
- pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
- syndrome = dw2;
- pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
- ecc_loc = i5100_redmemb_ecc_locator(dw2);
- }
-
- if (i5100_validlog_recmemvalid(dw)) {
- const char *msg;
-
- pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
- merr = i5100_recmema_merr(dw2);
- bank = i5100_recmema_bank(dw2);
- rank = i5100_recmema_rank(dw2);
-
- pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
- cas = i5100_recmemb_cas(dw2);
- ras = i5100_recmemb_ras(dw2);
-
- /* FIXME: not really sure if this is what merr is...
- */
- if (!merr)
- msg = i5100_err_msg(ferr);
- else
- msg = i5100_err_msg(nerr);
-
- i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
- }
-
- if (i5100_validlog_nrecmemvalid(dw)) {
- const char *msg;
-
- pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
- merr = i5100_nrecmema_merr(dw2);
- bank = i5100_nrecmema_bank(dw2);
- rank = i5100_nrecmema_rank(dw2);
-
- pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
- cas = i5100_nrecmemb_cas(dw2);
- ras = i5100_nrecmemb_ras(dw2);
-
- /* FIXME: not really sure if this is what merr is...
- */
- if (!merr)
- msg = i5100_err_msg(ferr);
- else
- msg = i5100_err_msg(nerr);
-
- i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
- }
-
- pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
-}
-
-static void i5100_check_error(struct mem_ctl_info *mci)
-{
- struct i5100_priv *priv = mci->pvt_info;
- u32 dw, dw2;
-
- pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
- if (i5100_ferr_nf_mem_any(dw)) {
-
- pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
-
- i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
- i5100_ferr_nf_mem_any(dw),
- i5100_nerr_nf_mem_any(dw2));
-
- pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2);
- }
- pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
-}
-
-/* The i5100 chipset will scrub the entire memory once, then
- * set a done bit. Continuous scrubbing is achieved by enqueing
- * delayed work to a workqueue, checking every few minutes if
- * the scrubbing has completed and if so reinitiating it.
- */
-
-static void i5100_refresh_scrubbing(struct work_struct *work)
-{
- struct delayed_work *i5100_scrubbing = container_of(work,
- struct delayed_work,
- work);
- struct i5100_priv *priv = container_of(i5100_scrubbing,
- struct i5100_priv,
- i5100_scrubbing);
- u32 dw;
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- if (priv->scrub_enable) {
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- if (i5100_mc_scrbdone(dw)) {
- dw |= I5100_MC_SCRBEN_MASK;
- pci_write_config_dword(priv->mc, I5100_MC, dw);
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
- }
-
- schedule_delayed_work(&(priv->i5100_scrubbing),
- I5100_SCRUB_REFRESH_RATE);
- }
-}
-/*
- * The bandwidth is based on experimentation, feel free to refine it.
- */
-static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
-{
- struct i5100_priv *priv = mci->pvt_info;
- u32 dw;
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
- if (bandwidth) {
- priv->scrub_enable = 1;
- dw |= I5100_MC_SCRBEN_MASK;
- schedule_delayed_work(&(priv->i5100_scrubbing),
- I5100_SCRUB_REFRESH_RATE);
- } else {
- priv->scrub_enable = 0;
- dw &= ~I5100_MC_SCRBEN_MASK;
- cancel_delayed_work(&(priv->i5100_scrubbing));
- }
- pci_write_config_dword(priv->mc, I5100_MC, dw);
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- bandwidth = 5900000 * i5100_mc_scrben(dw);
-
- return bandwidth;
-}
-
-static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
-{
- struct i5100_priv *priv = mci->pvt_info;
- u32 dw;
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- return 5900000 * i5100_mc_scrben(dw);
-}
-
-static struct pci_dev *pci_get_device_func(unsigned vendor,
- unsigned device,
- unsigned func)
-{
- struct pci_dev *ret = NULL;
-
- while (1) {
- ret = pci_get_device(vendor, device, ret);
-
- if (!ret)
- break;
-
- if (PCI_FUNC(ret->devfn) == func)
- break;
- }
-
- return ret;
-}
-
-static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
- int csrow)
-{
- struct i5100_priv *priv = mci->pvt_info;
- const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned chan = i5100_csrow_to_chan(mci, csrow);
- unsigned addr_lines;
-
- /* dimm present? */
- if (!priv->mtr[chan][chan_rank].present)
- return 0ULL;
-
- addr_lines =
- I5100_DIMM_ADDR_LINES +
- priv->mtr[chan][chan_rank].numcol +
- priv->mtr[chan][chan_rank].numrow +
- priv->mtr[chan][chan_rank].numbank;
-
- return (unsigned long)
- ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
-}
-
-static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
-{
- struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
- int i;
-
- for (i = 0; i < I5100_CHANNELS; i++) {
- int j;
- struct pci_dev *pdev = mms[i];
-
- for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
- const unsigned addr =
- (j < 4) ? I5100_MTR_0 + j * 2 :
- I5100_MTR_4 + (j - 4) * 2;
- u16 w;
-
- pci_read_config_word(pdev, addr, &w);
-
- priv->mtr[i][j].present = i5100_mtr_present(w);
- priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
- priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
- priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
- priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
- priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
- }
- }
-}
-
-/*
- * FIXME: make this into a real i2c adapter (so that dimm-decode
- * will work)?
- */
-static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
- u8 ch, u8 slot, u8 addr, u8 *byte)
-{
- struct i5100_priv *priv = mci->pvt_info;
- u16 w;
- unsigned long et;
-
- pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
- if (i5100_spddata_busy(w))
- return -1;
-
- pci_write_config_dword(priv->mc, I5100_SPDCMD,
- i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
- 0, 0));
-
- /* wait up to 100ms */
- et = jiffies + HZ / 10;
- udelay(100);
- while (1) {
- pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
- if (!i5100_spddata_busy(w))
- break;
- udelay(100);
- }
-
- if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
- return -1;
-
- *byte = i5100_spddata_data(w);
-
- return 0;
-}
-
-/*
- * fill dimm chip select map
- *
- * FIXME:
- * o not the only way to may chip selects to dimm slots
- * o investigate if there is some way to obtain this map from the bios
- */
-static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
-{
- struct i5100_priv *priv = mci->pvt_info;
- int i;
-
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
- int j;
-
- for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
- priv->dimm_csmap[i][j] = -1; /* default NC */
- }
-
- /* only 2 chip selects per slot... */
- if (priv->ranksperchan == 4) {
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 3;
- priv->dimm_csmap[1][0] = 1;
- priv->dimm_csmap[1][1] = 2;
- priv->dimm_csmap[2][0] = 2;
- priv->dimm_csmap[3][0] = 3;
- } else {
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 1;
- priv->dimm_csmap[1][0] = 2;
- priv->dimm_csmap[1][1] = 3;
- priv->dimm_csmap[2][0] = 4;
- priv->dimm_csmap[2][1] = 5;
- }
-}
-
-static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
- struct mem_ctl_info *mci)
-{
- struct i5100_priv *priv = mci->pvt_info;
- int i;
-
- for (i = 0; i < I5100_CHANNELS; i++) {
- int j;
-
- for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
- u8 rank;
-
- if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
- priv->dimm_numrank[i][j] = 0;
- else
- priv->dimm_numrank[i][j] = (rank & 3) + 1;
- }
- }
-
- i5100_init_dimm_csmap(mci);
-}
-
-static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
- struct mem_ctl_info *mci)
-{
- u16 w;
- u32 dw;
- struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
- int i;
-
- pci_read_config_word(pdev, I5100_TOLM, &w);
- priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
-
- pci_read_config_word(pdev, I5100_MIR0, &w);
- priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
- priv->mir[0].way[1] = i5100_mir_way1(w);
- priv->mir[0].way[0] = i5100_mir_way0(w);
-
- pci_read_config_word(pdev, I5100_MIR1, &w);
- priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
- priv->mir[1].way[1] = i5100_mir_way1(w);
- priv->mir[1].way[0] = i5100_mir_way0(w);
-
- pci_read_config_word(pdev, I5100_AMIR_0, &w);
- priv->amir[0] = w;
- pci_read_config_word(pdev, I5100_AMIR_1, &w);
- priv->amir[1] = w;
-
- for (i = 0; i < I5100_CHANNELS; i++) {
- int j;
-
- for (j = 0; j < 5; j++) {
- int k;
-
- pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
-
- priv->dmir[i][j].limit =
- (u64) i5100_dmir_limit(dw) << 28;
- for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
- priv->dmir[i][j].rank[k] =
- i5100_dmir_rank(dw, k);
- }
- }
-
- i5100_init_mtr(mci);
-}
-
-static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
-{
- int i;
- unsigned long total_pages = 0UL;
- struct i5100_priv *priv = mci->pvt_info;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- const unsigned long npages = i5100_npages(mci, i);
- const unsigned chan = i5100_csrow_to_chan(mci, i);
- const unsigned rank = i5100_csrow_to_rank(mci, i);
-
- if (!npages)
- continue;
-
- /*
- * FIXME: these two are totally bogus -- I don't see how to
- * map them correctly to this structure...
- */
- mci->csrows[i].first_page = total_pages;
- mci->csrows[i].last_page = total_pages + npages - 1;
- mci->csrows[i].page_mask = 0UL;
-
- mci->csrows[i].nr_pages = npages;
- mci->csrows[i].grain = 32;
- mci->csrows[i].csrow_idx = i;
- mci->csrows[i].dtype =
- (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
- mci->csrows[i].ue_count = 0;
- mci->csrows[i].ce_count = 0;
- mci->csrows[i].mtype = MEM_RDDR2;
- mci->csrows[i].edac_mode = EDAC_SECDED;
- mci->csrows[i].mci = mci;
- mci->csrows[i].nr_channels = 1;
- mci->csrows[i].channels[0].chan_idx = 0;
- mci->csrows[i].channels[0].ce_count = 0;
- mci->csrows[i].channels[0].csrow = mci->csrows + i;
- snprintf(mci->csrows[i].channels[0].label,
- sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
-
- total_pages += npages;
- }
-}
-
-static int __devinit i5100_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int rc;
- struct mem_ctl_info *mci;
- struct i5100_priv *priv;
- struct pci_dev *ch0mm, *ch1mm;
- int ret = 0;
- u32 dw;
- int ranksperch;
-
- if (PCI_FUNC(pdev->devfn) != 1)
- return -ENODEV;
-
- rc = pci_enable_device(pdev);
- if (rc < 0) {
- ret = rc;
- goto bail;
- }
-
- /* ECC enabled? */
- pci_read_config_dword(pdev, I5100_MC, &dw);
- if (!i5100_mc_errdeten(dw)) {
- printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
- ret = -ENODEV;
- goto bail_pdev;
- }
-
- /* figure out how many ranks, from strapped state of 48GB_Mode input */
- pci_read_config_dword(pdev, I5100_MS, &dw);
- ranksperch = !!(dw & (1 << 8)) * 2 + 4;
-
- /* enable error reporting... */
- pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
- dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
- pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
-
- /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
- ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5100_21, 0);
- if (!ch0mm) {
- ret = -ENODEV;
- goto bail_pdev;
- }
-
- rc = pci_enable_device(ch0mm);
- if (rc < 0) {
- ret = rc;
- goto bail_ch0;
- }
-
- /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
- ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5100_22, 0);
- if (!ch1mm) {
- ret = -ENODEV;
- goto bail_disable_ch0;
- }
-
- rc = pci_enable_device(ch1mm);
- if (rc < 0) {
- ret = rc;
- goto bail_ch1;
- }
-
- mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
- if (!mci) {
- ret = -ENOMEM;
- goto bail_disable_ch1;
- }
-
- mci->dev = &pdev->dev;
-
- priv = mci->pvt_info;
- priv->ranksperchan = ranksperch;
- priv->mc = pdev;
- priv->ch0mm = ch0mm;
- priv->ch1mm = ch1mm;
-
- INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
-
- /* If scrubbing was already enabled by the bios, start maintaining it */
- pci_read_config_dword(pdev, I5100_MC, &dw);
- if (i5100_mc_scrben(dw)) {
- priv->scrub_enable = 1;
- schedule_delayed_work(&(priv->i5100_scrubbing),
- I5100_SCRUB_REFRESH_RATE);
- }
-
- i5100_init_dimm_layout(pdev, mci);
- i5100_init_interleaving(pdev, mci);
-
- mci->mtype_cap = MEM_FLAG_FB_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = "i5100_edac.c";
- mci->mod_ver = "not versioned";
- mci->ctl_name = "i5100";
- mci->dev_name = pci_name(pdev);
- mci->ctl_page_to_phys = NULL;
-
- mci->edac_check = i5100_check_error;
- mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
- mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
-
- i5100_init_csrows(mci);
-
- /* this strange construction seems to be in every driver, dunno why */
- switch (edac_op_state) {
- case EDAC_OPSTATE_POLL:
- case EDAC_OPSTATE_NMI:
- break;
- default:
- edac_op_state = EDAC_OPSTATE_POLL;
- break;
- }
-
- if (edac_mc_add_mc(mci)) {
- ret = -ENODEV;
- goto bail_scrub;
- }
-
- return ret;
-
-bail_scrub:
- priv->scrub_enable = 0;
- cancel_delayed_work_sync(&(priv->i5100_scrubbing));
- edac_mc_free(mci);
-
-bail_disable_ch1:
- pci_disable_device(ch1mm);
-
-bail_ch1:
- pci_dev_put(ch1mm);
-
-bail_disable_ch0:
- pci_disable_device(ch0mm);
-
-bail_ch0:
- pci_dev_put(ch0mm);
-
-bail_pdev:
- pci_disable_device(pdev);
-
-bail:
- return ret;
-}
-
-static void __devexit i5100_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct i5100_priv *priv;
-
- mci = edac_mc_del_mc(&pdev->dev);
-
- if (!mci)
- return;
-
- priv = mci->pvt_info;
-
- priv->scrub_enable = 0;
- cancel_delayed_work_sync(&(priv->i5100_scrubbing));
-
- pci_disable_device(pdev);
- pci_disable_device(priv->ch0mm);
- pci_disable_device(priv->ch1mm);
- pci_dev_put(priv->ch0mm);
- pci_dev_put(priv->ch1mm);
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = {
- /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
-
-static struct pci_driver i5100_driver = {
- .name = KBUILD_BASENAME,
- .probe = i5100_init_one,
- .remove = __devexit_p(i5100_remove_one),
- .id_table = i5100_pci_tbl,
-};
-
-static int __init i5100_init(void)
-{
- int pci_rc;
-
- pci_rc = pci_register_driver(&i5100_driver);
-
- return (pci_rc < 0) ? pci_rc : 0;
-}
-
-static void __exit i5100_exit(void)
-{
- pci_unregister_driver(&i5100_driver);
-}
-
-module_init(i5100_init);
-module_exit(i5100_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR
- ("Arthur Jones <ajones@riverbed.com>");
-MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
diff --git a/ANDROID_3.4.5/drivers/edac/i5400_edac.c b/ANDROID_3.4.5/drivers/edac/i5400_edac.c
deleted file mode 100644
index 1869a101..00000000
--- a/ANDROID_3.4.5/drivers/edac/i5400_edac.c
+++ /dev/null
@@ -1,1465 +0,0 @@
-/*
- * Intel 5400 class Memory Controllers kernel module (Seaburg)
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Copyright (c) 2008 by:
- * Ben Woodard <woodard@redhat.com>
- * Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * Red Hat Inc. http://www.redhat.com
- *
- * Forked and adapted from the i5000_edac driver which was
- * written by Douglas Thompson Linux Networx <norsk5@xmission.com>
- *
- * This module is based on the following document:
- *
- * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
- * http://developer.intel.com/design/chipsets/datashts/313070.htm
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/slab.h>
-#include <linux/edac.h>
-#include <linux/mmzone.h>
-
-#include "edac_core.h"
-
-/*
- * Alter this version for the I5400 module when modifications are made
- */
-#define I5400_REVISION " Ver: 1.0.0"
-
-#define EDAC_MOD_STR "i5400_edac"
-
-#define i5400_printk(level, fmt, arg...) \
- edac_printk(level, "i5400", fmt, ##arg)
-
-#define i5400_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
-
-/* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH 4
-#define CHANNELS_PER_BRANCH 2
-#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
-#define MAX_CHANNELS 4
-/* max possible csrows per channel */
-#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
-
-/* Device 16,
- * Function 0: System Address
- * Function 1: Memory Branch Map, Control, Errors Register
- * Function 2: FSB Error Registers
- *
- * All 3 functions of Device 16 (0,1,2) share the SAME DID and
- * uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2),
- * PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1
- * for device 21 (0,1).
- */
-
- /* OFFSETS for Function 0 */
-#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
-#define MAXCH 0x56 /* Max Channel Number */
-#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
-
- /* OFFSETS for Function 1 */
-#define TOLM 0x6C
-#define REDMEMB 0x7C
-#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */
-#define MIR0 0x80
-#define MIR1 0x84
-#define AMIR0 0x8c
-#define AMIR1 0x90
-
- /* Fatal error registers */
-#define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */
-#define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */
-
-#define NERR_FAT_FBD 0x9c
-#define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */
-
- /* Non-fatal error register */
-#define NERR_NF_FBD 0xa4
-
- /* Enable error mask */
-#define EMASK_FBD 0xa8
-
-#define ERR0_FBD 0xac
-#define ERR1_FBD 0xb0
-#define ERR2_FBD 0xb4
-#define MCERR_FBD 0xb8
-
- /* No OFFSETS for Device 16 Function 2 */
-
-/*
- * Device 21,
- * Function 0: Memory Map Branch 0
- *
- * Device 22,
- * Function 0: Memory Map Branch 1
- */
-
- /* OFFSETS for Function 0 */
-#define AMBPRESENT_0 0x64
-#define AMBPRESENT_1 0x66
-#define MTR0 0x80
-#define MTR1 0x82
-#define MTR2 0x84
-#define MTR3 0x86
-
- /* OFFSETS for Function 1 */
-#define NRECFGLOG 0x74
-#define RECFGLOG 0x78
-#define NRECMEMA 0xbe
-#define NRECMEMB 0xc0
-#define NRECFB_DIMMA 0xc4
-#define NRECFB_DIMMB 0xc8
-#define NRECFB_DIMMC 0xcc
-#define NRECFB_DIMMD 0xd0
-#define NRECFB_DIMME 0xd4
-#define NRECFB_DIMMF 0xd8
-#define REDMEMA 0xdC
-#define RECMEMA 0xf0
-#define RECMEMB 0xf4
-#define RECFB_DIMMA 0xf8
-#define RECFB_DIMMB 0xec
-#define RECFB_DIMMC 0xf0
-#define RECFB_DIMMD 0xf4
-#define RECFB_DIMME 0xf8
-#define RECFB_DIMMF 0xfC
-
-/*
- * Error indicator bits and masks
- * Error masks are according with Table 5-17 of i5400 datasheet
- */
-
-enum error_mask {
- EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */
- EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */
- EMASK_M3 = 1<<2, /* Reserved */
- EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */
- EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
- EMASK_M6 = 1<<5, /* Unsupported on i5400 */
- EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
- EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */
- EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
- EMASK_M10 = 1<<9, /* Unsupported on i5400 */
- EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
- EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
- EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
- EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
- EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
- EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
- EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
- EMASK_M18 = 1<<17, /* Unsupported on i5400 */
- EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
- EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
- EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
- EMASK_M22 = 1<<21, /* SPD protocol Error */
- EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
- EMASK_M24 = 1<<23, /* Refresh error */
- EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
- EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
- EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
- EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
- EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
-};
-
-/*
- * Names to translate bit error into something useful
- */
-static const char *error_name[] = {
- [0] = "Memory Write error on non-redundant retry",
- [1] = "Memory or FB-DIMM configuration CRC read error",
- /* Reserved */
- [3] = "Uncorrectable Data ECC on Replay",
- [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
- /* M6 Unsupported on i5400 */
- [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
- [7] = "Aliased Uncorrectable Patrol Data ECC",
- [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
- /* M10 Unsupported on i5400 */
- [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
- [11] = "Non-Aliased Uncorrectable Patrol Data ECC",
- [12] = "Memory Write error on first attempt",
- [13] = "FB-DIMM Configuration Write error on first attempt",
- [14] = "Memory or FB-DIMM configuration CRC read error",
- [15] = "Channel Failed-Over Occurred",
- [16] = "Correctable Non-Mirrored Demand Data ECC",
- /* M18 Unsupported on i5400 */
- [18] = "Correctable Resilver- or Spare-Copy Data ECC",
- [19] = "Correctable Patrol Data ECC",
- [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
- [21] = "SPD protocol Error",
- [22] = "Non-Redundant Fast Reset Timeout",
- [23] = "Refresh error",
- [24] = "Memory Write error on redundant retry",
- [25] = "Redundant Fast Reset Timeout",
- [26] = "Correctable Counter Threshold Exceeded",
- [27] = "DIMM-Spare Copy Completed",
- [28] = "DIMM-Isolation Completed",
-};
-
-/* Fatal errors */
-#define ERROR_FAT_MASK (EMASK_M1 | \
- EMASK_M2 | \
- EMASK_M23)
-
-/* Correctable errors */
-#define ERROR_NF_CORRECTABLE (EMASK_M27 | \
- EMASK_M20 | \
- EMASK_M19 | \
- EMASK_M18 | \
- EMASK_M17 | \
- EMASK_M16)
-#define ERROR_NF_DIMM_SPARE (EMASK_M29 | \
- EMASK_M28)
-#define ERROR_NF_SPD_PROTOCOL (EMASK_M22)
-#define ERROR_NF_NORTH_CRC (EMASK_M21)
-
-/* Recoverable errors */
-#define ERROR_NF_RECOVERABLE (EMASK_M26 | \
- EMASK_M25 | \
- EMASK_M24 | \
- EMASK_M15 | \
- EMASK_M14 | \
- EMASK_M13 | \
- EMASK_M12 | \
- EMASK_M11 | \
- EMASK_M9 | \
- EMASK_M8 | \
- EMASK_M7 | \
- EMASK_M5)
-
-/* uncorrectable errors */
-#define ERROR_NF_UNCORRECTABLE (EMASK_M4)
-
-/* mask to all non-fatal errors */
-#define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \
- ERROR_NF_UNCORRECTABLE | \
- ERROR_NF_RECOVERABLE | \
- ERROR_NF_DIMM_SPARE | \
- ERROR_NF_SPD_PROTOCOL | \
- ERROR_NF_NORTH_CRC)
-
-/*
- * Define error masks for the several registers
- */
-
-/* Enable all fatal and non fatal errors */
-#define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK)
-
-/* mask for fatal error registers */
-#define FERR_FAT_MASK ERROR_FAT_MASK
-
-/* masks for non-fatal error register */
-static inline int to_nf_mask(unsigned int mask)
-{
- return (mask & EMASK_M29) | (mask >> 3);
-};
-
-static inline int from_nf_ferr(unsigned int mask)
-{
- return (mask & EMASK_M29) | /* Bit 28 */
- (mask & ((1 << 28) - 1) << 3); /* Bits 0 to 27 */
-};
-
-#define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK)
-#define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE)
-#define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE)
-#define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL)
-#define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC)
-#define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE)
-#define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE)
-
-/* Defines to extract the vaious fields from the
- * MTRx - Memory Technology Registers
- */
-#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10))
-#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9))
-#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4)
-#define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
-#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
-#define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1)
-#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
-#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
-#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
-#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
-#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
-
-/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
-static inline int extract_fbdchan_indx(u32 x)
-{
- return (x>>28) & 0x3;
-}
-
-#ifdef CONFIG_EDAC_DEBUG
-/* MTR NUMROW */
-static const char *numrow_toString[] = {
- "8,192 - 13 rows",
- "16,384 - 14 rows",
- "32,768 - 15 rows",
- "65,536 - 16 rows"
-};
-
-/* MTR NUMCOL */
-static const char *numcol_toString[] = {
- "1,024 - 10 columns",
- "2,048 - 11 columns",
- "4,096 - 12 columns",
- "reserved"
-};
-#endif
-
-/* Device name and register DID (Device ID) */
-struct i5400_dev_info {
- const char *ctl_name; /* name for this device */
- u16 fsb_mapping_errors; /* DID for the branchmap,control */
-};
-
-/* Table of devices attributes supported by this driver */
-static const struct i5400_dev_info i5400_devs[] = {
- {
- .ctl_name = "I5400",
- .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
- },
-};
-
-struct i5400_dimm_info {
- int megabytes; /* size, 0 means not present */
-};
-
-/* driver private data structure */
-struct i5400_pvt {
- struct pci_dev *system_address; /* 16.0 */
- struct pci_dev *branchmap_werrors; /* 16.1 */
- struct pci_dev *fsb_error_regs; /* 16.2 */
- struct pci_dev *branch_0; /* 21.0 */
- struct pci_dev *branch_1; /* 22.0 */
-
- u16 tolm; /* top of low memory */
- u64 ambase; /* AMB BAR */
-
- u16 mir0, mir1;
-
- u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
- u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
-
- u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
- u16 b1_ambpresent0; /* Branch 1, Channel 8 */
- u16 b1_ambpresent1; /* Branch 1, Channel 1 */
-
- /* DIMM information matrix, allocating architecture maximums */
- struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
-
- /* Actual values for this controller */
- int maxch; /* Max channels */
- int maxdimmperch; /* Max DIMMs per channel */
-};
-
-/* I5400 MCH error information retrieved from Hardware */
-struct i5400_error_info {
- /* These registers are always read from the MC */
- u32 ferr_fat_fbd; /* First Errors Fatal */
- u32 nerr_fat_fbd; /* Next Errors Fatal */
- u32 ferr_nf_fbd; /* First Errors Non-Fatal */
- u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
-
- /* These registers are input ONLY if there was a Recoverable Error */
- u32 redmemb; /* Recoverable Mem Data Error log B */
- u16 recmema; /* Recoverable Mem Error log A */
- u32 recmemb; /* Recoverable Mem Error log B */
-
- /* These registers are input ONLY if there was a Non-Rec Error */
- u16 nrecmema; /* Non-Recoverable Mem log A */
- u16 nrecmemb; /* Non-Recoverable Mem log B */
-
-};
-
-/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
- 5400 better to use an inline function than a macro in this case */
-static inline int nrec_bank(struct i5400_error_info *info)
-{
- return ((info->nrecmema) >> 12) & 0x7;
-}
-static inline int nrec_rank(struct i5400_error_info *info)
-{
- return ((info->nrecmema) >> 8) & 0xf;
-}
-static inline int nrec_buf_id(struct i5400_error_info *info)
-{
- return ((info->nrecmema)) & 0xff;
-}
-static inline int nrec_rdwr(struct i5400_error_info *info)
-{
- return (info->nrecmemb) >> 31;
-}
-/* This applies to both NREC and REC string so it can be used with nrec_rdwr
- and rec_rdwr */
-static inline const char *rdwr_str(int rdwr)
-{
- return rdwr ? "Write" : "Read";
-}
-static inline int nrec_cas(struct i5400_error_info *info)
-{
- return ((info->nrecmemb) >> 16) & 0x1fff;
-}
-static inline int nrec_ras(struct i5400_error_info *info)
-{
- return (info->nrecmemb) & 0xffff;
-}
-static inline int rec_bank(struct i5400_error_info *info)
-{
- return ((info->recmema) >> 12) & 0x7;
-}
-static inline int rec_rank(struct i5400_error_info *info)
-{
- return ((info->recmema) >> 8) & 0xf;
-}
-static inline int rec_rdwr(struct i5400_error_info *info)
-{
- return (info->recmemb) >> 31;
-}
-static inline int rec_cas(struct i5400_error_info *info)
-{
- return ((info->recmemb) >> 16) & 0x1fff;
-}
-static inline int rec_ras(struct i5400_error_info *info)
-{
- return (info->recmemb) & 0xffff;
-}
-
-static struct edac_pci_ctl_info *i5400_pci;
-
-/*
- * i5400_get_error_info Retrieve the hardware error information from
- * the hardware and cache it in the 'info'
- * structure
- */
-static void i5400_get_error_info(struct mem_ctl_info *mci,
- struct i5400_error_info *info)
-{
- struct i5400_pvt *pvt;
- u32 value;
-
- pvt = mci->pvt_info;
-
- /* read in the 1st FATAL error register */
- pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
-
- /* Mask only the bits that the doc says are valid
- */
- value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
-
- /* If there is an error, then read in the
- NEXT FATAL error register and the Memory Error Log Register A
- */
- if (value & FERR_FAT_MASK) {
- info->ferr_fat_fbd = value;
-
- /* harvest the various error data we need */
- pci_read_config_dword(pvt->branchmap_werrors,
- NERR_FAT_FBD, &info->nerr_fat_fbd);
- pci_read_config_word(pvt->branchmap_werrors,
- NRECMEMA, &info->nrecmema);
- pci_read_config_word(pvt->branchmap_werrors,
- NRECMEMB, &info->nrecmemb);
-
- /* Clear the error bits, by writing them back */
- pci_write_config_dword(pvt->branchmap_werrors,
- FERR_FAT_FBD, value);
- } else {
- info->ferr_fat_fbd = 0;
- info->nerr_fat_fbd = 0;
- info->nrecmema = 0;
- info->nrecmemb = 0;
- }
-
- /* read in the 1st NON-FATAL error register */
- pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
-
- /* If there is an error, then read in the 1st NON-FATAL error
- * register as well */
- if (value & FERR_NF_MASK) {
- info->ferr_nf_fbd = value;
-
- /* harvest the various error data we need */
- pci_read_config_dword(pvt->branchmap_werrors,
- NERR_NF_FBD, &info->nerr_nf_fbd);
- pci_read_config_word(pvt->branchmap_werrors,
- RECMEMA, &info->recmema);
- pci_read_config_dword(pvt->branchmap_werrors,
- RECMEMB, &info->recmemb);
- pci_read_config_dword(pvt->branchmap_werrors,
- REDMEMB, &info->redmemb);
-
- /* Clear the error bits, by writing them back */
- pci_write_config_dword(pvt->branchmap_werrors,
- FERR_NF_FBD, value);
- } else {
- info->ferr_nf_fbd = 0;
- info->nerr_nf_fbd = 0;
- info->recmema = 0;
- info->recmemb = 0;
- info->redmemb = 0;
- }
-}
-
-/*
- * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
- * struct i5400_error_info *info,
- * int handle_errors);
- *
- * handle the Intel FATAL and unrecoverable errors, if any
- */
-static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
- struct i5400_error_info *info,
- unsigned long allErrors)
-{
- char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
- int branch;
- int channel;
- int bank;
- int buf_id;
- int rank;
- int rdwr;
- int ras, cas;
- int errnum;
- char *type = NULL;
-
- if (!allErrors)
- return; /* if no error, return now */
-
- if (allErrors & ERROR_FAT_MASK)
- type = "FATAL";
- else if (allErrors & FERR_NF_UNCORRECTABLE)
- type = "NON-FATAL uncorrected";
- else
- type = "NON-FATAL recoverable";
-
- /* ONLY ONE of the possible error bits will be set, as per the docs */
-
- branch = extract_fbdchan_indx(info->ferr_fat_fbd);
- channel = branch;
-
- /* Use the NON-Recoverable macros to extract data */
- bank = nrec_bank(info);
- rank = nrec_rank(info);
- buf_id = nrec_buf_id(info);
- rdwr = nrec_rdwr(info);
- ras = nrec_ras(info);
- cas = nrec_cas(info);
-
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- buf_id, rdwr_str(rdwr), ras, cas);
-
- /* Only 1 bit will be on */
- errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
-
- /* Form out message */
- snprintf(msg, sizeof(msg),
- "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
- "RAS=%d CAS=%d %s Err=0x%lx (%s))",
- type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
- type, allErrors, error_name[errnum]);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
-}
-
-/*
- * i5400_process_fatal_error_info(struct mem_ctl_info *mci,
- * struct i5400_error_info *info,
- * int handle_errors);
- *
- * handle the Intel NON-FATAL errors, if any
- */
-static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
- struct i5400_error_info *info)
-{
- char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
- unsigned long allErrors;
- int branch;
- int channel;
- int bank;
- int rank;
- int rdwr;
- int ras, cas;
- int errnum;
-
- /* mask off the Error bits that are possible */
- allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
- if (!allErrors)
- return; /* if no error, return now */
-
- /* ONLY ONE of the possible error bits will be set, as per the docs */
-
- if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
- i5400_proccess_non_recoverable_info(mci, info, allErrors);
- return;
- }
-
- /* Correctable errors */
- if (allErrors & ERROR_NF_CORRECTABLE) {
- debugf0("\tCorrected bits= 0x%lx\n", allErrors);
-
- branch = extract_fbdchan_indx(info->ferr_nf_fbd);
-
- channel = 0;
- if (REC_ECC_LOCATOR_ODD(info->redmemb))
- channel = 1;
-
- /* Convert channel to be based from zero, instead of
- * from branch base of 0 */
- channel += branch;
-
- bank = rec_bank(info);
- rank = rec_rank(info);
- rdwr = rec_rdwr(info);
- ras = rec_ras(info);
- cas = rec_cas(info);
-
- /* Only 1 bit will be on */
- errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
-
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, branch >> 1, bank,
- rdwr_str(rdwr), ras, cas);
-
- /* Form out message */
- snprintf(msg, sizeof(msg),
- "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
- "RAS=%d CAS=%d, CE Err=0x%lx (%s))",
- branch >> 1, bank, rdwr_str(rdwr), ras, cas,
- allErrors, error_name[errnum]);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
-
- return;
- }
-
- /* Miscellaneous errors */
- errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
-
- branch = extract_fbdchan_indx(info->ferr_nf_fbd);
-
- i5400_mc_printk(mci, KERN_EMERG,
- "Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
- branch >> 1, allErrors, error_name[errnum]);
-}
-
-/*
- * i5400_process_error_info Process the error info that is
- * in the 'info' structure, previously retrieved from hardware
- */
-static void i5400_process_error_info(struct mem_ctl_info *mci,
- struct i5400_error_info *info)
-{ u32 allErrors;
-
- /* First handle any fatal errors that occurred */
- allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
- i5400_proccess_non_recoverable_info(mci, info, allErrors);
-
- /* now handle any non-fatal errors that occurred */
- i5400_process_nonfatal_error_info(mci, info);
-}
-
-/*
- * i5400_clear_error Retrieve any error from the hardware
- * but do NOT process that error.
- * Used for 'clearing' out of previous errors
- * Called by the Core module.
- */
-static void i5400_clear_error(struct mem_ctl_info *mci)
-{
- struct i5400_error_info info;
-
- i5400_get_error_info(mci, &info);
-}
-
-/*
- * i5400_check_error Retrieve and process errors reported by the
- * hardware. Called by the Core module.
- */
-static void i5400_check_error(struct mem_ctl_info *mci)
-{
- struct i5400_error_info info;
- debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
- i5400_get_error_info(mci, &info);
- i5400_process_error_info(mci, &info);
-}
-
-/*
- * i5400_put_devices 'put' all the devices that we have
- * reserved via 'get'
- */
-static void i5400_put_devices(struct mem_ctl_info *mci)
-{
- struct i5400_pvt *pvt;
-
- pvt = mci->pvt_info;
-
- /* Decrement usage count for devices */
- pci_dev_put(pvt->branch_1);
- pci_dev_put(pvt->branch_0);
- pci_dev_put(pvt->fsb_error_regs);
- pci_dev_put(pvt->branchmap_werrors);
-}
-
-/*
- * i5400_get_devices Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver
- *
- * Need to 'get' device 16 func 1 and func 2
- */
-static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
-{
- struct i5400_pvt *pvt;
- struct pci_dev *pdev;
-
- pvt = mci->pvt_info;
- pvt->branchmap_werrors = NULL;
- pvt->fsb_error_regs = NULL;
- pvt->branch_0 = NULL;
- pvt->branch_1 = NULL;
-
- /* Attempt to 'get' the MCH register we want */
- pdev = NULL;
- while (1) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i5400_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR func 1 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_ERR);
- return -ENODEV;
- }
-
- /* Store device 16 func 1 */
- if (PCI_FUNC(pdev->devfn) == 1)
- break;
- }
- pvt->branchmap_werrors = pdev;
-
- pdev = NULL;
- while (1) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i5400_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR func 2 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_ERR);
-
- pci_dev_put(pvt->branchmap_werrors);
- return -ENODEV;
- }
-
- /* Store device 16 func 2 */
- if (PCI_FUNC(pdev->devfn) == 2)
- break;
- }
- pvt->fsb_error_regs = pdev;
-
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->system_address),
- pvt->system_address->vendor, pvt->system_address->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->branchmap_werrors),
- pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->fsb_error_regs),
- pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
-
- pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
- if (!pvt->branch_0) {
- i5400_printk(KERN_ERR,
- "MC: 'BRANCH 0' device not found:"
- "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
-
- pci_dev_put(pvt->fsb_error_regs);
- pci_dev_put(pvt->branchmap_werrors);
- return -ENODEV;
- }
-
- /* If this device claims to have more than 2 channels then
- * fetch Branch 1's information
- */
- if (pvt->maxch < CHANNELS_PER_BRANCH)
- return 0;
-
- pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
- if (!pvt->branch_1) {
- i5400_printk(KERN_ERR,
- "MC: 'BRANCH 1' device not found:"
- "vendor 0x%x device 0x%x Func 0 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_5400_FBD1);
-
- pci_dev_put(pvt->branch_0);
- pci_dev_put(pvt->fsb_error_regs);
- pci_dev_put(pvt->branchmap_werrors);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
- * determine_amb_present
- *
- * the information is contained in NUM_MTRS_PER_BRANCH different
- * registers determining which of the NUM_MTRS_PER_BRANCH requires
- * knowing which channel is in question
- *
- * 2 branches, each with 2 channels
- * b0_ambpresent0 for channel '0'
- * b0_ambpresent1 for channel '1'
- * b1_ambpresent0 for channel '2'
- * b1_ambpresent1 for channel '3'
- */
-static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
-{
- int amb_present;
-
- if (channel < CHANNELS_PER_BRANCH) {
- if (channel & 0x1)
- amb_present = pvt->b0_ambpresent1;
- else
- amb_present = pvt->b0_ambpresent0;
- } else {
- if (channel & 0x1)
- amb_present = pvt->b1_ambpresent1;
- else
- amb_present = pvt->b1_ambpresent0;
- }
-
- return amb_present;
-}
-
-/*
- * determine_mtr(pvt, csrow, channel)
- *
- * return the proper MTR register as determine by the csrow and desired channel
- */
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
-{
- int mtr;
- int n;
-
- /* There is one MTR for each slot pair of FB-DIMMs,
- Each slot pair may be at branch 0 or branch 1.
- */
- n = csrow;
-
- if (n >= NUM_MTRS_PER_BRANCH) {
- debugf0("ERROR: trying to access an invalid csrow: %d\n",
- csrow);
- return 0;
- }
-
- if (channel < CHANNELS_PER_BRANCH)
- mtr = pvt->b0_mtr[n];
- else
- mtr = pvt->b1_mtr[n];
-
- return mtr;
-}
-
-/*
- */
-static void decode_mtr(int slot_row, u16 mtr)
-{
- int ans;
-
- ans = MTR_DIMMS_PRESENT(mtr);
-
- debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
- ans ? "Present" : "NOT Present");
- if (!ans)
- return;
-
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
-
- debugf2("\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
-
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
- debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
-}
-
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
- struct i5400_dimm_info *dinfo)
-{
- int mtr;
- int amb_present_reg;
- int addrBits;
-
- mtr = determine_mtr(pvt, csrow, channel);
- if (MTR_DIMMS_PRESENT(mtr)) {
- amb_present_reg = determine_amb_present_reg(pvt, channel);
-
- /* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << csrow)) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
- /* add the number of RANK bits */
- addrBits += MTR_DIMM_RANK(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
- }
-}
-
-/*
- * calculate_dimm_size
- *
- * also will output a DIMM matrix map, if debug is enabled, for viewing
- * how the DIMMs are populated
- */
-static void calculate_dimm_size(struct i5400_pvt *pvt)
-{
- struct i5400_dimm_info *dinfo;
- int csrow, max_csrows;
- char *p, *mem_buffer;
- int space, n;
- int channel;
-
- /* ================= Generate some debug output ================= */
- space = PAGE_SIZE;
- mem_buffer = p = kmalloc(space, GFP_KERNEL);
- if (p == NULL) {
- i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
- __FILE__, __func__);
- return;
- }
-
- /* Scan all the actual CSROWS
- * and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
- */
- max_csrows = pvt->maxdimmperch;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
-
- /* on an odd csrow, first output a 'boundary' marker,
- * then reset the message buffer */
- if (csrow & 0x1) {
- n = snprintf(p, space, "---------------------------"
- "--------------------------------");
- p += n;
- space -= n;
- debugf2("%s\n", mem_buffer);
- p = mem_buffer;
- space = PAGE_SIZE;
- }
- n = snprintf(p, space, "csrow %2d ", csrow);
- p += n;
- space -= n;
-
- for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
- p += n;
- space -= n;
- }
- debugf2("%s\n", mem_buffer);
- p = mem_buffer;
- space = PAGE_SIZE;
- }
-
- /* Output the last bottom 'boundary' marker */
- n = snprintf(p, space, "---------------------------"
- "--------------------------------");
- p += n;
- space -= n;
- debugf2("%s\n", mem_buffer);
- p = mem_buffer;
- space = PAGE_SIZE;
-
- /* now output the 'channel' labels */
- n = snprintf(p, space, " ");
- p += n;
- space -= n;
- for (channel = 0; channel < pvt->maxch; channel++) {
- n = snprintf(p, space, "channel %d | ", channel);
- p += n;
- space -= n;
- }
-
- /* output the last message and free buffer */
- debugf2("%s\n", mem_buffer);
- kfree(mem_buffer);
-}
-
-/*
- * i5400_get_mc_regs read in the necessary registers and
- * cache locally
- *
- * Fills in the private data members
- */
-static void i5400_get_mc_regs(struct mem_ctl_info *mci)
-{
- struct i5400_pvt *pvt;
- u32 actual_tolm;
- u16 limit;
- int slot_row;
- int maxch;
- int maxdimmperch;
- int way0, way1;
-
- pvt = mci->pvt_info;
-
- pci_read_config_dword(pvt->system_address, AMBASE,
- (u32 *) &pvt->ambase);
- pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
- ((u32 *) &pvt->ambase) + sizeof(u32));
-
- maxdimmperch = pvt->maxdimmperch;
- maxch = pvt->maxch;
-
- debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
- (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
-
- /* Get the Branch Map regs */
- pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
- pvt->tolm >>= 12;
- debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
-
- actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
- debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
- actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
-
- pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
- pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
-
- /* Get the MIR[0-1] regs */
- limit = (pvt->mir0 >> 4) & 0x0fff;
- way0 = pvt->mir0 & 0x1;
- way1 = pvt->mir0 & 0x2;
- debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
- limit = (pvt->mir1 >> 4) & 0xfff;
- way0 = pvt->mir1 & 0x1;
- way1 = pvt->mir1 & 0x2;
- debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
-
- /* Get the set of MTR[0-3] regs by each branch */
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
- int where = MTR0 + (slot_row * sizeof(u16));
-
- /* Branch 0 set of MTR registers */
- pci_read_config_word(pvt->branch_0, where,
- &pvt->b0_mtr[slot_row]);
-
- debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
- pvt->b0_mtr[slot_row]);
-
- if (pvt->maxch < CHANNELS_PER_BRANCH) {
- pvt->b1_mtr[slot_row] = 0;
- continue;
- }
-
- /* Branch 1 set of MTR registers */
- pci_read_config_word(pvt->branch_1, where,
- &pvt->b1_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
- pvt->b1_mtr[slot_row]);
- }
-
- /* Read and dump branch 0's MTRs */
- debugf2("\nMemory Technology Registers:\n");
- debugf2(" Branch 0:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
- decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
-
- pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
- &pvt->b0_ambpresent0);
- debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
- pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
- &pvt->b0_ambpresent1);
- debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
-
- /* Only if we have 2 branchs (4 channels) */
- if (pvt->maxch < CHANNELS_PER_BRANCH) {
- pvt->b1_ambpresent0 = 0;
- pvt->b1_ambpresent1 = 0;
- } else {
- /* Read and dump branch 1's MTRs */
- debugf2(" Branch 1:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
- decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
-
- pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
- &pvt->b1_ambpresent0);
- debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
- pvt->b1_ambpresent0);
- pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
- &pvt->b1_ambpresent1);
- debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
- pvt->b1_ambpresent1);
- }
-
- /* Go and determine the size of each DIMM and place in an
- * orderly matrix */
- calculate_dimm_size(pvt);
-}
-
-/*
- * i5400_init_csrows Initialize the 'csrows' table within
- * the mci control structure with the
- * addressing of memory.
- *
- * return:
- * 0 success
- * 1 no actual memory found on this MC
- */
-static int i5400_init_csrows(struct mem_ctl_info *mci)
-{
- struct i5400_pvt *pvt;
- struct csrow_info *p_csrow;
- int empty, channel_count;
- int max_csrows;
- int mtr;
- int csrow_megs;
- int channel;
- int csrow;
-
- pvt = mci->pvt_info;
-
- channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch;
-
- empty = 1; /* Assume NO memory */
-
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = determine_mtr(pvt, csrow, 0);
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
- continue;
-
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
-
- p_csrow->grain = 8;
-
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++)
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
- p_csrow->nr_pages = csrow_megs << 8;
-
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
-
- p_csrow->edac_mode = EDAC_S8ECD8ED;
-
- empty = 0;
- }
-
- return empty;
-}
-
-/*
- * i5400_enable_error_reporting
- * Turn on the memory reporting features of the hardware
- */
-static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
-{
- struct i5400_pvt *pvt;
- u32 fbd_error_mask;
-
- pvt = mci->pvt_info;
-
- /* Read the FBD Error Mask Register */
- pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
- &fbd_error_mask);
-
- /* Enable with a '0' */
- fbd_error_mask &= ~(ENABLE_EMASK_ALL);
-
- pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
- fbd_error_mask);
-}
-
-/*
- * i5400_probe1 Probe for ONE instance of device to see if it is
- * present.
- * return:
- * 0 for FOUND a device
- * < 0 for error code
- */
-static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
-{
- struct mem_ctl_info *mci;
- struct i5400_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
-
- if (dev_idx >= ARRAY_SIZE(i5400_devs))
- return -EINVAL;
-
- debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __FILE__, __func__,
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
- /* We only are looking for func 0 of the set */
- if (PCI_FUNC(pdev->devfn) != 0)
- return -ENODEV;
-
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
- num_channels = MAX_CHANNELS;
- num_csrows = num_dimms_per_channel;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
- if (mci == NULL)
- return -ENOMEM;
-
- debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
-
- mci->dev = &pdev->dev; /* record ptr to the generic device */
-
- pvt = mci->pvt_info;
- pvt->system_address = pdev; /* Record this device in our private */
- pvt->maxch = num_channels;
- pvt->maxdimmperch = num_dimms_per_channel;
-
- /* 'get' the pci devices we want to reserve for our use */
- if (i5400_get_devices(mci, dev_idx))
- goto fail0;
-
- /* Time to get serious */
- i5400_get_mc_regs(mci); /* retrieve the hardware registers */
-
- mci->mc_idx = 0;
- mci->mtype_cap = MEM_FLAG_FB_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "i5400_edac.c";
- mci->mod_ver = I5400_REVISION;
- mci->ctl_name = i5400_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->ctl_page_to_phys = NULL;
-
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i5400_check_error;
-
- /* initialize the MC control structure 'csrows' table
- * with the mapping and control information */
- if (i5400_init_csrows(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_csrows() returned nonzero "
- "value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
- } else {
- debugf1("MC: Enable error reporting now\n");
- i5400_enable_error_reporting(mci);
- }
-
- /* add this new MC control structure to EDAC's list of MCs */
- if (edac_mc_add_mc(mci)) {
- debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
- __FILE__, __func__);
- /* FIXME: perhaps some code should go here that disables error
- * reporting if we just enabled it
- */
- goto fail1;
- }
-
- i5400_clear_error(mci);
-
- /* allocating generic PCI control info */
- i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i5400_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- return 0;
-
- /* Error exit unwinding stack */
-fail1:
-
- i5400_put_devices(mci);
-
-fail0:
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-/*
- * i5400_init_one constructor for one instance of device
- *
- * returns:
- * negative on error
- * count (>= 0)
- */
-static int __devinit i5400_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int rc;
-
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
-
- /* wake up device */
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- /* now probe and enable the device */
- return i5400_probe1(pdev, id->driver_data);
-}
-
-/*
- * i5400_remove_one destructor for one instance of device
- *
- */
-static void __devexit i5400_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s: %s()\n", __FILE__, __func__);
-
- if (i5400_pci)
- edac_pci_release_generic_ctl(i5400_pci);
-
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
-
- /* retrieve references to resources, and free those resources */
- i5400_put_devices(mci);
-
- edac_mc_free(mci);
-}
-
-/*
- * pci_device_id table for which devices we are looking for
- *
- * The "E500P" device is the first device supported.
- */
-static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
- {0,} /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
-
-/*
- * i5400_driver pci_driver structure for this module
- *
- */
-static struct pci_driver i5400_driver = {
- .name = "i5400_edac",
- .probe = i5400_init_one,
- .remove = __devexit_p(i5400_remove_one),
- .id_table = i5400_pci_tbl,
-};
-
-/*
- * i5400_init Module entry function
- * Try to initialize this module for its devices
- */
-static int __init i5400_init(void)
-{
- int pci_rc;
-
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i5400_driver);
-
- return (pci_rc < 0) ? pci_rc : 0;
-}
-
-/*
- * i5400_exit() Module exit function
- * Unregister the driver
- */
-static void __exit i5400_exit(void)
-{
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
- pci_unregister_driver(&i5400_driver);
-}
-
-module_init(i5400_init);
-module_exit(i5400_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
-MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
- I5400_REVISION);
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i7300_edac.c b/ANDROID_3.4.5/drivers/edac/i7300_edac.c
deleted file mode 100644
index 3bafa3bc..00000000
--- a/ANDROID_3.4.5/drivers/edac/i7300_edac.c
+++ /dev/null
@@ -1,1248 +0,0 @@
-/*
- * Intel 7300 class Memory Controllers kernel module (Clarksboro)
- *
- * This file may be distributed under the terms of the
- * GNU General Public License version 2 only.
- *
- * Copyright (c) 2010 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * Red Hat Inc. http://www.redhat.com
- *
- * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
- * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
- *
- * TODO: The chipset allow checking for PCI Express errors also. Currently,
- * the driver covers only memory error errors
- *
- * This driver uses "csrows" EDAC attribute to represent DIMM slot#
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/slab.h>
-#include <linux/edac.h>
-#include <linux/mmzone.h>
-
-#include "edac_core.h"
-
-/*
- * Alter this version for the I7300 module when modifications are made
- */
-#define I7300_REVISION " Ver: 1.0.0"
-
-#define EDAC_MOD_STR "i7300_edac"
-
-#define i7300_printk(level, fmt, arg...) \
- edac_printk(level, "i7300", fmt, ##arg)
-
-#define i7300_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
-
-/***********************************************
- * i7300 Limit constants Structs and static vars
- ***********************************************/
-
-/*
- * Memory topology is organized as:
- * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
- * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
- * Each channel can have to 8 DIMM sets (called as SLOTS)
- * Slots should generally be filled in pairs
- * Except on Single Channel mode of operation
- * just slot 0/channel0 filled on this mode
- * On normal operation mode, the two channels on a branch should be
- * filled together for the same SLOT#
- * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
- * channels on both branches should be filled
- */
-
-/* Limits for i7300 */
-#define MAX_SLOTS 8
-#define MAX_BRANCHES 2
-#define MAX_CH_PER_BRANCH 2
-#define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
-#define MAX_MIR 3
-
-#define to_channel(ch, branch) ((((branch)) << 1) | (ch))
-
-#define to_csrow(slot, ch, branch) \
- (to_channel(ch, branch) | ((slot) << 2))
-
-/* Device name and register DID (Device ID) */
-struct i7300_dev_info {
- const char *ctl_name; /* name for this device */
- u16 fsb_mapping_errors; /* DID for the branchmap,control */
-};
-
-/* Table of devices attributes supported by this driver */
-static const struct i7300_dev_info i7300_devs[] = {
- {
- .ctl_name = "I7300",
- .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
- },
-};
-
-struct i7300_dimm_info {
- int megabytes; /* size, 0 means not present */
-};
-
-/* driver private data structure */
-struct i7300_pvt {
- struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
- struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
- struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
- struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
-
- u16 tolm; /* top of low memory */
- u64 ambase; /* AMB BAR */
-
- u32 mc_settings; /* Report several settings */
- u32 mc_settings_a;
-
- u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
-
- u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
- u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
-
- /* DIMM information matrix, allocating architecture maximums */
- struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
-
- /* Temporary buffer for use when preparing error messages */
- char *tmp_prt_buffer;
-};
-
-/* FIXME: Why do we need to have this static? */
-static struct edac_pci_ctl_info *i7300_pci;
-
-/***************************************************
- * i7300 Register definitions for memory enumeration
- ***************************************************/
-
-/*
- * Device 16,
- * Function 0: System Address (not documented)
- * Function 1: Memory Branch Map, Control, Errors Register
- */
-
- /* OFFSETS for Function 0 */
-#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
-#define MAXCH 0x56 /* Max Channel Number */
-#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
-
- /* OFFSETS for Function 1 */
-#define MC_SETTINGS 0x40
- #define IS_MIRRORED(mc) ((mc) & (1 << 16))
- #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
- #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
- #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
-
-#define MC_SETTINGS_A 0x58
- #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
-
-#define TOLM 0x6C
-
-#define MIR0 0x80
-#define MIR1 0x84
-#define MIR2 0x88
-
-/*
- * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
- * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
- * seems that we cannot use this information directly for the same usage.
- * Each memory slot may have up to 2 AMB interfaces, one for income and another
- * for outcome interface to the next slot.
- * For now, the driver just stores the AMB present registers, but rely only at
- * the MTR info to detect memory.
- * Datasheet is also not clear about how to map each AMBPRESENT registers to
- * one of the 4 available channels.
- */
-#define AMBPRESENT_0 0x64
-#define AMBPRESENT_1 0x66
-
-static const u16 mtr_regs[MAX_SLOTS] = {
- 0x80, 0x84, 0x88, 0x8c,
- 0x82, 0x86, 0x8a, 0x8e
-};
-
-/*
- * Defines to extract the vaious fields from the
- * MTRx - Memory Technology Registers
- */
-#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
-#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
-#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
-#define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
-#define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
-#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
-#define MTR_DRAM_BANKS_ADDR_BITS 2
-#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
-#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
-#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
-
-#ifdef CONFIG_EDAC_DEBUG
-/* MTR NUMROW */
-static const char *numrow_toString[] = {
- "8,192 - 13 rows",
- "16,384 - 14 rows",
- "32,768 - 15 rows",
- "65,536 - 16 rows"
-};
-
-/* MTR NUMCOL */
-static const char *numcol_toString[] = {
- "1,024 - 10 columns",
- "2,048 - 11 columns",
- "4,096 - 12 columns",
- "reserved"
-};
-#endif
-
-/************************************************
- * i7300 Register definitions for error detection
- ************************************************/
-
-/*
- * Device 16.1: FBD Error Registers
- */
-#define FERR_FAT_FBD 0x98
-static const char *ferr_fat_fbd_name[] = {
- [22] = "Non-Redundant Fast Reset Timeout",
- [2] = ">Tmid Thermal event with intelligent throttling disabled",
- [1] = "Memory or FBD configuration CRC read error",
- [0] = "Memory Write error on non-redundant retry or "
- "FBD configuration Write error on retry",
-};
-#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
-#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
-
-#define FERR_NF_FBD 0xa0
-static const char *ferr_nf_fbd_name[] = {
- [24] = "DIMM-Spare Copy Completed",
- [23] = "DIMM-Spare Copy Initiated",
- [22] = "Redundant Fast Reset Timeout",
- [21] = "Memory Write error on redundant retry",
- [18] = "SPD protocol Error",
- [17] = "FBD Northbound parity error on FBD Sync Status",
- [16] = "Correctable Patrol Data ECC",
- [15] = "Correctable Resilver- or Spare-Copy Data ECC",
- [14] = "Correctable Mirrored Demand Data ECC",
- [13] = "Correctable Non-Mirrored Demand Data ECC",
- [11] = "Memory or FBD configuration CRC read error",
- [10] = "FBD Configuration Write error on first attempt",
- [9] = "Memory Write error on first attempt",
- [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
- [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
- [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
- [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
- [4] = "Aliased Uncorrectable Patrol Data ECC",
- [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
- [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
- [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
- [0] = "Uncorrectable Data ECC on Replay",
-};
-#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
-#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
- (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
- (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
- (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
- (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
- (1 << 1) | (1 << 0))
-
-#define EMASK_FBD 0xa8
-#define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
- (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
- (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
- (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
- (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
- (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
- (1 << 1) | (1 << 0))
-
-/*
- * Device 16.2: Global Error Registers
- */
-
-#define FERR_GLOBAL_HI 0x48
-static const char *ferr_global_hi_name[] = {
- [3] = "FSB 3 Fatal Error",
- [2] = "FSB 2 Fatal Error",
- [1] = "FSB 1 Fatal Error",
- [0] = "FSB 0 Fatal Error",
-};
-#define ferr_global_hi_is_fatal(errno) 1
-
-#define FERR_GLOBAL_LO 0x40
-static const char *ferr_global_lo_name[] = {
- [31] = "Internal MCH Fatal Error",
- [30] = "Intel QuickData Technology Device Fatal Error",
- [29] = "FSB1 Fatal Error",
- [28] = "FSB0 Fatal Error",
- [27] = "FBD Channel 3 Fatal Error",
- [26] = "FBD Channel 2 Fatal Error",
- [25] = "FBD Channel 1 Fatal Error",
- [24] = "FBD Channel 0 Fatal Error",
- [23] = "PCI Express Device 7Fatal Error",
- [22] = "PCI Express Device 6 Fatal Error",
- [21] = "PCI Express Device 5 Fatal Error",
- [20] = "PCI Express Device 4 Fatal Error",
- [19] = "PCI Express Device 3 Fatal Error",
- [18] = "PCI Express Device 2 Fatal Error",
- [17] = "PCI Express Device 1 Fatal Error",
- [16] = "ESI Fatal Error",
- [15] = "Internal MCH Non-Fatal Error",
- [14] = "Intel QuickData Technology Device Non Fatal Error",
- [13] = "FSB1 Non-Fatal Error",
- [12] = "FSB 0 Non-Fatal Error",
- [11] = "FBD Channel 3 Non-Fatal Error",
- [10] = "FBD Channel 2 Non-Fatal Error",
- [9] = "FBD Channel 1 Non-Fatal Error",
- [8] = "FBD Channel 0 Non-Fatal Error",
- [7] = "PCI Express Device 7 Non-Fatal Error",
- [6] = "PCI Express Device 6 Non-Fatal Error",
- [5] = "PCI Express Device 5 Non-Fatal Error",
- [4] = "PCI Express Device 4 Non-Fatal Error",
- [3] = "PCI Express Device 3 Non-Fatal Error",
- [2] = "PCI Express Device 2 Non-Fatal Error",
- [1] = "PCI Express Device 1 Non-Fatal Error",
- [0] = "ESI Non-Fatal Error",
-};
-#define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
-
-#define NRECMEMA 0xbe
- #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
- #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
-
-#define NRECMEMB 0xc0
- #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
- #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
- #define NRECMEMB_RAS(v) ((v) & 0xffff)
-
-#define REDMEMA 0xdc
-
-#define REDMEMB 0x7c
- #define IS_SECOND_CH(v) ((v) * (1 << 17))
-
-#define RECMEMA 0xe0
- #define RECMEMA_BANK(v) (((v) >> 12) & 7)
- #define RECMEMA_RANK(v) (((v) >> 8) & 15)
-
-#define RECMEMB 0xe4
- #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
- #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
- #define RECMEMB_RAS(v) ((v) & 0xffff)
-
-/********************************************
- * i7300 Functions related to error detection
- ********************************************/
-
-/**
- * get_err_from_table() - Gets the error message from a table
- * @table: table name (array of char *)
- * @size: number of elements at the table
- * @pos: position of the element to be returned
- *
- * This is a small routine that gets the pos-th element of a table. If the
- * element doesn't exist (or it is empty), it returns "reserved".
- * Instead of calling it directly, the better is to call via the macro
- * GET_ERR_FROM_TABLE(), that automatically checks the table size via
- * ARRAY_SIZE() macro
- */
-static const char *get_err_from_table(const char *table[], int size, int pos)
-{
- if (unlikely(pos >= size))
- return "Reserved";
-
- if (unlikely(!table[pos]))
- return "Reserved";
-
- return table[pos];
-}
-
-#define GET_ERR_FROM_TABLE(table, pos) \
- get_err_from_table(table, ARRAY_SIZE(table), pos)
-
-/**
- * i7300_process_error_global() - Retrieve the hardware error information from
- * the hardware global error registers and
- * sends it to dmesg
- * @mci: struct mem_ctl_info pointer
- */
-static void i7300_process_error_global(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt;
- u32 errnum, error_reg;
- unsigned long errors;
- const char *specific;
- bool is_fatal;
-
- pvt = mci->pvt_info;
-
- /* read in the 1st FATAL error register */
- pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_HI, &error_reg);
- if (unlikely(error_reg)) {
- errors = error_reg;
- errnum = find_first_bit(&errors,
- ARRAY_SIZE(ferr_global_hi_name));
- specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
- is_fatal = ferr_global_hi_is_fatal(errnum);
-
- /* Clear the error bit */
- pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_HI, error_reg);
-
- goto error_global;
- }
-
- pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, &error_reg);
- if (unlikely(error_reg)) {
- errors = error_reg;
- errnum = find_first_bit(&errors,
- ARRAY_SIZE(ferr_global_lo_name));
- specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
- is_fatal = ferr_global_lo_is_fatal(errnum);
-
- /* Clear the error bit */
- pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, error_reg);
-
- goto error_global;
- }
- return;
-
-error_global:
- i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
- is_fatal ? "Fatal" : "NOT fatal", specific);
-}
-
-/**
- * i7300_process_fbd_error() - Retrieve the hardware error information from
- * the FBD error registers and sends it via
- * EDAC error API calls
- * @mci: struct mem_ctl_info pointer
- */
-static void i7300_process_fbd_error(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt;
- u32 errnum, value, error_reg;
- u16 val16;
- unsigned branch, channel, bank, rank, cas, ras;
- u32 syndrome;
-
- unsigned long errors;
- const char *specific;
- bool is_wr;
-
- pvt = mci->pvt_info;
-
- /* read in the 1st FATAL error register */
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_FAT_FBD, &error_reg);
- if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
- errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
- errnum = find_first_bit(&errors,
- ARRAY_SIZE(ferr_fat_fbd_name));
- specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
- branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
-
- pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
- NRECMEMA, &val16);
- bank = NRECMEMA_BANK(val16);
- rank = NRECMEMA_RANK(val16);
-
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- NRECMEMB, &value);
- is_wr = NRECMEMB_IS_WR(value);
- cas = NRECMEMB_CAS(value);
- ras = NRECMEMB_RAS(value);
-
- /* Clean the error register */
- pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_FAT_FBD, error_reg);
-
- snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "FATAL (Branch=%d DRAM-Bank=%d %s "
- "RAS=%d CAS=%d Err=0x%lx (%s))",
- branch, bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, branch << 1,
- (branch << 1) + 1,
- pvt->tmp_prt_buffer);
- }
-
- /* read in the 1st NON-FATAL error register */
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_NF_FBD, &error_reg);
- if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
- errors = error_reg & FERR_NF_FBD_ERR_MASK;
- errnum = find_first_bit(&errors,
- ARRAY_SIZE(ferr_nf_fbd_name));
- specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
- branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
-
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- REDMEMA, &syndrome);
-
- pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
- RECMEMA, &val16);
- bank = RECMEMA_BANK(val16);
- rank = RECMEMA_RANK(val16);
-
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- RECMEMB, &value);
- is_wr = RECMEMB_IS_WR(value);
- cas = RECMEMB_CAS(value);
- ras = RECMEMB_RAS(value);
-
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- REDMEMB, &value);
- channel = (branch << 1);
- if (IS_SECOND_CH(value))
- channel++;
-
- /* Clear the error bit */
- pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_NF_FBD, error_reg);
-
- /* Form out message */
- snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "Corrected error (Branch=%d, Channel %d), "
- " DRAM-Bank=%d %s "
- "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
- branch, channel,
- bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, syndrome, specific);
-
- /*
- * Call the helper to output message
- * NOTE: Errors are reported per-branch, and not per-channel
- * Currently, we don't know how to identify the right
- * channel.
- */
- edac_mc_handle_fbd_ce(mci, rank, channel,
- pvt->tmp_prt_buffer);
- }
- return;
-}
-
-/**
- * i7300_check_error() - Calls the error checking subroutines
- * @mci: struct mem_ctl_info pointer
- */
-static void i7300_check_error(struct mem_ctl_info *mci)
-{
- i7300_process_error_global(mci);
- i7300_process_fbd_error(mci);
-};
-
-/**
- * i7300_clear_error() - Clears the error registers
- * @mci: struct mem_ctl_info pointer
- */
-static void i7300_clear_error(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt = mci->pvt_info;
- u32 value;
- /*
- * All error values are RWC - we need to read and write 1 to the
- * bit that we want to cleanup
- */
-
- /* Clear global error registers */
- pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_HI, &value);
- pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_HI, value);
-
- pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, &value);
- pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, value);
-
- /* Clear FBD error registers */
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_FAT_FBD, &value);
- pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_FAT_FBD, value);
-
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_NF_FBD, &value);
- pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_NF_FBD, value);
-}
-
-/**
- * i7300_enable_error_reporting() - Enable the memory reporting logic at the
- * hardware
- * @mci: struct mem_ctl_info pointer
- */
-static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt = mci->pvt_info;
- u32 fbd_error_mask;
-
- /* Read the FBD Error Mask Register */
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- EMASK_FBD, &fbd_error_mask);
-
- /* Enable with a '0' */
- fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
-
- pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- EMASK_FBD, fbd_error_mask);
-}
-
-/************************************************
- * i7300 Functions related to memory enumberation
- ************************************************/
-
-/**
- * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
- * @pvt: pointer to the private data struct used by i7300 driver
- * @slot: DIMM slot (0 to 7)
- * @ch: Channel number within the branch (0 or 1)
- * @branch: Branch number (0 or 1)
- * @dinfo: Pointer to DIMM info where dimm size is stored
- * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
- */
-static int decode_mtr(struct i7300_pvt *pvt,
- int slot, int ch, int branch,
- struct i7300_dimm_info *dinfo,
- struct csrow_info *p_csrow,
- u32 *nr_pages)
-{
- int mtr, ans, addrBits, channel;
-
- channel = to_channel(ch, branch);
-
- mtr = pvt->mtr[slot][branch];
- ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
-
- debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n",
- slot, channel,
- ans ? "Present" : "NOT Present");
-
- /* Determine if there is a DIMM present in this DIMM slot */
- if (!ans)
- return 0;
-
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS;
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
- /* add the number of RANK bits */
- addrBits += MTR_DIMM_RANKS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- *nr_pages = dinfo->megabytes << 8;
-
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
-
- debugf2("\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
-
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
- debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
- debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
-
- p_csrow->grain = 8;
- p_csrow->mtype = MEM_FB_DDR2;
- p_csrow->csrow_idx = slot;
- p_csrow->page_mask = 0;
-
- /*
- * The type of error detection actually depends of the
- * mode of operation. When it is just one single memory chip, at
- * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
- * In normal or mirrored mode, it uses Lockstep mode,
- * with the possibility of using an extended algorithm for x8 memories
- * See datasheet Sections 7.3.6 to 7.3.8
- */
-
- if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
- p_csrow->edac_mode = EDAC_SECDED;
- debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
- } else {
- debugf2("\t\tECC code is on Lockstep mode\n");
- if (MTR_DRAM_WIDTH(mtr) == 8)
- p_csrow->edac_mode = EDAC_S8ECD8ED;
- else
- p_csrow->edac_mode = EDAC_S4ECD4ED;
- }
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr) == 8) {
- debugf2("\t\tScrub algorithm for x8 is on %s mode\n",
- IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
- "enhanced" : "normal");
-
- p_csrow->dtype = DEV_X8;
- } else
- p_csrow->dtype = DEV_X4;
-
- return mtr;
-}
-
-/**
- * print_dimm_size() - Prints dump of the memory organization
- * @pvt: pointer to the private data struct used by i7300 driver
- *
- * Useful for debug. If debug is disabled, this routine do nothing
- */
-static void print_dimm_size(struct i7300_pvt *pvt)
-{
-#ifdef CONFIG_EDAC_DEBUG
- struct i7300_dimm_info *dinfo;
- char *p;
- int space, n;
- int channel, slot;
-
- space = PAGE_SIZE;
- p = pvt->tmp_prt_buffer;
-
- n = snprintf(p, space, " ");
- p += n;
- space -= n;
- for (channel = 0; channel < MAX_CHANNELS; channel++) {
- n = snprintf(p, space, "channel %d | ", channel);
- p += n;
- space -= n;
- }
- debugf2("%s\n", pvt->tmp_prt_buffer);
- p = pvt->tmp_prt_buffer;
- space = PAGE_SIZE;
- n = snprintf(p, space, "-------------------------------"
- "------------------------------");
- p += n;
- space -= n;
- debugf2("%s\n", pvt->tmp_prt_buffer);
- p = pvt->tmp_prt_buffer;
- space = PAGE_SIZE;
-
- for (slot = 0; slot < MAX_SLOTS; slot++) {
- n = snprintf(p, space, "csrow/SLOT %d ", slot);
- p += n;
- space -= n;
-
- for (channel = 0; channel < MAX_CHANNELS; channel++) {
- dinfo = &pvt->dimm_info[slot][channel];
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
- p += n;
- space -= n;
- }
-
- debugf2("%s\n", pvt->tmp_prt_buffer);
- p = pvt->tmp_prt_buffer;
- space = PAGE_SIZE;
- }
-
- n = snprintf(p, space, "-------------------------------"
- "------------------------------");
- p += n;
- space -= n;
- debugf2("%s\n", pvt->tmp_prt_buffer);
- p = pvt->tmp_prt_buffer;
- space = PAGE_SIZE;
-#endif
-}
-
-/**
- * i7300_init_csrows() - Initialize the 'csrows' table within
- * the mci control structure with the
- * addressing of memory.
- * @mci: struct mem_ctl_info pointer
- */
-static int i7300_init_csrows(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt;
- struct i7300_dimm_info *dinfo;
- struct csrow_info *p_csrow;
- int rc = -ENODEV;
- int mtr;
- int ch, branch, slot, channel;
- u32 last_page = 0, nr_pages;
-
- pvt = mci->pvt_info;
-
- debugf2("Memory Technology Registers:\n");
-
- /* Get the AMB present registers for the four channels */
- for (branch = 0; branch < MAX_BRANCHES; branch++) {
- /* Read and dump branch 0's MTRs */
- channel = to_channel(0, branch);
- pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
- AMBPRESENT_0,
- &pvt->ambpresent[channel]);
- debugf2("\t\tAMB-present CH%d = 0x%x:\n",
- channel, pvt->ambpresent[channel]);
-
- channel = to_channel(1, branch);
- pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
- AMBPRESENT_1,
- &pvt->ambpresent[channel]);
- debugf2("\t\tAMB-present CH%d = 0x%x:\n",
- channel, pvt->ambpresent[channel]);
- }
-
- /* Get the set of MTR[0-7] regs by each branch */
- for (slot = 0; slot < MAX_SLOTS; slot++) {
- int where = mtr_regs[slot];
- for (branch = 0; branch < MAX_BRANCHES; branch++) {
- pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
- where,
- &pvt->mtr[slot][branch]);
- for (ch = 0; ch < MAX_BRANCHES; ch++) {
- int channel = to_channel(ch, branch);
-
- dinfo = &pvt->dimm_info[slot][channel];
- p_csrow = &mci->csrows[slot];
-
- mtr = decode_mtr(pvt, slot, ch, branch,
- dinfo, p_csrow, &nr_pages);
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
- continue;
-
- /* Update per_csrow memory count */
- p_csrow->nr_pages += nr_pages;
- p_csrow->first_page = last_page;
- last_page += nr_pages;
- p_csrow->last_page = last_page;
-
- rc = 0;
- }
- }
- }
-
- return rc;
-}
-
-/**
- * decode_mir() - Decodes Memory Interleave Register (MIR) info
- * @int mir_no: number of the MIR register to decode
- * @mir: array with the MIR data cached on the driver
- */
-static void decode_mir(int mir_no, u16 mir[MAX_MIR])
-{
- if (mir[mir_no] & 3)
- debugf2("MIR%d: limit= 0x%x Branch(es) that participate:"
- " %s %s\n",
- mir_no,
- (mir[mir_no] >> 4) & 0xfff,
- (mir[mir_no] & 1) ? "B0" : "",
- (mir[mir_no] & 2) ? "B1" : "");
-}
-
-/**
- * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
- * @mci: struct mem_ctl_info pointer
- *
- * Data read is cached internally for its usage when needed
- */
-static int i7300_get_mc_regs(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt;
- u32 actual_tolm;
- int i, rc;
-
- pvt = mci->pvt_info;
-
- pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
- (u32 *) &pvt->ambase);
-
- debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
-
- /* Get the Branch Map regs */
- pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
- pvt->tolm >>= 12;
- debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
-
- actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
- debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
- actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
-
- /* Get memory controller settings */
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
- &pvt->mc_settings);
- pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
- &pvt->mc_settings_a);
-
- if (IS_SINGLE_MODE(pvt->mc_settings_a))
- debugf0("Memory controller operating on single mode\n");
- else
- debugf0("Memory controller operating on %s mode\n",
- IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored");
-
- debugf0("Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
- debugf0("Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
-
- /* Get Memory Interleave Range registers */
- pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
- &pvt->mir[0]);
- pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
- &pvt->mir[1]);
- pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
- &pvt->mir[2]);
-
- /* Decode the MIR regs */
- for (i = 0; i < MAX_MIR; i++)
- decode_mir(i, pvt->mir);
-
- rc = i7300_init_csrows(mci);
- if (rc < 0)
- return rc;
-
- /* Go and determine the size of each DIMM and place in an
- * orderly matrix */
- print_dimm_size(pvt);
-
- return 0;
-}
-
-/*************************************************
- * i7300 Functions related to device probe/release
- *************************************************/
-
-/**
- * i7300_put_devices() - Release the PCI devices
- * @mci: struct mem_ctl_info pointer
- */
-static void i7300_put_devices(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt;
- int branch;
-
- pvt = mci->pvt_info;
-
- /* Decrement usage count for devices */
- for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
- pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
- pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
- pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
-}
-
-/**
- * i7300_get_devices() - Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver
- * @mci: struct mem_ctl_info pointer
- *
- * Access and prepare the several devices for usage:
- * I7300 devices used by this driver:
- * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
- * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
- * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
- */
-static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
-{
- struct i7300_pvt *pvt;
- struct pci_dev *pdev;
-
- pvt = mci->pvt_info;
-
- /* Attempt to 'get' the MCH register we want */
- pdev = NULL;
- while (!pvt->pci_dev_16_1_fsb_addr_map ||
- !pvt->pci_dev_16_2_fsb_err_regs) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i7300_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR funcs "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
- goto error;
- }
-
- /* Store device 16 funcs 1 and 2 */
- switch (PCI_FUNC(pdev->devfn)) {
- case 1:
- pvt->pci_dev_16_1_fsb_addr_map = pdev;
- break;
- case 2:
- pvt->pci_dev_16_2_fsb_err_regs = pdev;
- break;
- }
- }
-
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_0_fsb_ctlr),
- pvt->pci_dev_16_0_fsb_ctlr->vendor,
- pvt->pci_dev_16_0_fsb_ctlr->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_1_fsb_addr_map),
- pvt->pci_dev_16_1_fsb_addr_map->vendor,
- pvt->pci_dev_16_1_fsb_addr_map->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_2_fsb_err_regs),
- pvt->pci_dev_16_2_fsb_err_regs->vendor,
- pvt->pci_dev_16_2_fsb_err_regs->device);
-
- pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
- NULL);
- if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
- i7300_printk(KERN_ERR,
- "MC: 'BRANCH 0' device not found:"
- "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
- goto error;
- }
-
- pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
- NULL);
- if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
- i7300_printk(KERN_ERR,
- "MC: 'BRANCH 1' device not found:"
- "vendor 0x%x device 0x%x Func 0 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
- goto error;
- }
-
- return 0;
-
-error:
- i7300_put_devices(mci);
- return -ENODEV;
-}
-
-/**
- * i7300_init_one() - Probe for one instance of the device
- * @pdev: struct pci_dev pointer
- * @id: struct pci_device_id pointer - currently unused
- */
-static int __devinit i7300_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct mem_ctl_info *mci;
- struct i7300_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
- int rc;
-
- /* wake up device */
- rc = pci_enable_device(pdev);
- if (rc == -EIO)
- return rc;
-
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
- /* We only are looking for func 0 of the set */
- if (PCI_FUNC(pdev->devfn) != 0)
- return -ENODEV;
-
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- num_dimms_per_channel = MAX_SLOTS;
- num_channels = MAX_CHANNELS;
- num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
- if (mci == NULL)
- return -ENOMEM;
-
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
-
- mci->dev = &pdev->dev; /* record ptr to the generic device */
-
- pvt = mci->pvt_info;
- pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
-
- pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!pvt->tmp_prt_buffer) {
- edac_mc_free(mci);
- return -ENOMEM;
- }
-
- /* 'get' the pci devices we want to reserve for our use */
- if (i7300_get_devices(mci))
- goto fail0;
-
- mci->mc_idx = 0;
- mci->mtype_cap = MEM_FLAG_FB_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "i7300_edac.c";
- mci->mod_ver = I7300_REVISION;
- mci->ctl_name = i7300_devs[0].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->ctl_page_to_phys = NULL;
-
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i7300_check_error;
-
- /* initialize the MC control structure 'csrows' table
- * with the mapping and control information */
- if (i7300_get_mc_regs(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i7300_init_csrows() returned nonzero "
- "value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
- } else {
- debugf1("MC: Enable error reporting now\n");
- i7300_enable_error_reporting(mci);
- }
-
- /* add this new MC control structure to EDAC's list of MCs */
- if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
- /* FIXME: perhaps some code should go here that disables error
- * reporting if we just enabled it
- */
- goto fail1;
- }
-
- i7300_clear_error(mci);
-
- /* allocating generic PCI control info */
- i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i7300_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- return 0;
-
- /* Error exit unwinding stack */
-fail1:
-
- i7300_put_devices(mci);
-
-fail0:
- kfree(pvt->tmp_prt_buffer);
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-/**
- * i7300_remove_one() - Remove the driver
- * @pdev: struct pci_dev pointer
- */
-static void __devexit i7300_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- char *tmp;
-
- debugf0(__FILE__ ": %s()\n", __func__);
-
- if (i7300_pci)
- edac_pci_release_generic_ctl(i7300_pci);
-
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
-
- tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
-
- /* retrieve references to resources, and free those resources */
- i7300_put_devices(mci);
-
- kfree(tmp);
- edac_mc_free(mci);
-}
-
-/*
- * pci_device_id: table for which devices we are looking for
- *
- * Has only 8086:360c PCI ID
- */
-static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
- {0,} /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
-
-/*
- * i7300_driver: pci_driver structure for this module
- */
-static struct pci_driver i7300_driver = {
- .name = "i7300_edac",
- .probe = i7300_init_one,
- .remove = __devexit_p(i7300_remove_one),
- .id_table = i7300_pci_tbl,
-};
-
-/**
- * i7300_init() - Registers the driver
- */
-static int __init i7300_init(void)
-{
- int pci_rc;
-
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i7300_driver);
-
- return (pci_rc < 0) ? pci_rc : 0;
-}
-
-/**
- * i7300_init() - Unregisters the driver
- */
-static void __exit i7300_exit(void)
-{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
- pci_unregister_driver(&i7300_driver);
-}
-
-module_init(i7300_init);
-module_exit(i7300_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
-MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
- I7300_REVISION);
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i7core_edac.c b/ANDROID_3.4.5/drivers/edac/i7core_edac.c
deleted file mode 100644
index 0fe2277d..00000000
--- a/ANDROID_3.4.5/drivers/edac/i7core_edac.c
+++ /dev/null
@@ -1,2506 +0,0 @@
-/* Intel i7 core/Nehalem Memory Controller kernel module
- *
- * This driver supports the memory controllers found on the Intel
- * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
- * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
- * and Westmere-EP.
- *
- * This file may be distributed under the terms of the
- * GNU General Public License version 2 only.
- *
- * Copyright (c) 2009-2010 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * Red Hat Inc. http://www.redhat.com
- *
- * Forked and adapted from the i5400_edac driver
- *
- * Based on the following public Intel datasheets:
- * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
- * Datasheet, Volume 2:
- * http://download.intel.com/design/processor/datashts/320835.pdf
- * Intel Xeon Processor 5500 Series Datasheet Volume 2
- * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
- * also available at:
- * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/dmi.h>
-#include <linux/edac.h>
-#include <linux/mmzone.h>
-#include <linux/smp.h>
-#include <asm/mce.h>
-#include <asm/processor.h>
-#include <asm/div64.h>
-
-#include "edac_core.h"
-
-/* Static vars */
-static LIST_HEAD(i7core_edac_list);
-static DEFINE_MUTEX(i7core_edac_lock);
-static int probed;
-
-static int use_pci_fixup;
-module_param(use_pci_fixup, int, 0444);
-MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
-/*
- * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
- * registers start at bus 255, and are not reported by BIOS.
- * We currently find devices with only 2 sockets. In order to support more QPI
- * Quick Path Interconnect, just increment this number.
- */
-#define MAX_SOCKET_BUSES 2
-
-
-/*
- * Alter this version for the module when modifications are made
- */
-#define I7CORE_REVISION " Ver: 1.0.0"
-#define EDAC_MOD_STR "i7core_edac"
-
-/*
- * Debug macros
- */
-#define i7core_printk(level, fmt, arg...) \
- edac_printk(level, "i7core", fmt, ##arg)
-
-#define i7core_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
-
-/*
- * i7core Memory Controller Registers
- */
-
- /* OFFSETS for Device 0 Function 0 */
-
-#define MC_CFG_CONTROL 0x90
- #define MC_CFG_UNLOCK 0x02
- #define MC_CFG_LOCK 0x00
-
- /* OFFSETS for Device 3 Function 0 */
-
-#define MC_CONTROL 0x48
-#define MC_STATUS 0x4c
-#define MC_MAX_DOD 0x64
-
-/*
- * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
- * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
- */
-
-#define MC_TEST_ERR_RCV1 0x60
- #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
-
-#define MC_TEST_ERR_RCV0 0x64
- #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
- #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
-
-/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
-#define MC_SSRCONTROL 0x48
- #define SSR_MODE_DISABLE 0x00
- #define SSR_MODE_ENABLE 0x01
- #define SSR_MODE_MASK 0x03
-
-#define MC_SCRUB_CONTROL 0x4c
- #define STARTSCRUB (1 << 24)
- #define SCRUBINTERVAL_MASK 0xffffff
-
-#define MC_COR_ECC_CNT_0 0x80
-#define MC_COR_ECC_CNT_1 0x84
-#define MC_COR_ECC_CNT_2 0x88
-#define MC_COR_ECC_CNT_3 0x8c
-#define MC_COR_ECC_CNT_4 0x90
-#define MC_COR_ECC_CNT_5 0x94
-
-#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
-#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
-
-
- /* OFFSETS for Devices 4,5 and 6 Function 0 */
-
-#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
- #define THREE_DIMMS_PRESENT (1 << 24)
- #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
- #define QUAD_RANK_PRESENT (1 << 22)
- #define REGISTERED_DIMM (1 << 15)
-
-#define MC_CHANNEL_MAPPER 0x60
- #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
- #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
-
-#define MC_CHANNEL_RANK_PRESENT 0x7c
- #define RANK_PRESENT_MASK 0xffff
-
-#define MC_CHANNEL_ADDR_MATCH 0xf0
-#define MC_CHANNEL_ERROR_MASK 0xf8
-#define MC_CHANNEL_ERROR_INJECT 0xfc
- #define INJECT_ADDR_PARITY 0x10
- #define INJECT_ECC 0x08
- #define MASK_CACHELINE 0x06
- #define MASK_FULL_CACHELINE 0x06
- #define MASK_MSB32_CACHELINE 0x04
- #define MASK_LSB32_CACHELINE 0x02
- #define NO_MASK_CACHELINE 0x00
- #define REPEAT_EN 0x01
-
- /* OFFSETS for Devices 4,5 and 6 Function 1 */
-
-#define MC_DOD_CH_DIMM0 0x48
-#define MC_DOD_CH_DIMM1 0x4c
-#define MC_DOD_CH_DIMM2 0x50
- #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
- #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
- #define DIMM_PRESENT_MASK (1 << 9)
- #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
- #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
- #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
- #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
- #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
- #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
- #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
- #define MC_DOD_NUMCOL_MASK 3
- #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
-
-#define MC_RANK_PRESENT 0x7c
-
-#define MC_SAG_CH_0 0x80
-#define MC_SAG_CH_1 0x84
-#define MC_SAG_CH_2 0x88
-#define MC_SAG_CH_3 0x8c
-#define MC_SAG_CH_4 0x90
-#define MC_SAG_CH_5 0x94
-#define MC_SAG_CH_6 0x98
-#define MC_SAG_CH_7 0x9c
-
-#define MC_RIR_LIMIT_CH_0 0x40
-#define MC_RIR_LIMIT_CH_1 0x44
-#define MC_RIR_LIMIT_CH_2 0x48
-#define MC_RIR_LIMIT_CH_3 0x4C
-#define MC_RIR_LIMIT_CH_4 0x50
-#define MC_RIR_LIMIT_CH_5 0x54
-#define MC_RIR_LIMIT_CH_6 0x58
-#define MC_RIR_LIMIT_CH_7 0x5C
-#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
-
-#define MC_RIR_WAY_CH 0x80
- #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
- #define MC_RIR_WAY_RANK_MASK 0x7
-
-/*
- * i7core structs
- */
-
-#define NUM_CHANS 3
-#define MAX_DIMMS 3 /* Max DIMMS per channel */
-#define MAX_MCR_FUNC 4
-#define MAX_CHAN_FUNC 3
-
-struct i7core_info {
- u32 mc_control;
- u32 mc_status;
- u32 max_dod;
- u32 ch_map;
-};
-
-
-struct i7core_inject {
- int enable;
-
- u32 section;
- u32 type;
- u32 eccmask;
-
- /* Error address mask */
- int channel, dimm, rank, bank, page, col;
-};
-
-struct i7core_channel {
- u32 ranks;
- u32 dimms;
-};
-
-struct pci_id_descr {
- int dev;
- int func;
- int dev_id;
- int optional;
-};
-
-struct pci_id_table {
- const struct pci_id_descr *descr;
- int n_devs;
-};
-
-struct i7core_dev {
- struct list_head list;
- u8 socket;
- struct pci_dev **pdev;
- int n_devs;
- struct mem_ctl_info *mci;
-};
-
-struct i7core_pvt {
- struct pci_dev *pci_noncore;
- struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
- struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
-
- struct i7core_dev *i7core_dev;
-
- struct i7core_info info;
- struct i7core_inject inject;
- struct i7core_channel channel[NUM_CHANS];
-
- int ce_count_available;
- int csrow_map[NUM_CHANS][MAX_DIMMS];
-
- /* ECC corrected errors counts per udimm */
- unsigned long udimm_ce_count[MAX_DIMMS];
- int udimm_last_ce_count[MAX_DIMMS];
- /* ECC corrected errors counts per rdimm */
- unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
- int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
-
- bool is_registered, enable_scrub;
-
- /* Fifo double buffers */
- struct mce mce_entry[MCE_LOG_LEN];
- struct mce mce_outentry[MCE_LOG_LEN];
-
- /* Fifo in/out counters */
- unsigned mce_in, mce_out;
-
- /* Count indicator to show errors not got */
- unsigned mce_overrun;
-
- /* DCLK Frequency used for computing scrub rate */
- int dclk_freq;
-
- /* Struct to control EDAC polling */
- struct edac_pci_ctl_info *i7core_pci;
-};
-
-#define PCI_DESCR(device, function, device_id) \
- .dev = (device), \
- .func = (function), \
- .dev_id = (device_id)
-
-static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
- /* Memory controller */
- { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
- { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
- /* Exists only for RDIMM */
- { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
- { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
-
- /* Channel 0 */
- { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
- { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
- { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
- { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
-
- /* Channel 1 */
- { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
- { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
- { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
- { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
-
- /* Channel 2 */
- { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
- { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
- { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
- { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
-
- /* Generic Non-core registers */
- /*
- * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
- * On Xeon 55xx, however, it has a different id (8086:2c40). So,
- * the probing code needs to test for the other address in case of
- * failure of this one
- */
- { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
-
-};
-
-static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
- { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
- { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
- { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
-
- { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
- { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
- { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
- { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
-
- { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
- { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
- { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
- { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
-
- /*
- * This is the PCI device has an alternate address on some
- * processors like Core i7 860
- */
- { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
-};
-
-static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
- /* Memory controller */
- { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
- { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
- /* Exists only for RDIMM */
- { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
- { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
-
- /* Channel 0 */
- { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
- { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
- { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
- { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
-
- /* Channel 1 */
- { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
- { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
- { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
- { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
-
- /* Channel 2 */
- { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
- { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
- { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
- { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
-
- /* Generic Non-core registers */
- { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
-
-};
-
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
-static const struct pci_id_table pci_dev_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
- PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
- PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
- {0,} /* 0 terminated list. */
-};
-
-/*
- * pci_device_id table for which devices we are looking for
- */
-static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
- {0,} /* 0 terminated list. */
-};
-
-/****************************************************************************
- Anciliary status routines
- ****************************************************************************/
-
- /* MC_CONTROL bits */
-#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
-#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
-
- /* MC_STATUS bits */
-#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
-#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
-
- /* MC_MAX_DOD read functions */
-static inline int numdimms(u32 dimms)
-{
- return (dimms & 0x3) + 1;
-}
-
-static inline int numrank(u32 rank)
-{
- static int ranks[4] = { 1, 2, 4, -EINVAL };
-
- return ranks[rank & 0x3];
-}
-
-static inline int numbank(u32 bank)
-{
- static int banks[4] = { 4, 8, 16, -EINVAL };
-
- return banks[bank & 0x3];
-}
-
-static inline int numrow(u32 row)
-{
- static int rows[8] = {
- 1 << 12, 1 << 13, 1 << 14, 1 << 15,
- 1 << 16, -EINVAL, -EINVAL, -EINVAL,
- };
-
- return rows[row & 0x7];
-}
-
-static inline int numcol(u32 col)
-{
- static int cols[8] = {
- 1 << 10, 1 << 11, 1 << 12, -EINVAL,
- };
- return cols[col & 0x3];
-}
-
-static struct i7core_dev *get_i7core_dev(u8 socket)
-{
- struct i7core_dev *i7core_dev;
-
- list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
- if (i7core_dev->socket == socket)
- return i7core_dev;
- }
-
- return NULL;
-}
-
-static struct i7core_dev *alloc_i7core_dev(u8 socket,
- const struct pci_id_table *table)
-{
- struct i7core_dev *i7core_dev;
-
- i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
- if (!i7core_dev)
- return NULL;
-
- i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
- GFP_KERNEL);
- if (!i7core_dev->pdev) {
- kfree(i7core_dev);
- return NULL;
- }
-
- i7core_dev->socket = socket;
- i7core_dev->n_devs = table->n_devs;
- list_add_tail(&i7core_dev->list, &i7core_edac_list);
-
- return i7core_dev;
-}
-
-static void free_i7core_dev(struct i7core_dev *i7core_dev)
-{
- list_del(&i7core_dev->list);
- kfree(i7core_dev->pdev);
- kfree(i7core_dev);
-}
-
-/****************************************************************************
- Memory check routines
- ****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
- unsigned func)
-{
- struct i7core_dev *i7core_dev = get_i7core_dev(socket);
- int i;
-
- if (!i7core_dev)
- return NULL;
-
- for (i = 0; i < i7core_dev->n_devs; i++) {
- if (!i7core_dev->pdev[i])
- continue;
-
- if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
- return i7core_dev->pdev[i];
- }
- }
-
- return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket: Quick Path Interconnect socket
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
- unsigned *csrows)
-{
- struct pci_dev *pdev = NULL;
- int i, j;
- u32 status, control;
-
- *channels = 0;
- *csrows = 0;
-
- pdev = get_pdev_slot_func(socket, 3, 0);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
- socket);
- return -ENODEV;
- }
-
- /* Device 3 function 0 reads */
- pci_read_config_dword(pdev, MC_STATUS, &status);
- pci_read_config_dword(pdev, MC_CONTROL, &control);
-
- for (i = 0; i < NUM_CHANS; i++) {
- u32 dimm_dod[3];
- /* Check if the channel is active */
- if (!(control & (1 << (8 + i))))
- continue;
-
- /* Check if the channel is disabled */
- if (status & (1 << i))
- continue;
-
- pdev = get_pdev_slot_func(socket, i + 4, 1);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d "
- "fn %d.%d!!!\n",
- socket, i + 4, 1);
- return -ENODEV;
- }
- /* Devices 4-6 function 1 */
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM0, &dimm_dod[0]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM1, &dimm_dod[1]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM2, &dimm_dod[2]);
-
- (*channels)++;
-
- for (j = 0; j < 3; j++) {
- if (!DIMM_PRESENT(dimm_dod[j]))
- continue;
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels on socket %d: %d\n",
- socket, *channels);
-
- return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
- struct pci_dev *pdev;
- int i, j;
- int csrow = 0;
- unsigned long last_page = 0;
- enum edac_type mode;
- enum mem_type mtype;
-
- /* Get data from the MC register, function 0 */
- pdev = pvt->pci_mcr[0];
- if (!pdev)
- return -ENODEV;
-
- /* Device 3 function 0 reads */
- pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
- pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
- pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
- pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
-
- debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
- pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
- pvt->info.max_dod, pvt->info.ch_map);
-
- if (ECC_ENABLED(pvt)) {
- debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
- if (ECCx8(pvt))
- mode = EDAC_S8ECD8ED;
- else
- mode = EDAC_S4ECD4ED;
- } else {
- debugf0("ECC disabled\n");
- mode = EDAC_NONE;
- }
-
- /* FIXME: need to handle the error codes */
- debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
- "x%x x 0x%x\n",
- numdimms(pvt->info.max_dod),
- numrank(pvt->info.max_dod >> 2),
- numbank(pvt->info.max_dod >> 4),
- numrow(pvt->info.max_dod >> 6),
- numcol(pvt->info.max_dod >> 9));
-
- for (i = 0; i < NUM_CHANS; i++) {
- u32 data, dimm_dod[3], value[8];
-
- if (!pvt->pci_ch[i][0])
- continue;
-
- if (!CH_ACTIVE(pvt, i)) {
- debugf0("Channel %i is not active\n", i);
- continue;
- }
- if (CH_DISABLED(pvt, i)) {
- debugf0("Channel %i is disabled\n", i);
- continue;
- }
-
- /* Devices 4-6 function 0 */
- pci_read_config_dword(pvt->pci_ch[i][0],
- MC_CHANNEL_DIMM_INIT_PARAMS, &data);
-
- pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
- 4 : 2;
-
- if (data & REGISTERED_DIMM)
- mtype = MEM_RDDR3;
- else
- mtype = MEM_DDR3;
-#if 0
- if (data & THREE_DIMMS_PRESENT)
- pvt->channel[i].dimms = 3;
- else if (data & SINGLE_QUAD_RANK_PRESENT)
- pvt->channel[i].dimms = 1;
- else
- pvt->channel[i].dimms = 2;
-#endif
-
- /* Devices 4-6 function 1 */
- pci_read_config_dword(pvt->pci_ch[i][1],
- MC_DOD_CH_DIMM0, &dimm_dod[0]);
- pci_read_config_dword(pvt->pci_ch[i][1],
- MC_DOD_CH_DIMM1, &dimm_dod[1]);
- pci_read_config_dword(pvt->pci_ch[i][1],
- MC_DOD_CH_DIMM2, &dimm_dod[2]);
-
- debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%d ranks, %cDIMMs\n",
- i,
- RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
- data,
- pvt->channel[i].ranks,
- (data & REGISTERED_DIMM) ? 'R' : 'U');
-
- for (j = 0; j < 3; j++) {
- u32 banks, ranks, rows, cols;
- u32 size, npages;
-
- if (!DIMM_PRESENT(dimm_dod[j]))
- continue;
-
- banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
- ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
- rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
- cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
-
- /* DDR3 has 8 I/O banks */
- size = (rows * cols * banks * ranks) >> (20 - 3);
-
- pvt->channel[i].dimms++;
-
- debugf0("\tdimm %d %d Mb offset: %x, "
- "bank: %d, rank: %d, row: %#x, col: %#x\n",
- j, size,
- RANKOFFSET(dimm_dod[j]),
- banks, ranks, rows, cols);
-
- npages = MiB_TO_PAGES(size);
-
- csr = &mci->csrows[csrow];
- csr->first_page = last_page + 1;
- last_page += npages;
- csr->last_page = last_page;
- csr->nr_pages = npages;
-
- csr->page_mask = 0;
- csr->grain = 8;
- csr->csrow_idx = csrow;
- csr->nr_channels = 1;
-
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
-
- pvt->csrow_map[i][j] = csrow;
-
- switch (banks) {
- case 4:
- csr->dtype = DEV_X4;
- break;
- case 8:
- csr->dtype = DEV_X8;
- break;
- case 16:
- csr->dtype = DEV_X16;
- break;
- default:
- csr->dtype = DEV_UNKNOWN;
- }
-
- csr->edac_mode = mode;
- csr->mtype = mtype;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
- "CPU#%uChannel#%u_DIMM#%u",
- pvt->i7core_dev->socket, i, j);
-
- csrow++;
- }
-
- pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
- pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
- pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
- pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
- pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
- pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
- pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
- pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
- debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
- for (j = 0; j < 8; j++)
- debugf1("\t\t%#x\t%#x\t%#x\n",
- (value[j] >> 27) & 0x1,
- (value[j] >> 24) & 0x7,
- (value[j] & ((1 << 24) - 1)));
- }
-
- return 0;
-}
-
-/****************************************************************************
- Error insertion routines
- ****************************************************************************/
-
-/* The i7core has independent error injection features per channel.
- However, to have a simpler code, we don't allow enabling error injection
- on more than one channel.
- Also, since a change at an inject parameter will be applied only at enable,
- we're disabling error injection on all write calls to the sysfs nodes that
- controls the error code injection.
- */
-static int disable_inject(const struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
-
- pvt->inject.enable = 0;
-
- if (!pvt->pci_ch[pvt->inject.channel][0])
- return -ENODEV;
-
- pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
- MC_CHANNEL_ERROR_INJECT, 0);
-
- return 0;
-}
-
-/*
- * i7core inject inject.section
- *
- * accept and store error injection inject.section value
- * bit 0 - refers to the lower 32-byte half cacheline
- * bit 1 - refers to the upper 32-byte half cacheline
- */
-static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int rc;
-
- if (pvt->inject.enable)
- disable_inject(mci);
-
- rc = strict_strtoul(data, 10, &value);
- if ((rc < 0) || (value > 3))
- return -EIO;
-
- pvt->inject.section = (u32) value;
- return count;
-}
-
-static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
- char *data)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- return sprintf(data, "0x%08x\n", pvt->inject.section);
-}
-
-/*
- * i7core inject.type
- *
- * accept and store error injection inject.section value
- * bit 0 - repeat enable - Enable error repetition
- * bit 1 - inject ECC error
- * bit 2 - inject parity error
- */
-static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int rc;
-
- if (pvt->inject.enable)
- disable_inject(mci);
-
- rc = strict_strtoul(data, 10, &value);
- if ((rc < 0) || (value > 7))
- return -EIO;
-
- pvt->inject.type = (u32) value;
- return count;
-}
-
-static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
- char *data)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- return sprintf(data, "0x%08x\n", pvt->inject.type);
-}
-
-/*
- * i7core_inject_inject.eccmask_store
- *
- * The type of error (UE/CE) will depend on the inject.eccmask value:
- * Any bits set to a 1 will flip the corresponding ECC bit
- * Correctable errors can be injected by flipping 1 bit or the bits within
- * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
- * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
- * uncorrectable error to be injected.
- */
-static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int rc;
-
- if (pvt->inject.enable)
- disable_inject(mci);
-
- rc = strict_strtoul(data, 10, &value);
- if (rc < 0)
- return -EIO;
-
- pvt->inject.eccmask = (u32) value;
- return count;
-}
-
-static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
- char *data)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
-}
-
-/*
- * i7core_addrmatch
- *
- * The type of error (UE/CE) will depend on the inject.eccmask value:
- * Any bits set to a 1 will flip the corresponding ECC bit
- * Correctable errors can be injected by flipping 1 bit or the bits within
- * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
- * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
- * uncorrectable error to be injected.
- */
-
-#define DECLARE_ADDR_MATCH(param, limit) \
-static ssize_t i7core_inject_store_##param( \
- struct mem_ctl_info *mci, \
- const char *data, size_t count) \
-{ \
- struct i7core_pvt *pvt; \
- long value; \
- int rc; \
- \
- debugf1("%s()\n", __func__); \
- pvt = mci->pvt_info; \
- \
- if (pvt->inject.enable) \
- disable_inject(mci); \
- \
- if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
- value = -1; \
- else { \
- rc = strict_strtoul(data, 10, &value); \
- if ((rc < 0) || (value >= limit)) \
- return -EIO; \
- } \
- \
- pvt->inject.param = value; \
- \
- return count; \
-} \
- \
-static ssize_t i7core_inject_show_##param( \
- struct mem_ctl_info *mci, \
- char *data) \
-{ \
- struct i7core_pvt *pvt; \
- \
- pvt = mci->pvt_info; \
- debugf1("%s() pvt=%p\n", __func__, pvt); \
- if (pvt->inject.param < 0) \
- return sprintf(data, "any\n"); \
- else \
- return sprintf(data, "%d\n", pvt->inject.param);\
-}
-
-#define ATTR_ADDR_MATCH(param) \
- { \
- .attr = { \
- .name = #param, \
- .mode = (S_IRUGO | S_IWUSR) \
- }, \
- .show = i7core_inject_show_##param, \
- .store = i7core_inject_store_##param, \
- }
-
-DECLARE_ADDR_MATCH(channel, 3);
-DECLARE_ADDR_MATCH(dimm, 3);
-DECLARE_ADDR_MATCH(rank, 4);
-DECLARE_ADDR_MATCH(bank, 32);
-DECLARE_ADDR_MATCH(page, 0x10000);
-DECLARE_ADDR_MATCH(col, 0x4000);
-
-static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
-{
- u32 read;
- int count;
-
- debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
- dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
- where, val);
-
- for (count = 0; count < 10; count++) {
- if (count)
- msleep(100);
- pci_write_config_dword(dev, where, val);
- pci_read_config_dword(dev, where, &read);
-
- if (read == val)
- return 0;
- }
-
- i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
- "write=%08x. Read=%08x\n",
- dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
- where, val, read);
-
- return -EINVAL;
-}
-
-/*
- * This routine prepares the Memory Controller for error injection.
- * The error will be injected when some process tries to write to the
- * memory that matches the given criteria.
- * The criteria can be set in terms of a mask where dimm, rank, bank, page
- * and col can be specified.
- * A -1 value for any of the mask items will make the MCU to ignore
- * that matching criteria for error injection.
- *
- * It should be noticed that the error will only happen after a write operation
- * on a memory that matches the condition. if REPEAT_EN is not enabled at
- * inject mask, then it will produce just one error. Otherwise, it will repeat
- * until the injectmask would be cleaned.
- *
- * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
- * is reliable enough to check if the MC is using the
- * three channels. However, this is not clear at the datasheet.
- */
-static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- u32 injectmask;
- u64 mask = 0;
- int rc;
- long enable;
-
- if (!pvt->pci_ch[pvt->inject.channel][0])
- return 0;
-
- rc = strict_strtoul(data, 10, &enable);
- if ((rc < 0))
- return 0;
-
- if (enable) {
- pvt->inject.enable = 1;
- } else {
- disable_inject(mci);
- return count;
- }
-
- /* Sets pvt->inject.dimm mask */
- if (pvt->inject.dimm < 0)
- mask |= 1LL << 41;
- else {
- if (pvt->channel[pvt->inject.channel].dimms > 2)
- mask |= (pvt->inject.dimm & 0x3LL) << 35;
- else
- mask |= (pvt->inject.dimm & 0x1LL) << 36;
- }
-
- /* Sets pvt->inject.rank mask */
- if (pvt->inject.rank < 0)
- mask |= 1LL << 40;
- else {
- if (pvt->channel[pvt->inject.channel].dimms > 2)
- mask |= (pvt->inject.rank & 0x1LL) << 34;
- else
- mask |= (pvt->inject.rank & 0x3LL) << 34;
- }
-
- /* Sets pvt->inject.bank mask */
- if (pvt->inject.bank < 0)
- mask |= 1LL << 39;
- else
- mask |= (pvt->inject.bank & 0x15LL) << 30;
-
- /* Sets pvt->inject.page mask */
- if (pvt->inject.page < 0)
- mask |= 1LL << 38;
- else
- mask |= (pvt->inject.page & 0xffff) << 14;
-
- /* Sets pvt->inject.column mask */
- if (pvt->inject.col < 0)
- mask |= 1LL << 37;
- else
- mask |= (pvt->inject.col & 0x3fff);
-
- /*
- * bit 0: REPEAT_EN
- * bits 1-2: MASK_HALF_CACHELINE
- * bit 3: INJECT_ECC
- * bit 4: INJECT_ADDR_PARITY
- */
-
- injectmask = (pvt->inject.type & 1) |
- (pvt->inject.section & 0x3) << 1 |
- (pvt->inject.type & 0x6) << (3 - 1);
-
- /* Unlock writes to registers - this register is write only */
- pci_write_config_dword(pvt->pci_noncore,
- MC_CFG_CONTROL, 0x2);
-
- write_and_test(pvt->pci_ch[pvt->inject.channel][0],
- MC_CHANNEL_ADDR_MATCH, mask);
- write_and_test(pvt->pci_ch[pvt->inject.channel][0],
- MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
-
- write_and_test(pvt->pci_ch[pvt->inject.channel][0],
- MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
-
- write_and_test(pvt->pci_ch[pvt->inject.channel][0],
- MC_CHANNEL_ERROR_INJECT, injectmask);
-
- /*
- * This is something undocumented, based on my tests
- * Without writing 8 to this register, errors aren't injected. Not sure
- * why.
- */
- pci_write_config_dword(pvt->pci_noncore,
- MC_CFG_CONTROL, 8);
-
- debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
- " inject 0x%08x\n",
- mask, pvt->inject.eccmask, injectmask);
-
-
- return count;
-}
-
-static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
- char *data)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- u32 injectmask;
-
- if (!pvt->pci_ch[pvt->inject.channel][0])
- return 0;
-
- pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
- MC_CHANNEL_ERROR_INJECT, &injectmask);
-
- debugf0("Inject error read: 0x%018x\n", injectmask);
-
- if (injectmask & 0x0c)
- pvt->inject.enable = 1;
-
- return sprintf(data, "%d\n", pvt->inject.enable);
-}
-
-#define DECLARE_COUNTER(param) \
-static ssize_t i7core_show_counter_##param( \
- struct mem_ctl_info *mci, \
- char *data) \
-{ \
- struct i7core_pvt *pvt = mci->pvt_info; \
- \
- debugf1("%s() \n", __func__); \
- if (!pvt->ce_count_available || (pvt->is_registered)) \
- return sprintf(data, "data unavailable\n"); \
- return sprintf(data, "%lu\n", \
- pvt->udimm_ce_count[param]); \
-}
-
-#define ATTR_COUNTER(param) \
- { \
- .attr = { \
- .name = __stringify(udimm##param), \
- .mode = (S_IRUGO | S_IWUSR) \
- }, \
- .show = i7core_show_counter_##param \
- }
-
-DECLARE_COUNTER(0);
-DECLARE_COUNTER(1);
-DECLARE_COUNTER(2);
-
-/*
- * Sysfs struct
- */
-
-static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
- ATTR_ADDR_MATCH(channel),
- ATTR_ADDR_MATCH(dimm),
- ATTR_ADDR_MATCH(rank),
- ATTR_ADDR_MATCH(bank),
- ATTR_ADDR_MATCH(page),
- ATTR_ADDR_MATCH(col),
- { } /* End of list */
-};
-
-static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
- .name = "inject_addrmatch",
- .mcidev_attr = i7core_addrmatch_attrs,
-};
-
-static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
- ATTR_COUNTER(0),
- ATTR_COUNTER(1),
- ATTR_COUNTER(2),
- { .attr = { .name = NULL } }
-};
-
-static const struct mcidev_sysfs_group i7core_udimm_counters = {
- .name = "all_channel_counts",
- .mcidev_attr = i7core_udimm_counters_attrs,
-};
-
-static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
- {
- .attr = {
- .name = "inject_section",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_section_show,
- .store = i7core_inject_section_store,
- }, {
- .attr = {
- .name = "inject_type",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_type_show,
- .store = i7core_inject_type_store,
- }, {
- .attr = {
- .name = "inject_eccmask",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_eccmask_show,
- .store = i7core_inject_eccmask_store,
- }, {
- .grp = &i7core_inject_addrmatch,
- }, {
- .attr = {
- .name = "inject_enable",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_enable_show,
- .store = i7core_inject_enable_store,
- },
- { } /* End of list */
-};
-
-static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
- {
- .attr = {
- .name = "inject_section",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_section_show,
- .store = i7core_inject_section_store,
- }, {
- .attr = {
- .name = "inject_type",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_type_show,
- .store = i7core_inject_type_store,
- }, {
- .attr = {
- .name = "inject_eccmask",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_eccmask_show,
- .store = i7core_inject_eccmask_store,
- }, {
- .grp = &i7core_inject_addrmatch,
- }, {
- .attr = {
- .name = "inject_enable",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_enable_show,
- .store = i7core_inject_enable_store,
- }, {
- .grp = &i7core_udimm_counters,
- },
- { } /* End of list */
-};
-
-/****************************************************************************
- Device initialization routines: put/get, init/exit
- ****************************************************************************/
-
-/*
- * i7core_put_all_devices 'put' all the devices that we have
- * reserved via 'get'
- */
-static void i7core_put_devices(struct i7core_dev *i7core_dev)
-{
- int i;
-
- debugf0(__FILE__ ": %s()\n", __func__);
- for (i = 0; i < i7core_dev->n_devs; i++) {
- struct pci_dev *pdev = i7core_dev->pdev[i];
- if (!pdev)
- continue;
- debugf0("Removing dev %02x:%02x.%d\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- pci_dev_put(pdev);
- }
-}
-
-static void i7core_put_all_devices(void)
-{
- struct i7core_dev *i7core_dev, *tmp;
-
- list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
- i7core_put_devices(i7core_dev);
- free_i7core_dev(i7core_dev);
- }
-}
-
-static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
-{
- struct pci_dev *pdev = NULL;
- int i;
-
- /*
- * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
- * aren't announced by acpi. So, we need to use a legacy scan probing
- * to detect them
- */
- while (table && table->descr) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
- if (unlikely(!pdev)) {
- for (i = 0; i < MAX_SOCKET_BUSES; i++)
- pcibios_scan_specific_bus(255-i);
- }
- pci_dev_put(pdev);
- table++;
- }
-}
-
-static unsigned i7core_pci_lastbus(void)
-{
- int last_bus = 0, bus;
- struct pci_bus *b = NULL;
-
- while ((b = pci_find_next_bus(b)) != NULL) {
- bus = b->number;
- debugf0("Found bus %d\n", bus);
- if (bus > last_bus)
- last_bus = bus;
- }
-
- debugf0("Last bus %d\n", last_bus);
-
- return last_bus;
-}
-
-/*
- * i7core_get_all_devices Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver
- *
- * Need to 'get' device 16 func 1 and func 2
- */
-static int i7core_get_onedevice(struct pci_dev **prev,
- const struct pci_id_table *table,
- const unsigned devno,
- const unsigned last_bus)
-{
- struct i7core_dev *i7core_dev;
- const struct pci_id_descr *dev_descr = &table->descr[devno];
-
- struct pci_dev *pdev = NULL;
- u8 bus = 0;
- u8 socket = 0;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- dev_descr->dev_id, *prev);
-
- /*
- * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
- * is at addr 8086:2c40, instead of 8086:2c41. So, we need
- * to probe for the alternate address in case of failure
- */
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
-
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
- *prev);
-
- if (!pdev) {
- if (*prev) {
- *prev = pdev;
- return 0;
- }
-
- if (dev_descr->optional)
- return 0;
-
- if (devno == 0)
- return -ENODEV;
-
- i7core_printk(KERN_INFO,
- "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
- dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
-
- /* End of list, leave */
- return -ENODEV;
- }
- bus = pdev->bus->number;
-
- socket = last_bus - bus;
-
- i7core_dev = get_i7core_dev(socket);
- if (!i7core_dev) {
- i7core_dev = alloc_i7core_dev(socket, table);
- if (!i7core_dev) {
- pci_dev_put(pdev);
- return -ENOMEM;
- }
- }
-
- if (i7core_dev->pdev[devno]) {
- i7core_printk(KERN_ERR,
- "Duplicated device for "
- "dev %02x:%02x.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
- pci_dev_put(pdev);
- return -ENODEV;
- }
-
- i7core_dev->pdev[devno] = pdev;
-
- /* Sanity check */
- if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
- PCI_FUNC(pdev->devfn) != dev_descr->func)) {
- i7core_printk(KERN_ERR,
- "Device PCI ID %04x:%04x "
- "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
- bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- bus, dev_descr->dev, dev_descr->func);
- return -ENODEV;
- }
-
- /* Be sure that the device is enabled */
- if (unlikely(pci_enable_device(pdev) < 0)) {
- i7core_printk(KERN_ERR,
- "Couldn't enable "
- "dev %02x:%02x.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
- return -ENODEV;
- }
-
- debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
- socket, bus, dev_descr->dev,
- dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
-
- /*
- * As stated on drivers/pci/search.c, the reference count for
- * @from is always decremented if it is not %NULL. So, as we need
- * to get all devices up to null, we need to do a get for the device
- */
- pci_dev_get(pdev);
-
- *prev = pdev;
-
- return 0;
-}
-
-static int i7core_get_all_devices(void)
-{
- int i, rc, last_bus;
- struct pci_dev *pdev = NULL;
- const struct pci_id_table *table = pci_dev_table;
-
- last_bus = i7core_pci_lastbus();
-
- while (table && table->descr) {
- for (i = 0; i < table->n_devs; i++) {
- pdev = NULL;
- do {
- rc = i7core_get_onedevice(&pdev, table, i,
- last_bus);
- if (rc < 0) {
- if (i == 0) {
- i = table->n_devs;
- break;
- }
- i7core_put_all_devices();
- return -ENODEV;
- }
- } while (pdev);
- }
- table++;
- }
-
- return 0;
-}
-
-static int mci_bind_devs(struct mem_ctl_info *mci,
- struct i7core_dev *i7core_dev)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- struct pci_dev *pdev;
- int i, func, slot;
- char *family;
-
- pvt->is_registered = false;
- pvt->enable_scrub = false;
- for (i = 0; i < i7core_dev->n_devs; i++) {
- pdev = i7core_dev->pdev[i];
- if (!pdev)
- continue;
-
- func = PCI_FUNC(pdev->devfn);
- slot = PCI_SLOT(pdev->devfn);
- if (slot == 3) {
- if (unlikely(func > MAX_MCR_FUNC))
- goto error;
- pvt->pci_mcr[func] = pdev;
- } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
- if (unlikely(func > MAX_CHAN_FUNC))
- goto error;
- pvt->pci_ch[slot - 4][func] = pdev;
- } else if (!slot && !func) {
- pvt->pci_noncore = pdev;
-
- /* Detect the processor family */
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_I7_NONCORE:
- family = "Xeon 35xx/ i7core";
- pvt->enable_scrub = false;
- break;
- case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
- family = "i7-800/i5-700";
- pvt->enable_scrub = false;
- break;
- case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
- family = "Xeon 34xx";
- pvt->enable_scrub = false;
- break;
- case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
- family = "Xeon 55xx";
- pvt->enable_scrub = true;
- break;
- case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
- family = "Xeon 56xx / i7-900";
- pvt->enable_scrub = true;
- break;
- default:
- family = "unknown";
- pvt->enable_scrub = false;
- }
- debugf0("Detected a processor type %s\n", family);
- } else
- goto error;
-
- debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev, i7core_dev->socket);
-
- if (PCI_SLOT(pdev->devfn) == 3 &&
- PCI_FUNC(pdev->devfn) == 2)
- pvt->is_registered = true;
- }
-
- return 0;
-
-error:
- i7core_printk(KERN_ERR, "Device %d, function %d "
- "is out of the expected range\n",
- slot, func);
- return -EINVAL;
-}
-
-/****************************************************************************
- Error check routines
- ****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
- const int chan,
- const int dimm,
- const int add)
-{
- char *msg;
- struct i7core_pvt *pvt = mci->pvt_info;
- int row = pvt->csrow_map[chan][dimm], i;
-
- for (i = 0; i < add; i++) {
- msg = kasprintf(GFP_KERNEL, "Corrected error "
- "(Socket=%d channel=%d dimm=%d)",
- pvt->i7core_dev->socket, chan, dimm);
-
- edac_mc_handle_fbd_ce(mci, row, 0, msg);
- kfree (msg);
- }
-}
-
-static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
- const int chan,
- const int new0,
- const int new1,
- const int new2)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- int add0 = 0, add1 = 0, add2 = 0;
- /* Updates CE counters if it is not the first time here */
- if (pvt->ce_count_available) {
- /* Updates CE counters */
-
- add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
- add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
- add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
-
- if (add2 < 0)
- add2 += 0x7fff;
- pvt->rdimm_ce_count[chan][2] += add2;
-
- if (add1 < 0)
- add1 += 0x7fff;
- pvt->rdimm_ce_count[chan][1] += add1;
-
- if (add0 < 0)
- add0 += 0x7fff;
- pvt->rdimm_ce_count[chan][0] += add0;
- } else
- pvt->ce_count_available = 1;
-
- /* Store the new values */
- pvt->rdimm_last_ce_count[chan][2] = new2;
- pvt->rdimm_last_ce_count[chan][1] = new1;
- pvt->rdimm_last_ce_count[chan][0] = new0;
-
- /*updated the edac core */
- if (add0 != 0)
- i7core_rdimm_update_csrow(mci, chan, 0, add0);
- if (add1 != 0)
- i7core_rdimm_update_csrow(mci, chan, 1, add1);
- if (add2 != 0)
- i7core_rdimm_update_csrow(mci, chan, 2, add2);
-
-}
-
-static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- u32 rcv[3][2];
- int i, new0, new1, new2;
-
- /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
- pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
- &rcv[0][0]);
- pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
- &rcv[0][1]);
- pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
- &rcv[1][0]);
- pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
- &rcv[1][1]);
- pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
- &rcv[2][0]);
- pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
- &rcv[2][1]);
- for (i = 0 ; i < 3; i++) {
- debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
- (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
- /*if the channel has 3 dimms*/
- if (pvt->channel[i].dimms > 2) {
- new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
- new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
- new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
- } else {
- new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
- DIMM_BOT_COR_ERR(rcv[i][0]);
- new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
- DIMM_BOT_COR_ERR(rcv[i][1]);
- new2 = 0;
- }
-
- i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
- }
-}
-
-/* This function is based on the device 3 function 4 registers as described on:
- * Intel Xeon Processor 5500 Series Datasheet Volume 2
- * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
- * also available at:
- * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
- */
-static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- u32 rcv1, rcv0;
- int new0, new1, new2;
-
- if (!pvt->pci_mcr[4]) {
- debugf0("%s MCR registers not found\n", __func__);
- return;
- }
-
- /* Corrected test errors */
- pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
- pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
-
- /* Store the new values */
- new2 = DIMM2_COR_ERR(rcv1);
- new1 = DIMM1_COR_ERR(rcv0);
- new0 = DIMM0_COR_ERR(rcv0);
-
- /* Updates CE counters if it is not the first time here */
- if (pvt->ce_count_available) {
- /* Updates CE counters */
- int add0, add1, add2;
-
- add2 = new2 - pvt->udimm_last_ce_count[2];
- add1 = new1 - pvt->udimm_last_ce_count[1];
- add0 = new0 - pvt->udimm_last_ce_count[0];
-
- if (add2 < 0)
- add2 += 0x7fff;
- pvt->udimm_ce_count[2] += add2;
-
- if (add1 < 0)
- add1 += 0x7fff;
- pvt->udimm_ce_count[1] += add1;
-
- if (add0 < 0)
- add0 += 0x7fff;
- pvt->udimm_ce_count[0] += add0;
-
- if (add0 | add1 | add2)
- i7core_printk(KERN_ERR, "New Corrected error(s): "
- "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
- add0, add1, add2);
- } else
- pvt->ce_count_available = 1;
-
- /* Store the new values */
- pvt->udimm_last_ce_count[2] = new2;
- pvt->udimm_last_ce_count[1] = new1;
- pvt->udimm_last_ce_count[0] = new0;
-}
-
-/*
- * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
- * Architectures Software Developer’s Manual Volume 3B.
- * Nehalem are defined as family 0x06, model 0x1a
- *
- * The MCA registers used here are the following ones:
- * struct mce field MCA Register
- * m->status MSR_IA32_MC8_STATUS
- * m->addr MSR_IA32_MC8_ADDR
- * m->misc MSR_IA32_MC8_MISC
- * In the case of Nehalem, the error information is masked at .status and .misc
- * fields
- */
-static void i7core_mce_output_error(struct mem_ctl_info *mci,
- const struct mce *m)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err, *msg;
- unsigned long error = m->status & 0x1ff0000l;
- u32 optypenum = (m->status >> 4) & 0x07;
- u32 core_err_cnt = (m->status >> 38) & 0x7fff;
- u32 dimm = (m->misc >> 16) & 0x3;
- u32 channel = (m->misc >> 18) & 0x3;
- u32 syndrome = m->misc >> 32;
- u32 errnum = find_first_bit(&error, 32);
- int csrow;
-
- if (m->mcgstatus & 1)
- type = "FATAL";
- else
- type = "NON_FATAL";
-
- switch (optypenum) {
- case 0:
- optype = "generic undef request";
- break;
- case 1:
- optype = "read error";
- break;
- case 2:
- optype = "write error";
- break;
- case 3:
- optype = "addr/cmd error";
- break;
- case 4:
- optype = "scrubbing error";
- break;
- default:
- optype = "reserved";
- break;
- }
-
- switch (errnum) {
- case 16:
- err = "read ECC error";
- break;
- case 17:
- err = "RAS ECC error";
- break;
- case 18:
- err = "write parity error";
- break;
- case 19:
- err = "redundacy loss";
- break;
- case 20:
- err = "reserved";
- break;
- case 21:
- err = "memory range error";
- break;
- case 22:
- err = "RTID out of range";
- break;
- case 23:
- err = "address parity error";
- break;
- case 24:
- err = "byte enable parity error";
- break;
- default:
- err = "unknown";
- }
-
- /* FIXME: should convert addr into bank and rank information */
- msg = kasprintf(GFP_ATOMIC,
- "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
- "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
- type, (long long) m->addr, m->cpu, dimm, channel,
- syndrome, core_err_cnt, (long long)m->status,
- (long long)m->misc, optype, err);
-
- debugf0("%s", msg);
-
- csrow = pvt->csrow_map[channel][dimm];
-
- /* Call the helper to output message */
- if (m->mcgstatus & 1)
- edac_mc_handle_fbd_ue(mci, csrow, 0,
- 0 /* FIXME: should be channel here */, msg);
- else if (!pvt->is_registered)
- edac_mc_handle_fbd_ce(mci, csrow,
- 0 /* FIXME: should be channel here */, msg);
-
- kfree(msg);
-}
-
-/*
- * i7core_check_error Retrieve and process errors reported by the
- * hardware. Called by the Core module.
- */
-static void i7core_check_error(struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- int i;
- unsigned count = 0;
- struct mce *m;
-
- /*
- * MCE first step: Copy all mce errors into a temporary buffer
- * We use a double buffering here, to reduce the risk of
- * losing an error.
- */
- smp_rmb();
- count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
- % MCE_LOG_LEN;
- if (!count)
- goto check_ce_error;
-
- m = pvt->mce_outentry;
- if (pvt->mce_in + count > MCE_LOG_LEN) {
- unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
- smp_wmb();
- pvt->mce_in = 0;
- count -= l;
- m += l;
- }
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
- smp_wmb();
- pvt->mce_in += count;
-
- smp_rmb();
- if (pvt->mce_overrun) {
- i7core_printk(KERN_ERR, "Lost %d memory errors\n",
- pvt->mce_overrun);
- smp_wmb();
- pvt->mce_overrun = 0;
- }
-
- /*
- * MCE second step: parse errors and display
- */
- for (i = 0; i < count; i++)
- i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
-
- /*
- * Now, let's increment CE error counts
- */
-check_ce_error:
- if (!pvt->is_registered)
- i7core_udimm_check_mc_ecc_err(mci);
- else
- i7core_rdimm_check_mc_ecc_err(mci);
-}
-
-/*
- * i7core_mce_check_error Replicates mcelog routine to get errors
- * This routine simply queues mcelog errors, and
- * return. The error itself should be handled later
- * by i7core_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
- */
-static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct mce *mce = (struct mce *)data;
- struct i7core_dev *i7_dev;
- struct mem_ctl_info *mci;
- struct i7core_pvt *pvt;
-
- i7_dev = get_i7core_dev(mce->socketid);
- if (!i7_dev)
- return NOTIFY_BAD;
-
- mci = i7_dev->mci;
- pvt = mci->pvt_info;
-
- /*
- * Just let mcelog handle it if the error is
- * outside the memory controller
- */
- if (((mce->status & 0xffff) >> 7) != 1)
- return NOTIFY_DONE;
-
- /* Bank 8 registers are the only ones that we know how to handle */
- if (mce->bank != 8)
- return NOTIFY_DONE;
-
- smp_rmb();
- if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
- smp_wmb();
- pvt->mce_overrun++;
- return NOTIFY_DONE;
- }
-
- /* Copy memory error at the ringbuffer */
- memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
- smp_wmb();
- pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
- /* Handle fatal errors immediately */
- if (mce->mcgstatus & 1)
- i7core_check_error(mci);
-
- /* Advise mcelog that the errors were handled */
- return NOTIFY_STOP;
-}
-
-static struct notifier_block i7_mce_dec = {
- .notifier_call = i7core_mce_check_error,
-};
-
-struct memdev_dmi_entry {
- u8 type;
- u8 length;
- u16 handle;
- u16 phys_mem_array_handle;
- u16 mem_err_info_handle;
- u16 total_width;
- u16 data_width;
- u16 size;
- u8 form;
- u8 device_set;
- u8 device_locator;
- u8 bank_locator;
- u8 memory_type;
- u16 type_detail;
- u16 speed;
- u8 manufacturer;
- u8 serial_number;
- u8 asset_tag;
- u8 part_number;
- u8 attributes;
- u32 extended_size;
- u16 conf_mem_clk_speed;
-} __attribute__((__packed__));
-
-
-/*
- * Decode the DRAM Clock Frequency, be paranoid, make sure that all
- * memory devices show the same speed, and if they don't then consider
- * all speeds to be invalid.
- */
-static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
-{
- int *dclk_freq = _dclk_freq;
- u16 dmi_mem_clk_speed;
-
- if (*dclk_freq == -1)
- return;
-
- if (dh->type == DMI_ENTRY_MEM_DEVICE) {
- struct memdev_dmi_entry *memdev_dmi_entry =
- (struct memdev_dmi_entry *)dh;
- unsigned long conf_mem_clk_speed_offset =
- (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
- (unsigned long)&memdev_dmi_entry->type;
- unsigned long speed_offset =
- (unsigned long)&memdev_dmi_entry->speed -
- (unsigned long)&memdev_dmi_entry->type;
-
- /* Check that a DIMM is present */
- if (memdev_dmi_entry->size == 0)
- return;
-
- /*
- * Pick the configured speed if it's available, otherwise
- * pick the DIMM speed, or we don't have a speed.
- */
- if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
- dmi_mem_clk_speed =
- memdev_dmi_entry->conf_mem_clk_speed;
- } else if (memdev_dmi_entry->length > speed_offset) {
- dmi_mem_clk_speed = memdev_dmi_entry->speed;
- } else {
- *dclk_freq = -1;
- return;
- }
-
- if (*dclk_freq == 0) {
- /* First pass, speed was 0 */
- if (dmi_mem_clk_speed > 0) {
- /* Set speed if a valid speed is read */
- *dclk_freq = dmi_mem_clk_speed;
- } else {
- /* Otherwise we don't have a valid speed */
- *dclk_freq = -1;
- }
- } else if (*dclk_freq > 0 &&
- *dclk_freq != dmi_mem_clk_speed) {
- /*
- * If we have a speed, check that all DIMMS are the same
- * speed, otherwise set the speed as invalid.
- */
- *dclk_freq = -1;
- }
- }
-}
-
-/*
- * The default DCLK frequency is used as a fallback if we
- * fail to find anything reliable in the DMI. The value
- * is taken straight from the datasheet.
- */
-#define DEFAULT_DCLK_FREQ 800
-
-static int get_dclk_freq(void)
-{
- int dclk_freq = 0;
-
- dmi_walk(decode_dclk, (void *)&dclk_freq);
-
- if (dclk_freq < 1)
- return DEFAULT_DCLK_FREQ;
-
- return dclk_freq;
-}
-
-/*
- * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
- * to hardware according to SCRUBINTERVAL formula
- * found in datasheet.
- */
-static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- struct pci_dev *pdev;
- u32 dw_scrub;
- u32 dw_ssr;
-
- /* Get data from the MC register, function 2 */
- pdev = pvt->pci_mcr[2];
- if (!pdev)
- return -ENODEV;
-
- pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
-
- if (new_bw == 0) {
- /* Prepare to disable petrol scrub */
- dw_scrub &= ~STARTSCRUB;
- /* Stop the patrol scrub engine */
- write_and_test(pdev, MC_SCRUB_CONTROL,
- dw_scrub & ~SCRUBINTERVAL_MASK);
-
- /* Get current status of scrub rate and set bit to disable */
- pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
- dw_ssr &= ~SSR_MODE_MASK;
- dw_ssr |= SSR_MODE_DISABLE;
- } else {
- const int cache_line_size = 64;
- const u32 freq_dclk_mhz = pvt->dclk_freq;
- unsigned long long scrub_interval;
- /*
- * Translate the desired scrub rate to a register value and
- * program the corresponding register value.
- */
- scrub_interval = (unsigned long long)freq_dclk_mhz *
- cache_line_size * 1000000;
- do_div(scrub_interval, new_bw);
-
- if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
- return -EINVAL;
-
- dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
-
- /* Start the patrol scrub engine */
- pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
- STARTSCRUB | dw_scrub);
-
- /* Get current status of scrub rate and set bit to enable */
- pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
- dw_ssr &= ~SSR_MODE_MASK;
- dw_ssr |= SSR_MODE_ENABLE;
- }
- /* Disable or enable scrubbing */
- pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
-
- return new_bw;
-}
-
-/*
- * get_sdram_scrub_rate This routine convert current scrub rate value
- * into byte/sec bandwidth accourding to
- * SCRUBINTERVAL formula found in datasheet.
- */
-static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- struct pci_dev *pdev;
- const u32 cache_line_size = 64;
- const u32 freq_dclk_mhz = pvt->dclk_freq;
- unsigned long long scrub_rate;
- u32 scrubval;
-
- /* Get data from the MC register, function 2 */
- pdev = pvt->pci_mcr[2];
- if (!pdev)
- return -ENODEV;
-
- /* Get current scrub control data */
- pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
-
- /* Mask highest 8-bits to 0 */
- scrubval &= SCRUBINTERVAL_MASK;
- if (!scrubval)
- return 0;
-
- /* Calculate scrub rate value into byte/sec bandwidth */
- scrub_rate = (unsigned long long)freq_dclk_mhz *
- 1000000 * cache_line_size;
- do_div(scrub_rate, scrubval);
- return (int)scrub_rate;
-}
-
-static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- u32 pci_lock;
-
- /* Unlock writes to pci registers */
- pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
- pci_lock &= ~0x3;
- pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
- pci_lock | MC_CFG_UNLOCK);
-
- mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
- mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
-}
-
-static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
-{
- struct i7core_pvt *pvt = mci->pvt_info;
- u32 pci_lock;
-
- /* Lock writes to pci registers */
- pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
- pci_lock &= ~0x3;
- pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
- pci_lock | MC_CFG_LOCK);
-}
-
-static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
-{
- pvt->i7core_pci = edac_pci_create_generic_ctl(
- &pvt->i7core_dev->pdev[0]->dev,
- EDAC_MOD_STR);
- if (unlikely(!pvt->i7core_pci))
- i7core_printk(KERN_WARNING,
- "Unable to setup PCI error report via EDAC\n");
-}
-
-static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
-{
- if (likely(pvt->i7core_pci))
- edac_pci_release_generic_ctl(pvt->i7core_pci);
- else
- i7core_printk(KERN_ERR,
- "Couldn't find mem_ctl_info for socket %d\n",
- pvt->i7core_dev->socket);
- pvt->i7core_pci = NULL;
-}
-
-static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
-{
- struct mem_ctl_info *mci = i7core_dev->mci;
- struct i7core_pvt *pvt;
-
- if (unlikely(!mci || !mci->pvt_info)) {
- debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
- __func__, &i7core_dev->pdev[0]->dev);
-
- i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
- return;
- }
-
- pvt = mci->pvt_info;
-
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &i7core_dev->pdev[0]->dev);
-
- /* Disable scrubrate setting */
- if (pvt->enable_scrub)
- disable_sdram_scrub_setting(mci);
-
- /* Disable EDAC polling */
- i7core_pci_ctl_release(pvt);
-
- /* Remove MC sysfs nodes */
- edac_mc_del_mc(mci->dev);
-
- debugf1("%s: free mci struct\n", mci->ctl_name);
- kfree(mci->ctl_name);
- edac_mc_free(mci);
- i7core_dev->mci = NULL;
-}
-
-static int i7core_register_mci(struct i7core_dev *i7core_dev)
-{
- struct mem_ctl_info *mci;
- struct i7core_pvt *pvt;
- int rc, channels, csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
- if (unlikely(rc < 0))
- return rc;
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
- if (unlikely(!mci))
- return -ENOMEM;
-
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &i7core_dev->pdev[0]->dev);
-
- pvt = mci->pvt_info;
- memset(pvt, 0, sizeof(*pvt));
-
- /* Associates i7core_dev and mci for future usage */
- pvt->i7core_dev = i7core_dev;
- i7core_dev->mci = mci;
-
- /*
- * FIXME: how to handle RDDR3 at MCI level? It is possible to have
- * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
- * memory channels
- */
- mci->mtype_cap = MEM_FLAG_DDR3;
- mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "i7core_edac.c";
- mci->mod_ver = I7CORE_REVISION;
- mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
- i7core_dev->socket);
- mci->dev_name = pci_name(i7core_dev->pdev[0]);
- mci->ctl_page_to_phys = NULL;
-
- /* Store pci devices at mci for faster access */
- rc = mci_bind_devs(mci, i7core_dev);
- if (unlikely(rc < 0))
- goto fail0;
-
- if (pvt->is_registered)
- mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
- else
- mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
-
- /* Get dimm basic config */
- get_dimm_config(mci);
- /* record ptr to the generic device */
- mci->dev = &i7core_dev->pdev[0]->dev;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i7core_check_error;
-
- /* Enable scrubrate setting */
- if (pvt->enable_scrub)
- enable_sdram_scrub_setting(mci);
-
- /* add this new MC control structure to EDAC's list of MCs */
- if (unlikely(edac_mc_add_mc(mci))) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
- /* FIXME: perhaps some code should go here that disables error
- * reporting if we just enabled it
- */
-
- rc = -EINVAL;
- goto fail0;
- }
-
- /* Default error mask is any memory */
- pvt->inject.channel = 0;
- pvt->inject.dimm = -1;
- pvt->inject.rank = -1;
- pvt->inject.bank = -1;
- pvt->inject.page = -1;
- pvt->inject.col = -1;
-
- /* allocating generic PCI control info */
- i7core_pci_ctl_create(pvt);
-
- /* DCLK for scrub rate setting */
- pvt->dclk_freq = get_dclk_freq();
-
- return 0;
-
-fail0:
- kfree(mci->ctl_name);
- edac_mc_free(mci);
- i7core_dev->mci = NULL;
- return rc;
-}
-
-/*
- * i7core_probe Probe for ONE instance of device to see if it is
- * present.
- * return:
- * 0 for FOUND a device
- * < 0 for error code
- */
-
-static int __devinit i7core_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int rc, count = 0;
- struct i7core_dev *i7core_dev;
-
- /* get the pci devices we want to reserve for our use */
- mutex_lock(&i7core_edac_lock);
-
- /*
- * All memory controllers are allocated at the first pass.
- */
- if (unlikely(probed >= 1)) {
- mutex_unlock(&i7core_edac_lock);
- return -ENODEV;
- }
- probed++;
-
- rc = i7core_get_all_devices();
- if (unlikely(rc < 0))
- goto fail0;
-
- list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
- count++;
- rc = i7core_register_mci(i7core_dev);
- if (unlikely(rc < 0))
- goto fail1;
- }
-
- /*
- * Nehalem-EX uses a different memory controller. However, as the
- * memory controller is not visible on some Nehalem/Nehalem-EP, we
- * need to indirectly probe via a X58 PCI device. The same devices
- * are found on (some) Nehalem-EX. So, on those machines, the
- * probe routine needs to return -ENODEV, as the actual Memory
- * Controller registers won't be detected.
- */
- if (!count) {
- rc = -ENODEV;
- goto fail1;
- }
-
- i7core_printk(KERN_INFO,
- "Driver loaded, %d memory controller(s) found.\n",
- count);
-
- mutex_unlock(&i7core_edac_lock);
- return 0;
-
-fail1:
- list_for_each_entry(i7core_dev, &i7core_edac_list, list)
- i7core_unregister_mci(i7core_dev);
-
- i7core_put_all_devices();
-fail0:
- mutex_unlock(&i7core_edac_lock);
- return rc;
-}
-
-/*
- * i7core_remove destructor for one instance of device
- *
- */
-static void __devexit i7core_remove(struct pci_dev *pdev)
-{
- struct i7core_dev *i7core_dev;
-
- debugf0(__FILE__ ": %s()\n", __func__);
-
- /*
- * we have a trouble here: pdev value for removal will be wrong, since
- * it will point to the X58 register used to detect that the machine
- * is a Nehalem or upper design. However, due to the way several PCI
- * devices are grouped together to provide MC functionality, we need
- * to use a different method for releasing the devices
- */
-
- mutex_lock(&i7core_edac_lock);
-
- if (unlikely(!probed)) {
- mutex_unlock(&i7core_edac_lock);
- return;
- }
-
- list_for_each_entry(i7core_dev, &i7core_edac_list, list)
- i7core_unregister_mci(i7core_dev);
-
- /* Release PCI resources */
- i7core_put_all_devices();
-
- probed--;
-
- mutex_unlock(&i7core_edac_lock);
-}
-
-MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
-
-/*
- * i7core_driver pci_driver structure for this module
- *
- */
-static struct pci_driver i7core_driver = {
- .name = "i7core_edac",
- .probe = i7core_probe,
- .remove = __devexit_p(i7core_remove),
- .id_table = i7core_pci_tbl,
-};
-
-/*
- * i7core_init Module entry function
- * Try to initialize this module for its devices
- */
-static int __init i7core_init(void)
-{
- int pci_rc;
-
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- if (use_pci_fixup)
- i7core_xeon_pci_fixup(pci_dev_table);
-
- pci_rc = pci_register_driver(&i7core_driver);
-
- if (pci_rc >= 0) {
- mce_register_decode_chain(&i7_mce_dec);
- return 0;
- }
-
- i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
- pci_rc);
-
- return pci_rc;
-}
-
-/*
- * i7core_exit() Module exit function
- * Unregister the driver
- */
-static void __exit i7core_exit(void)
-{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
- pci_unregister_driver(&i7core_driver);
- mce_unregister_decode_chain(&i7_mce_dec);
-}
-
-module_init(i7core_init);
-module_exit(i7core_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
-MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
- I7CORE_REVISION);
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i82443bxgx_edac.c b/ANDROID_3.4.5/drivers/edac/i82443bxgx_edac.c
deleted file mode 100644
index 3bf2b2f4..00000000
--- a/ANDROID_3.4.5/drivers/edac/i82443bxgx_edac.c
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
- * module (C) 2006 Tim Small
- *
- * This file may be distributed under the terms of the GNU General
- * Public License.
- *
- * Written by Tim Small <tim@buttersideup.com>, based on work by Linux
- * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
- * others.
- *
- * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
- *
- * Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf
- * references to this document given in [].
- *
- * This module doesn't support the 440LX, but it may be possible to
- * make it do so (the 440LX's register definitions are different, but
- * not completely so - I haven't studied them in enough detail to know
- * how easy this would be).
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-
-
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define I82443_REVISION "0.1"
-
-#define EDAC_MOD_STR "i82443bxgx_edac"
-
-/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
- * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
- * rows" "The 82443BX supports multiple-bit error detection and
- * single-bit error correction when ECC mode is enabled and
- * single/multi-bit error detection when correction is disabled.
- * During writes to the DRAM, the 82443BX generates ECC for the data
- * on a QWord basis. Partial QWord writes require a read-modify-write
- * cycle when ECC is enabled."
-*/
-
-/* "Additionally, the 82443BX ensures that the data is corrected in
- * main memory so that accumulation of errors is prevented. Another
- * error within the same QWord would result in a double-bit error
- * which is unrecoverable. This is known as hardware scrubbing since
- * it requires no software intervention to correct the data in memory."
- */
-
-/* [Also see page 100 (section 4.3), "DRAM Interface"]
- * [Also see page 112 (section 4.6.1.4), ECC]
- */
-
-#define I82443BXGX_NR_CSROWS 8
-#define I82443BXGX_NR_CHANS 1
-#define I82443BXGX_NR_DIMMS 4
-
-/* 82443 PCI Device 0 */
-#define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI
- * config space offset */
-#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if
- * row is non-ECC */
-#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */
-
-#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */
-#define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */
-#define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */
-#define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */
-#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */
-
-#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6
-
-/* 82443 PCI Device 0 */
-#define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI
- * config space offset, Error Address
- * Pointer Register */
-#define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */
-#define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */
-#define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */
-
-#define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI
- * config space offset. */
-#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */
-#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */
-
-#define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI
- * config space offset. */
-#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */
-#define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */
-#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */
-#define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */
-
-#define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI
- * config space offset. */
-#define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */
-#define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */
-#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */
-#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */
-
-#define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI
- * config space offset. */
-
-/* FIXME - don't poll when ECC disabled? */
-
-struct i82443bxgx_edacmc_error_info {
- u32 eap;
-};
-
-static struct edac_pci_ctl_info *i82443bxgx_pci;
-
-static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
- * already registered driver
- */
-
-static int i82443bxgx_registered = 1;
-
-static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
- struct i82443bxgx_edacmc_error_info
- *info)
-{
- struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
- pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
- if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
- /* Clear error to allow next error to be reported [p.61] */
- pci_write_bits32(pdev, I82443BXGX_EAP,
- I82443BXGX_EAP_OFFSET_SBE,
- I82443BXGX_EAP_OFFSET_SBE);
-
- if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
- /* Clear error to allow next error to be reported [p.61] */
- pci_write_bits32(pdev, I82443BXGX_EAP,
- I82443BXGX_EAP_OFFSET_MBE,
- I82443BXGX_EAP_OFFSET_MBE);
-}
-
-static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
- struct
- i82443bxgx_edacmc_error_info
- *info, int handle_errors)
-{
- int error_found = 0;
- u32 eapaddr, page, pageoffset;
-
- /* bits 30:12 hold the 4kb block in which the error occurred
- * [p.61] */
- eapaddr = (info->eap & 0xfffff000);
- page = eapaddr >> PAGE_SHIFT;
- pageoffset = eapaddr - (page << PAGE_SHIFT);
-
- if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
- error_found = 1;
- if (handle_errors)
- edac_mc_handle_ce(mci, page, pageoffset,
- /* 440BX/GX don't make syndrome information
- * available */
- 0, edac_mc_find_csrow_by_page(mci, page), 0,
- mci->ctl_name);
- }
-
- if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
- error_found = 1;
- if (handle_errors)
- edac_mc_handle_ue(mci, page, pageoffset,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
- }
-
- return error_found;
-}
-
-static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
-{
- struct i82443bxgx_edacmc_error_info info;
-
- debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
- i82443bxgx_edacmc_get_error_info(mci, &info);
- i82443bxgx_edacmc_process_error_info(mci, &info, 1);
-}
-
-static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
- struct pci_dev *pdev,
- enum edac_type edac_mode,
- enum mem_type mtype)
-{
- struct csrow_info *csrow;
- int index;
- u8 drbar, dramc;
- u32 row_base, row_high_limit, row_high_limit_last;
-
- pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
- row_high_limit_last = 0;
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
- debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
- mci->mc_idx, __FILE__, __func__, index, drbar);
- row_high_limit = ((u32) drbar << 23);
- /* find the DRAM Chip Select Base address and mask */
- debugf1("MC%d: %s: %s() Row=%d, "
- "Boundary Address=%#0x, Last = %#0x\n",
- mci->mc_idx, __FILE__, __func__, index, row_high_limit,
- row_high_limit_last);
-
- /* 440GX goes to 2GB, represented with a DRB of 0. */
- if (row_high_limit_last && !row_high_limit)
- row_high_limit = 1UL << 31;
-
- /* This row is empty [p.49] */
- if (row_high_limit == row_high_limit_last)
- continue;
- row_base = row_high_limit_last;
- csrow->first_page = row_base >> PAGE_SHIFT;
- csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
- /* EAP reports in 4kilobyte granularity [61] */
- csrow->grain = 1 << 12;
- csrow->mtype = mtype;
- /* I don't think 440BX can tell you device type? FIXME? */
- csrow->dtype = DEV_UNKNOWN;
- /* Mode is global to all rows on 440BX */
- csrow->edac_mode = edac_mode;
- row_high_limit_last = row_high_limit;
- }
-}
-
-static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
-{
- struct mem_ctl_info *mci;
- u8 dramc;
- u32 nbxcfg, ecc_mode;
- enum mem_type mtype;
- enum edac_type edac_mode;
-
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
-
- /* Something is really hosed if PCI config space reads from
- * the MC aren't working.
- */
- if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
- return -EIO;
-
- mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
- if (mci == NULL)
- return -ENOMEM;
-
- debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
- switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
- case I82443BXGX_DRAMC_DRAM_IS_EDO:
- mtype = MEM_EDO;
- break;
- case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
- mtype = MEM_SDR;
- break;
- case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
- mtype = MEM_RDR;
- break;
- default:
- debugf0("Unknown/reserved DRAM type value "
- "in DRAMC register!\n");
- mtype = -MEM_UNKNOWN;
- }
-
- if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
- mci->edac_cap = mci->edac_ctl_cap;
- else
- mci->edac_cap = EDAC_FLAG_NONE;
-
- mci->scrub_cap = SCRUB_FLAG_HW_SRC;
- pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
- ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
- (BIT(0) | BIT(1)));
-
- mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
- ? SCRUB_HW_SRC : SCRUB_NONE;
-
- switch (ecc_mode) {
- case I82443BXGX_NBXCFG_INTEGRITY_NONE:
- edac_mode = EDAC_NONE;
- break;
- case I82443BXGX_NBXCFG_INTEGRITY_EC:
- edac_mode = EDAC_EC;
- break;
- case I82443BXGX_NBXCFG_INTEGRITY_ECC:
- case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
- edac_mode = EDAC_SECDED;
- break;
- default:
- debugf0("%s(): Unknown/reserved ECC state "
- "in NBXCFG register!\n", __func__);
- edac_mode = EDAC_UNKNOWN;
- break;
- }
-
- i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
-
- /* Many BIOSes don't clear error flags on boot, so do this
- * here, or we get "phantom" errors occurring at module-load
- * time. */
- pci_write_bits32(pdev, I82443BXGX_EAP,
- (I82443BXGX_EAP_OFFSET_SBE |
- I82443BXGX_EAP_OFFSET_MBE),
- (I82443BXGX_EAP_OFFSET_SBE |
- I82443BXGX_EAP_OFFSET_MBE));
-
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82443_REVISION;
- mci->ctl_name = "I82443BXGX";
- mci->dev_name = pci_name(pdev);
- mci->edac_check = i82443bxgx_edacmc_check;
- mci->ctl_page_to_phys = NULL;
-
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* allocating generic PCI control info */
- i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i82443bxgx_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
- return 0;
-
-fail:
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
-
-/* returns count (>= 0), or negative on error */
-static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
-
- /* don't need to call pci_enable_device() */
- rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
-
- if (mci_pdev == NULL)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s: %s()\n", __FILE__, __func__);
-
- if (i82443bxgx_pci)
- edac_pci_release_generic_ctl(i82443bxgx_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- edac_mc_free(mci);
-}
-
-EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
-
-static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
- {0,} /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
-
-static struct pci_driver i82443bxgx_edacmc_driver = {
- .name = EDAC_MOD_STR,
- .probe = i82443bxgx_edacmc_init_one,
- .remove = __devexit_p(i82443bxgx_edacmc_remove_one),
- .id_table = i82443bxgx_pci_tbl,
-};
-
-static int __init i82443bxgx_edacmc_init(void)
-{
- int pci_rc;
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver);
- if (pci_rc < 0)
- goto fail0;
-
- if (mci_pdev == NULL) {
- const struct pci_device_id *id = &i82443bxgx_pci_tbl[0];
- int i = 0;
- i82443bxgx_registered = 0;
-
- while (mci_pdev == NULL && id->vendor != 0) {
- mci_pdev = pci_get_device(id->vendor,
- id->device, NULL);
- i++;
- id = &i82443bxgx_pci_tbl[i];
- }
- if (!mci_pdev) {
- debugf0("i82443bxgx pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
-
- if (pci_rc < 0) {
- debugf0("i82443bxgx init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&i82443bxgx_edacmc_driver);
-
-fail0:
- if (mci_pdev != NULL)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit i82443bxgx_edacmc_exit(void)
-{
- pci_unregister_driver(&i82443bxgx_edacmc_driver);
-
- if (!i82443bxgx_registered)
- i82443bxgx_edacmc_remove_one(mci_pdev);
-
- if (mci_pdev)
- pci_dev_put(mci_pdev);
-}
-
-module_init(i82443bxgx_edacmc_init);
-module_exit(i82443bxgx_edacmc_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
-MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i82860_edac.c b/ANDROID_3.4.5/drivers/edac/i82860_edac.c
deleted file mode 100644
index c779092d..00000000
--- a/ANDROID_3.4.5/drivers/edac/i82860_edac.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Intel 82860 Memory Controller kernel module
- * (C) 2005 Red Hat (http://www.redhat.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Ben Woodard <woodard@redhat.com>
- * shamelessly copied from and based upon the edac_i82875 driver
- * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define I82860_REVISION " Ver: 2.0.2"
-#define EDAC_MOD_STR "i82860_edac"
-
-#define i82860_printk(level, fmt, arg...) \
- edac_printk(level, "i82860", fmt, ##arg)
-
-#define i82860_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
-
-#ifndef PCI_DEVICE_ID_INTEL_82860_0
-#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
-#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
-
-#define I82860_MCHCFG 0x50
-#define I82860_GBA 0x60
-#define I82860_GBA_MASK 0x7FF
-#define I82860_GBA_SHIFT 24
-#define I82860_ERRSTS 0xC8
-#define I82860_EAP 0xE4
-#define I82860_DERRCTL_STS 0xE2
-
-enum i82860_chips {
- I82860 = 0,
-};
-
-struct i82860_dev_info {
- const char *ctl_name;
-};
-
-struct i82860_error_info {
- u16 errsts;
- u32 eap;
- u16 derrsyn;
- u16 errsts2;
-};
-
-static const struct i82860_dev_info i82860_devs[] = {
- [I82860] = {
- .ctl_name = "i82860"},
-};
-
-static struct pci_dev *mci_pdev; /* init dev: in case that AGP code
- * has already registered driver
- */
-static struct edac_pci_ctl_info *i82860_pci;
-
-static void i82860_get_error_info(struct mem_ctl_info *mci,
- struct i82860_error_info *info)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * This is a mess because there is no atomic way to read all the
- * registers at once and the registers can transition from CE being
- * overwritten by UE.
- */
- pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts);
- pci_read_config_dword(pdev, I82860_EAP, &info->eap);
- pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
- pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2);
-
- pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003);
-
- /*
- * If the error is the same for both reads then the first set of reads
- * is valid. If there is a change then there is a CE no info and the
- * second set of reads is valid and should be UE info.
- */
- if (!(info->errsts2 & 0x0003))
- return;
-
- if ((info->errsts ^ info->errsts2) & 0x0003) {
- pci_read_config_dword(pdev, I82860_EAP, &info->eap);
- pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
- }
-}
-
-static int i82860_process_error_info(struct mem_ctl_info *mci,
- struct i82860_error_info *info,
- int handle_errors)
-{
- int row;
-
- if (!(info->errsts2 & 0x0003))
- return 0;
-
- if (!handle_errors)
- return 1;
-
- if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
- info->errsts = info->errsts2;
- }
-
- info->eap >>= PAGE_SHIFT;
- row = edac_mc_find_csrow_by_page(mci, info->eap);
-
- if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
- else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
- "i82860 UE");
-
- return 1;
-}
-
-static void i82860_check(struct mem_ctl_info *mci)
-{
- struct i82860_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- i82860_get_error_info(mci, &info);
- i82860_process_error_info(mci, &info, 1);
-}
-
-static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
-{
- unsigned long last_cumul_size;
- u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
- u16 value;
- u32 cumul_size;
- struct csrow_info *csrow;
- int index;
-
- pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
- mchcfg_ddim = mchcfg_ddim & 0x180;
- last_cumul_size = 0;
-
- /* The group row boundary (GRA) reg values are boundary address
- * for each DRAM row with a granularity of 16MB. GRA regs are
- * cumulative; therefore GRA15 will contain the total memory contained
- * in all eight rows.
- */
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
- cumul_size = (value & I82860_GBA_MASK) <<
- (I82860_GBA_SHIFT - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
-
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
-
- csrow->first_page = last_cumul_size;
- csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
- last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
- csrow->mtype = MEM_RMBS;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
- }
-}
-
-static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
-{
- struct mem_ctl_info *mci;
- struct i82860_error_info discard;
-
- /* RDRAM has channels but these don't map onto the abstractions that
- edac uses.
- The device groups from the GRA registers seem to map reasonably
- well onto the notion of a chip select row.
- There are 16 GRA registers and since the name is associated with
- the channel and the GRA registers map to physical devices so we are
- going to make 1 channel for group.
- */
- mci = edac_mc_alloc(0, 16, 1, 0);
-
- if (!mci)
- return -ENOMEM;
-
- debugf3("%s(): init mci\n", __func__);
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- /* I"m not sure about this but I think that all RDRAM is SECDED */
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82860_REVISION;
- mci->ctl_name = i82860_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = i82860_check;
- mci->ctl_page_to_phys = NULL;
- i82860_init_csrows(mci, pdev);
- i82860_get_error_info(mci, &discard); /* clear counters */
-
- /* Here we assume that we will never see multiple instances of this
- * type of memory controller. The ID is therefore hardcoded to 0.
- */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* allocating generic PCI control info */
- i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i82860_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
-
- return 0;
-
-fail:
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit i82860_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("%s()\n", __func__);
- i82860_printk(KERN_INFO, "i82860 init one\n");
-
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- rc = i82860_probe1(pdev, ent->driver_data);
-
- if (rc == 0)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit i82860_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s()\n", __func__);
-
- if (i82860_pci)
- edac_pci_release_generic_ctl(i82860_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82860},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
-
-static struct pci_driver i82860_driver = {
- .name = EDAC_MOD_STR,
- .probe = i82860_init_one,
- .remove = __devexit_p(i82860_remove_one),
- .id_table = i82860_pci_tbl,
-};
-
-static int __init i82860_init(void)
-{
- int pci_rc;
-
- debugf3("%s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
- goto fail0;
-
- if (!mci_pdev) {
- mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82860_0, NULL);
-
- if (mci_pdev == NULL) {
- debugf0("860 pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
-
- if (pci_rc < 0) {
- debugf0("860 init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&i82860_driver);
-
-fail0:
- if (mci_pdev != NULL)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit i82860_exit(void)
-{
- debugf3("%s()\n", __func__);
-
- pci_unregister_driver(&i82860_driver);
-
- if (mci_pdev != NULL)
- pci_dev_put(mci_pdev);
-}
-
-module_init(i82860_init);
-module_exit(i82860_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
- "Ben Woodard <woodard@redhat.com>");
-MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i82875p_edac.c b/ANDROID_3.4.5/drivers/edac/i82875p_edac.c
deleted file mode 100644
index 10f15d85..00000000
--- a/ANDROID_3.4.5/drivers/edac/i82875p_edac.c
+++ /dev/null
@@ -1,596 +0,0 @@
-/*
- * Intel D82875P Memory Controller kernel module
- * (C) 2003 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Thayne Harbaugh
- * Contributors:
- * Wang Zhenyu at intel.com
- *
- * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
- * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define I82875P_REVISION " Ver: 2.0.2"
-#define EDAC_MOD_STR "i82875p_edac"
-
-#define i82875p_printk(level, fmt, arg...) \
- edac_printk(level, "i82875p", fmt, ##arg)
-
-#define i82875p_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
-
-#ifndef PCI_DEVICE_ID_INTEL_82875_0
-#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
-#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
-
-#ifndef PCI_DEVICE_ID_INTEL_82875_6
-#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
-#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
-
-/* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
-
-/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
-#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
- *
- * 31:12 block address
- * 11:0 reserved
- */
-
-#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
- *
- * 7:0 DRAM ECC Syndrome
- */
-
-#define I82875P_DES 0x5d /* DRAM Error Status (8b)
- *
- * 7:1 reserved
- * 0 Error channel 0/1
- */
-
-#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15:10 reserved
- * 9 non-DRAM lock error (ndlock)
- * 8 Sftwr Generated SMI
- * 7 ECC UE
- * 6 reserved
- * 5 MCH detects unimplemented cycle
- * 4 AGP access outside GA
- * 3 Invalid AGP access
- * 2 Invalid GA translation table
- * 1 Unsupported AGP command
- * 0 ECC CE
- */
-
-#define I82875P_ERRCMD 0xca /* Error Command (16b)
- *
- * 15:10 reserved
- * 9 SERR on non-DRAM lock
- * 8 SERR on ECC UE
- * 7 SERR on ECC CE
- * 6 target abort on high exception
- * 5 detect unimplemented cyc
- * 4 AGP access outside of GA
- * 3 SERR on invalid AGP access
- * 2 invalid translation table
- * 1 SERR on unsupported AGP command
- * 0 reserved
- */
-
-/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
-#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
- *
- * 15:10 reserved
- * 9 fast back-to-back - ro 0
- * 8 SERR enable - ro 0
- * 7 addr/data stepping - ro 0
- * 6 parity err enable - ro 0
- * 5 VGA palette snoop - ro 0
- * 4 mem wr & invalidate - ro 0
- * 3 special cycle - ro 0
- * 2 bus master - ro 0
- * 1 mem access dev6 - 0(dis),1(en)
- * 0 IO access dev3 - 0(dis),1(en)
- */
-
-#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
- *
- * 31:12 mem base addr [31:12]
- * 11:4 address mask - ro 0
- * 3 prefetchable - ro 0(non),1(pre)
- * 2:1 mem type - ro 0
- * 0 mem space - ro 0
- */
-
-/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
-
-#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
-#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
- *
- * 7 reserved
- * 6:0 64MiB row boundary addr
- */
-
-#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
- *
- * 7 reserved
- * 6:4 row attr row 1
- * 3 reserved
- * 2:0 row attr row 0
- *
- * 000 = 4KiB
- * 001 = 8KiB
- * 010 = 16KiB
- * 011 = 32KiB
- */
-
-#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
- *
- * 31:30 reserved
- * 29 init complete
- * 28:23 reserved
- * 22:21 nr chan 00=1,01=2
- * 20 reserved
- * 19:18 Data Integ Mode 00=none,01=ecc
- * 17:11 reserved
- * 10:8 refresh mode
- * 7 reserved
- * 6:4 mode select
- * 3:2 reserved
- * 1:0 DRAM type 01=DDR
- */
-
-enum i82875p_chips {
- I82875P = 0,
-};
-
-struct i82875p_pvt {
- struct pci_dev *ovrfl_pdev;
- void __iomem *ovrfl_window;
-};
-
-struct i82875p_dev_info {
- const char *ctl_name;
-};
-
-struct i82875p_error_info {
- u16 errsts;
- u32 eap;
- u8 des;
- u8 derrsyn;
- u16 errsts2;
-};
-
-static const struct i82875p_dev_info i82875p_devs[] = {
- [I82875P] = {
- .ctl_name = "i82875p"},
-};
-
-static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
- * already registered driver
- */
-
-static struct edac_pci_ctl_info *i82875p_pci;
-
-static void i82875p_get_error_info(struct mem_ctl_info *mci,
- struct i82875p_error_info *info)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * This is a mess because there is no atomic way to read all the
- * registers at once and the registers can transition from CE being
- * overwritten by UE.
- */
- pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
-
- if (!(info->errsts & 0x0081))
- return;
-
- pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
- pci_read_config_byte(pdev, I82875P_DES, &info->des);
- pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
- pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
-
- /*
- * If the error is the same then we can for both reads then
- * the first set of reads is valid. If there is a change then
- * there is a CE no info and the second set of reads is valid
- * and should be UE info.
- */
- if ((info->errsts ^ info->errsts2) & 0x0081) {
- pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
- pci_read_config_byte(pdev, I82875P_DES, &info->des);
- pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
- }
-
- pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
-}
-
-static int i82875p_process_error_info(struct mem_ctl_info *mci,
- struct i82875p_error_info *info,
- int handle_errors)
-{
- int row, multi_chan;
-
- multi_chan = mci->csrows[0].nr_channels - 1;
-
- if (!(info->errsts & 0x0081))
- return 0;
-
- if (!handle_errors)
- return 1;
-
- if ((info->errsts ^ info->errsts2) & 0x0081) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
- info->errsts = info->errsts2;
- }
-
- info->eap >>= PAGE_SHIFT;
- row = edac_mc_find_csrow_by_page(mci, info->eap);
-
- if (info->errsts & 0x0080)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
- else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
-
- return 1;
-}
-
-static void i82875p_check(struct mem_ctl_info *mci)
-{
- struct i82875p_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- i82875p_get_error_info(mci, &info);
- i82875p_process_error_info(mci, &info, 1);
-}
-
-/* Return 0 on success or 1 on failure. */
-static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
- struct pci_dev **ovrfl_pdev,
- void __iomem **ovrfl_window)
-{
- struct pci_dev *dev;
- void __iomem *window;
- int err;
-
- *ovrfl_pdev = NULL;
- *ovrfl_window = NULL;
- dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
-
- if (dev == NULL) {
- /* Intel tells BIOS developers to hide device 6 which
- * configures the overflow device access containing
- * the DRBs - this is where we expose device 6.
- * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
- */
- pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
- dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
-
- if (dev == NULL)
- return 1;
-
- err = pci_bus_add_device(dev);
- if (err) {
- i82875p_printk(KERN_ERR,
- "%s(): pci_bus_add_device() Failed\n",
- __func__);
- }
- pci_bus_assign_resources(dev->bus);
- }
-
- *ovrfl_pdev = dev;
-
- if (pci_enable_device(dev)) {
- i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
- "device\n", __func__);
- return 1;
- }
-
- if (pci_request_regions(dev, pci_name(dev))) {
-#ifdef CORRECT_BIOS
- goto fail0;
-#endif
- }
-
- /* cache is irrelevant for PCI bus reads/writes */
- window = pci_ioremap_bar(dev, 0);
- if (window == NULL) {
- i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
- __func__);
- goto fail1;
- }
-
- *ovrfl_window = window;
- return 0;
-
-fail1:
- pci_release_regions(dev);
-
-#ifdef CORRECT_BIOS
-fail0:
- pci_disable_device(dev);
-#endif
- /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
- return 1;
-}
-
-/* Return 1 if dual channel mode is active. Else return 0. */
-static inline int dual_channel_active(u32 drc)
-{
- return (drc >> 21) & 0x1;
-}
-
-static void i82875p_init_csrows(struct mem_ctl_info *mci,
- struct pci_dev *pdev,
- void __iomem * ovrfl_window, u32 drc)
-{
- struct csrow_info *csrow;
- unsigned long last_cumul_size;
- u8 value;
- u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
- u32 cumul_size;
- int index;
-
- drc_ddim = (drc >> 18) & 0x1;
- last_cumul_size = 0;
-
- /* The dram row boundary (DRB) reg values are boundary address
- * for each DRAM row with a granularity of 32 or 64MB (single/dual
- * channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all eight rows.
- */
-
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
-
- value = readb(ovrfl_window + I82875P_DRB + index);
- cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
-
- csrow->first_page = last_cumul_size;
- csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
- last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
- }
-}
-
-static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
-{
- int rc = -ENODEV;
- struct mem_ctl_info *mci;
- struct i82875p_pvt *pvt;
- struct pci_dev *ovrfl_pdev;
- void __iomem *ovrfl_window;
- u32 drc;
- u32 nr_chans;
- struct i82875p_error_info discard;
-
- debugf0("%s()\n", __func__);
-
- ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
-
- if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
- return -ENODEV;
- drc = readl(ovrfl_window + I82875P_DRC);
- nr_chans = dual_channel_active(drc) + 1;
- mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans, 0);
-
- if (!mci) {
- rc = -ENOMEM;
- goto fail0;
- }
-
- /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */
- kobject_get(&mci->edac_mci_kobj);
-
- debugf3("%s(): init mci\n", __func__);
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_UNKNOWN;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82875P_REVISION;
- mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = i82875p_check;
- mci->ctl_page_to_phys = NULL;
- debugf3("%s(): init pvt\n", __func__);
- pvt = (struct i82875p_pvt *)mci->pvt_info;
- pvt->ovrfl_pdev = ovrfl_pdev;
- pvt->ovrfl_window = ovrfl_window;
- i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
- i82875p_get_error_info(mci, &discard); /* clear counters */
-
- /* Here we assume that we will never see multiple instances of this
- * type of memory controller. The ID is therefore hardcoded to 0.
- */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail1;
- }
-
- /* allocating generic PCI control info */
- i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!i82875p_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
- return 0;
-
-fail1:
- kobject_put(&mci->edac_mci_kobj);
- edac_mc_free(mci);
-
-fail0:
- iounmap(ovrfl_window);
- pci_release_regions(ovrfl_pdev);
-
- pci_disable_device(ovrfl_pdev);
- /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
- return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit i82875p_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("%s()\n", __func__);
- i82875p_printk(KERN_INFO, "i82875p init one\n");
-
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- rc = i82875p_probe1(pdev, ent->driver_data);
-
- if (mci_pdev == NULL)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit i82875p_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct i82875p_pvt *pvt = NULL;
-
- debugf0("%s()\n", __func__);
-
- if (i82875p_pci)
- edac_pci_release_generic_ctl(i82875p_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- pvt = (struct i82875p_pvt *)mci->pvt_info;
-
- if (pvt->ovrfl_window)
- iounmap(pvt->ovrfl_window);
-
- if (pvt->ovrfl_pdev) {
-#ifdef CORRECT_BIOS
- pci_release_regions(pvt->ovrfl_pdev);
-#endif /*CORRECT_BIOS */
- pci_disable_device(pvt->ovrfl_pdev);
- pci_dev_put(pvt->ovrfl_pdev);
- }
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82875P},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
-
-static struct pci_driver i82875p_driver = {
- .name = EDAC_MOD_STR,
- .probe = i82875p_init_one,
- .remove = __devexit_p(i82875p_remove_one),
- .id_table = i82875p_pci_tbl,
-};
-
-static int __init i82875p_init(void)
-{
- int pci_rc;
-
- debugf3("%s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i82875p_driver);
-
- if (pci_rc < 0)
- goto fail0;
-
- if (mci_pdev == NULL) {
- mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82875_0, NULL);
-
- if (!mci_pdev) {
- debugf0("875p pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
-
- if (pci_rc < 0) {
- debugf0("875p init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&i82875p_driver);
-
-fail0:
- if (mci_pdev != NULL)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit i82875p_exit(void)
-{
- debugf3("%s()\n", __func__);
-
- i82875p_remove_one(mci_pdev);
- pci_dev_put(mci_pdev);
-
- pci_unregister_driver(&i82875p_driver);
-
-}
-
-module_init(i82875p_init);
-module_exit(i82875p_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
-MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/i82975x_edac.c b/ANDROID_3.4.5/drivers/edac/i82975x_edac.c
deleted file mode 100644
index 0cd8368f..00000000
--- a/ANDROID_3.4.5/drivers/edac/i82975x_edac.c
+++ /dev/null
@@ -1,698 +0,0 @@
-/*
- * Intel 82975X Memory Controller kernel module
- * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
- * (C) 2007 jetzbroadband (http://jetzbroadband.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Arvind R.
- * Copied from i82875p_edac.c source:
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define I82975X_REVISION " Ver: 1.0.0"
-#define EDAC_MOD_STR "i82975x_edac"
-
-#define i82975x_printk(level, fmt, arg...) \
- edac_printk(level, "i82975x", fmt, ##arg)
-
-#define i82975x_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
-
-#ifndef PCI_DEVICE_ID_INTEL_82975_0
-#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
-#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
-
-#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
-
-/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
-#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
- *
- * 31:7 128 byte cache-line address
- * 6:1 reserved
- * 0 0: CH0; 1: CH1
- */
-
-#define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b)
- *
- * 7:0 DRAM ECC Syndrome
- */
-
-#define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b)
- * 0h: Processor Memory Reads
- * 1h:7h reserved
- * More - See Page 65 of Intel DocSheet.
- */
-
-#define I82975X_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15:12 reserved
- * 11 Thermal Sensor Event
- * 10 reserved
- * 9 non-DRAM lock error (ndlock)
- * 8 Refresh Timeout
- * 7:2 reserved
- * 1 ECC UE (multibit DRAM error)
- * 0 ECC CE (singlebit DRAM error)
- */
-
-/* Error Reporting is supported by 3 mechanisms:
- 1. DMI SERR generation ( ERRCMD )
- 2. SMI DMI generation ( SMICMD )
- 3. SCI DMI generation ( SCICMD )
-NOTE: Only ONE of the three must be enabled
-*/
-#define I82975X_ERRCMD 0xca /* Error Command (16b)
- *
- * 15:12 reserved
- * 11 Thermal Sensor Event
- * 10 reserved
- * 9 non-DRAM lock error (ndlock)
- * 8 Refresh Timeout
- * 7:2 reserved
- * 1 ECC UE (multibit DRAM error)
- * 0 ECC CE (singlebit DRAM error)
- */
-
-#define I82975X_SMICMD 0xcc /* Error Command (16b)
- *
- * 15:2 reserved
- * 1 ECC UE (multibit DRAM error)
- * 0 ECC CE (singlebit DRAM error)
- */
-
-#define I82975X_SCICMD 0xce /* Error Command (16b)
- *
- * 15:2 reserved
- * 1 ECC UE (multibit DRAM error)
- * 0 ECC CE (singlebit DRAM error)
- */
-
-#define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b)
- *
- * 7:1 reserved
- * 0 Bit32 of the Dram Error Address
- */
-
-#define I82975X_MCHBAR 0x44 /*
- *
- * 31:14 Base Addr of 16K memory-mapped
- * configuration space
- * 13:1 reserverd
- * 0 mem-mapped config space enable
- */
-
-/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
-/* Intel 82975x memory mapped register space */
-
-#define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */
-
-#define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8)
- *
- * 7 set to 1 in highest DRB of
- * channel if 4GB in ch.
- * 6:2 upper boundary of rank in
- * 32MB grains
- * 1:0 set to 0
- */
-#define I82975X_DRB_CH0R0 0x100
-#define I82975X_DRB_CH0R1 0x101
-#define I82975X_DRB_CH0R2 0x102
-#define I82975X_DRB_CH0R3 0x103
-#define I82975X_DRB_CH1R0 0x180
-#define I82975X_DRB_CH1R1 0x181
-#define I82975X_DRB_CH1R2 0x182
-#define I82975X_DRB_CH1R3 0x183
-
-
-#define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8)
- * defines the PAGE SIZE to be used
- * for the rank
- * 7 reserved
- * 6:4 row attr of odd rank, i.e. 1
- * 3 reserved
- * 2:0 row attr of even rank, i.e. 0
- *
- * 000 = unpopulated
- * 001 = reserved
- * 010 = 4KiB
- * 011 = 8KiB
- * 100 = 16KiB
- * others = reserved
- */
-#define I82975X_DRA_CH0R01 0x108
-#define I82975X_DRA_CH0R23 0x109
-#define I82975X_DRA_CH1R01 0x188
-#define I82975X_DRA_CH1R23 0x189
-
-
-#define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b)
- *
- * 15:8 reserved
- * 7:6 Rank 3 architecture
- * 5:4 Rank 2 architecture
- * 3:2 Rank 1 architecture
- * 1:0 Rank 0 architecture
- *
- * 00 => 4 banks
- * 01 => 8 banks
- */
-#define I82975X_C0BNKARC 0x10e
-#define I82975X_C1BNKARC 0x18e
-
-
-
-#define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b)
- *
- * 31:30 reserved
- * 29 init complete
- * 28:11 reserved, according to Intel
- * 22:21 number of channels
- * 00=1 01=2 in 82875
- * seems to be ECC mode
- * bits in 82975 in Asus
- * P5W
- * 19:18 Data Integ Mode
- * 00=none 01=ECC in 82875
- * 10:8 refresh mode
- * 7 reserved
- * 6:4 mode select
- * 3:2 reserved
- * 1:0 DRAM type 10=Second Revision
- * DDR2 SDRAM
- * 00, 01, 11 reserved
- */
-#define I82975X_DRC_CH0M0 0x120
-#define I82975X_DRC_CH1M0 0x1A0
-
-
-#define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b)
- * 31 0=Standard Address Map
- * 1=Enhanced Address Map
- * 30:0 reserved
- */
-
-#define I82975X_DRC_CH0M1 0x124
-#define I82975X_DRC_CH1M1 0x1A4
-
-enum i82975x_chips {
- I82975X = 0,
-};
-
-struct i82975x_pvt {
- void __iomem *mch_window;
-};
-
-struct i82975x_dev_info {
- const char *ctl_name;
-};
-
-struct i82975x_error_info {
- u16 errsts;
- u32 eap;
- u8 des;
- u8 derrsyn;
- u16 errsts2;
- u8 chan; /* the channel is bit 0 of EAP */
- u8 xeap; /* extended eap bit */
-};
-
-static const struct i82975x_dev_info i82975x_devs[] = {
- [I82975X] = {
- .ctl_name = "i82975x"
- },
-};
-
-static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
- * already registered driver
- */
-
-static int i82975x_registered = 1;
-
-static void i82975x_get_error_info(struct mem_ctl_info *mci,
- struct i82975x_error_info *info)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * This is a mess because there is no atomic way to read all the
- * registers at once and the registers can transition from CE being
- * overwritten by UE.
- */
- pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
- pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
- pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
- pci_read_config_byte(pdev, I82975X_DES, &info->des);
- pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
- pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
-
- pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
-
- /*
- * If the error is the same then we can for both reads then
- * the first set of reads is valid. If there is a change then
- * there is a CE no info and the second set of reads is valid
- * and should be UE info.
- */
- if (!(info->errsts2 & 0x0003))
- return;
-
- if ((info->errsts ^ info->errsts2) & 0x0003) {
- pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
- pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
- pci_read_config_byte(pdev, I82975X_DES, &info->des);
- pci_read_config_byte(pdev, I82975X_DERRSYN,
- &info->derrsyn);
- }
-}
-
-static int i82975x_process_error_info(struct mem_ctl_info *mci,
- struct i82975x_error_info *info, int handle_errors)
-{
- int row, chan;
- unsigned long offst, page;
-
- if (!(info->errsts2 & 0x0003))
- return 0;
-
- if (!handle_errors)
- return 1;
-
- if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
- info->errsts = info->errsts2;
- }
-
- page = (unsigned long) info->eap;
- page >>= 1;
- if (info->xeap & 1)
- page |= 0x80000000;
- page >>= (PAGE_SHIFT - 1);
- row = edac_mc_find_csrow_by_page(mci, page);
-
- if (row == -1) {
- i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
- "\tXEAP=%u\n"
- "\t EAP=0x%08x\n"
- "\tPAGE=0x%08x\n",
- (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
- return 0;
- }
- chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
- offst = info->eap
- & ((1 << PAGE_SHIFT) -
- (1 << mci->csrows[row].grain));
-
- if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
- else
- edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- chan, "i82975x CE");
-
- return 1;
-}
-
-static void i82975x_check(struct mem_ctl_info *mci)
-{
- struct i82975x_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- i82975x_get_error_info(mci, &info);
- i82975x_process_error_info(mci, &info, 1);
-}
-
-/* Return 1 if dual channel mode is active. Else return 0. */
-static int dual_channel_active(void __iomem *mch_window)
-{
- /*
- * We treat interleaved-symmetric configuration as dual-channel - EAP's
- * bit-0 giving the channel of the error location.
- *
- * All other configurations are treated as single channel - the EAP's
- * bit-0 will resolve ok in symmetric area of mixed
- * (symmetric/asymmetric) configurations
- */
- u8 drb[4][2];
- int row;
- int dualch;
-
- for (dualch = 1, row = 0; dualch && (row < 4); row++) {
- drb[row][0] = readb(mch_window + I82975X_DRB + row);
- drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
- dualch = dualch && (drb[row][0] == drb[row][1]);
- }
- return dualch;
-}
-
-static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
-{
- /*
- * ECC is possible on i92975x ONLY with DEV_X8
- */
- return DEV_X8;
-}
-
-static void i82975x_init_csrows(struct mem_ctl_info *mci,
- struct pci_dev *pdev, void __iomem *mch_window)
-{
- static const char *labels[4] = {
- "DIMM A1", "DIMM A2",
- "DIMM B1", "DIMM B2"
- };
- struct csrow_info *csrow;
- unsigned long last_cumul_size;
- u8 value;
- u32 cumul_size;
- int index, chan;
-
- last_cumul_size = 0;
-
- /*
- * 82875 comment:
- * The dram row boundary (DRB) reg values are boundary address
- * for each DRAM row with a granularity of 32 or 64MB (single/dual
- * channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all rows.
- *
- */
-
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
-
- value = readb(mch_window + I82975X_DRB + index +
- ((index >= 4) ? 0x80 : 0));
- cumul_size = value;
- cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
- /*
- * Adjust cumul_size w.r.t number of channels
- *
- */
- if (csrow->nr_channels > 1)
- cumul_size <<= 1;
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
-
- /*
- * Initialise dram labels
- * index values:
- * [0-7] for single-channel; i.e. csrow->nr_channels = 1
- * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
- */
- for (chan = 0; chan < csrow->nr_channels; chan++)
- strncpy(csrow->channels[chan].label,
- labels[(index >> 1) + (chan * 2)],
- EDAC_MC_LABEL_LEN);
-
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
-
- csrow->first_page = last_cumul_size;
- csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
- last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
- csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
- csrow->dtype = i82975x_dram_type(mch_window, index);
- csrow->edac_mode = EDAC_SECDED; /* only supported */
- }
-}
-
-/* #define i82975x_DEBUG_IOMEM */
-
-#ifdef i82975x_DEBUG_IOMEM
-static void i82975x_print_dram_timings(void __iomem *mch_window)
-{
- /*
- * The register meanings are from Intel specs;
- * (shows 13-5-5-5 for 800-DDR2)
- * Asus P5W Bios reports 15-5-4-4
- * What's your religion?
- */
- static const int caslats[4] = { 5, 4, 3, 6 };
- u32 dtreg[2];
-
- dtreg[0] = readl(mch_window + 0x114);
- dtreg[1] = readl(mch_window + 0x194);
- i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n"
- " RAS Active Min = %d %d\n"
- " CAS latency = %d %d\n"
- " RAS to CAS = %d %d\n"
- " RAS precharge = %d %d\n",
- (dtreg[0] >> 19 ) & 0x0f,
- (dtreg[1] >> 19) & 0x0f,
- caslats[(dtreg[0] >> 8) & 0x03],
- caslats[(dtreg[1] >> 8) & 0x03],
- ((dtreg[0] >> 4) & 0x07) + 2,
- ((dtreg[1] >> 4) & 0x07) + 2,
- (dtreg[0] & 0x07) + 2,
- (dtreg[1] & 0x07) + 2
- );
-
-}
-#endif
-
-static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
-{
- int rc = -ENODEV;
- struct mem_ctl_info *mci;
- struct i82975x_pvt *pvt;
- void __iomem *mch_window;
- u32 mchbar;
- u32 drc[2];
- struct i82975x_error_info discard;
- int chans;
-#ifdef i82975x_DEBUG_IOMEM
- u8 c0drb[4];
- u8 c1drb[4];
-#endif
-
- debugf0("%s()\n", __func__);
-
- pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
- if (!(mchbar & 1)) {
- debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
- goto fail0;
- }
- mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
- mch_window = ioremap_nocache(mchbar, 0x1000);
-
-#ifdef i82975x_DEBUG_IOMEM
- i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
- mchbar, mch_window);
-
- c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
- c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
- c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
- c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
- c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
- c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
- c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
- c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
- i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
- i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
- i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
- i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
- i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
- i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
- i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
- i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
-#endif
-
- drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
- drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
-#ifdef i82975x_DEBUG_IOMEM
- i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
- ((drc[0] >> 21) & 3) == 1 ?
- "ECC enabled" : "ECC disabled");
- i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
- ((drc[1] >> 21) & 3) == 1 ?
- "ECC enabled" : "ECC disabled");
-
- i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
- readw(mch_window + I82975X_C0BNKARC));
- i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
- readw(mch_window + I82975X_C1BNKARC));
- i82975x_print_dram_timings(mch_window);
- goto fail1;
-#endif
- if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
- i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
- goto fail1;
- }
-
- chans = dual_channel_active(mch_window) + 1;
-
- /* assuming only one controller, index thus is 0 */
- mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
- chans, 0);
- if (!mci) {
- rc = -ENOMEM;
- goto fail1;
- }
-
- debugf3("%s(): init mci\n", __func__);
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82975X_REVISION;
- mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = i82975x_check;
- mci->ctl_page_to_phys = NULL;
- debugf3("%s(): init pvt\n", __func__);
- pvt = (struct i82975x_pvt *) mci->pvt_info;
- pvt->mch_window = mch_window;
- i82975x_init_csrows(mci, pdev, mch_window);
- mci->scrub_mode = SCRUB_HW_SRC;
- i82975x_get_error_info(mci, &discard); /* clear counters */
-
- /* finalize this instance of memory controller with edac core */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail2;
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
- return 0;
-
-fail2:
- edac_mc_free(mci);
-
-fail1:
- iounmap(mch_window);
-fail0:
- return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit i82975x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("%s()\n", __func__);
-
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- rc = i82975x_probe1(pdev, ent->driver_data);
-
- if (mci_pdev == NULL)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit i82975x_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
- struct i82975x_pvt *pvt;
-
- debugf0("%s()\n", __func__);
-
- mci = edac_mc_del_mc(&pdev->dev);
- if (mci == NULL)
- return;
-
- pvt = mci->pvt_info;
- if (pvt->mch_window)
- iounmap( pvt->mch_window );
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82975X
- },
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
-
-static struct pci_driver i82975x_driver = {
- .name = EDAC_MOD_STR,
- .probe = i82975x_init_one,
- .remove = __devexit_p(i82975x_remove_one),
- .id_table = i82975x_pci_tbl,
-};
-
-static int __init i82975x_init(void)
-{
- int pci_rc;
-
- debugf3("%s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&i82975x_driver);
- if (pci_rc < 0)
- goto fail0;
-
- if (mci_pdev == NULL) {
- mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82975_0, NULL);
-
- if (!mci_pdev) {
- debugf0("i82975x pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
-
- if (pci_rc < 0) {
- debugf0("i82975x init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&i82975x_driver);
-
-fail0:
- if (mci_pdev != NULL)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit i82975x_exit(void)
-{
- debugf3("%s()\n", __func__);
-
- pci_unregister_driver(&i82975x_driver);
-
- if (!i82975x_registered) {
- i82975x_remove_one(mci_pdev);
- pci_dev_put(mci_pdev);
- }
-}
-
-module_init(i82975x_init);
-module_exit(i82975x_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
-MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/mce_amd.c b/ANDROID_3.4.5/drivers/edac/mce_amd.c
deleted file mode 100644
index d0c372e3..00000000
--- a/ANDROID_3.4.5/drivers/edac/mce_amd.c
+++ /dev/null
@@ -1,822 +0,0 @@
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "mce_amd.h"
-
-static struct amd_decoder_ops *fam_ops;
-
-static u8 xec_mask = 0xf;
-static u8 nb_err_cpumask = 0xf;
-
-static bool report_gart_errors;
-static void (*nb_bus_decoder)(int node_id, struct mce *m);
-
-void amd_report_gart_errors(bool v)
-{
- report_gart_errors = v;
-}
-EXPORT_SYMBOL_GPL(amd_report_gart_errors);
-
-void amd_register_ecc_decoder(void (*f)(int, struct mce *))
-{
- nb_bus_decoder = f;
-}
-EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
-
-void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
-{
- if (nb_bus_decoder) {
- WARN_ON(nb_bus_decoder != f);
-
- nb_bus_decoder = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
-
-/*
- * string representation for the different MCA reported error types, see F3x48
- * or MSR0000_0411.
- */
-
-/* transaction type */
-const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
-EXPORT_SYMBOL_GPL(tt_msgs);
-
-/* cache level */
-const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
-EXPORT_SYMBOL_GPL(ll_msgs);
-
-/* memory transaction type */
-const char * const rrrr_msgs[] = {
- "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
-};
-EXPORT_SYMBOL_GPL(rrrr_msgs);
-
-/* participating processor */
-const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
-EXPORT_SYMBOL_GPL(pp_msgs);
-
-/* request timeout */
-const char * const to_msgs[] = { "no timeout", "timed out" };
-EXPORT_SYMBOL_GPL(to_msgs);
-
-/* memory or i/o */
-const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
-EXPORT_SYMBOL_GPL(ii_msgs);
-
-static const char * const f15h_ic_mce_desc[] = {
- "UC during a demand linefill from L2",
- "Parity error during data load from IC",
- "Parity error for IC valid bit",
- "Main tag parity error",
- "Parity error in prediction queue",
- "PFB data/address parity error",
- "Parity error in the branch status reg",
- "PFB promotion address error",
- "Tag error during probe/victimization",
- "Parity error for IC probe tag valid bit",
- "PFB non-cacheable bit parity error",
- "PFB valid bit parity error", /* xec = 0xd */
- "Microcode Patch Buffer", /* xec = 010 */
- "uop queue",
- "insn buffer",
- "predecode buffer",
- "fetch address FIFO"
-};
-
-static const char * const f15h_cu_mce_desc[] = {
- "Fill ECC error on data fills", /* xec = 0x4 */
- "Fill parity error on insn fills",
- "Prefetcher request FIFO parity error",
- "PRQ address parity error",
- "PRQ data parity error",
- "WCC Tag ECC error",
- "WCC Data ECC error",
- "WCB Data parity error",
- "VB Data ECC or parity error",
- "L2 Tag ECC error", /* xec = 0x10 */
- "Hard L2 Tag ECC error",
- "Multiple hits on L2 tag",
- "XAB parity error",
- "PRB address parity error"
-};
-
-static const char * const nb_mce_desc[] = {
- "DRAM ECC error detected on the NB",
- "CRC error detected on HT link",
- "Link-defined sync error packets detected on HT link",
- "HT Master abort",
- "HT Target abort",
- "Invalid GART PTE entry during GART table walk",
- "Unsupported atomic RMW received from an IO link",
- "Watchdog timeout due to lack of progress",
- "DRAM ECC error detected on the NB",
- "SVM DMA Exclusion Vector error",
- "HT data error detected on link",
- "Protocol error (link, L3, probe filter)",
- "NB internal arrays parity error",
- "DRAM addr/ctl signals parity error",
- "IO link transmission error",
- "L3 data cache ECC error", /* xec = 0x1c */
- "L3 cache tag error",
- "L3 LRU parity bits error",
- "ECC Error in the Probe Filter directory"
-};
-
-static const char * const fr_ex_mce_desc[] = {
- "CPU Watchdog timer expire",
- "Wakeup array dest tag",
- "AG payload array",
- "EX payload array",
- "IDRF array",
- "Retire dispatch queue",
- "Mapper checkpoint array",
- "Physical register file EX0 port",
- "Physical register file EX1 port",
- "Physical register file AG0 port",
- "Physical register file AG1 port",
- "Flag register file",
- "DE error occurred"
-};
-
-static bool f12h_dc_mce(u16 ec, u8 xec)
-{
- bool ret = false;
-
- if (MEM_ERROR(ec)) {
- u8 ll = LL(ec);
- ret = true;
-
- if (ll == LL_L2)
- pr_cont("during L1 linefill from L2.\n");
- else if (ll == LL_L1)
- pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
- else
- ret = false;
- }
- return ret;
-}
-
-static bool f10h_dc_mce(u16 ec, u8 xec)
-{
- if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
- pr_cont("during data scrub.\n");
- return true;
- }
- return f12h_dc_mce(ec, xec);
-}
-
-static bool k8_dc_mce(u16 ec, u8 xec)
-{
- if (BUS_ERROR(ec)) {
- pr_cont("during system linefill.\n");
- return true;
- }
-
- return f10h_dc_mce(ec, xec);
-}
-
-static bool f14h_dc_mce(u16 ec, u8 xec)
-{
- u8 r4 = R4(ec);
- bool ret = true;
-
- if (MEM_ERROR(ec)) {
-
- if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
- return false;
-
- switch (r4) {
- case R4_DRD:
- case R4_DWR:
- pr_cont("Data/Tag parity error due to %s.\n",
- (r4 == R4_DRD ? "load/hw prf" : "store"));
- break;
- case R4_EVICT:
- pr_cont("Copyback parity error on a tag miss.\n");
- break;
- case R4_SNOOP:
- pr_cont("Tag parity error during snoop.\n");
- break;
- default:
- ret = false;
- }
- } else if (BUS_ERROR(ec)) {
-
- if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
- return false;
-
- pr_cont("System read data error on a ");
-
- switch (r4) {
- case R4_RD:
- pr_cont("TLB reload.\n");
- break;
- case R4_DWR:
- pr_cont("store.\n");
- break;
- case R4_DRD:
- pr_cont("load.\n");
- break;
- default:
- ret = false;
- }
- } else {
- ret = false;
- }
-
- return ret;
-}
-
-static bool f15h_dc_mce(u16 ec, u8 xec)
-{
- bool ret = true;
-
- if (MEM_ERROR(ec)) {
-
- switch (xec) {
- case 0x0:
- pr_cont("Data Array access error.\n");
- break;
-
- case 0x1:
- pr_cont("UC error during a linefill from L2/NB.\n");
- break;
-
- case 0x2:
- case 0x11:
- pr_cont("STQ access error.\n");
- break;
-
- case 0x3:
- pr_cont("SCB access error.\n");
- break;
-
- case 0x10:
- pr_cont("Tag error.\n");
- break;
-
- case 0x12:
- pr_cont("LDQ access error.\n");
- break;
-
- default:
- ret = false;
- }
- } else if (BUS_ERROR(ec)) {
-
- if (!xec)
- pr_cont("System Read Data Error.\n");
- else
- pr_cont(" Internal error condition type %d.\n", xec);
- } else
- ret = false;
-
- return ret;
-}
-
-static void amd_decode_dc_mce(struct mce *m)
-{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "Data Cache Error: ");
-
- /* TLB error signatures are the same across families */
- if (TLB_ERROR(ec)) {
- if (TT(ec) == TT_DATA) {
- pr_cont("%s TLB %s.\n", LL_MSG(ec),
- ((xec == 2) ? "locked miss"
- : (xec ? "multimatch" : "parity")));
- return;
- }
- } else if (fam_ops->dc_mce(ec, xec))
- ;
- else
- pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
-}
-
-static bool k8_ic_mce(u16 ec, u8 xec)
-{
- u8 ll = LL(ec);
- bool ret = true;
-
- if (!MEM_ERROR(ec))
- return false;
-
- if (ll == 0x2)
- pr_cont("during a linefill from L2.\n");
- else if (ll == 0x1) {
- switch (R4(ec)) {
- case R4_IRD:
- pr_cont("Parity error during data load.\n");
- break;
-
- case R4_EVICT:
- pr_cont("Copyback Parity/Victim error.\n");
- break;
-
- case R4_SNOOP:
- pr_cont("Tag Snoop error.\n");
- break;
-
- default:
- ret = false;
- break;
- }
- } else
- ret = false;
-
- return ret;
-}
-
-static bool f14h_ic_mce(u16 ec, u8 xec)
-{
- u8 r4 = R4(ec);
- bool ret = true;
-
- if (MEM_ERROR(ec)) {
- if (TT(ec) != 0 || LL(ec) != 1)
- ret = false;
-
- if (r4 == R4_IRD)
- pr_cont("Data/tag array parity error for a tag hit.\n");
- else if (r4 == R4_SNOOP)
- pr_cont("Tag error during snoop/victimization.\n");
- else
- ret = false;
- }
- return ret;
-}
-
-static bool f15h_ic_mce(u16 ec, u8 xec)
-{
- bool ret = true;
-
- if (!MEM_ERROR(ec))
- return false;
-
- switch (xec) {
- case 0x0 ... 0xa:
- pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
- break;
-
- case 0xd:
- pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
- break;
-
- case 0x10:
- pr_cont("%s.\n", f15h_ic_mce_desc[xec-4]);
- break;
-
- case 0x11 ... 0x14:
- pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
- break;
-
- default:
- ret = false;
- }
- return ret;
-}
-
-static void amd_decode_ic_mce(struct mce *m)
-{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "Instruction Cache Error: ");
-
- if (TLB_ERROR(ec))
- pr_cont("%s TLB %s.\n", LL_MSG(ec),
- (xec ? "multimatch" : "parity error"));
- else if (BUS_ERROR(ec)) {
- bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
-
- pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
- } else if (fam_ops->ic_mce(ec, xec))
- ;
- else
- pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
-}
-
-static void amd_decode_bu_mce(struct mce *m)
-{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "Bus Unit Error");
-
- if (xec == 0x1)
- pr_cont(" in the write data buffers.\n");
- else if (xec == 0x3)
- pr_cont(" in the victim data buffers.\n");
- else if (xec == 0x2 && MEM_ERROR(ec))
- pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
- else if (xec == 0x0) {
- if (TLB_ERROR(ec))
- pr_cont(": %s error in a Page Descriptor Cache or "
- "Guest TLB.\n", TT_MSG(ec));
- else if (BUS_ERROR(ec))
- pr_cont(": %s/ECC error in data read from NB: %s.\n",
- R4_MSG(ec), PP_MSG(ec));
- else if (MEM_ERROR(ec)) {
- u8 r4 = R4(ec);
-
- if (r4 >= 0x7)
- pr_cont(": %s error during data copyback.\n",
- R4_MSG(ec));
- else if (r4 <= 0x1)
- pr_cont(": %s parity/ECC error during data "
- "access from L2.\n", R4_MSG(ec));
- else
- goto wrong_bu_mce;
- } else
- goto wrong_bu_mce;
- } else
- goto wrong_bu_mce;
-
- return;
-
-wrong_bu_mce:
- pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
-}
-
-static void amd_decode_cu_mce(struct mce *m)
-{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "Combined Unit Error: ");
-
- if (TLB_ERROR(ec)) {
- if (xec == 0x0)
- pr_cont("Data parity TLB read error.\n");
- else if (xec == 0x1)
- pr_cont("Poison data provided for TLB fill.\n");
- else
- goto wrong_cu_mce;
- } else if (BUS_ERROR(ec)) {
- if (xec > 2)
- goto wrong_cu_mce;
-
- pr_cont("Error during attempted NB data read.\n");
- } else if (MEM_ERROR(ec)) {
- switch (xec) {
- case 0x4 ... 0xc:
- pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
- break;
-
- case 0x10 ... 0x14:
- pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
- break;
-
- default:
- goto wrong_cu_mce;
- }
- }
-
- return;
-
-wrong_cu_mce:
- pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
-}
-
-static void amd_decode_ls_mce(struct mce *m)
-{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- if (boot_cpu_data.x86 >= 0x14) {
- pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
- " please report on LKML.\n");
- return;
- }
-
- pr_emerg(HW_ERR "Load Store Error");
-
- if (xec == 0x0) {
- u8 r4 = R4(ec);
-
- if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
- goto wrong_ls_mce;
-
- pr_cont(" during %s.\n", R4_MSG(ec));
- } else
- goto wrong_ls_mce;
-
- return;
-
-wrong_ls_mce:
- pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
-}
-
-void amd_decode_nb_mce(struct mce *m)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- int node_id = amd_get_nb_id(m->extcpu);
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, 0x1f);
- u8 offset = 0;
-
- pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id);
-
- switch (xec) {
- case 0x0 ... 0xe:
-
- /* special handling for DRAM ECCs */
- if (xec == 0x0 || xec == 0x8) {
- /* no ECCs on F11h */
- if (c->x86 == 0x11)
- goto wrong_nb_mce;
-
- pr_cont("%s.\n", nb_mce_desc[xec]);
-
- if (nb_bus_decoder)
- nb_bus_decoder(node_id, m);
- return;
- }
- break;
-
- case 0xf:
- if (TLB_ERROR(ec))
- pr_cont("GART Table Walk data error.\n");
- else if (BUS_ERROR(ec))
- pr_cont("DMA Exclusion Vector Table Walk error.\n");
- else
- goto wrong_nb_mce;
- return;
-
- case 0x19:
- if (boot_cpu_data.x86 == 0x15)
- pr_cont("Compute Unit Data Error.\n");
- else
- goto wrong_nb_mce;
- return;
-
- case 0x1c ... 0x1f:
- offset = 13;
- break;
-
- default:
- goto wrong_nb_mce;
- }
-
- pr_cont("%s.\n", nb_mce_desc[xec - offset]);
- return;
-
-wrong_nb_mce:
- pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
-}
-EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
-
-static void amd_decode_fr_mce(struct mce *m)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- u8 xec = XEC(m->status, xec_mask);
-
- if (c->x86 == 0xf || c->x86 == 0x11)
- goto wrong_fr_mce;
-
- pr_emerg(HW_ERR "%s Error: ",
- (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
-
- if (xec == 0x0 || xec == 0xc)
- pr_cont("%s.\n", fr_ex_mce_desc[xec]);
- else if (xec < 0xd)
- pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
- else
- goto wrong_fr_mce;
-
- return;
-
-wrong_fr_mce:
- pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
-}
-
-static void amd_decode_fp_mce(struct mce *m)
-{
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "Floating Point Unit Error: ");
-
- switch (xec) {
- case 0x1:
- pr_cont("Free List");
- break;
-
- case 0x2:
- pr_cont("Physical Register File");
- break;
-
- case 0x3:
- pr_cont("Retire Queue");
- break;
-
- case 0x4:
- pr_cont("Scheduler table");
- break;
-
- case 0x5:
- pr_cont("Status Register File");
- break;
-
- default:
- goto wrong_fp_mce;
- break;
- }
-
- pr_cont(" parity error.\n");
-
- return;
-
-wrong_fp_mce:
- pr_emerg(HW_ERR "Corrupted FP MCE info?\n");
-}
-
-static inline void amd_decode_err_code(u16 ec)
-{
-
- pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
-
- if (BUS_ERROR(ec))
- pr_cont(", mem/io: %s", II_MSG(ec));
- else
- pr_cont(", tx: %s", TT_MSG(ec));
-
- if (MEM_ERROR(ec) || BUS_ERROR(ec)) {
- pr_cont(", mem-tx: %s", R4_MSG(ec));
-
- if (BUS_ERROR(ec))
- pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec));
- }
-
- pr_cont("\n");
-}
-
-/*
- * Filter out unwanted MCE signatures here.
- */
-static bool amd_filter_mce(struct mce *m)
-{
- u8 xec = (m->status >> 16) & 0x1f;
-
- /*
- * NB GART TLB error reporting is disabled by default.
- */
- if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
- return true;
-
- return false;
-}
-
-int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
-{
- struct mce *m = (struct mce *)data;
- struct cpuinfo_x86 *c = &boot_cpu_data;
- int ecc;
-
- if (amd_filter_mce(m))
- return NOTIFY_STOP;
-
- pr_emerg(HW_ERR "CPU:%d\tMC%d_STATUS[%s|%s|%s|%s|%s",
- m->extcpu, m->bank,
- ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
- ((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
- ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
- ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
- ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
-
- if (c->x86 == 0x15)
- pr_cont("|%s|%s",
- ((m->status & BIT_64(44)) ? "Deferred" : "-"),
- ((m->status & BIT_64(43)) ? "Poison" : "-"));
-
- /* do the two bits[14:13] together */
- ecc = (m->status >> 45) & 0x3;
- if (ecc)
- pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
-
- pr_cont("]: 0x%016llx\n", m->status);
-
- if (m->status & MCI_STATUS_ADDRV)
- pr_emerg(HW_ERR "\tMC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
-
- switch (m->bank) {
- case 0:
- amd_decode_dc_mce(m);
- break;
-
- case 1:
- amd_decode_ic_mce(m);
- break;
-
- case 2:
- if (c->x86 == 0x15)
- amd_decode_cu_mce(m);
- else
- amd_decode_bu_mce(m);
- break;
-
- case 3:
- amd_decode_ls_mce(m);
- break;
-
- case 4:
- amd_decode_nb_mce(m);
- break;
-
- case 5:
- amd_decode_fr_mce(m);
- break;
-
- case 6:
- amd_decode_fp_mce(m);
- break;
-
- default:
- break;
- }
-
- amd_decode_err_code(m->status & 0xffff);
-
- return NOTIFY_STOP;
-}
-EXPORT_SYMBOL_GPL(amd_decode_mce);
-
-static struct notifier_block amd_mce_dec_nb = {
- .notifier_call = amd_decode_mce,
-};
-
-static int __init mce_amd_init(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- if (c->x86_vendor != X86_VENDOR_AMD)
- return 0;
-
- if (c->x86 < 0xf || c->x86 > 0x15)
- return 0;
-
- fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
- if (!fam_ops)
- return -ENOMEM;
-
- switch (c->x86) {
- case 0xf:
- fam_ops->dc_mce = k8_dc_mce;
- fam_ops->ic_mce = k8_ic_mce;
- break;
-
- case 0x10:
- fam_ops->dc_mce = f10h_dc_mce;
- fam_ops->ic_mce = k8_ic_mce;
- break;
-
- case 0x11:
- fam_ops->dc_mce = k8_dc_mce;
- fam_ops->ic_mce = k8_ic_mce;
- break;
-
- case 0x12:
- fam_ops->dc_mce = f12h_dc_mce;
- fam_ops->ic_mce = k8_ic_mce;
- break;
-
- case 0x14:
- nb_err_cpumask = 0x3;
- fam_ops->dc_mce = f14h_dc_mce;
- fam_ops->ic_mce = f14h_ic_mce;
- break;
-
- case 0x15:
- xec_mask = 0x1f;
- fam_ops->dc_mce = f15h_dc_mce;
- fam_ops->ic_mce = f15h_ic_mce;
- break;
-
- default:
- printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- kfree(fam_ops);
- return -EINVAL;
- }
-
- pr_info("MCE: In-kernel MCE decoding enabled.\n");
-
- mce_register_decode_chain(&amd_mce_dec_nb);
-
- return 0;
-}
-early_initcall(mce_amd_init);
-
-#ifdef MODULE
-static void __exit mce_amd_exit(void)
-{
- mce_unregister_decode_chain(&amd_mce_dec_nb);
- kfree(fam_ops);
-}
-
-MODULE_DESCRIPTION("AMD MCE decoder");
-MODULE_ALIAS("edac-mce-amd");
-MODULE_LICENSE("GPL");
-module_exit(mce_amd_exit);
-#endif
diff --git a/ANDROID_3.4.5/drivers/edac/mce_amd.h b/ANDROID_3.4.5/drivers/edac/mce_amd.h
deleted file mode 100644
index c6074c5c..00000000
--- a/ANDROID_3.4.5/drivers/edac/mce_amd.h
+++ /dev/null
@@ -1,93 +0,0 @@
-#ifndef _EDAC_MCE_AMD_H
-#define _EDAC_MCE_AMD_H
-
-#include <linux/notifier.h>
-
-#include <asm/mce.h>
-
-#define BIT_64(n) (U64_C(1) << (n))
-
-#define EC(x) ((x) & 0xffff)
-#define XEC(x, mask) (((x) >> 16) & mask)
-
-#define LOW_SYNDROME(x) (((x) >> 15) & 0xff)
-#define HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
-
-#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
-#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
-#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
-
-#define TT(x) (((x) >> 2) & 0x3)
-#define TT_MSG(x) tt_msgs[TT(x)]
-#define II(x) (((x) >> 2) & 0x3)
-#define II_MSG(x) ii_msgs[II(x)]
-#define LL(x) ((x) & 0x3)
-#define LL_MSG(x) ll_msgs[LL(x)]
-#define TO(x) (((x) >> 8) & 0x1)
-#define TO_MSG(x) to_msgs[TO(x)]
-#define PP(x) (((x) >> 9) & 0x3)
-#define PP_MSG(x) pp_msgs[PP(x)]
-
-#define R4(x) (((x) >> 4) & 0xf)
-#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
-
-/*
- * F3x4C bits (MCi_STATUS' high half)
- */
-#define NBSH_ERR_CPU_VAL BIT(24)
-
-enum tt_ids {
- TT_INSTR = 0,
- TT_DATA,
- TT_GEN,
- TT_RESV,
-};
-
-enum ll_ids {
- LL_RESV = 0,
- LL_L1,
- LL_L2,
- LL_LG,
-};
-
-enum ii_ids {
- II_MEM = 0,
- II_RESV,
- II_IO,
- II_GEN,
-};
-
-enum rrrr_ids {
- R4_GEN = 0,
- R4_RD,
- R4_WR,
- R4_DRD,
- R4_DWR,
- R4_IRD,
- R4_PREF,
- R4_EVICT,
- R4_SNOOP,
-};
-
-extern const char * const tt_msgs[];
-extern const char * const ll_msgs[];
-extern const char * const rrrr_msgs[];
-extern const char * const pp_msgs[];
-extern const char * const to_msgs[];
-extern const char * const ii_msgs[];
-
-/*
- * per-family decoder ops
- */
-struct amd_decoder_ops {
- bool (*dc_mce)(u16, u8);
- bool (*ic_mce)(u16, u8);
-};
-
-void amd_report_gart_errors(bool);
-void amd_register_ecc_decoder(void (*f)(int, struct mce *));
-void amd_unregister_ecc_decoder(void (*f)(int, struct mce *));
-void amd_decode_nb_mce(struct mce *);
-int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data);
-
-#endif /* _EDAC_MCE_AMD_H */
diff --git a/ANDROID_3.4.5/drivers/edac/mce_amd_inj.c b/ANDROID_3.4.5/drivers/edac/mce_amd_inj.c
deleted file mode 100644
index 66b5151c..00000000
--- a/ANDROID_3.4.5/drivers/edac/mce_amd_inj.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * A simple MCE injection facility for testing the MCE decoding code. This
- * driver should be built as module so that it can be loaded on production
- * kernels for testing purposes.
- *
- * This file may be distributed under the terms of the GNU General Public
- * License version 2.
- *
- * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com>
- * Advanced Micro Devices Inc.
- */
-
-#include <linux/kobject.h>
-#include <linux/device.h>
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <asm/mce.h>
-
-#include "mce_amd.h"
-
-struct edac_mce_attr {
- struct attribute attr;
- ssize_t (*show) (struct kobject *kobj, struct edac_mce_attr *attr, char *buf);
- ssize_t (*store)(struct kobject *kobj, struct edac_mce_attr *attr,
- const char *buf, size_t count);
-};
-
-#define EDAC_MCE_ATTR(_name, _mode, _show, _store) \
-static struct edac_mce_attr mce_attr_##_name = __ATTR(_name, _mode, _show, _store)
-
-static struct kobject *mce_kobj;
-
-/*
- * Collect all the MCi_XXX settings
- */
-static struct mce i_mce;
-
-#define MCE_INJECT_STORE(reg) \
-static ssize_t edac_inject_##reg##_store(struct kobject *kobj, \
- struct edac_mce_attr *attr, \
- const char *data, size_t count)\
-{ \
- int ret = 0; \
- unsigned long value; \
- \
- ret = strict_strtoul(data, 16, &value); \
- if (ret < 0) \
- printk(KERN_ERR "Error writing MCE " #reg " field.\n"); \
- \
- i_mce.reg = value; \
- \
- return count; \
-}
-
-MCE_INJECT_STORE(status);
-MCE_INJECT_STORE(misc);
-MCE_INJECT_STORE(addr);
-
-#define MCE_INJECT_SHOW(reg) \
-static ssize_t edac_inject_##reg##_show(struct kobject *kobj, \
- struct edac_mce_attr *attr, \
- char *buf) \
-{ \
- return sprintf(buf, "0x%016llx\n", i_mce.reg); \
-}
-
-MCE_INJECT_SHOW(status);
-MCE_INJECT_SHOW(misc);
-MCE_INJECT_SHOW(addr);
-
-EDAC_MCE_ATTR(status, 0644, edac_inject_status_show, edac_inject_status_store);
-EDAC_MCE_ATTR(misc, 0644, edac_inject_misc_show, edac_inject_misc_store);
-EDAC_MCE_ATTR(addr, 0644, edac_inject_addr_show, edac_inject_addr_store);
-
-/*
- * This denotes into which bank we're injecting and triggers
- * the injection, at the same time.
- */
-static ssize_t edac_inject_bank_store(struct kobject *kobj,
- struct edac_mce_attr *attr,
- const char *data, size_t count)
-{
- int ret = 0;
- unsigned long value;
-
- ret = strict_strtoul(data, 10, &value);
- if (ret < 0) {
- printk(KERN_ERR "Invalid bank value!\n");
- return -EINVAL;
- }
-
- if (value > 5)
- if (boot_cpu_data.x86 != 0x15 || value > 6) {
- printk(KERN_ERR "Non-existent MCE bank: %lu\n", value);
- return -EINVAL;
- }
-
- i_mce.bank = value;
-
- amd_decode_mce(NULL, 0, &i_mce);
-
- return count;
-}
-
-static ssize_t edac_inject_bank_show(struct kobject *kobj,
- struct edac_mce_attr *attr, char *buf)
-{
- return sprintf(buf, "%d\n", i_mce.bank);
-}
-
-EDAC_MCE_ATTR(bank, 0644, edac_inject_bank_show, edac_inject_bank_store);
-
-static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc,
- &mce_attr_addr, &mce_attr_bank
-};
-
-static int __init edac_init_mce_inject(void)
-{
- struct bus_type *edac_subsys = NULL;
- int i, err = 0;
-
- edac_subsys = edac_get_sysfs_subsys();
- if (!edac_subsys)
- return -EINVAL;
-
- mce_kobj = kobject_create_and_add("mce", &edac_subsys->dev_root->kobj);
- if (!mce_kobj) {
- printk(KERN_ERR "Error creating a mce kset.\n");
- err = -ENOMEM;
- goto err_mce_kobj;
- }
-
- for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) {
- err = sysfs_create_file(mce_kobj, &sysfs_attrs[i]->attr);
- if (err) {
- printk(KERN_ERR "Error creating %s in sysfs.\n",
- sysfs_attrs[i]->attr.name);
- goto err_sysfs_create;
- }
- }
- return 0;
-
-err_sysfs_create:
- while (--i >= 0)
- sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
-
- kobject_del(mce_kobj);
-
-err_mce_kobj:
- edac_put_sysfs_subsys();
-
- return err;
-}
-
-static void __exit edac_exit_mce_inject(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++)
- sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
-
- kobject_del(mce_kobj);
-
- edac_put_sysfs_subsys();
-}
-
-module_init(edac_init_mce_inject);
-module_exit(edac_exit_mce_inject);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>");
-MODULE_AUTHOR("AMD Inc.");
-MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
diff --git a/ANDROID_3.4.5/drivers/edac/mpc85xx_edac.c b/ANDROID_3.4.5/drivers/edac/mpc85xx_edac.c
deleted file mode 100644
index 73464a62..00000000
--- a/ANDROID_3.4.5/drivers/edac/mpc85xx_edac.c
+++ /dev/null
@@ -1,1235 +0,0 @@
-/*
- * Freescale MPC85xx Memory Controller kenel module
- *
- * Author: Dave Jiang <djiang@mvista.com>
- *
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ctype.h>
-#include <linux/io.h>
-#include <linux/mod_devicetable.h>
-#include <linux/edac.h>
-#include <linux/smp.h>
-#include <linux/gfp.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include "edac_module.h"
-#include "edac_core.h"
-#include "mpc85xx_edac.h"
-
-static int edac_dev_idx;
-#ifdef CONFIG_PCI
-static int edac_pci_idx;
-#endif
-static int edac_mc_idx;
-
-static u32 orig_ddr_err_disable;
-static u32 orig_ddr_err_sbe;
-
-/*
- * PCI Err defines
- */
-#ifdef CONFIG_PCI
-static u32 orig_pci_err_cap_dr;
-static u32 orig_pci_err_en;
-#endif
-
-static u32 orig_l2_err_disable;
-#ifdef CONFIG_FSL_SOC_BOOKE
-static u32 orig_hid1[2];
-#endif
-
-/************************ MC SYSFS parts ***********************************/
-
-static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
- char *data)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->mc_vbase +
- MPC85XX_MC_DATA_ERR_INJECT_HI));
-}
-
-static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
- char *data)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->mc_vbase +
- MPC85XX_MC_DATA_ERR_INJECT_LO));
-}
-
-static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
-}
-
-static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
- {
- .attr = {
- .name = "inject_data_hi",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_mc_inject_data_hi_show,
- .store = mpc85xx_mc_inject_data_hi_store},
- {
- .attr = {
- .name = "inject_data_lo",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_mc_inject_data_lo_show,
- .store = mpc85xx_mc_inject_data_lo_store},
- {
- .attr = {
- .name = "inject_ctrl",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_mc_inject_ctrl_show,
- .store = mpc85xx_mc_inject_ctrl_store},
-
- /* End of list */
- {
- .attr = {.name = NULL}
- }
-};
-
-static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
-{
- mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
-}
-
-/**************************** PCI Err device ***************************/
-#ifdef CONFIG_PCI
-
-static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
-{
- struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
- u32 err_detect;
-
- err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
-
- /* master aborts can happen during PCI config cycles */
- if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
- return;
- }
-
- printk(KERN_ERR "PCI error(s) detected\n");
- printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
-
- printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
- printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
- printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
- printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
- printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
-
- /* clear error bits */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
-
- if (err_detect & PCI_EDE_PERR_MASK)
- edac_pci_handle_pe(pci, pci->ctl_name);
-
- if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
- edac_pci_handle_npe(pci, pci->ctl_name);
-}
-
-static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
-{
- struct edac_pci_ctl_info *pci = dev_id;
- struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
- u32 err_detect;
-
- err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
-
- if (!err_detect)
- return IRQ_NONE;
-
- mpc85xx_pci_check(pci);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
-{
- struct edac_pci_ctl_info *pci;
- struct mpc85xx_pci_pdata *pdata;
- struct resource r;
- int res = 0;
-
- if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
- if (!pci)
- return -ENOMEM;
-
- pdata = pci->pvt_info;
- pdata->name = "mpc85xx_pci_err";
- pdata->irq = NO_IRQ;
- dev_set_drvdata(&op->dev, pci);
- pci->dev = &op->dev;
- pci->mod_name = EDAC_MOD_STR;
- pci->ctl_name = pdata->name;
- pci->dev_name = dev_name(&op->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci->edac_check = mpc85xx_pci_check;
-
- pdata->edac_idx = edac_pci_idx++;
-
- res = of_address_to_resource(op->dev.of_node, 0, &r);
- if (res) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "PCI err regs\n", __func__);
- goto err;
- }
-
- /* we only need the error registers */
- r.start += 0xe00;
-
- if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
- if (!pdata->pci_vbase) {
- printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- orig_pci_err_cap_dr =
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
-
- /* PCI master abort is expected during config cycles */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
-
- orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
-
- /* disable master abort reporting */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
-
- /* clear error bits */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
-
- if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("%s(): failed edac_pci_add_device()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
- res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_pci_isr, IRQF_DISABLED,
- "[EDAC] PCI err", pci);
- if (res < 0) {
- printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
- "MPC85xx PCI err\n", __func__, pdata->irq);
- irq_dispose_mapping(pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
- pdata->irq);
- }
-
- devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
- debugf3("%s(): success\n", __func__);
- printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
-
- return 0;
-
-err2:
- edac_pci_del_device(&op->dev);
-err:
- edac_pci_free_ctl_info(pci);
- devres_release_group(&op->dev, mpc85xx_pci_err_probe);
- return res;
-}
-
-static int mpc85xx_pci_err_remove(struct platform_device *op)
-{
- struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
- struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
-
- debugf0("%s()\n", __func__);
-
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
- orig_pci_err_cap_dr);
-
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
-
- edac_pci_del_device(pci->dev);
-
- if (edac_op_state == EDAC_OPSTATE_INT)
- irq_dispose_mapping(pdata->irq);
-
- edac_pci_free_ctl_info(pci);
-
- return 0;
-}
-
-static struct of_device_id mpc85xx_pci_err_of_match[] = {
- {
- .compatible = "fsl,mpc8540-pcix",
- },
- {
- .compatible = "fsl,mpc8540-pci",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
-
-static struct platform_driver mpc85xx_pci_err_driver = {
- .probe = mpc85xx_pci_err_probe,
- .remove = __devexit_p(mpc85xx_pci_err_remove),
- .driver = {
- .name = "mpc85xx_pci_err",
- .owner = THIS_MODULE,
- .of_match_table = mpc85xx_pci_err_of_match,
- },
-};
-
-#endif /* CONFIG_PCI */
-
-/**************************** L2 Err device ***************************/
-
-/************************ L2 SYSFS parts ***********************************/
-
-static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
- *edac_dev, char *data)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
-}
-
-static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
- *edac_dev, char *data)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
-}
-
-static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
- *edac_dev, char *data)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
-}
-
-static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
- *edac_dev, const char *data,
- size_t count)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
- *edac_dev, const char *data,
- size_t count)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
- *edac_dev, const char *data,
- size_t count)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
- {
- .attr = {
- .name = "inject_data_hi",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_l2_inject_data_hi_show,
- .store = mpc85xx_l2_inject_data_hi_store},
- {
- .attr = {
- .name = "inject_data_lo",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_l2_inject_data_lo_show,
- .store = mpc85xx_l2_inject_data_lo_store},
- {
- .attr = {
- .name = "inject_ctrl",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_l2_inject_ctrl_show,
- .store = mpc85xx_l2_inject_ctrl_store},
-
- /* End of list */
- {
- .attr = {.name = NULL}
- }
-};
-
-static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
- *edac_dev)
-{
- edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
-}
-
-/***************************** L2 ops ***********************************/
-
-static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
-{
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- u32 err_detect;
-
- err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
-
- if (!(err_detect & L2_EDE_MASK))
- return;
-
- printk(KERN_ERR "ECC Error in CPU L2 cache\n");
- printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
- printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
- in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
- printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
- in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
- printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
- in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
- printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
- in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
- printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
- in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
-
- /* clear error detect register */
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
-
- if (err_detect & L2_EDE_CE_MASK)
- edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
-
- if (err_detect & L2_EDE_UE_MASK)
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
-}
-
-static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
-{
- struct edac_device_ctl_info *edac_dev = dev_id;
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- u32 err_detect;
-
- err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
-
- if (!(err_detect & L2_EDE_MASK))
- return IRQ_NONE;
-
- mpc85xx_l2_check(edac_dev);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
-{
- struct edac_device_ctl_info *edac_dev;
- struct mpc85xx_l2_pdata *pdata;
- struct resource r;
- int res;
-
- if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
- "cpu", 1, "L", 1, 2, NULL, 0,
- edac_dev_idx);
- if (!edac_dev) {
- devres_release_group(&op->dev, mpc85xx_l2_err_probe);
- return -ENOMEM;
- }
-
- pdata = edac_dev->pvt_info;
- pdata->name = "mpc85xx_l2_err";
- pdata->irq = NO_IRQ;
- edac_dev->dev = &op->dev;
- dev_set_drvdata(edac_dev->dev, edac_dev);
- edac_dev->ctl_name = pdata->name;
- edac_dev->dev_name = pdata->name;
-
- res = of_address_to_resource(op->dev.of_node, 0, &r);
- if (res) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "L2 err regs\n", __func__);
- goto err;
- }
-
- /* we only need the error registers */
- r.start += 0xe00;
-
- if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
- if (!pdata->l2_vbase) {
- printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
-
- orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
-
- /* clear the err_dis */
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
-
- edac_dev->mod_name = EDAC_MOD_STR;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- edac_dev->edac_check = mpc85xx_l2_check;
-
- mpc85xx_set_l2_sysfs_attributes(edac_dev);
-
- pdata->edac_idx = edac_dev_idx++;
-
- if (edac_device_add_device(edac_dev) > 0) {
- debugf3("%s(): failed edac_device_add_device()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
- res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_l2_isr, IRQF_DISABLED,
- "[EDAC] L2 err", edac_dev);
- if (res < 0) {
- printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
- "MPC85xx L2 err\n", __func__, pdata->irq);
- irq_dispose_mapping(pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
- pdata->irq);
-
- edac_dev->op_state = OP_RUNNING_INTERRUPT;
-
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
- }
-
- devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
-
- debugf3("%s(): success\n", __func__);
- printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
-
- return 0;
-
-err2:
- edac_device_del_device(&op->dev);
-err:
- devres_release_group(&op->dev, mpc85xx_l2_err_probe);
- edac_device_free_ctl_info(edac_dev);
- return res;
-}
-
-static int mpc85xx_l2_err_remove(struct platform_device *op)
-{
- struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
- struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
-
- debugf0("%s()\n", __func__);
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
- irq_dispose_mapping(pdata->irq);
- }
-
- out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
- edac_device_del_device(&op->dev);
- edac_device_free_ctl_info(edac_dev);
- return 0;
-}
-
-static struct of_device_id mpc85xx_l2_err_of_match[] = {
-/* deprecate the fsl,85.. forms in the future, 2.6.30? */
- { .compatible = "fsl,8540-l2-cache-controller", },
- { .compatible = "fsl,8541-l2-cache-controller", },
- { .compatible = "fsl,8544-l2-cache-controller", },
- { .compatible = "fsl,8548-l2-cache-controller", },
- { .compatible = "fsl,8555-l2-cache-controller", },
- { .compatible = "fsl,8568-l2-cache-controller", },
- { .compatible = "fsl,mpc8536-l2-cache-controller", },
- { .compatible = "fsl,mpc8540-l2-cache-controller", },
- { .compatible = "fsl,mpc8541-l2-cache-controller", },
- { .compatible = "fsl,mpc8544-l2-cache-controller", },
- { .compatible = "fsl,mpc8548-l2-cache-controller", },
- { .compatible = "fsl,mpc8555-l2-cache-controller", },
- { .compatible = "fsl,mpc8560-l2-cache-controller", },
- { .compatible = "fsl,mpc8568-l2-cache-controller", },
- { .compatible = "fsl,mpc8569-l2-cache-controller", },
- { .compatible = "fsl,mpc8572-l2-cache-controller", },
- { .compatible = "fsl,p1020-l2-cache-controller", },
- { .compatible = "fsl,p1021-l2-cache-controller", },
- { .compatible = "fsl,p2020-l2-cache-controller", },
- {},
-};
-MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
-
-static struct platform_driver mpc85xx_l2_err_driver = {
- .probe = mpc85xx_l2_err_probe,
- .remove = mpc85xx_l2_err_remove,
- .driver = {
- .name = "mpc85xx_l2_err",
- .owner = THIS_MODULE,
- .of_match_table = mpc85xx_l2_err_of_match,
- },
-};
-
-/**************************** MC Err device ***************************/
-
-/*
- * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
- * MPC8572 User's Manual. Each line represents a syndrome bit column as a
- * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
- * below correspond to Freescale's manuals.
- */
-static unsigned int ecc_table[16] = {
- /* MSB LSB */
- /* [0:31] [32:63] */
- 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
- 0x00ff00ff, 0x00fff0ff,
- 0x0f0f0f0f, 0x0f0fff00,
- 0x11113333, 0x7777000f,
- 0x22224444, 0x8888222f,
- 0x44448888, 0xffff4441,
- 0x8888ffff, 0x11118882,
- 0xffff1111, 0x22221114, /* Syndrome bit 0 */
-};
-
-/*
- * Calculate the correct ECC value for a 64-bit value specified by high:low
- */
-static u8 calculate_ecc(u32 high, u32 low)
-{
- u32 mask_low;
- u32 mask_high;
- int bit_cnt;
- u8 ecc = 0;
- int i;
- int j;
-
- for (i = 0; i < 8; i++) {
- mask_high = ecc_table[i * 2];
- mask_low = ecc_table[i * 2 + 1];
- bit_cnt = 0;
-
- for (j = 0; j < 32; j++) {
- if ((mask_high >> j) & 1)
- bit_cnt ^= (high >> j) & 1;
- if ((mask_low >> j) & 1)
- bit_cnt ^= (low >> j) & 1;
- }
-
- ecc |= bit_cnt << i;
- }
-
- return ecc;
-}
-
-/*
- * Create the syndrome code which is generated if the data line specified by
- * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
- * User's Manual and 9-61 in the MPC8572 User's Manual.
- */
-static u8 syndrome_from_bit(unsigned int bit) {
- int i;
- u8 syndrome = 0;
-
- /*
- * Cycle through the upper or lower 32-bit portion of each value in
- * ecc_table depending on if 'bit' is in the upper or lower half of
- * 64-bit data.
- */
- for (i = bit < 32; i < 16; i += 2)
- syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
-
- return syndrome;
-}
-
-/*
- * Decode data and ecc syndrome to determine what went wrong
- * Note: This can only decode single-bit errors
- */
-static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
- int *bad_data_bit, int *bad_ecc_bit)
-{
- int i;
- u8 syndrome;
-
- *bad_data_bit = -1;
- *bad_ecc_bit = -1;
-
- /*
- * Calculate the ECC of the captured data and XOR it with the captured
- * ECC to find an ECC syndrome value we can search for
- */
- syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
-
- /* Check if a data line is stuck... */
- for (i = 0; i < 64; i++) {
- if (syndrome == syndrome_from_bit(i)) {
- *bad_data_bit = i;
- return;
- }
- }
-
- /* If data is correct, check ECC bits for errors... */
- for (i = 0; i < 8; i++) {
- if ((syndrome >> i) & 0x1) {
- *bad_ecc_bit = i;
- return;
- }
- }
-}
-
-static void mpc85xx_mc_check(struct mem_ctl_info *mci)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- struct csrow_info *csrow;
- u32 bus_width;
- u32 err_detect;
- u32 syndrome;
- u32 err_addr;
- u32 pfn;
- int row_index;
- u32 cap_high;
- u32 cap_low;
- int bad_data_bit;
- int bad_ecc_bit;
-
- err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
- if (!err_detect)
- return;
-
- mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
- err_detect);
-
- /* no more processing if not ECC bit errors */
- if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
- return;
- }
-
- syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
-
- /* Mask off appropriate bits of syndrome based on bus width */
- bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
- DSC_DBW_MASK) ? 32 : 64;
- if (bus_width == 64)
- syndrome &= 0xff;
- else
- syndrome &= 0xffff;
-
- err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
- pfn = err_addr >> PAGE_SHIFT;
-
- for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
- csrow = &mci->csrows[row_index];
- if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
- break;
- }
-
- cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
- cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
-
- /*
- * Analyze single-bit errors on 64-bit wide buses
- * TODO: Add support for 32-bit wide buses
- */
- if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
- sbe_ecc_decode(cap_high, cap_low, syndrome,
- &bad_data_bit, &bad_ecc_bit);
-
- if (bad_data_bit != -1)
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Faulty Data bit: %d\n", bad_data_bit);
- if (bad_ecc_bit != -1)
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Faulty ECC bit: %d\n", bad_ecc_bit);
-
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
- cap_high ^ (1 << (bad_data_bit - 32)),
- cap_low ^ (1 << bad_data_bit),
- syndrome ^ (1 << bad_ecc_bit));
- }
-
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
- cap_high, cap_low, syndrome);
- mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
- mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
-
- /* we are out of range */
- if (row_index == mci->nr_csrows)
- mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
-
- if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
- syndrome, row_index, 0, mci->ctl_name);
-
- if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
- row_index, mci->ctl_name);
-
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
-}
-
-static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
-{
- struct mem_ctl_info *mci = dev_id;
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- u32 err_detect;
-
- err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
- if (!err_detect)
- return IRQ_NONE;
-
- mpc85xx_mc_check(mci);
-
- return IRQ_HANDLED;
-}
-
-static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- struct csrow_info *csrow;
- u32 sdram_ctl;
- u32 sdtype;
- enum mem_type mtype;
- u32 cs_bnds;
- int index;
-
- sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
-
- sdtype = sdram_ctl & DSC_SDTYPE_MASK;
- if (sdram_ctl & DSC_RD_EN) {
- switch (sdtype) {
- case DSC_SDTYPE_DDR:
- mtype = MEM_RDDR;
- break;
- case DSC_SDTYPE_DDR2:
- mtype = MEM_RDDR2;
- break;
- case DSC_SDTYPE_DDR3:
- mtype = MEM_RDDR3;
- break;
- default:
- mtype = MEM_UNKNOWN;
- break;
- }
- } else {
- switch (sdtype) {
- case DSC_SDTYPE_DDR:
- mtype = MEM_DDR;
- break;
- case DSC_SDTYPE_DDR2:
- mtype = MEM_DDR2;
- break;
- case DSC_SDTYPE_DDR3:
- mtype = MEM_DDR3;
- break;
- default:
- mtype = MEM_UNKNOWN;
- break;
- }
- }
-
- for (index = 0; index < mci->nr_csrows; index++) {
- u32 start;
- u32 end;
-
- csrow = &mci->csrows[index];
- cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
- (index * MPC85XX_MC_CS_BNDS_OFS));
-
- start = (cs_bnds & 0xffff0000) >> 16;
- end = (cs_bnds & 0x0000ffff);
-
- if (start == end)
- continue; /* not populated */
-
- start <<= (24 - PAGE_SHIFT);
- end <<= (24 - PAGE_SHIFT);
- end |= (1 << (24 - PAGE_SHIFT)) - 1;
-
- csrow->first_page = start;
- csrow->last_page = end;
- csrow->nr_pages = end + 1 - start;
- csrow->grain = 8;
- csrow->mtype = mtype;
- csrow->dtype = DEV_UNKNOWN;
- if (sdram_ctl & DSC_X32_EN)
- csrow->dtype = DEV_X32;
- csrow->edac_mode = EDAC_SECDED;
- }
-}
-
-static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
-{
- struct mem_ctl_info *mci;
- struct mpc85xx_mc_pdata *pdata;
- struct resource r;
- u32 sdram_ctl;
- int res;
-
- if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
- if (!mci) {
- devres_release_group(&op->dev, mpc85xx_mc_err_probe);
- return -ENOMEM;
- }
-
- pdata = mci->pvt_info;
- pdata->name = "mpc85xx_mc_err";
- pdata->irq = NO_IRQ;
- mci->dev = &op->dev;
- pdata->edac_idx = edac_mc_idx++;
- dev_set_drvdata(mci->dev, mci);
- mci->ctl_name = pdata->name;
- mci->dev_name = pdata->name;
-
- res = of_address_to_resource(op->dev.of_node, 0, &r);
- if (res) {
- printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
- __func__);
- goto err;
- }
-
- if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
- if (!pdata->mc_vbase) {
- printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
- if (!(sdram_ctl & DSC_ECC_EN)) {
- /* no ECC */
- printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
- res = -ENODEV;
- goto err;
- }
-
- debugf3("%s(): init mci\n", __func__);
- mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
- MEM_FLAG_DDR | MEM_FLAG_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = MPC85XX_REVISION;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- mci->edac_check = mpc85xx_mc_check;
-
- mci->ctl_page_to_phys = NULL;
-
- mci->scrub_mode = SCRUB_SW_SRC;
-
- mpc85xx_set_mc_sysfs_attributes(mci);
-
- mpc85xx_init_csrows(mci);
-
- /* store the original error disable bits */
- orig_ddr_err_disable =
- in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
-
- /* clear all error bits */
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
-
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
- DDR_EIE_MBEE | DDR_EIE_SBEE);
-
- /* store the original error management threshold */
- orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
- MPC85XX_MC_ERR_SBE) & 0xff0000;
-
- /* set threshold to 1 error per interrupt */
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
-
- /* register interrupts */
- pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
- res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_mc_isr,
- IRQF_DISABLED | IRQF_SHARED,
- "[EDAC] MC err", mci);
- if (res < 0) {
- printk(KERN_ERR "%s: Unable to request irq %d for "
- "MPC85xx DRAM ERR\n", __func__, pdata->irq);
- irq_dispose_mapping(pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
- pdata->irq);
- }
-
- devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
- debugf3("%s(): success\n", __func__);
- printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
-
- return 0;
-
-err2:
- edac_mc_del_mc(&op->dev);
-err:
- devres_release_group(&op->dev, mpc85xx_mc_err_probe);
- edac_mc_free(mci);
- return res;
-}
-
-static int mpc85xx_mc_err_remove(struct platform_device *op)
-{
- struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
-
- debugf0("%s()\n", __func__);
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
- irq_dispose_mapping(pdata->irq);
- }
-
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
- orig_ddr_err_disable);
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
-
- edac_mc_del_mc(&op->dev);
- edac_mc_free(mci);
- return 0;
-}
-
-static struct of_device_id mpc85xx_mc_err_of_match[] = {
-/* deprecate the fsl,85.. forms in the future, 2.6.30? */
- { .compatible = "fsl,8540-memory-controller", },
- { .compatible = "fsl,8541-memory-controller", },
- { .compatible = "fsl,8544-memory-controller", },
- { .compatible = "fsl,8548-memory-controller", },
- { .compatible = "fsl,8555-memory-controller", },
- { .compatible = "fsl,8568-memory-controller", },
- { .compatible = "fsl,mpc8536-memory-controller", },
- { .compatible = "fsl,mpc8540-memory-controller", },
- { .compatible = "fsl,mpc8541-memory-controller", },
- { .compatible = "fsl,mpc8544-memory-controller", },
- { .compatible = "fsl,mpc8548-memory-controller", },
- { .compatible = "fsl,mpc8555-memory-controller", },
- { .compatible = "fsl,mpc8560-memory-controller", },
- { .compatible = "fsl,mpc8568-memory-controller", },
- { .compatible = "fsl,mpc8569-memory-controller", },
- { .compatible = "fsl,mpc8572-memory-controller", },
- { .compatible = "fsl,mpc8349-memory-controller", },
- { .compatible = "fsl,p1020-memory-controller", },
- { .compatible = "fsl,p1021-memory-controller", },
- { .compatible = "fsl,p2020-memory-controller", },
- { .compatible = "fsl,qoriq-memory-controller", },
- {},
-};
-MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
-
-static struct platform_driver mpc85xx_mc_err_driver = {
- .probe = mpc85xx_mc_err_probe,
- .remove = mpc85xx_mc_err_remove,
- .driver = {
- .name = "mpc85xx_mc_err",
- .owner = THIS_MODULE,
- .of_match_table = mpc85xx_mc_err_of_match,
- },
-};
-
-#ifdef CONFIG_FSL_SOC_BOOKE
-static void __init mpc85xx_mc_clear_rfxe(void *data)
-{
- orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
- mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
-}
-#endif
-
-static int __init mpc85xx_mc_init(void)
-{
- int res = 0;
- u32 pvr = 0;
-
- printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
- "(C) 2006 Montavista Software\n");
-
- /* make sure error reporting method is sane */
- switch (edac_op_state) {
- case EDAC_OPSTATE_POLL:
- case EDAC_OPSTATE_INT:
- break;
- default:
- edac_op_state = EDAC_OPSTATE_INT;
- break;
- }
-
- res = platform_driver_register(&mpc85xx_mc_err_driver);
- if (res)
- printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
-
- res = platform_driver_register(&mpc85xx_l2_err_driver);
- if (res)
- printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
-
-#ifdef CONFIG_PCI
- res = platform_driver_register(&mpc85xx_pci_err_driver);
- if (res)
- printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
-#endif
-
-#ifdef CONFIG_FSL_SOC_BOOKE
- pvr = mfspr(SPRN_PVR);
-
- if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
- (PVR_VER(pvr) == PVR_VER_E500V2)) {
- /*
- * need to clear HID1[RFXE] to disable machine check int
- * so we can catch it
- */
- if (edac_op_state == EDAC_OPSTATE_INT)
- on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
- }
-#endif
-
- return 0;
-}
-
-module_init(mpc85xx_mc_init);
-
-#ifdef CONFIG_FSL_SOC_BOOKE
-static void __exit mpc85xx_mc_restore_hid1(void *data)
-{
- mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
-}
-#endif
-
-static void __exit mpc85xx_mc_exit(void)
-{
-#ifdef CONFIG_FSL_SOC_BOOKE
- u32 pvr = mfspr(SPRN_PVR);
-
- if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
- (PVR_VER(pvr) == PVR_VER_E500V2)) {
- on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
- }
-#endif
-#ifdef CONFIG_PCI
- platform_driver_unregister(&mpc85xx_pci_err_driver);
-#endif
- platform_driver_unregister(&mpc85xx_l2_err_driver);
- platform_driver_unregister(&mpc85xx_mc_err_driver);
-}
-
-module_exit(mpc85xx_mc_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Montavista Software, Inc.");
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state,
- "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/ANDROID_3.4.5/drivers/edac/mpc85xx_edac.h b/ANDROID_3.4.5/drivers/edac/mpc85xx_edac.h
deleted file mode 100644
index 932016f2..00000000
--- a/ANDROID_3.4.5/drivers/edac/mpc85xx_edac.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Freescale MPC85xx Memory Controller kenel module
- * Author: Dave Jiang <djiang@mvista.com>
- *
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-#ifndef _MPC85XX_EDAC_H_
-#define _MPC85XX_EDAC_H_
-
-#define MPC85XX_REVISION " Ver: 2.0.0"
-#define EDAC_MOD_STR "MPC85xx_edac"
-
-#define mpc85xx_printk(level, fmt, arg...) \
- edac_printk(level, "MPC85xx", fmt, ##arg)
-
-#define mpc85xx_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "MPC85xx", fmt, ##arg)
-
-/*
- * DRAM error defines
- */
-
-/* DDR_SDRAM_CFG */
-#define MPC85XX_MC_DDR_SDRAM_CFG 0x0110
-#define MPC85XX_MC_CS_BNDS_0 0x0000
-#define MPC85XX_MC_CS_BNDS_1 0x0008
-#define MPC85XX_MC_CS_BNDS_2 0x0010
-#define MPC85XX_MC_CS_BNDS_3 0x0018
-#define MPC85XX_MC_CS_BNDS_OFS 0x0008
-
-#define MPC85XX_MC_DATA_ERR_INJECT_HI 0x0e00
-#define MPC85XX_MC_DATA_ERR_INJECT_LO 0x0e04
-#define MPC85XX_MC_ECC_ERR_INJECT 0x0e08
-#define MPC85XX_MC_CAPTURE_DATA_HI 0x0e20
-#define MPC85XX_MC_CAPTURE_DATA_LO 0x0e24
-#define MPC85XX_MC_CAPTURE_ECC 0x0e28
-#define MPC85XX_MC_ERR_DETECT 0x0e40
-#define MPC85XX_MC_ERR_DISABLE 0x0e44
-#define MPC85XX_MC_ERR_INT_EN 0x0e48
-#define MPC85XX_MC_CAPTURE_ATRIBUTES 0x0e4c
-#define MPC85XX_MC_CAPTURE_ADDRESS 0x0e50
-#define MPC85XX_MC_ERR_SBE 0x0e58
-
-#define DSC_MEM_EN 0x80000000
-#define DSC_ECC_EN 0x20000000
-#define DSC_RD_EN 0x10000000
-#define DSC_DBW_MASK 0x00180000
-#define DSC_DBW_32 0x00080000
-#define DSC_DBW_64 0x00000000
-
-#define DSC_SDTYPE_MASK 0x07000000
-
-#define DSC_SDTYPE_DDR 0x02000000
-#define DSC_SDTYPE_DDR2 0x03000000
-#define DSC_SDTYPE_DDR3 0x07000000
-#define DSC_X32_EN 0x00000020
-
-/* Err_Int_En */
-#define DDR_EIE_MSEE 0x1 /* memory select */
-#define DDR_EIE_SBEE 0x4 /* single-bit ECC error */
-#define DDR_EIE_MBEE 0x8 /* multi-bit ECC error */
-
-/* Err_Detect */
-#define DDR_EDE_MSE 0x1 /* memory select */
-#define DDR_EDE_SBE 0x4 /* single-bit ECC error */
-#define DDR_EDE_MBE 0x8 /* multi-bit ECC error */
-#define DDR_EDE_MME 0x80000000 /* multiple memory errors */
-
-/* Err_Disable */
-#define DDR_EDI_MSED 0x1 /* memory select disable */
-#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
-#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
-
-/*
- * L2 Err defines
- */
-#define MPC85XX_L2_ERRINJHI 0x0000
-#define MPC85XX_L2_ERRINJLO 0x0004
-#define MPC85XX_L2_ERRINJCTL 0x0008
-#define MPC85XX_L2_CAPTDATAHI 0x0020
-#define MPC85XX_L2_CAPTDATALO 0x0024
-#define MPC85XX_L2_CAPTECC 0x0028
-#define MPC85XX_L2_ERRDET 0x0040
-#define MPC85XX_L2_ERRDIS 0x0044
-#define MPC85XX_L2_ERRINTEN 0x0048
-#define MPC85XX_L2_ERRATTR 0x004c
-#define MPC85XX_L2_ERRADDR 0x0050
-#define MPC85XX_L2_ERRCTL 0x0058
-
-/* Error Interrupt Enable */
-#define L2_EIE_L2CFGINTEN 0x1
-#define L2_EIE_SBECCINTEN 0x4
-#define L2_EIE_MBECCINTEN 0x8
-#define L2_EIE_TPARINTEN 0x10
-#define L2_EIE_MASK (L2_EIE_L2CFGINTEN | L2_EIE_SBECCINTEN | \
- L2_EIE_MBECCINTEN | L2_EIE_TPARINTEN)
-
-/* Error Detect */
-#define L2_EDE_L2CFGERR 0x1
-#define L2_EDE_SBECCERR 0x4
-#define L2_EDE_MBECCERR 0x8
-#define L2_EDE_TPARERR 0x10
-#define L2_EDE_MULL2ERR 0x80000000
-
-#define L2_EDE_CE_MASK L2_EDE_SBECCERR
-#define L2_EDE_UE_MASK (L2_EDE_L2CFGERR | L2_EDE_MBECCERR | \
- L2_EDE_TPARERR)
-#define L2_EDE_MASK (L2_EDE_L2CFGERR | L2_EDE_SBECCERR | \
- L2_EDE_MBECCERR | L2_EDE_TPARERR | L2_EDE_MULL2ERR)
-
-/*
- * PCI Err defines
- */
-#define PCI_EDE_TOE 0x00000001
-#define PCI_EDE_SCM 0x00000002
-#define PCI_EDE_IRMSV 0x00000004
-#define PCI_EDE_ORMSV 0x00000008
-#define PCI_EDE_OWMSV 0x00000010
-#define PCI_EDE_TGT_ABRT 0x00000020
-#define PCI_EDE_MST_ABRT 0x00000040
-#define PCI_EDE_TGT_PERR 0x00000080
-#define PCI_EDE_MST_PERR 0x00000100
-#define PCI_EDE_RCVD_SERR 0x00000200
-#define PCI_EDE_ADDR_PERR 0x00000400
-#define PCI_EDE_MULTI_ERR 0x80000000
-
-#define PCI_EDE_PERR_MASK (PCI_EDE_TGT_PERR | PCI_EDE_MST_PERR | \
- PCI_EDE_ADDR_PERR)
-
-#define MPC85XX_PCI_ERR_DR 0x0000
-#define MPC85XX_PCI_ERR_CAP_DR 0x0004
-#define MPC85XX_PCI_ERR_EN 0x0008
-#define MPC85XX_PCI_ERR_ATTRIB 0x000c
-#define MPC85XX_PCI_ERR_ADDR 0x0010
-#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
-#define MPC85XX_PCI_ERR_DL 0x0018
-#define MPC85XX_PCI_ERR_DH 0x001c
-#define MPC85XX_PCI_GAS_TIMR 0x0020
-#define MPC85XX_PCI_PCIX_TIMR 0x0024
-
-struct mpc85xx_mc_pdata {
- char *name;
- int edac_idx;
- void __iomem *mc_vbase;
- int irq;
-};
-
-struct mpc85xx_l2_pdata {
- char *name;
- int edac_idx;
- void __iomem *l2_vbase;
- int irq;
-};
-
-struct mpc85xx_pci_pdata {
- char *name;
- int edac_idx;
- void __iomem *pci_vbase;
- int irq;
-};
-
-#endif
diff --git a/ANDROID_3.4.5/drivers/edac/mv64x60_edac.c b/ANDROID_3.4.5/drivers/edac/mv64x60_edac.c
deleted file mode 100644
index 7e5ff367..00000000
--- a/ANDROID_3.4.5/drivers/edac/mv64x60_edac.c
+++ /dev/null
@@ -1,890 +0,0 @@
-/*
- * Marvell MV64x60 Memory Controller kernel module for PPC platforms
- *
- * Author: Dave Jiang <djiang@mvista.com>
- *
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/edac.h>
-#include <linux/gfp.h>
-
-#include "edac_core.h"
-#include "edac_module.h"
-#include "mv64x60_edac.h"
-
-static const char *mv64x60_ctl_name = "MV64x60";
-static int edac_dev_idx;
-static int edac_pci_idx;
-static int edac_mc_idx;
-
-/*********************** PCI err device **********************************/
-#ifdef CONFIG_PCI
-static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
-{
- struct mv64x60_pci_pdata *pdata = pci->pvt_info;
- u32 cause;
-
- cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
- if (!cause)
- return;
-
- printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
- printk(KERN_ERR "Cause register: 0x%08x\n", cause);
- printk(KERN_ERR "Address Low: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
- printk(KERN_ERR "Address High: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
- printk(KERN_ERR "Attribute: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
- printk(KERN_ERR "Command: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
-
- if (cause & MV64X60_PCI_PE_MASK)
- edac_pci_handle_pe(pci, pci->ctl_name);
-
- if (!(cause & MV64X60_PCI_PE_MASK))
- edac_pci_handle_npe(pci, pci->ctl_name);
-}
-
-static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
-{
- struct edac_pci_ctl_info *pci = dev_id;
- struct mv64x60_pci_pdata *pdata = pci->pvt_info;
- u32 val;
-
- val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
- if (!val)
- return IRQ_NONE;
-
- mv64x60_pci_check(pci);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
- * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
- * well. IOW, don't set bit 0.
- */
-
-/* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
-static int __init mv64x60_pci_fixup(struct platform_device *pdev)
-{
- struct resource *r;
- void __iomem *pci_serr;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "PCI err regs\n", __func__);
- return -ENOENT;
- }
-
- pci_serr = ioremap(r->start, resource_size(r));
- if (!pci_serr)
- return -ENOMEM;
-
- out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
- iounmap(pci_serr);
-
- return 0;
-}
-
-static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
-{
- struct edac_pci_ctl_info *pci;
- struct mv64x60_pci_pdata *pdata;
- struct resource *r;
- int res = 0;
-
- if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
- if (!pci)
- return -ENOMEM;
-
- pdata = pci->pvt_info;
-
- pdata->pci_hose = pdev->id;
- pdata->name = "mpc85xx_pci_err";
- pdata->irq = NO_IRQ;
- platform_set_drvdata(pdev, pci);
- pci->dev = &pdev->dev;
- pci->dev_name = dev_name(&pdev->dev);
- pci->mod_name = EDAC_MOD_STR;
- pci->ctl_name = pdata->name;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci->edac_check = mv64x60_pci_check;
-
- pdata->edac_idx = edac_pci_idx++;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "PCI err regs\n", __func__);
- res = -ENOENT;
- goto err;
- }
-
- if (!devm_request_mem_region(&pdev->dev,
- r->start,
- resource_size(r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->pci_vbase = devm_ioremap(&pdev->dev,
- r->start,
- resource_size(r));
- if (!pdata->pci_vbase) {
- printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- res = mv64x60_pci_fixup(pdev);
- if (res < 0) {
- printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
- goto err;
- }
-
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
- MV64X60_PCIx_ERR_MASK_VAL);
-
- if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("%s(): failed edac_pci_add_device()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev,
- pdata->irq,
- mv64x60_pci_isr,
- IRQF_DISABLED,
- "[EDAC] PCI err",
- pci);
- if (res < 0) {
- printk(KERN_ERR "%s: Unable to request irq %d for "
- "MV64x60 PCI ERR\n", __func__, pdata->irq);
- res = -ENODEV;
- goto err2;
- }
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
- pdata->irq);
- }
-
- devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
-
- return 0;
-
-err2:
- edac_pci_del_device(&pdev->dev);
-err:
- edac_pci_free_ctl_info(pci);
- devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
- return res;
-}
-
-static int mv64x60_pci_err_remove(struct platform_device *pdev)
-{
- struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
-
- debugf0("%s()\n", __func__);
-
- edac_pci_del_device(&pdev->dev);
-
- edac_pci_free_ctl_info(pci);
-
- return 0;
-}
-
-static struct platform_driver mv64x60_pci_err_driver = {
- .probe = mv64x60_pci_err_probe,
- .remove = __devexit_p(mv64x60_pci_err_remove),
- .driver = {
- .name = "mv64x60_pci_err",
- }
-};
-
-#endif /* CONFIG_PCI */
-
-/*********************** SRAM err device **********************************/
-static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
-{
- struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
- u32 cause;
-
- cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
- if (!cause)
- return;
-
- printk(KERN_ERR "Error in internal SRAM\n");
- printk(KERN_ERR "Cause register: 0x%08x\n", cause);
- printk(KERN_ERR "Address Low: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
- printk(KERN_ERR "Address High: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
- printk(KERN_ERR "Data Low: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
- printk(KERN_ERR "Data High: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
- printk(KERN_ERR "Parity: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
- out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
-
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
-}
-
-static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
-{
- struct edac_device_ctl_info *edac_dev = dev_id;
- struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
- u32 cause;
-
- cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
- if (!cause)
- return IRQ_NONE;
-
- mv64x60_sram_check(edac_dev);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
-{
- struct edac_device_ctl_info *edac_dev;
- struct mv64x60_sram_pdata *pdata;
- struct resource *r;
- int res = 0;
-
- if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
- "sram", 1, NULL, 0, 0, NULL, 0,
- edac_dev_idx);
- if (!edac_dev) {
- devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
- return -ENOMEM;
- }
-
- pdata = edac_dev->pvt_info;
- pdata->name = "mv64x60_sram_err";
- pdata->irq = NO_IRQ;
- edac_dev->dev = &pdev->dev;
- platform_set_drvdata(pdev, edac_dev);
- edac_dev->dev_name = dev_name(&pdev->dev);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "SRAM err regs\n", __func__);
- res = -ENOENT;
- goto err;
- }
-
- if (!devm_request_mem_region(&pdev->dev,
- r->start,
- resource_size(r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while request mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->sram_vbase = devm_ioremap(&pdev->dev,
- r->start,
- resource_size(r));
- if (!pdata->sram_vbase) {
- printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
- __func__);
- res = -ENOMEM;
- goto err;
- }
-
- /* setup SRAM err registers */
- out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
-
- edac_dev->mod_name = EDAC_MOD_STR;
- edac_dev->ctl_name = pdata->name;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- edac_dev->edac_check = mv64x60_sram_check;
-
- pdata->edac_idx = edac_dev_idx++;
-
- if (edac_device_add_device(edac_dev) > 0) {
- debugf3("%s(): failed edac_device_add_device()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev,
- pdata->irq,
- mv64x60_sram_isr,
- IRQF_DISABLED,
- "[EDAC] SRAM err",
- edac_dev);
- if (res < 0) {
- printk(KERN_ERR
- "%s: Unable to request irq %d for "
- "MV64x60 SRAM ERR\n", __func__, pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
- pdata->irq);
- }
-
- devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
-
- return 0;
-
-err2:
- edac_device_del_device(&pdev->dev);
-err:
- devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
- edac_device_free_ctl_info(edac_dev);
- return res;
-}
-
-static int mv64x60_sram_err_remove(struct platform_device *pdev)
-{
- struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
-
- debugf0("%s()\n", __func__);
-
- edac_device_del_device(&pdev->dev);
- edac_device_free_ctl_info(edac_dev);
-
- return 0;
-}
-
-static struct platform_driver mv64x60_sram_err_driver = {
- .probe = mv64x60_sram_err_probe,
- .remove = mv64x60_sram_err_remove,
- .driver = {
- .name = "mv64x60_sram_err",
- }
-};
-
-/*********************** CPU err device **********************************/
-static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
-{
- struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
- u32 cause;
-
- cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
- MV64x60_CPU_CAUSE_MASK;
- if (!cause)
- return;
-
- printk(KERN_ERR "Error on CPU interface\n");
- printk(KERN_ERR "Cause register: 0x%08x\n", cause);
- printk(KERN_ERR "Address Low: 0x%08x\n",
- in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
- printk(KERN_ERR "Address High: 0x%08x\n",
- in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
- printk(KERN_ERR "Data Low: 0x%08x\n",
- in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
- printk(KERN_ERR "Data High: 0x%08x\n",
- in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
- printk(KERN_ERR "Parity: 0x%08x\n",
- in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
-
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
-}
-
-static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
-{
- struct edac_device_ctl_info *edac_dev = dev_id;
- struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
- u32 cause;
-
- cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
- MV64x60_CPU_CAUSE_MASK;
- if (!cause)
- return IRQ_NONE;
-
- mv64x60_cpu_check(edac_dev);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
-{
- struct edac_device_ctl_info *edac_dev;
- struct resource *r;
- struct mv64x60_cpu_pdata *pdata;
- int res = 0;
-
- if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
- "cpu", 1, NULL, 0, 0, NULL, 0,
- edac_dev_idx);
- if (!edac_dev) {
- devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
- return -ENOMEM;
- }
-
- pdata = edac_dev->pvt_info;
- pdata->name = "mv64x60_cpu_err";
- pdata->irq = NO_IRQ;
- edac_dev->dev = &pdev->dev;
- platform_set_drvdata(pdev, edac_dev);
- edac_dev->dev_name = dev_name(&pdev->dev);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "CPU err regs\n", __func__);
- res = -ENOENT;
- goto err;
- }
-
- if (!devm_request_mem_region(&pdev->dev,
- r->start,
- resource_size(r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
- r->start,
- resource_size(r));
- if (!pdata->cpu_vbase[0]) {
- printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "CPU err regs\n", __func__);
- res = -ENOENT;
- goto err;
- }
-
- if (!devm_request_mem_region(&pdev->dev,
- r->start,
- resource_size(r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
- r->start,
- resource_size(r));
- if (!pdata->cpu_vbase[1]) {
- printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- /* setup CPU err registers */
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
-
- edac_dev->mod_name = EDAC_MOD_STR;
- edac_dev->ctl_name = pdata->name;
- if (edac_op_state == EDAC_OPSTATE_POLL)
- edac_dev->edac_check = mv64x60_cpu_check;
-
- pdata->edac_idx = edac_dev_idx++;
-
- if (edac_device_add_device(edac_dev) > 0) {
- debugf3("%s(): failed edac_device_add_device()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev,
- pdata->irq,
- mv64x60_cpu_isr,
- IRQF_DISABLED,
- "[EDAC] CPU err",
- edac_dev);
- if (res < 0) {
- printk(KERN_ERR
- "%s: Unable to request irq %d for MV64x60 "
- "CPU ERR\n", __func__, pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR
- " acquired irq %d for CPU Err\n", pdata->irq);
- }
-
- devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
-
- return 0;
-
-err2:
- edac_device_del_device(&pdev->dev);
-err:
- devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
- edac_device_free_ctl_info(edac_dev);
- return res;
-}
-
-static int mv64x60_cpu_err_remove(struct platform_device *pdev)
-{
- struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
-
- debugf0("%s()\n", __func__);
-
- edac_device_del_device(&pdev->dev);
- edac_device_free_ctl_info(edac_dev);
- return 0;
-}
-
-static struct platform_driver mv64x60_cpu_err_driver = {
- .probe = mv64x60_cpu_err_probe,
- .remove = mv64x60_cpu_err_remove,
- .driver = {
- .name = "mv64x60_cpu_err",
- }
-};
-
-/*********************** DRAM err device **********************************/
-
-static void mv64x60_mc_check(struct mem_ctl_info *mci)
-{
- struct mv64x60_mc_pdata *pdata = mci->pvt_info;
- u32 reg;
- u32 err_addr;
- u32 sdram_ecc;
- u32 comp_ecc;
- u32 syndrome;
-
- reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
- if (!reg)
- return;
-
- err_addr = reg & ~0x3;
- sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
- comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
- syndrome = sdram_ecc ^ comp_ecc;
-
- /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
- if (!(reg & 0x1))
- edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, syndrome, 0, 0,
- mci->ctl_name);
- else /* 2 bit error, UE */
- edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, 0, mci->ctl_name);
-
- /* clear the error */
- out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
-}
-
-static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
-{
- struct mem_ctl_info *mci = dev_id;
- struct mv64x60_mc_pdata *pdata = mci->pvt_info;
- u32 reg;
-
- reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
- if (!reg)
- return IRQ_NONE;
-
- /* writing 0's to the ECC err addr in check function clears irq */
- mv64x60_mc_check(mci);
-
- return IRQ_HANDLED;
-}
-
-static void get_total_mem(struct mv64x60_mc_pdata *pdata)
-{
- struct device_node *np = NULL;
- const unsigned int *reg;
-
- np = of_find_node_by_type(NULL, "memory");
- if (!np)
- return;
-
- reg = of_get_property(np, "reg", NULL);
-
- pdata->total_mem = reg[1];
-}
-
-static void mv64x60_init_csrows(struct mem_ctl_info *mci,
- struct mv64x60_mc_pdata *pdata)
-{
- struct csrow_info *csrow;
- u32 devtype;
- u32 ctl;
-
- get_total_mem(pdata);
-
- ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
-
- csrow = &mci->csrows[0];
- csrow->first_page = 0;
- csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = 8;
-
- csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
-
- devtype = (ctl >> 20) & 0x3;
- switch (devtype) {
- case 0x0:
- csrow->dtype = DEV_X32;
- break;
- case 0x2: /* could be X8 too, but no way to tell */
- csrow->dtype = DEV_X16;
- break;
- case 0x3:
- csrow->dtype = DEV_X4;
- break;
- default:
- csrow->dtype = DEV_UNKNOWN;
- break;
- }
-
- csrow->edac_mode = EDAC_SECDED;
-}
-
-static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci;
- struct mv64x60_mc_pdata *pdata;
- struct resource *r;
- u32 ctl;
- int res = 0;
-
- if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
- if (!mci) {
- printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
- devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
- return -ENOMEM;
- }
-
- pdata = mci->pvt_info;
- mci->dev = &pdev->dev;
- platform_set_drvdata(pdev, mci);
- pdata->name = "mv64x60_mc_err";
- pdata->irq = NO_IRQ;
- mci->dev_name = dev_name(&pdev->dev);
- pdata->edac_idx = edac_mc_idx++;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "MC err regs\n", __func__);
- res = -ENOENT;
- goto err;
- }
-
- if (!devm_request_mem_region(&pdev->dev,
- r->start,
- resource_size(r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->mc_vbase = devm_ioremap(&pdev->dev,
- r->start,
- resource_size(r));
- if (!pdata->mc_vbase) {
- printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
- if (!(ctl & MV64X60_SDRAM_ECC)) {
- /* Non-ECC RAM? */
- printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
- res = -ENODEV;
- goto err2;
- }
-
- debugf3("%s(): init mci\n", __func__);
- mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = MV64x60_REVISION;
- mci->ctl_name = mv64x60_ctl_name;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- mci->edac_check = mv64x60_mc_check;
-
- mci->ctl_page_to_phys = NULL;
-
- mci->scrub_mode = SCRUB_SW_SRC;
-
- mv64x60_init_csrows(mci, pdata);
-
- /* setup MC registers */
- out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
- ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
- ctl = (ctl & 0xff00ffff) | 0x10000;
- out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
-
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- /* acquire interrupt that reports errors */
- pdata->irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev,
- pdata->irq,
- mv64x60_mc_isr,
- IRQF_DISABLED,
- "[EDAC] MC err",
- mci);
- if (res < 0) {
- printk(KERN_ERR "%s: Unable to request irq %d for "
- "MV64x60 DRAM ERR\n", __func__, pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
- pdata->irq);
- }
-
- /* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
-
- return 0;
-
-err2:
- edac_mc_del_mc(&pdev->dev);
-err:
- devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
- edac_mc_free(mci);
- return res;
-}
-
-static int mv64x60_mc_err_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = platform_get_drvdata(pdev);
-
- debugf0("%s()\n", __func__);
-
- edac_mc_del_mc(&pdev->dev);
- edac_mc_free(mci);
- return 0;
-}
-
-static struct platform_driver mv64x60_mc_err_driver = {
- .probe = mv64x60_mc_err_probe,
- .remove = mv64x60_mc_err_remove,
- .driver = {
- .name = "mv64x60_mc_err",
- }
-};
-
-static int __init mv64x60_edac_init(void)
-{
- int ret = 0;
-
- printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
- printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
- /* make sure error reporting method is sane */
- switch (edac_op_state) {
- case EDAC_OPSTATE_POLL:
- case EDAC_OPSTATE_INT:
- break;
- default:
- edac_op_state = EDAC_OPSTATE_INT;
- break;
- }
-
- ret = platform_driver_register(&mv64x60_mc_err_driver);
- if (ret)
- printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
-
- ret = platform_driver_register(&mv64x60_cpu_err_driver);
- if (ret)
- printk(KERN_WARNING EDAC_MOD_STR
- "CPU err failed to register\n");
-
- ret = platform_driver_register(&mv64x60_sram_err_driver);
- if (ret)
- printk(KERN_WARNING EDAC_MOD_STR
- "SRAM err failed to register\n");
-
-#ifdef CONFIG_PCI
- ret = platform_driver_register(&mv64x60_pci_err_driver);
- if (ret)
- printk(KERN_WARNING EDAC_MOD_STR
- "PCI err failed to register\n");
-#endif
-
- return ret;
-}
-module_init(mv64x60_edac_init);
-
-static void __exit mv64x60_edac_exit(void)
-{
-#ifdef CONFIG_PCI
- platform_driver_unregister(&mv64x60_pci_err_driver);
-#endif
- platform_driver_unregister(&mv64x60_sram_err_driver);
- platform_driver_unregister(&mv64x60_cpu_err_driver);
- platform_driver_unregister(&mv64x60_mc_err_driver);
-}
-module_exit(mv64x60_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Montavista Software, Inc.");
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state,
- "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/ANDROID_3.4.5/drivers/edac/mv64x60_edac.h b/ANDROID_3.4.5/drivers/edac/mv64x60_edac.h
deleted file mode 100644
index c7f209c9..00000000
--- a/ANDROID_3.4.5/drivers/edac/mv64x60_edac.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * EDAC defs for Marvell MV64x60 bridge chip
- *
- * Author: Dave Jiang <djiang@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-#ifndef _MV64X60_EDAC_H_
-#define _MV64X60_EDAC_H_
-
-#define MV64x60_REVISION " Ver: 2.0.0"
-#define EDAC_MOD_STR "MV64x60_edac"
-
-#define mv64x60_printk(level, fmt, arg...) \
- edac_printk(level, "MV64x60", fmt, ##arg)
-
-#define mv64x60_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "MV64x60", fmt, ##arg)
-
-/* CPU Error Report Registers */
-#define MV64x60_CPU_ERR_ADDR_LO 0x00 /* 0x0070 */
-#define MV64x60_CPU_ERR_ADDR_HI 0x08 /* 0x0078 */
-#define MV64x60_CPU_ERR_DATA_LO 0x00 /* 0x0128 */
-#define MV64x60_CPU_ERR_DATA_HI 0x08 /* 0x0130 */
-#define MV64x60_CPU_ERR_PARITY 0x10 /* 0x0138 */
-#define MV64x60_CPU_ERR_CAUSE 0x18 /* 0x0140 */
-#define MV64x60_CPU_ERR_MASK 0x20 /* 0x0148 */
-
-#define MV64x60_CPU_CAUSE_MASK 0x07ffffff
-
-/* SRAM Error Report Registers */
-#define MV64X60_SRAM_ERR_CAUSE 0x08 /* 0x0388 */
-#define MV64X60_SRAM_ERR_ADDR_LO 0x10 /* 0x0390 */
-#define MV64X60_SRAM_ERR_ADDR_HI 0x78 /* 0x03f8 */
-#define MV64X60_SRAM_ERR_DATA_LO 0x18 /* 0x0398 */
-#define MV64X60_SRAM_ERR_DATA_HI 0x20 /* 0x03a0 */
-#define MV64X60_SRAM_ERR_PARITY 0x28 /* 0x03a8 */
-
-/* SDRAM Controller Registers */
-#define MV64X60_SDRAM_CONFIG 0x00 /* 0x1400 */
-#define MV64X60_SDRAM_ERR_DATA_HI 0x40 /* 0x1440 */
-#define MV64X60_SDRAM_ERR_DATA_LO 0x44 /* 0x1444 */
-#define MV64X60_SDRAM_ERR_ECC_RCVD 0x48 /* 0x1448 */
-#define MV64X60_SDRAM_ERR_ECC_CALC 0x4c /* 0x144c */
-#define MV64X60_SDRAM_ERR_ADDR 0x50 /* 0x1450 */
-#define MV64X60_SDRAM_ERR_ECC_CNTL 0x54 /* 0x1454 */
-#define MV64X60_SDRAM_ERR_ECC_ERR_CNT 0x58 /* 0x1458 */
-
-#define MV64X60_SDRAM_REGISTERED 0x20000
-#define MV64X60_SDRAM_ECC 0x40000
-
-#ifdef CONFIG_PCI
-/*
- * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
- * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
- * well. IOW, don't set bit 0.
- */
-#define MV64X60_PCIx_ERR_MASK_VAL 0x00a50c24
-
-/* Register offsets from PCIx error address low register */
-#define MV64X60_PCI_ERROR_ADDR_LO 0x00
-#define MV64X60_PCI_ERROR_ADDR_HI 0x04
-#define MV64X60_PCI_ERROR_ATTR 0x08
-#define MV64X60_PCI_ERROR_CMD 0x10
-#define MV64X60_PCI_ERROR_CAUSE 0x18
-#define MV64X60_PCI_ERROR_MASK 0x1c
-
-#define MV64X60_PCI_ERR_SWrPerr 0x0002
-#define MV64X60_PCI_ERR_SRdPerr 0x0004
-#define MV64X60_PCI_ERR_MWrPerr 0x0020
-#define MV64X60_PCI_ERR_MRdPerr 0x0040
-
-#define MV64X60_PCI_PE_MASK (MV64X60_PCI_ERR_SWrPerr | \
- MV64X60_PCI_ERR_SRdPerr | \
- MV64X60_PCI_ERR_MWrPerr | \
- MV64X60_PCI_ERR_MRdPerr)
-
-struct mv64x60_pci_pdata {
- int pci_hose;
- void __iomem *pci_vbase;
- char *name;
- int irq;
- int edac_idx;
-};
-
-#endif /* CONFIG_PCI */
-
-struct mv64x60_mc_pdata {
- void __iomem *mc_vbase;
- int total_mem;
- char *name;
- int irq;
- int edac_idx;
-};
-
-struct mv64x60_cpu_pdata {
- void __iomem *cpu_vbase[2];
- char *name;
- int irq;
- int edac_idx;
-};
-
-struct mv64x60_sram_pdata {
- void __iomem *sram_vbase;
- char *name;
- int irq;
- int edac_idx;
-};
-
-#endif
diff --git a/ANDROID_3.4.5/drivers/edac/pasemi_edac.c b/ANDROID_3.4.5/drivers/edac/pasemi_edac.c
deleted file mode 100644
index 7f71ee43..00000000
--- a/ANDROID_3.4.5/drivers/edac/pasemi_edac.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2006-2007 PA Semi, Inc
- *
- * Author: Egor Martovetsky <egor@pasemi.com>
- * Maintained by: Olof Johansson <olof@lixom.net>
- *
- * Driver for the PWRficient onchip memory controllers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define MODULE_NAME "pasemi_edac"
-
-#define MCCFG_MCEN 0x300
-#define MCCFG_MCEN_MMC_EN 0x00000001
-#define MCCFG_ERRCOR 0x388
-#define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100
-#define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010
-#define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001
-#define MCCFG_SCRUB 0x384
-#define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001
-#define MCDEBUG_ERRCTL1 0x728
-#define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000
-#define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000
-#define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000
-#define MCDEBUG_ERRSTA 0x730
-#define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004
-#define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002
-#define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001
-#define MCDEBUG_ERRCNT1 0x734
-#define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080
-#define MCDEBUG_ERRLOG1A 0x738
-#define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000
-#define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000
-#define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000
-#define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000
-#define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000
-#define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000
-#define MCDEBUG_ERRLOG1A_MERR_BA_S 20
-#define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000
-#define MCDEBUG_ERRLOG1A_MERR_CS_S 16
-#define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff
-#define MCDRAM_RANKCFG 0x114
-#define MCDRAM_RANKCFG_EN 0x00000001
-#define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0
-#define MCDRAM_RANKCFG_TYPE_SIZE_S 6
-
-#define PASEMI_EDAC_NR_CSROWS 8
-#define PASEMI_EDAC_NR_CHANS 1
-#define PASEMI_EDAC_ERROR_GRAIN 64
-
-static int last_page_in_mmc;
-static int system_mmc_id;
-
-
-static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
-{
- struct pci_dev *pdev = to_pci_dev(mci->dev);
- u32 tmp;
-
- pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
- &tmp);
-
- tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
- | MCDEBUG_ERRSTA_SBE_STATUS);
-
- if (tmp) {
- if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
- pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
- MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
- pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
- }
-
- return tmp;
-}
-
-static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
-{
- struct pci_dev *pdev = to_pci_dev(mci->dev);
- u32 errlog1a;
- u32 cs;
-
- if (!errsta)
- return;
-
- pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
-
- cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
- MCDEBUG_ERRLOG1A_MERR_CS_S;
-
- /* uncorrectable/multi-bit errors */
- if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
- MCDEBUG_ERRSTA_RFL_STATUS)) {
- edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
- cs, mci->ctl_name);
- }
-
- /* correctable/single-bit errors */
- if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
- edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
- 0, cs, 0, mci->ctl_name);
- }
-}
-
-static void pasemi_edac_check(struct mem_ctl_info *mci)
-{
- u32 errsta;
-
- errsta = pasemi_edac_get_error_info(mci);
- if (errsta)
- pasemi_edac_process_error_info(mci, errsta);
-}
-
-static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
- struct pci_dev *pdev,
- enum edac_type edac_mode)
-{
- struct csrow_info *csrow;
- u32 rankcfg;
- int index;
-
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
-
- pci_read_config_dword(pdev,
- MCDRAM_RANKCFG + (index * 12),
- &rankcfg);
-
- if (!(rankcfg & MCDRAM_RANKCFG_EN))
- continue;
-
- switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
- MCDRAM_RANKCFG_TYPE_SIZE_S) {
- case 0:
- csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
- break;
- case 1:
- csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
- break;
- case 2:
- case 3:
- csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
- break;
- case 4:
- csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
- break;
- case 5:
- csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
- break;
- default:
- edac_mc_printk(mci, KERN_ERR,
- "Unrecognized Rank Config. rankcfg=%u\n",
- rankcfg);
- return -EINVAL;
- }
-
- csrow->first_page = last_page_in_mmc;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- last_page_in_mmc += csrow->nr_pages;
- csrow->page_mask = 0;
- csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
- }
- return 0;
-}
-
-static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mem_ctl_info *mci = NULL;
- u32 errctl1, errcor, scrub, mcen;
-
- pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
- if (!(mcen & MCCFG_MCEN_MMC_EN))
- return -ENODEV;
-
- /*
- * We should think about enabling other error detection later on
- */
-
- pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
- errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
- MCDEBUG_ERRCTL1_MBE_LOG_EN |
- MCDEBUG_ERRCTL1_RFL_LOG_EN;
- pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
-
- mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
- system_mmc_id++);
-
- if (mci == NULL)
- return -ENOMEM;
-
- pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
- errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
- MCCFG_ERRCOR_ECC_GEN_EN |
- MCCFG_ERRCOR_ECC_CRR_EN;
-
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
- ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
- (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
- EDAC_FLAG_NONE;
- mci->mod_name = MODULE_NAME;
- mci->dev_name = pci_name(pdev);
- mci->ctl_name = "pasemi,pwrficient-mc";
- mci->edac_check = pasemi_edac_check;
- mci->ctl_page_to_phys = NULL;
- pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
- mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
- mci->scrub_mode =
- ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
- ((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
-
- if (pasemi_edac_init_csrows(mci, pdev,
- (mci->edac_cap & EDAC_FLAG_SECDED) ?
- EDAC_SECDED :
- ((mci->edac_cap & EDAC_FLAG_EC) ?
- EDAC_EC : EDAC_NONE)))
- goto fail;
-
- /*
- * Clear status
- */
- pasemi_edac_get_error_info(mci);
-
- if (edac_mc_add_mc(mci))
- goto fail;
-
- /* get this far and it's successful */
- return 0;
-
-fail:
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
-
- if (!mci)
- return;
-
- edac_mc_free(mci);
-}
-
-
-static const struct pci_device_id pasemi_edac_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
-
-static struct pci_driver pasemi_edac_driver = {
- .name = MODULE_NAME,
- .probe = pasemi_edac_probe,
- .remove = __devexit_p(pasemi_edac_remove),
- .id_table = pasemi_edac_pci_tbl,
-};
-
-static int __init pasemi_edac_init(void)
-{
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- return pci_register_driver(&pasemi_edac_driver);
-}
-
-static void __exit pasemi_edac_exit(void)
-{
- pci_unregister_driver(&pasemi_edac_driver);
-}
-
-module_init(pasemi_edac_init);
-module_exit(pasemi_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
-MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
-
diff --git a/ANDROID_3.4.5/drivers/edac/ppc4xx_edac.c b/ANDROID_3.4.5/drivers/edac/ppc4xx_edac.c
deleted file mode 100644
index d427c69b..00000000
--- a/ANDROID_3.4.5/drivers/edac/ppc4xx_edac.c
+++ /dev/null
@@ -1,1439 +0,0 @@
-/*
- * Copyright (c) 2008 Nuovation System Designs, LLC
- * Grant Erickson <gerickson@nuovations.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- */
-
-#include <linux/edac.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/types.h>
-
-#include <asm/dcr.h>
-
-#include "edac_core.h"
-#include "ppc4xx_edac.h"
-
-/*
- * This file implements a driver for monitoring and handling events
- * associated with the IMB DDR2 ECC controller found in the AMCC/IBM
- * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX.
- *
- * As realized in the 405EX[r], this controller features:
- *
- * - Support for registered- and non-registered DDR1 and DDR2 memory.
- * - 32-bit or 16-bit memory interface with optional ECC.
- *
- * o ECC support includes:
- *
- * - 4-bit SEC/DED
- * - Aligned-nibble error detect
- * - Bypass mode
- *
- * - Two (2) memory banks/ranks.
- * - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per
- * bank/rank in 16-bit mode.
- *
- * As realized in the 440SP and 440SPe, this controller changes/adds:
- *
- * - 64-bit or 32-bit memory interface with optional ECC.
- *
- * o ECC support includes:
- *
- * - 8-bit SEC/DED
- * - Aligned-nibble error detect
- * - Bypass mode
- *
- * - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB
- * per bank/rank in 32-bit mode.
- *
- * As realized in the 460EX and 460GT, this controller changes/adds:
- *
- * - 64-bit or 32-bit memory interface with optional ECC.
- *
- * o ECC support includes:
- *
- * - 8-bit SEC/DED
- * - Aligned-nibble error detect
- * - Bypass mode
- *
- * - Four (4) memory banks/ranks.
- * - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB
- * per bank/rank in 32-bit mode.
- *
- * At present, this driver has ONLY been tested against the controller
- * realization in the 405EX[r] on the AMCC Kilauea and Haleakala
- * boards (256 MiB w/o ECC memory soldered onto the board) and a
- * proprietary board based on those designs (128 MiB ECC memory, also
- * soldered onto the board).
- *
- * Dynamic feature detection and handling needs to be added for the
- * other realizations of this controller listed above.
- *
- * Eventually, this driver will likely be adapted to the above variant
- * realizations of this controller as well as broken apart to handle
- * the other known ECC-capable controllers prevalent in other 4xx
- * processors:
- *
- * - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx"
- * - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr"
- * - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2"
- *
- * For this controller, unfortunately, correctable errors report
- * nothing more than the beat/cycle and byte/lane the correction
- * occurred on and the check bit group that covered the error.
- *
- * In contrast, uncorrectable errors also report the failing address,
- * the bus master and the transaction direction (i.e. read or write)
- *
- * Regardless of whether the error is a CE or a UE, we report the
- * following pieces of information in the driver-unique message to the
- * EDAC subsystem:
- *
- * - Device tree path
- * - Bank(s)
- * - Check bit error group
- * - Beat(s)/lane(s)
- */
-
-/* Preprocessor Definitions */
-
-#define EDAC_OPSTATE_INT_STR "interrupt"
-#define EDAC_OPSTATE_POLL_STR "polled"
-#define EDAC_OPSTATE_UNKNOWN_STR "unknown"
-
-#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac"
-#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0"
-
-#define PPC4XX_EDAC_MESSAGE_SIZE 256
-
-/*
- * Kernel logging without an EDAC instance
- */
-#define ppc4xx_edac_printk(level, fmt, arg...) \
- edac_printk(level, "PPC4xx MC", fmt, ##arg)
-
-/*
- * Kernel logging with an EDAC instance
- */
-#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg)
-
-/*
- * Macros to convert bank configuration size enumerations into MiB and
- * page values.
- */
-#define SDRAM_MBCF_SZ_MiB_MIN 4
-#define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \
- << (SDRAM_MBCF_SZ_DECODE(n)))
-#define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \
- << (20 - PAGE_SHIFT + \
- SDRAM_MBCF_SZ_DECODE(n)))
-
-/*
- * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
- * indirectly accessed and have a base and length defined by the
- * device tree. The base can be anything; however, we expect the
- * length to be precisely two registers, the first for the address
- * window and the second for the data window.
- */
-#define SDRAM_DCR_RESOURCE_LEN 2
-#define SDRAM_DCR_ADDR_OFFSET 0
-#define SDRAM_DCR_DATA_OFFSET 1
-
-/*
- * Device tree interrupt indices
- */
-#define INTMAP_ECCDED_INDEX 0 /* Double-bit Error Detect */
-#define INTMAP_ECCSEC_INDEX 1 /* Single-bit Error Correct */
-
-/* Type Definitions */
-
-/*
- * PPC4xx SDRAM memory controller private instance data
- */
-struct ppc4xx_edac_pdata {
- dcr_host_t dcr_host; /* Indirect DCR address/data window mapping */
- struct {
- int sec; /* Single-bit correctable error IRQ assigned */
- int ded; /* Double-bit detectable error IRQ assigned */
- } irqs;
-};
-
-/*
- * Various status data gathered and manipulated when checking and
- * reporting ECC status.
- */
-struct ppc4xx_ecc_status {
- u32 ecces;
- u32 besr;
- u32 bearh;
- u32 bearl;
- u32 wmirq;
-};
-
-/* Function Prototypes */
-
-static int ppc4xx_edac_probe(struct platform_device *device);
-static int ppc4xx_edac_remove(struct platform_device *device);
-
-/* Global Variables */
-
-/*
- * Device tree node type and compatible tuples this driver can match
- * on.
- */
-static struct of_device_id ppc4xx_edac_match[] = {
- {
- .compatible = "ibm,sdram-4xx-ddr2"
- },
- { }
-};
-
-static struct platform_driver ppc4xx_edac_driver = {
- .probe = ppc4xx_edac_probe,
- .remove = ppc4xx_edac_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = PPC4XX_EDAC_MODULE_NAME,
- .of_match_table = ppc4xx_edac_match,
- },
-};
-
-/*
- * TODO: The row and channel parameters likely need to be dynamically
- * set based on the aforementioned variant controller realizations.
- */
-static const unsigned ppc4xx_edac_nr_csrows = 2;
-static const unsigned ppc4xx_edac_nr_chans = 1;
-
-/*
- * Strings associated with PLB master IDs capable of being posted in
- * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors.
- */
-static const char * const ppc4xx_plb_masters[9] = {
- [SDRAM_PLB_M0ID_ICU] = "ICU",
- [SDRAM_PLB_M0ID_PCIE0] = "PCI-E 0",
- [SDRAM_PLB_M0ID_PCIE1] = "PCI-E 1",
- [SDRAM_PLB_M0ID_DMA] = "DMA",
- [SDRAM_PLB_M0ID_DCU] = "DCU",
- [SDRAM_PLB_M0ID_OPB] = "OPB",
- [SDRAM_PLB_M0ID_MAL] = "MAL",
- [SDRAM_PLB_M0ID_SEC] = "SEC",
- [SDRAM_PLB_M0ID_AHB] = "AHB"
-};
-
-/**
- * mfsdram - read and return controller register data
- * @dcr_host: A pointer to the DCR mapping.
- * @idcr_n: The indirect DCR register to read.
- *
- * This routine reads and returns the data associated with the
- * controller's specified indirect DCR register.
- *
- * Returns the read data.
- */
-static inline u32
-mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n)
-{
- return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
- dcr_host->base + SDRAM_DCR_DATA_OFFSET,
- idcr_n);
-}
-
-/**
- * mtsdram - write controller register data
- * @dcr_host: A pointer to the DCR mapping.
- * @idcr_n: The indirect DCR register to write.
- * @value: The data to write.
- *
- * This routine writes the provided data to the controller's specified
- * indirect DCR register.
- */
-static inline void
-mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value)
-{
- return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
- dcr_host->base + SDRAM_DCR_DATA_OFFSET,
- idcr_n,
- value);
-}
-
-/**
- * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error
- * @status: A pointer to the ECC status structure to check for an
- * ECC bank error.
- * @bank: The bank to check for an ECC error.
- *
- * This routine determines whether the specified bank has an ECC
- * error.
- *
- * Returns true if the specified bank has an ECC error; otherwise,
- * false.
- */
-static bool
-ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status,
- unsigned int bank)
-{
- switch (bank) {
- case 0:
- return status->ecces & SDRAM_ECCES_BK0ER;
- case 1:
- return status->ecces & SDRAM_ECCES_BK1ER;
- default:
- return false;
- }
-}
-
-/**
- * ppc4xx_edac_generate_bank_message - generate interpretted bank status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the bank message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS[BKNER]
- * field of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n, total = 0;
- unsigned int row, rows;
-
- n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- for (rows = 0, row = 0; row < mci->nr_csrows; row++) {
- if (ppc4xx_edac_check_bank_error(status, row)) {
- n = snprintf(buffer, size, "%s%u",
- (rows++ ? ", " : ""), row);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
- }
- }
-
- n = snprintf(buffer, size, "%s; ", rows ? "" : "None");
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- fail:
- return total;
-}
-
-/**
- * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the checkbit message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS[CKBER]
- * field of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- const char *ckber = NULL;
-
- switch (status->ecces & SDRAM_ECCES_CKBER_MASK) {
- case SDRAM_ECCES_CKBER_NONE:
- ckber = "None";
- break;
- case SDRAM_ECCES_CKBER_32_ECC_0_3:
- ckber = "ECC0:3";
- break;
- case SDRAM_ECCES_CKBER_32_ECC_4_8:
- switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) &
- SDRAM_MCOPT1_WDTH_MASK) {
- case SDRAM_MCOPT1_WDTH_16:
- ckber = "ECC0:3";
- break;
- case SDRAM_MCOPT1_WDTH_32:
- ckber = "ECC4:8";
- break;
- default:
- ckber = "Unknown";
- break;
- }
- break;
- case SDRAM_ECCES_CKBER_32_ECC_0_8:
- ckber = "ECC0:8";
- break;
- default:
- ckber = "Unknown";
- break;
- }
-
- return snprintf(buffer, size, "Checkbit Error: %s", ckber);
-}
-
-/**
- * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the byte lane message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS[BNCE]
- * field of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n, total = 0;
- unsigned int lane, lanes;
- const unsigned int first_lane = 0;
- const unsigned int lane_count = 16;
-
- n = snprintf(buffer, size, "; Byte Lane Errors: ");
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- for (lanes = 0, lane = first_lane; lane < lane_count; lane++) {
- if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) {
- n = snprintf(buffer, size,
- "%s%u",
- (lanes++ ? ", " : ""), lane);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
- }
- }
-
- n = snprintf(buffer, size, "%s; ", lanes ? "" : "None");
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- fail:
- return total;
-}
-
-/**
- * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the ECCES message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS register of
- * the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n, total = 0;
-
- n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- fail:
- return total;
-}
-
-/**
- * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the PLB message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the PLB-related BESR
- * and/or WMIRQ registers of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- unsigned int master;
- bool read;
-
- if ((status->besr & SDRAM_BESR_MASK) == 0)
- return 0;
-
- if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE)
- return 0;
-
- read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ);
-
- master = SDRAM_BESR_M0ID_DECODE(status->besr);
-
- return snprintf(buffer, size,
- "%s error w/ PLB master %u \"%s\"; ",
- (read ? "Read" : "Write"),
- master,
- (((master >= SDRAM_PLB_M0ID_FIRST) &&
- (master <= SDRAM_PLB_M0ID_LAST)) ?
- ppc4xx_plb_masters[master] : "UNKNOWN"));
-}
-
-/**
- * ppc4xx_edac_generate_message - generate interpretted status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the driver-unique message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the driver-unique
- * EDAC report message from the specified ECC status.
- */
-static void
-ppc4xx_edac_generate_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n;
-
- if (buffer == NULL || size == 0)
- return;
-
- n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- return;
-
- buffer += n;
- size -= n;
-
- ppc4xx_edac_generate_plb_message(mci, status, buffer, size);
-}
-
-#ifdef DEBUG
-/**
- * ppc4xx_ecc_dump_status - dump controller ECC status registers
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the status being dumped.
- * @status: A pointer to the ECC status structure to generate the
- * dump from.
- *
- * This routine dumps to the kernel log buffer the raw and
- * interpretted specified ECC status.
- */
-static void
-ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- char message[PPC4XX_EDAC_MESSAGE_SIZE];
-
- ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
-
- ppc4xx_edac_mc_printk(KERN_INFO, mci,
- "\n"
- "\tECCES: 0x%08x\n"
- "\tWMIRQ: 0x%08x\n"
- "\tBESR: 0x%08x\n"
- "\tBEAR: 0x%08x%08x\n"
- "\t%s\n",
- status->ecces,
- status->wmirq,
- status->besr,
- status->bearh,
- status->bearl,
- message);
-}
-#endif /* DEBUG */
-
-/**
- * ppc4xx_ecc_get_status - get controller ECC status
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the status being retrieved.
- * @status: A pointer to the ECC status structure to populate the
- * ECC status with.
- *
- * This routine reads and masks, as appropriate, all the relevant
- * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors.
- * While we read all of them, for correctable errors, we only expect
- * to deal with ECCES. For uncorrectable errors, we expect to deal
- * with all of them.
- */
-static void
-ppc4xx_ecc_get_status(const struct mem_ctl_info *mci,
- struct ppc4xx_ecc_status *status)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- const dcr_host_t *dcr_host = &pdata->dcr_host;
-
- status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK;
- status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK;
- status->besr = mfsdram(dcr_host, SDRAM_BESR) & SDRAM_BESR_MASK;
- status->bearl = mfsdram(dcr_host, SDRAM_BEARL);
- status->bearh = mfsdram(dcr_host, SDRAM_BEARH);
-}
-
-/**
- * ppc4xx_ecc_clear_status - clear controller ECC status
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the status being cleared.
- * @status: A pointer to the ECC status structure containing the
- * values to write to clear the ECC status.
- *
- * This routine clears--by writing the masked (as appropriate) status
- * values back to--the status registers that deal with
- * ibm,sdram-4xx-ddr2 ECC errors.
- */
-static void
-ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- const dcr_host_t *dcr_host = &pdata->dcr_host;
-
- mtsdram(dcr_host, SDRAM_ECCES, status->ecces & SDRAM_ECCES_MASK);
- mtsdram(dcr_host, SDRAM_WMIRQ, status->wmirq & SDRAM_WMIRQ_MASK);
- mtsdram(dcr_host, SDRAM_BESR, status->besr & SDRAM_BESR_MASK);
- mtsdram(dcr_host, SDRAM_BEARL, 0);
- mtsdram(dcr_host, SDRAM_BEARH, 0);
-}
-
-/**
- * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE)
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the correctable error being handled and reported.
- * @status: A pointer to the ECC status structure associated with
- * the correctable error being handled and reported.
- *
- * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
- * correctable error. Per the aforementioned discussion, there's not
- * enough status available to use the full EDAC correctable error
- * interface, so we just pass driver-unique message to the "no info"
- * interface.
- */
-static void
-ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- int row;
- char message[PPC4XX_EDAC_MESSAGE_SIZE];
-
- ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
-
- for (row = 0; row < mci->nr_csrows; row++)
- if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ce_no_info(mci, message);
-}
-
-/**
- * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE)
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the uncorrectable error being handled and
- * reported.
- * @status: A pointer to the ECC status structure associated with
- * the uncorrectable error being handled and reported.
- *
- * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
- * uncorrectable error.
- */
-static void
-ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- const u64 bear = ((u64)status->bearh << 32 | status->bearl);
- const unsigned long page = bear >> PAGE_SHIFT;
- const unsigned long offset = bear & ~PAGE_MASK;
- int row;
- char message[PPC4XX_EDAC_MESSAGE_SIZE];
-
- ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
-
- for (row = 0; row < mci->nr_csrows; row++)
- if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ue(mci, page, offset, row, message);
-}
-
-/**
- * ppc4xx_edac_check - check controller for ECC errors
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the ibm,sdram-4xx-ddr2 controller being
- * checked.
- *
- * This routine is used to check and post ECC errors and is called by
- * both the EDAC polling thread and this driver's CE and UE interrupt
- * handler.
- */
-static void
-ppc4xx_edac_check(struct mem_ctl_info *mci)
-{
-#ifdef DEBUG
- static unsigned int count;
-#endif
- struct ppc4xx_ecc_status status;
-
- ppc4xx_ecc_get_status(mci, &status);
-
-#ifdef DEBUG
- if (count++ % 30 == 0)
- ppc4xx_ecc_dump_status(mci, &status);
-#endif
-
- if (status.ecces & SDRAM_ECCES_UE)
- ppc4xx_edac_handle_ue(mci, &status);
-
- if (status.ecces & SDRAM_ECCES_CE)
- ppc4xx_edac_handle_ce(mci, &status);
-
- ppc4xx_ecc_clear_status(mci, &status);
-}
-
-/**
- * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine
- * @irq: The virtual interrupt number being serviced.
- * @dev_id: A pointer to the EDAC memory controller instance
- * associated with the interrupt being handled.
- *
- * This routine implements the interrupt handler for both correctable
- * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2
- * controller. It simply calls through to the same routine used during
- * polling to check, report and clear the ECC status.
- *
- * Unconditionally returns IRQ_HANDLED.
- */
-static irqreturn_t
-ppc4xx_edac_isr(int irq, void *dev_id)
-{
- struct mem_ctl_info *mci = dev_id;
-
- ppc4xx_edac_check(mci);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ppc4xx_edac_get_dtype - return the controller memory width
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which the width
- * is derived.
- *
- * This routine returns the EDAC device type width appropriate for the
- * current controller configuration.
- *
- * TODO: This needs to be conditioned dynamically through feature
- * flags or some such when other controller variants are supported as
- * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the
- * 16- and 64-bit field definition/value/enumeration (b1) overloaded
- * among them.
- *
- * Returns a device type width enumeration.
- */
-static enum dev_type __devinit
-ppc4xx_edac_get_dtype(u32 mcopt1)
-{
- switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
- case SDRAM_MCOPT1_WDTH_16:
- return DEV_X2;
- case SDRAM_MCOPT1_WDTH_32:
- return DEV_X4;
- default:
- return DEV_UNKNOWN;
- }
-}
-
-/**
- * ppc4xx_edac_get_mtype - return controller memory type
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which the memory type
- * is derived.
- *
- * This routine returns the EDAC memory type appropriate for the
- * current controller configuration.
- *
- * Returns a memory type enumeration.
- */
-static enum mem_type __devinit
-ppc4xx_edac_get_mtype(u32 mcopt1)
-{
- bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
-
- switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) {
- case SDRAM_MCOPT1_DDR2_TYPE:
- return rden ? MEM_RDDR2 : MEM_DDR2;
- case SDRAM_MCOPT1_DDR1_TYPE:
- return rden ? MEM_RDDR : MEM_DDR;
- default:
- return MEM_UNKNOWN;
- }
-}
-
-/**
- * ppc4xx_edac_init_csrows - initialize driver instance rows
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the ibm,sdram-4xx-ddr2 controller for which
- * the csrows (i.e. banks/ranks) are being initialized.
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which bank width
- * and memory typ information is derived.
- *
- * This routine initializes the virtual "chip select rows" associated
- * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
- * controller bank/rank is mapped to a row.
- *
- * Returns 0 if OK; otherwise, -EINVAL if the memory bank size
- * configuration cannot be determined.
- */
-static int __devinit
-ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- int status = 0;
- enum mem_type mtype;
- enum dev_type dtype;
- enum edac_type edac_mode;
- int row;
- u32 mbxcf, size;
- static u32 ppc4xx_last_page;
-
- /* Establish the memory type and width */
-
- mtype = ppc4xx_edac_get_mtype(mcopt1);
- dtype = ppc4xx_edac_get_dtype(mcopt1);
-
- /* Establish EDAC mode */
-
- if (mci->edac_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
- else if (mci->edac_cap & EDAC_FLAG_EC)
- edac_mode = EDAC_EC;
- else
- edac_mode = EDAC_NONE;
-
- /*
- * Initialize each chip select row structure which correspond
- * 1:1 with a controller bank/rank.
- */
-
- for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = &mci->csrows[row];
-
- /*
- * Get the configuration settings for this
- * row/bank/rank and skip disabled banks.
- */
-
- mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row));
-
- if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE)
- continue;
-
- /* Map the bank configuration size setting to pages. */
-
- size = mbxcf & SDRAM_MBCF_SZ_MASK;
-
- switch (size) {
- case SDRAM_MBCF_SZ_4MB:
- case SDRAM_MBCF_SZ_8MB:
- case SDRAM_MBCF_SZ_16MB:
- case SDRAM_MBCF_SZ_32MB:
- case SDRAM_MBCF_SZ_64MB:
- case SDRAM_MBCF_SZ_128MB:
- case SDRAM_MBCF_SZ_256MB:
- case SDRAM_MBCF_SZ_512MB:
- case SDRAM_MBCF_SZ_1GB:
- case SDRAM_MBCF_SZ_2GB:
- case SDRAM_MBCF_SZ_4GB:
- case SDRAM_MBCF_SZ_8GB:
- csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
- break;
- default:
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unrecognized memory bank %d "
- "size 0x%08x\n",
- row, SDRAM_MBCF_SZ_DECODE(size));
- status = -EINVAL;
- goto done;
- }
-
- csi->first_page = ppc4xx_last_page;
- csi->last_page = csi->first_page + csi->nr_pages - 1;
- csi->page_mask = 0;
-
- /*
- * It's unclear exactly what grain should be set to
- * here. The SDRAM_ECCES register allows resolution of
- * an error down to a nibble which would potentially
- * argue for a grain of '1' byte, even though we only
- * know the associated address for uncorrectable
- * errors. This value is not used at present for
- * anything other than error reporting so getting it
- * wrong should be of little consequence. Other
- * possible values would be the PLB width (16), the
- * page size (PAGE_SIZE) or the memory width (2 or 4).
- */
-
- csi->grain = 1;
-
- csi->mtype = mtype;
- csi->dtype = dtype;
-
- csi->edac_mode = edac_mode;
-
- ppc4xx_last_page += csi->nr_pages;
- }
-
- done:
- return status;
-}
-
-/**
- * ppc4xx_edac_mc_init - initialize driver instance
- * @mci: A pointer to the EDAC memory controller instance being
- * initialized.
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller this EDAC instance is bound to.
- * @dcr_host: A pointer to the DCR data containing the DCR mapping
- * for this controller instance.
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which ECC capabilities
- * and scrub mode are derived.
- *
- * This routine performs initialization of the EDAC memory controller
- * instance and related driver-private data associated with the
- * ibm,sdram-4xx-ddr2 memory controller the instance is bound to.
- *
- * Returns 0 if OK; otherwise, < 0 on error.
- */
-static int __devinit
-ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
- struct platform_device *op,
- const dcr_host_t *dcr_host,
- u32 mcopt1)
-{
- int status = 0;
- const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
- struct ppc4xx_edac_pdata *pdata = NULL;
- const struct device_node *np = op->dev.of_node;
-
- if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
- return -EINVAL;
-
- /* Initial driver pointers and private data */
-
- mci->dev = &op->dev;
-
- dev_set_drvdata(mci->dev, mci);
-
- pdata = mci->pvt_info;
-
- pdata->dcr_host = *dcr_host;
- pdata->irqs.sec = NO_IRQ;
- pdata->irqs.ded = NO_IRQ;
-
- /* Initialize controller capabilities and configuration */
-
- mci->mtype_cap = (MEM_FLAG_DDR | MEM_FLAG_RDDR |
- MEM_FLAG_DDR2 | MEM_FLAG_RDDR2);
-
- mci->edac_ctl_cap = (EDAC_FLAG_NONE |
- EDAC_FLAG_EC |
- EDAC_FLAG_SECDED);
-
- mci->scrub_cap = SCRUB_NONE;
- mci->scrub_mode = SCRUB_NONE;
-
- /*
- * Update the actual capabilites based on the MCOPT1[MCHK]
- * settings. Scrubbing is only useful if reporting is enabled.
- */
-
- switch (memcheck) {
- case SDRAM_MCOPT1_MCHK_CHK:
- mci->edac_cap = EDAC_FLAG_EC;
- break;
- case SDRAM_MCOPT1_MCHK_CHK_REP:
- mci->edac_cap = (EDAC_FLAG_EC | EDAC_FLAG_SECDED);
- mci->scrub_mode = SCRUB_SW_SRC;
- break;
- default:
- mci->edac_cap = EDAC_FLAG_NONE;
- break;
- }
-
- /* Initialize strings */
-
- mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
- mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION;
- mci->ctl_name = ppc4xx_edac_match->compatible,
- mci->dev_name = np->full_name;
-
- /* Initialize callbacks */
-
- mci->edac_check = ppc4xx_edac_check;
- mci->ctl_page_to_phys = NULL;
-
- /* Initialize chip select rows */
-
- status = ppc4xx_edac_init_csrows(mci, mcopt1);
-
- if (status)
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Failed to initialize rows!\n");
-
- return status;
-}
-
-/**
- * ppc4xx_edac_register_irq - setup and register controller interrupts
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller this EDAC instance is bound to.
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the ibm,sdram-4xx-ddr2 controller for which
- * interrupts are being registered.
- *
- * This routine parses the correctable (CE) and uncorrectable error (UE)
- * interrupts from the device tree node and maps and assigns them to
- * the associated EDAC memory controller instance.
- *
- * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
- * mapped and assigned.
- */
-static int __devinit
-ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci)
-{
- int status = 0;
- int ded_irq, sec_irq;
- struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- struct device_node *np = op->dev.of_node;
-
- ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
- sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
-
- if (ded_irq == NO_IRQ || sec_irq == NO_IRQ) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unable to map interrupts.\n");
- status = -ENODEV;
- goto fail;
- }
-
- status = request_irq(ded_irq,
- ppc4xx_edac_isr,
- IRQF_DISABLED,
- "[EDAC] MC ECCDED",
- mci);
-
- if (status < 0) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unable to request irq %d for ECC DED",
- ded_irq);
- status = -ENODEV;
- goto fail1;
- }
-
- status = request_irq(sec_irq,
- ppc4xx_edac_isr,
- IRQF_DISABLED,
- "[EDAC] MC ECCSEC",
- mci);
-
- if (status < 0) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unable to request irq %d for ECC SEC",
- sec_irq);
- status = -ENODEV;
- goto fail2;
- }
-
- ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq);
- ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq);
-
- pdata->irqs.ded = ded_irq;
- pdata->irqs.sec = sec_irq;
-
- return 0;
-
- fail2:
- free_irq(sec_irq, mci);
-
- fail1:
- free_irq(ded_irq, mci);
-
- fail:
- return status;
-}
-
-/**
- * ppc4xx_edac_map_dcrs - locate and map controller registers
- * @np: A pointer to the device tree node containing the DCR
- * resources to map.
- * @dcr_host: A pointer to the DCR data to populate with the
- * DCR mapping.
- *
- * This routine attempts to locate in the device tree and map the DCR
- * register resources associated with the controller's indirect DCR
- * address and data windows.
- *
- * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
- * error.
- */
-static int __devinit
-ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
-{
- unsigned int dcr_base, dcr_len;
-
- if (np == NULL || dcr_host == NULL)
- return -EINVAL;
-
- /* Get the DCR resource extent and sanity check the values. */
-
- dcr_base = dcr_resource_start(np, 0);
- dcr_len = dcr_resource_len(np, 0);
-
- if (dcr_base == 0 || dcr_len == 0) {
- ppc4xx_edac_printk(KERN_ERR,
- "Failed to obtain DCR property.\n");
- return -ENODEV;
- }
-
- if (dcr_len != SDRAM_DCR_RESOURCE_LEN) {
- ppc4xx_edac_printk(KERN_ERR,
- "Unexpected DCR length %d, expected %d.\n",
- dcr_len, SDRAM_DCR_RESOURCE_LEN);
- return -ENODEV;
- }
-
- /* Attempt to map the DCR extent. */
-
- *dcr_host = dcr_map(np, dcr_base, dcr_len);
-
- if (!DCR_MAP_OK(*dcr_host)) {
- ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * ppc4xx_edac_probe - check controller and bind driver
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller being probed for driver binding.
- *
- * This routine probes a specific ibm,sdram-4xx-ddr2 controller
- * instance for binding with the driver.
- *
- * Returns 0 if the controller instance was successfully bound to the
- * driver; otherwise, < 0 on error.
- */
-static int __devinit ppc4xx_edac_probe(struct platform_device *op)
-{
- int status = 0;
- u32 mcopt1, memcheck;
- dcr_host_t dcr_host;
- const struct device_node *np = op->dev.of_node;
- struct mem_ctl_info *mci = NULL;
- static int ppc4xx_edac_instance;
-
- /*
- * At this point, we only support the controller realized on
- * the AMCC PPC 405EX[r]. Reject anything else.
- */
-
- if (!of_device_is_compatible(np, "ibm,sdram-405ex") &&
- !of_device_is_compatible(np, "ibm,sdram-405exr")) {
- ppc4xx_edac_printk(KERN_NOTICE,
- "Only the PPC405EX[r] is supported.\n");
- return -ENODEV;
- }
-
- /*
- * Next, get the DCR property and attempt to map it so that we
- * can probe the controller.
- */
-
- status = ppc4xx_edac_map_dcrs(np, &dcr_host);
-
- if (status)
- return status;
-
- /*
- * First determine whether ECC is enabled at all. If not,
- * there is no useful checking or monitoring that can be done
- * for this controller.
- */
-
- mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1);
- memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
-
- if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
- ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or "
- "ECC is disabled.\n", np->full_name);
- status = -ENODEV;
- goto done;
- }
-
- /*
- * At this point, we know ECC is enabled, allocate an EDAC
- * controller instance and perform the appropriate
- * initialization.
- */
-
- mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
- ppc4xx_edac_nr_csrows,
- ppc4xx_edac_nr_chans,
- ppc4xx_edac_instance);
-
- if (mci == NULL) {
- ppc4xx_edac_printk(KERN_ERR, "%s: "
- "Failed to allocate EDAC MC instance!\n",
- np->full_name);
- status = -ENOMEM;
- goto done;
- }
-
- status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1);
-
- if (status) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Failed to initialize instance!\n");
- goto fail;
- }
-
- /*
- * We have a valid, initialized EDAC instance bound to the
- * controller. Attempt to register it with the EDAC subsystem
- * and, if necessary, register interrupts.
- */
-
- if (edac_mc_add_mc(mci)) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Failed to add instance!\n");
- status = -ENODEV;
- goto fail;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- status = ppc4xx_edac_register_irq(op, mci);
-
- if (status)
- goto fail1;
- }
-
- ppc4xx_edac_instance++;
-
- return 0;
-
- fail1:
- edac_mc_del_mc(mci->dev);
-
- fail:
- edac_mc_free(mci);
-
- done:
- return status;
-}
-
-/**
- * ppc4xx_edac_remove - unbind driver from controller
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller this EDAC instance is to be unbound/removed
- * from.
- *
- * This routine unbinds the EDAC memory controller instance associated
- * with the specified ibm,sdram-4xx-ddr2 controller described by the
- * OpenFirmware device tree node passed as a parameter.
- *
- * Unconditionally returns 0.
- */
-static int
-ppc4xx_edac_remove(struct platform_device *op)
-{
- struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
- struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- free_irq(pdata->irqs.sec, mci);
- free_irq(pdata->irqs.ded, mci);
- }
-
- dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
-
- edac_mc_del_mc(mci->dev);
- edac_mc_free(mci);
-
- return 0;
-}
-
-/**
- * ppc4xx_edac_opstate_init - initialize EDAC reporting method
- *
- * This routine ensures that the EDAC memory controller reporting
- * method is mapped to a sane value as the EDAC core defines the value
- * to EDAC_OPSTATE_INVAL by default. We don't call the global
- * opstate_init as that defaults to polling and we want interrupt as
- * the default.
- */
-static inline void __init
-ppc4xx_edac_opstate_init(void)
-{
- switch (edac_op_state) {
- case EDAC_OPSTATE_POLL:
- case EDAC_OPSTATE_INT:
- break;
- default:
- edac_op_state = EDAC_OPSTATE_INT;
- break;
- }
-
- ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n",
- ((edac_op_state == EDAC_OPSTATE_POLL) ?
- EDAC_OPSTATE_POLL_STR :
- ((edac_op_state == EDAC_OPSTATE_INT) ?
- EDAC_OPSTATE_INT_STR :
- EDAC_OPSTATE_UNKNOWN_STR)));
-}
-
-/**
- * ppc4xx_edac_init - driver/module insertion entry point
- *
- * This routine is the driver/module insertion entry point. It
- * initializes the EDAC memory controller reporting state and
- * registers the driver as an OpenFirmware device tree platform
- * driver.
- */
-static int __init
-ppc4xx_edac_init(void)
-{
- ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n");
-
- ppc4xx_edac_opstate_init();
-
- return platform_driver_register(&ppc4xx_edac_driver);
-}
-
-/**
- * ppc4xx_edac_exit - driver/module removal entry point
- *
- * This routine is the driver/module removal entry point. It
- * unregisters the driver as an OpenFirmware device tree platform
- * driver.
- */
-static void __exit
-ppc4xx_edac_exit(void)
-{
- platform_driver_unregister(&ppc4xx_edac_driver);
-}
-
-module_init(ppc4xx_edac_init);
-module_exit(ppc4xx_edac_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Grant Erickson <gerickson@nuovations.com>");
-MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller");
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: "
- "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR);
diff --git a/ANDROID_3.4.5/drivers/edac/ppc4xx_edac.h b/ANDROID_3.4.5/drivers/edac/ppc4xx_edac.h
deleted file mode 100644
index d3154764..00000000
--- a/ANDROID_3.4.5/drivers/edac/ppc4xx_edac.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2008 Nuovation System Designs, LLC
- * Grant Erickson <gerickson@nuovations.com>
- *
- * This file defines processor mnemonics for accessing and managing
- * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP,
- * 440SPe, 460EX, 460GT and 460SX.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- */
-
-#ifndef __PPC4XX_EDAC_H
-#define __PPC4XX_EDAC_H
-
-#include <linux/types.h>
-
-/*
- * Macro for generating register field mnemonics
- */
-#define PPC_REG_BITS 32
-#define PPC_REG_VAL(bit, val) ((val) << ((PPC_REG_BITS - 1) - (bit)))
-#define PPC_REG_DECODE(bit, val) ((val) >> ((PPC_REG_BITS - 1) - (bit)))
-
-/*
- * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those
- * relevant to ECC)
- */
-#define SDRAM_BESR 0x00 /* Error status (read/clear) */
-#define SDRAM_BESRT 0x01 /* Error statuss (test/set) */
-#define SDRAM_BEARL 0x02 /* Error address low */
-#define SDRAM_BEARH 0x03 /* Error address high */
-#define SDRAM_WMIRQ 0x06 /* Write master (read/clear) */
-#define SDRAM_WMIRQT 0x07 /* Write master (test/set) */
-#define SDRAM_MCOPT1 0x20 /* Controller options 1 */
-#define SDRAM_MBXCF_BASE 0x40 /* Bank n configuration base */
-#define SDRAM_MBXCF(n) (SDRAM_MBXCF_BASE + (4 * (n)))
-#define SDRAM_MB0CF SDRAM_MBXCF(0)
-#define SDRAM_MB1CF SDRAM_MBXCF(1)
-#define SDRAM_MB2CF SDRAM_MBXCF(2)
-#define SDRAM_MB3CF SDRAM_MBXCF(3)
-#define SDRAM_ECCCR 0x98 /* ECC error status */
-#define SDRAM_ECCES SDRAM_ECCCR
-
-/*
- * PLB Master IDs
- */
-#define SDRAM_PLB_M0ID_FIRST 0
-#define SDRAM_PLB_M0ID_ICU SDRAM_PLB_M0ID_FIRST
-#define SDRAM_PLB_M0ID_PCIE0 1
-#define SDRAM_PLB_M0ID_PCIE1 2
-#define SDRAM_PLB_M0ID_DMA 3
-#define SDRAM_PLB_M0ID_DCU 4
-#define SDRAM_PLB_M0ID_OPB 5
-#define SDRAM_PLB_M0ID_MAL 6
-#define SDRAM_PLB_M0ID_SEC 7
-#define SDRAM_PLB_M0ID_AHB 8
-#define SDRAM_PLB_M0ID_LAST SDRAM_PLB_M0ID_AHB
-#define SDRAM_PLB_M0ID_COUNT (SDRAM_PLB_M0ID_LAST - \
- SDRAM_PLB_M0ID_FIRST + 1)
-
-/*
- * Memory Controller Bus Error Status Register
- */
-#define SDRAM_BESR_MASK PPC_REG_VAL(7, 0xFF)
-#define SDRAM_BESR_M0ID_MASK PPC_REG_VAL(3, 0xF)
-#define SDRAM_BESR_M0ID_DECODE(n) PPC_REG_DECODE(3, n)
-#define SDRAM_BESR_M0ID_ICU PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU)
-#define SDRAM_BESR_M0ID_PCIE0 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0)
-#define SDRAM_BESR_M0ID_PCIE1 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1)
-#define SDRAM_BESR_M0ID_DMA PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA)
-#define SDRAM_BESR_M0ID_DCU PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU)
-#define SDRAM_BESR_M0ID_OPB PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB)
-#define SDRAM_BESR_M0ID_MAL PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL)
-#define SDRAM_BESR_M0ID_SEC PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC)
-#define SDRAM_BESR_M0ID_AHB PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB)
-#define SDRAM_BESR_M0ET_MASK PPC_REG_VAL(6, 0x7)
-#define SDRAM_BESR_M0ET_NONE PPC_REG_VAL(6, 0)
-#define SDRAM_BESR_M0ET_ECC PPC_REG_VAL(6, 1)
-#define SDRAM_BESR_M0RW_MASK PPC_REG_VAL(7, 1)
-#define SDRAM_BESR_M0RW_WRITE PPC_REG_VAL(7, 0)
-#define SDRAM_BESR_M0RW_READ PPC_REG_VAL(7, 1)
-
-/*
- * Memory Controller PLB Write Master Interrupt Register
- */
-#define SDRAM_WMIRQ_MASK PPC_REG_VAL(8, 0x1FF)
-#define SDRAM_WMIRQ_ENCODE(id) PPC_REG_VAL((id % \
- SDRAM_PLB_M0ID_COUNT), 1)
-#define SDRAM_WMIRQ_ICU PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1)
-#define SDRAM_WMIRQ_PCIE0 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1)
-#define SDRAM_WMIRQ_PCIE1 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1)
-#define SDRAM_WMIRQ_DMA PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1)
-#define SDRAM_WMIRQ_DCU PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1)
-#define SDRAM_WMIRQ_OPB PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1)
-#define SDRAM_WMIRQ_MAL PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1)
-#define SDRAM_WMIRQ_SEC PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1)
-#define SDRAM_WMIRQ_AHB PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1)
-
-/*
- * Memory Controller Options 1 Register
- */
-#define SDRAM_MCOPT1_MCHK_MASK PPC_REG_VAL(3, 0x3) /* ECC mask */
-#define SDRAM_MCOPT1_MCHK_NON PPC_REG_VAL(3, 0x0) /* No ECC gen */
-#define SDRAM_MCOPT1_MCHK_GEN PPC_REG_VAL(3, 0x2) /* ECC gen */
-#define SDRAM_MCOPT1_MCHK_CHK PPC_REG_VAL(3, 0x1) /* ECC gen and chk */
-#define SDRAM_MCOPT1_MCHK_CHK_REP PPC_REG_VAL(3, 0x3) /* ECC gen/chk/rpt */
-#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3)
-#define SDRAM_MCOPT1_RDEN_MASK PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM mask */
-#define SDRAM_MCOPT1_RDEN PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM enbl */
-#define SDRAM_MCOPT1_WDTH_MASK PPC_REG_VAL(7, 0x1) /* Width mask */
-#define SDRAM_MCOPT1_WDTH_32 PPC_REG_VAL(7, 0x0) /* 32 bits */
-#define SDRAM_MCOPT1_WDTH_16 PPC_REG_VAL(7, 0x1) /* 16 bits */
-#define SDRAM_MCOPT1_DDR_TYPE_MASK PPC_REG_VAL(11, 0x1) /* DDR type mask */
-#define SDRAM_MCOPT1_DDR1_TYPE PPC_REG_VAL(11, 0x0) /* DDR1 type */
-#define SDRAM_MCOPT1_DDR2_TYPE PPC_REG_VAL(11, 0x1) /* DDR2 type */
-
-/*
- * Memory Bank 0 - n Configuration Register
- */
-#define SDRAM_MBCF_BA_MASK PPC_REG_VAL(12, 0x1FFF)
-#define SDRAM_MBCF_SZ_MASK PPC_REG_VAL(19, 0xF)
-#define SDRAM_MBCF_SZ_DECODE(mbxcf) PPC_REG_DECODE(19, mbxcf)
-#define SDRAM_MBCF_SZ_4MB PPC_REG_VAL(19, 0x0)
-#define SDRAM_MBCF_SZ_8MB PPC_REG_VAL(19, 0x1)
-#define SDRAM_MBCF_SZ_16MB PPC_REG_VAL(19, 0x2)
-#define SDRAM_MBCF_SZ_32MB PPC_REG_VAL(19, 0x3)
-#define SDRAM_MBCF_SZ_64MB PPC_REG_VAL(19, 0x4)
-#define SDRAM_MBCF_SZ_128MB PPC_REG_VAL(19, 0x5)
-#define SDRAM_MBCF_SZ_256MB PPC_REG_VAL(19, 0x6)
-#define SDRAM_MBCF_SZ_512MB PPC_REG_VAL(19, 0x7)
-#define SDRAM_MBCF_SZ_1GB PPC_REG_VAL(19, 0x8)
-#define SDRAM_MBCF_SZ_2GB PPC_REG_VAL(19, 0x9)
-#define SDRAM_MBCF_SZ_4GB PPC_REG_VAL(19, 0xA)
-#define SDRAM_MBCF_SZ_8GB PPC_REG_VAL(19, 0xB)
-#define SDRAM_MBCF_AM_MASK PPC_REG_VAL(23, 0xF)
-#define SDRAM_MBCF_AM_MODE0 PPC_REG_VAL(23, 0x0)
-#define SDRAM_MBCF_AM_MODE1 PPC_REG_VAL(23, 0x1)
-#define SDRAM_MBCF_AM_MODE2 PPC_REG_VAL(23, 0x2)
-#define SDRAM_MBCF_AM_MODE3 PPC_REG_VAL(23, 0x3)
-#define SDRAM_MBCF_AM_MODE4 PPC_REG_VAL(23, 0x4)
-#define SDRAM_MBCF_AM_MODE5 PPC_REG_VAL(23, 0x5)
-#define SDRAM_MBCF_AM_MODE6 PPC_REG_VAL(23, 0x6)
-#define SDRAM_MBCF_AM_MODE7 PPC_REG_VAL(23, 0x7)
-#define SDRAM_MBCF_AM_MODE8 PPC_REG_VAL(23, 0x8)
-#define SDRAM_MBCF_AM_MODE9 PPC_REG_VAL(23, 0x9)
-#define SDRAM_MBCF_BE_MASK PPC_REG_VAL(31, 0x1)
-#define SDRAM_MBCF_BE_DISABLE PPC_REG_VAL(31, 0x0)
-#define SDRAM_MBCF_BE_ENABLE PPC_REG_VAL(31, 0x1)
-
-/*
- * ECC Error Status
- */
-#define SDRAM_ECCES_MASK PPC_REG_VAL(21, 0x3FFFFF)
-#define SDRAM_ECCES_BNCE_MASK PPC_REG_VAL(15, 0xFFFF)
-#define SDRAM_ECCES_BNCE_ENCODE(lane) PPC_REG_VAL(((lane) & 0xF), 1)
-#define SDRAM_ECCES_CKBER_MASK PPC_REG_VAL(17, 0x3)
-#define SDRAM_ECCES_CKBER_NONE PPC_REG_VAL(17, 0)
-#define SDRAM_ECCES_CKBER_16_ECC_0_3 PPC_REG_VAL(17, 2)
-#define SDRAM_ECCES_CKBER_32_ECC_0_3 PPC_REG_VAL(17, 1)
-#define SDRAM_ECCES_CKBER_32_ECC_4_8 PPC_REG_VAL(17, 2)
-#define SDRAM_ECCES_CKBER_32_ECC_0_8 PPC_REG_VAL(17, 3)
-#define SDRAM_ECCES_CE PPC_REG_VAL(18, 1)
-#define SDRAM_ECCES_UE PPC_REG_VAL(19, 1)
-#define SDRAM_ECCES_BKNER_MASK PPC_REG_VAL(21, 0x3)
-#define SDRAM_ECCES_BK0ER PPC_REG_VAL(20, 1)
-#define SDRAM_ECCES_BK1ER PPC_REG_VAL(21, 1)
-
-#endif /* __PPC4XX_EDAC_H */
diff --git a/ANDROID_3.4.5/drivers/edac/r82600_edac.c b/ANDROID_3.4.5/drivers/edac/r82600_edac.c
deleted file mode 100644
index 6d908ad7..00000000
--- a/ANDROID_3.4.5/drivers/edac/r82600_edac.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * Radisys 82600 Embedded chipset Memory Controller kernel module
- * (C) 2005 EADS Astrium
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
- * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
- *
- * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
- *
- * Written with reference to 82600 High Integration Dual PCI System
- * Controller Data Book:
- * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
- * references to this document given in []
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define R82600_REVISION " Ver: 2.0.2"
-#define EDAC_MOD_STR "r82600_edac"
-
-#define r82600_printk(level, fmt, arg...) \
- edac_printk(level, "r82600", fmt, ##arg)
-
-#define r82600_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
-
-/* Radisys say "The 82600 integrates a main memory SDRAM controller that
- * supports up to four banks of memory. The four banks can support a mix of
- * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
- * each of which can be any size from 16MB to 512MB. Both registered (control
- * signals buffered) and unbuffered DIMM types are supported. Mixing of
- * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
- * is not allowed. The 82600 SDRAM interface operates at the same frequency as
- * the CPU bus, 66MHz, 100MHz or 133MHz."
- */
-
-#define R82600_NR_CSROWS 4
-#define R82600_NR_CHANS 1
-#define R82600_NR_DIMMS 4
-
-#define R82600_BRIDGE_ID 0x8200
-
-/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
-#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
- * all bits are R/W
- *
- * 7 SDRAM ISA Hole Enable
- * 6 Flash Page Mode Enable
- * 5 ECC Enable: 1=ECC 0=noECC
- * 4 DRAM DIMM Type: 1=
- * 3 BIOS Alias Disable
- * 2 SDRAM BIOS Flash Write Enable
- * 1:0 SDRAM Refresh Rate: 00=Disabled
- * 01=7.8usec (256Mbit SDRAMs)
- * 10=15.6us 11=125usec
- */
-
-#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
- * More SDRAM related control bits
- * all bits are R/W
- *
- * 15:8 Reserved.
- *
- * 7:5 Special SDRAM Mode Select
- *
- * 4 Force ECC
- *
- * 1=Drive ECC bits to 0 during
- * write cycles (i.e. ECC test mode)
- *
- * 0=Normal ECC functioning
- *
- * 3 Enhanced Paging Enable
- *
- * 2 CAS# Latency 0=3clks 1=2clks
- *
- * 1 RAS# to CAS# Delay 0=3 1=2
- *
- * 0 RAS# Precharge 0=3 1=2
- */
-
-#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
- *
- * 31 Disable Hardware Scrubbing (RW)
- * 0=Scrub on corrected read
- * 1=Don't scrub on corrected read
- *
- * 30:12 Error Address Pointer (RO)
- * Upper 19 bits of error address
- *
- * 11:4 Syndrome Bits (RO)
- *
- * 3 BSERR# on multibit error (RW)
- * 1=enable 0=disable
- *
- * 2 NMI on Single Bit Eror (RW)
- * 1=NMI triggered by SBE n.b. other
- * prerequeists
- * 0=NMI not triggered
- *
- * 1 MBE (R/WC)
- * read 1=MBE at EAP (see above)
- * read 0=no MBE, or SBE occurred first
- * write 1=Clear MBE status (must also
- * clear SBE)
- * write 0=NOP
- *
- * 1 SBE (R/WC)
- * read 1=SBE at EAP (see above)
- * read 0=no SBE, or MBE occurred first
- * write 1=Clear SBE status (must also
- * clear MBE)
- * write 0=NOP
- */
-
-#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundary Address
- * Registers
- *
- * 7:0 Address lines 30:24 - upper limit of
- * each row [p57]
- */
-
-struct r82600_error_info {
- u32 eapr;
-};
-
-static bool disable_hardware_scrub;
-
-static struct edac_pci_ctl_info *r82600_pci;
-
-static void r82600_get_error_info(struct mem_ctl_info *mci,
- struct r82600_error_info *info)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
- pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
-
- if (info->eapr & BIT(0))
- /* Clear error to allow next error to be reported [p.62] */
- pci_write_bits32(pdev, R82600_EAP,
- ((u32) BIT(0) & (u32) BIT(1)),
- ((u32) BIT(0) & (u32) BIT(1)));
-
- if (info->eapr & BIT(1))
- /* Clear error to allow next error to be reported [p.62] */
- pci_write_bits32(pdev, R82600_EAP,
- ((u32) BIT(0) & (u32) BIT(1)),
- ((u32) BIT(0) & (u32) BIT(1)));
-}
-
-static int r82600_process_error_info(struct mem_ctl_info *mci,
- struct r82600_error_info *info,
- int handle_errors)
-{
- int error_found;
- u32 eapaddr, page;
- u32 syndrome;
-
- error_found = 0;
-
- /* bits 30:12 store the upper 19 bits of the 32 bit error address */
- eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
- /* Syndrome in bits 11:4 [p.62] */
- syndrome = (info->eapr >> 4) & 0xFF;
-
- /* the R82600 reports at less than page *
- * granularity (upper 19 bits only) */
- page = eapaddr >> PAGE_SHIFT;
-
- if (info->eapr & BIT(0)) { /* CE? */
- error_found = 1;
-
- if (handle_errors)
- edac_mc_handle_ce(mci, page, 0, /* not avail */
- syndrome,
- edac_mc_find_csrow_by_page(mci, page),
- 0, mci->ctl_name);
- }
-
- if (info->eapr & BIT(1)) { /* UE? */
- error_found = 1;
-
- if (handle_errors)
- /* 82600 doesn't give enough info */
- edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
- }
-
- return error_found;
-}
-
-static void r82600_check(struct mem_ctl_info *mci)
-{
- struct r82600_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- r82600_get_error_info(mci, &info);
- r82600_process_error_info(mci, &info, 1);
-}
-
-static inline int ecc_enabled(u8 dramcr)
-{
- return dramcr & BIT(5);
-}
-
-static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- u8 dramcr)
-{
- struct csrow_info *csrow;
- int index;
- u8 drbar; /* SDRAM Row Boundary Address Register */
- u32 row_high_limit, row_high_limit_last;
- u32 reg_sdram, ecc_on, row_base;
-
- ecc_on = ecc_enabled(dramcr);
- reg_sdram = dramcr & BIT(4);
- row_high_limit_last = 0;
-
- for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
-
- /* find the DRAM Chip Select Base address and mask */
- pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
-
- debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
-
- row_high_limit = ((u32) drbar << 24);
-/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
-
- debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n",
- __func__, index, row_high_limit, row_high_limit_last);
-
- /* Empty row [p.57] */
- if (row_high_limit == row_high_limit_last)
- continue;
-
- row_base = row_high_limit_last;
-
- csrow->first_page = row_base >> PAGE_SHIFT;
- csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
- /* Error address is top 19 bits - so granularity is *
- * 14 bits */
- csrow->grain = 1 << 14;
- csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
- /* FIXME - check that this is unknowable with this chipset */
- csrow->dtype = DEV_UNKNOWN;
-
- /* Mode is global on 82600 */
- csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
- row_high_limit_last = row_high_limit;
- }
-}
-
-static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
-{
- struct mem_ctl_info *mci;
- u8 dramcr;
- u32 eapr;
- u32 scrub_disabled;
- u32 sdram_refresh_rate;
- struct r82600_error_info discard;
-
- debugf0("%s()\n", __func__);
- pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
- pci_read_config_dword(pdev, R82600_EAP, &eapr);
- scrub_disabled = eapr & BIT(31);
- sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
- debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
- sdram_refresh_rate);
- debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
- mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
- if (mci == NULL)
- return -ENOMEM;
-
- debugf0("%s(): mci = %p\n", __func__, mci);
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- /* FIXME try to work out if the chip leads have been used for COM2
- * instead on this board? [MA6?] MAYBE:
- */
-
- /* On the R82600, the pins for memory bits 72:65 - i.e. the *
- * EC bits are shared with the pins for COM2 (!), so if COM2 *
- * is enabled, we assume COM2 is wired up, and thus no EDAC *
- * is possible. */
- mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
-
- if (ecc_enabled(dramcr)) {
- if (scrub_disabled)
- debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
- "%#0x\n", __func__, mci, eapr);
- } else
- mci->edac_cap = EDAC_FLAG_NONE;
-
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = R82600_REVISION;
- mci->ctl_name = "R82600";
- mci->dev_name = pci_name(pdev);
- mci->edac_check = r82600_check;
- mci->ctl_page_to_phys = NULL;
- r82600_init_csrows(mci, pdev, dramcr);
- r82600_get_error_info(mci, &discard); /* clear counters */
-
- /* Here we assume that we will never see multiple instances of this
- * type of memory controller. The ID is therefore hardcoded to 0.
- */
- if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* get this far and it's successful */
-
- if (disable_hardware_scrub) {
- debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
- __func__);
- pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
- }
-
- /* allocating generic PCI control info */
- r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
- if (!r82600_pci) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
- }
-
- debugf3("%s(): success\n", __func__);
- return 0;
-
-fail:
- edac_mc_free(mci);
- return -ENODEV;
-}
-
-/* returns count (>= 0), or negative on error */
-static int __devinit r82600_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- debugf0("%s()\n", __func__);
-
- /* don't need to call pci_enable_device() */
- return r82600_probe1(pdev, ent->driver_data);
-}
-
-static void __devexit r82600_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s()\n", __func__);
-
- if (r82600_pci)
- edac_pci_release_generic_ctl(r82600_pci);
-
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
- return;
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = {
- {
- PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
- },
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
-
-static struct pci_driver r82600_driver = {
- .name = EDAC_MOD_STR,
- .probe = r82600_init_one,
- .remove = __devexit_p(r82600_remove_one),
- .id_table = r82600_pci_tbl,
-};
-
-static int __init r82600_init(void)
-{
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- return pci_register_driver(&r82600_driver);
-}
-
-static void __exit r82600_exit(void)
-{
- pci_unregister_driver(&r82600_driver);
-}
-
-module_init(r82600_init);
-module_exit(r82600_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
- "on behalf of EADS Astrium");
-MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
-
-module_param(disable_hardware_scrub, bool, 0644);
-MODULE_PARM_DESC(disable_hardware_scrub,
- "If set, disable the chipset's automatic scrub for CEs");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/ANDROID_3.4.5/drivers/edac/sb_edac.c b/ANDROID_3.4.5/drivers/edac/sb_edac.c
deleted file mode 100644
index 0f9552d6..00000000
--- a/ANDROID_3.4.5/drivers/edac/sb_edac.c
+++ /dev/null
@@ -1,1899 +0,0 @@
-/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
- *
- * This driver supports the memory controllers found on the Intel
- * processor family Sandy Bridge.
- *
- * This file may be distributed under the terms of the
- * GNU General Public License version 2 only.
- *
- * Copyright (c) 2011 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/edac.h>
-#include <linux/mmzone.h>
-#include <linux/smp.h>
-#include <linux/bitmap.h>
-#include <linux/math64.h>
-#include <asm/processor.h>
-#include <asm/mce.h>
-
-#include "edac_core.h"
-
-/* Static vars */
-static LIST_HEAD(sbridge_edac_list);
-static DEFINE_MUTEX(sbridge_edac_lock);
-static int probed;
-
-/*
- * Alter this version for the module when modifications are made
- */
-#define SBRIDGE_REVISION " Ver: 1.0.0 "
-#define EDAC_MOD_STR "sbridge_edac"
-
-/*
- * Debug macros
- */
-#define sbridge_printk(level, fmt, arg...) \
- edac_printk(level, "sbridge", fmt, ##arg)
-
-#define sbridge_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
-
-/*
- * Get a bit field at register value <v>, from bit <lo> to bit <hi>
- */
-#define GET_BITFIELD(v, lo, hi) \
- (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo))
-
-/*
- * sbridge Memory Controller Registers
- */
-
-/*
- * FIXME: For now, let's order by device function, as it makes
- * easier for driver's development proccess. This table should be
- * moved to pci_id.h when submitted upstream
- */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
-
- /*
- * Currently, unused, but will be needed in the future
- * implementations, as they hold the error counters
- */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
-
-/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
-static const u32 dram_rule[] = {
- 0x80, 0x88, 0x90, 0x98, 0xa0,
- 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
-};
-#define MAX_SAD ARRAY_SIZE(dram_rule)
-
-#define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
-#define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
-#define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
-#define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
-
-static char *get_dram_attr(u32 reg)
-{
- switch(DRAM_ATTR(reg)) {
- case 0:
- return "DRAM";
- case 1:
- return "MMCFG";
- case 2:
- return "NXM";
- default:
- return "unknown";
- }
-}
-
-static const u32 interleave_list[] = {
- 0x84, 0x8c, 0x94, 0x9c, 0xa4,
- 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
-};
-#define MAX_INTERLEAVE ARRAY_SIZE(interleave_list)
-
-#define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2)
-#define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5)
-#define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10)
-#define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13)
-#define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18)
-#define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21)
-#define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26)
-#define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29)
-
-static inline int sad_pkg(u32 reg, int interleave)
-{
- switch (interleave) {
- case 0:
- return SAD_PKG0(reg);
- case 1:
- return SAD_PKG1(reg);
- case 2:
- return SAD_PKG2(reg);
- case 3:
- return SAD_PKG3(reg);
- case 4:
- return SAD_PKG4(reg);
- case 5:
- return SAD_PKG5(reg);
- case 6:
- return SAD_PKG6(reg);
- case 7:
- return SAD_PKG7(reg);
- default:
- return -EINVAL;
- }
-}
-
-/* Devices 12 Function 7 */
-
-#define TOLM 0x80
-#define TOHM 0x84
-
-#define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
-#define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
-
-/* Device 13 Function 6 */
-
-#define SAD_TARGET 0xf0
-
-#define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
-
-#define SAD_CONTROL 0xf4
-
-#define NODE_ID(reg) GET_BITFIELD(reg, 0, 2)
-
-/* Device 14 function 0 */
-
-static const u32 tad_dram_rule[] = {
- 0x40, 0x44, 0x48, 0x4c,
- 0x50, 0x54, 0x58, 0x5c,
- 0x60, 0x64, 0x68, 0x6c,
-};
-#define MAX_TAD ARRAY_SIZE(tad_dram_rule)
-
-#define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
-#define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
-#define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
-#define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
-#define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
-#define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
-#define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
-
-/* Device 15, function 0 */
-
-#define MCMTR 0x7c
-
-#define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
-#define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
-#define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
-
-/* Device 15, function 1 */
-
-#define RASENABLES 0xac
-#define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
-
-/* Device 15, functions 2-5 */
-
-static const int mtr_regs[] = {
- 0x80, 0x84, 0x88,
-};
-
-#define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
-#define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
-#define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
-#define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
-#define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
-
-static const u32 tad_ch_nilv_offset[] = {
- 0x90, 0x94, 0x98, 0x9c,
- 0xa0, 0xa4, 0xa8, 0xac,
- 0xb0, 0xb4, 0xb8, 0xbc,
-};
-#define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
-#define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
-
-static const u32 rir_way_limit[] = {
- 0x108, 0x10c, 0x110, 0x114, 0x118,
-};
-#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
-
-#define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
-#define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
-#define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff)
-
-#define MAX_RIR_WAY 8
-
-static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
- { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
- { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
- { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
- { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
- { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
-};
-
-#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
-
-/* Device 16, functions 2-7 */
-
-/*
- * FIXME: Implement the error count reads directly
- */
-
-static const u32 correrrcnt[] = {
- 0x104, 0x108, 0x10c, 0x110,
-};
-
-#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
-#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
-#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
-#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
-
-static const u32 correrrthrsld[] = {
- 0x11c, 0x120, 0x124, 0x128,
-};
-
-#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
-#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
-
-
-/* Device 17, function 0 */
-
-#define RANK_CFG_A 0x0328
-
-#define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11)
-
-/*
- * sbridge structs
- */
-
-#define NUM_CHANNELS 4
-#define MAX_DIMMS 3 /* Max DIMMS per channel */
-
-struct sbridge_info {
- u32 mcmtr;
-};
-
-struct sbridge_channel {
- u32 ranks;
- u32 dimms;
-};
-
-struct pci_id_descr {
- int dev;
- int func;
- int dev_id;
- int optional;
-};
-
-struct pci_id_table {
- const struct pci_id_descr *descr;
- int n_devs;
-};
-
-struct sbridge_dev {
- struct list_head list;
- u8 bus, mc;
- u8 node_id, source_id;
- struct pci_dev **pdev;
- int n_devs;
- struct mem_ctl_info *mci;
-};
-
-struct sbridge_pvt {
- struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
- struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0;
- struct pci_dev *pci_br;
- struct pci_dev *pci_tad[NUM_CHANNELS];
-
- struct sbridge_dev *sbridge_dev;
-
- struct sbridge_info info;
- struct sbridge_channel channel[NUM_CHANNELS];
-
- int csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
- /* Memory type detection */
- bool is_mirrored, is_lockstep, is_close_pg;
-
- /* Fifo double buffers */
- struct mce mce_entry[MCE_LOG_LEN];
- struct mce mce_outentry[MCE_LOG_LEN];
-
- /* Fifo in/out counters */
- unsigned mce_in, mce_out;
-
- /* Count indicator to show errors not got */
- unsigned mce_overrun;
-
- /* Memory description */
- u64 tolm, tohm;
-};
-
-#define PCI_DESCR(device, function, device_id) \
- .dev = (device), \
- .func = (function), \
- .dev_id = (device_id)
-
-static const struct pci_id_descr pci_dev_descr_sbridge[] = {
- /* Processor Home Agent */
- { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0) },
-
- /* Memory controller */
- { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA) },
- { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS) },
- { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0) },
- { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1) },
- { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2) },
- { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3) },
- { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO) },
-
- /* System Address Decoder */
- { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0) },
- { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1) },
-
- /* Broadcast Registers */
- { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR) },
-};
-
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
-static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
- {0,} /* 0 terminated list. */
-};
-
-/*
- * pci_device_id table for which devices we are looking for
- */
-static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
- {0,} /* 0 terminated list. */
-};
-
-
-/****************************************************************************
- Anciliary status routines
- ****************************************************************************/
-
-static inline int numrank(u32 mtr)
-{
- int ranks = (1 << RANK_CNT_BITS(mtr));
-
- if (ranks > 4) {
- debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)",
- ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
- return -EINVAL;
- }
-
- return ranks;
-}
-
-static inline int numrow(u32 mtr)
-{
- int rows = (RANK_WIDTH_BITS(mtr) + 12);
-
- if (rows < 13 || rows > 18) {
- debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)",
- rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
- return -EINVAL;
- }
-
- return 1 << rows;
-}
-
-static inline int numcol(u32 mtr)
-{
- int cols = (COL_WIDTH_BITS(mtr) + 10);
-
- if (cols > 12) {
- debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)",
- cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
- return -EINVAL;
- }
-
- return 1 << cols;
-}
-
-static struct sbridge_dev *get_sbridge_dev(u8 bus)
-{
- struct sbridge_dev *sbridge_dev;
-
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->bus == bus)
- return sbridge_dev;
- }
-
- return NULL;
-}
-
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
- const struct pci_id_table *table)
-{
- struct sbridge_dev *sbridge_dev;
-
- sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
- if (!sbridge_dev)
- return NULL;
-
- sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
- GFP_KERNEL);
- if (!sbridge_dev->pdev) {
- kfree(sbridge_dev);
- return NULL;
- }
-
- sbridge_dev->bus = bus;
- sbridge_dev->n_devs = table->n_devs;
- list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
-
- return sbridge_dev;
-}
-
-static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
-{
- list_del(&sbridge_dev->list);
- kfree(sbridge_dev->pdev);
- kfree(sbridge_dev);
-}
-
-/****************************************************************************
- Memory check routines
- ****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
- unsigned func)
-{
- struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus);
- int i;
-
- if (!sbridge_dev)
- return NULL;
-
- for (i = 0; i < sbridge_dev->n_devs; i++) {
- if (!sbridge_dev->pdev[i])
- continue;
-
- if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
- debugf1("Associated %02x.%02x.%d with %p\n",
- bus, slot, func, sbridge_dev->pdev[i]);
- return sbridge_dev->pdev[i];
- }
- }
-
- return NULL;
-}
-
-/**
- * sbridge_get_active_channels() - gets the number of channels and csrows
- * bus: Device bus
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
- */
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
- unsigned *csrows)
-{
- struct pci_dev *pdev = NULL;
- int i, j;
- u32 mcmtr;
-
- *channels = 0;
- *csrows = 0;
-
- pdev = get_pdev_slot_func(bus, 15, 0);
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 0);
- return -ENODEV;
- }
-
- pci_read_config_dword(pdev, MCMTR, &mcmtr);
- if (!IS_ECC_ENABLED(mcmtr)) {
- sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
- return -ENODEV;
- }
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- u32 mtr;
-
- /* Device 15 functions 2 - 5 */
- pdev = get_pdev_slot_func(bus, 15, 2 + i);
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 2 + i);
- return -ENODEV;
- }
- (*channels)++;
-
- for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
- pci_read_config_dword(pdev, mtr_regs[j], &mtr);
- debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
- if (IS_DIMM_PRESENT(mtr))
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels: %d, number of active dimms: %d\n",
- *channels, *csrows);
-
- return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
-{
- struct sbridge_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
- int i, j, banks, ranks, rows, cols, size, npages;
- int csrow = 0;
- unsigned long last_page = 0;
- u32 reg;
- enum edac_type mode;
- enum mem_type mtype;
-
- pci_read_config_dword(pvt->pci_br, SAD_TARGET, &reg);
- pvt->sbridge_dev->source_id = SOURCE_ID(reg);
-
- pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
- pvt->sbridge_dev->node_id = NODE_ID(reg);
- debugf0("mc#%d: Node ID: %d, source ID: %d\n",
- pvt->sbridge_dev->mc,
- pvt->sbridge_dev->node_id,
- pvt->sbridge_dev->source_id);
-
- pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
- if (IS_MIRROR_ENABLED(reg)) {
- debugf0("Memory mirror is enabled\n");
- pvt->is_mirrored = true;
- } else {
- debugf0("Memory mirror is disabled\n");
- pvt->is_mirrored = false;
- }
-
- pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
- if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
- debugf0("Lockstep is enabled\n");
- mode = EDAC_S8ECD8ED;
- pvt->is_lockstep = true;
- } else {
- debugf0("Lockstep is disabled\n");
- mode = EDAC_S4ECD4ED;
- pvt->is_lockstep = false;
- }
- if (IS_CLOSE_PG(pvt->info.mcmtr)) {
- debugf0("address map is on closed page mode\n");
- pvt->is_close_pg = true;
- } else {
- debugf0("address map is on open page mode\n");
- pvt->is_close_pg = false;
- }
-
- pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
- if (IS_RDIMM_ENABLED(reg)) {
- /* FIXME: Can also be LRDIMM */
- debugf0("Memory is registered\n");
- mtype = MEM_RDDR3;
- } else {
- debugf0("Memory is unregistered\n");
- mtype = MEM_DDR3;
- }
-
- /* On all supported DDR3 DIMM types, there are 8 banks available */
- banks = 8;
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- u32 mtr;
-
- for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
- pci_read_config_dword(pvt->pci_tad[i],
- mtr_regs[j], &mtr);
- debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
- if (IS_DIMM_PRESENT(mtr)) {
- pvt->channel[i].dimms++;
-
- ranks = numrank(mtr);
- rows = numrow(mtr);
- cols = numcol(mtr);
-
- /* DDR3 has 8 I/O banks */
- size = (rows * cols * banks * ranks) >> (20 - 3);
- npages = MiB_TO_PAGES(size);
-
- debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
- pvt->sbridge_dev->mc, i, j,
- size, npages,
- banks, ranks, rows, cols);
- csr = &mci->csrows[csrow];
-
- csr->first_page = last_page;
- csr->last_page = last_page + npages - 1;
- csr->page_mask = 0UL; /* Unused */
- csr->nr_pages = npages;
- csr->grain = 32;
- csr->csrow_idx = csrow;
- csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
- csr->ce_count = 0;
- csr->ue_count = 0;
- csr->mtype = mtype;
- csr->edac_mode = mode;
- csr->nr_channels = 1;
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = csrow;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
- "CPU_SrcID#%u_Channel#%u_DIMM#%u",
- pvt->sbridge_dev->source_id, i, j);
- last_page += npages;
- csrow++;
- }
- }
- }
-
- return 0;
-}
-
-static void get_memory_layout(const struct mem_ctl_info *mci)
-{
- struct sbridge_pvt *pvt = mci->pvt_info;
- int i, j, k, n_sads, n_tads, sad_interl;
- u32 reg;
- u64 limit, prv = 0;
- u64 tmp_mb;
- u32 mb, kb;
- u32 rir_way;
-
- /*
- * Step 1) Get TOLM/TOHM ranges
- */
-
- /* Address range is 32:28 */
- pci_read_config_dword(pvt->pci_sad1, TOLM,
- &reg);
- pvt->tolm = GET_TOLM(reg);
- tmp_mb = (1 + pvt->tolm) >> 20;
-
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TOLM: %u.%03u GB (0x%016Lx)\n",
- mb, kb, (u64)pvt->tolm);
-
- /* Address range is already 45:25 */
- pci_read_config_dword(pvt->pci_sad1, TOHM,
- &reg);
- pvt->tohm = GET_TOHM(reg);
- tmp_mb = (1 + pvt->tohm) >> 20;
-
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TOHM: %u.%03u GB (0x%016Lx)",
- mb, kb, (u64)pvt->tohm);
-
- /*
- * Step 2) Get SAD range and SAD Interleave list
- * TAD registers contain the interleave wayness. However, it
- * seems simpler to just discover it indirectly, with the
- * algorithm bellow.
- */
- prv = 0;
- for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
- /* SAD_LIMIT Address range is 45:26 */
- pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
- &reg);
- limit = SAD_LIMIT(reg);
-
- if (!DRAM_RULE_ENABLE(reg))
- continue;
-
- if (limit <= prv)
- break;
-
- tmp_mb = (limit + 1) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("SAD#%d %s up to %u.%03u GB (0x%016Lx) %s reg=0x%08x\n",
- n_sads,
- get_dram_attr(reg),
- mb, kb,
- ((u64)tmp_mb) << 20L,
- INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]",
- reg);
- prv = limit;
-
- pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
- &reg);
- sad_interl = sad_pkg(reg, 0);
- for (j = 0; j < 8; j++) {
- if (j > 0 && sad_interl == sad_pkg(reg, j))
- break;
-
- debugf0("SAD#%d, interleave #%d: %d\n",
- n_sads, j, sad_pkg(reg, j));
- }
- }
-
- /*
- * Step 3) Get TAD range
- */
- prv = 0;
- for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
- pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
- &reg);
- limit = TAD_LIMIT(reg);
- if (limit <= prv)
- break;
- tmp_mb = (limit + 1) >> 20;
-
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
- n_tads, mb, kb,
- ((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
- (u32)TAD_TGT0(reg),
- (u32)TAD_TGT1(reg),
- (u32)TAD_TGT2(reg),
- (u32)TAD_TGT3(reg),
- reg);
- prv = limit;
- }
-
- /*
- * Step 4) Get TAD offsets, per each channel
- */
- for (i = 0; i < NUM_CHANNELS; i++) {
- if (!pvt->channel[i].dimms)
- continue;
- for (j = 0; j < n_tads; j++) {
- pci_read_config_dword(pvt->pci_tad[i],
- tad_ch_nilv_offset[j],
- &reg);
- tmp_mb = TAD_OFFSET(reg) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
- i, j,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- reg);
- }
- }
-
- /*
- * Step 6) Get RIR Wayness/Limit, per each channel
- */
- for (i = 0; i < NUM_CHANNELS; i++) {
- if (!pvt->channel[i].dimms)
- continue;
- for (j = 0; j < MAX_RIR_RANGES; j++) {
- pci_read_config_dword(pvt->pci_tad[i],
- rir_way_limit[j],
- &reg);
-
- if (!IS_RIR_VALID(reg))
- continue;
-
- tmp_mb = RIR_LIMIT(reg) >> 20;
- rir_way = 1 << RIR_WAY(reg);
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
- i, j,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- rir_way,
- reg);
-
- for (k = 0; k < rir_way; k++) {
- pci_read_config_dword(pvt->pci_tad[i],
- rir_offset[j][k],
- &reg);
- tmp_mb = RIR_OFFSET(reg) << 6;
-
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
- i, j, k,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
- reg);
- }
- }
- }
-}
-
-struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
-{
- struct sbridge_dev *sbridge_dev;
-
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->node_id == node_id)
- return sbridge_dev->mci;
- }
- return NULL;
-}
-
-static int get_memory_error_data(struct mem_ctl_info *mci,
- u64 addr,
- u8 *socket,
- long *channel_mask,
- u8 *rank,
- char *area_type)
-{
- struct mem_ctl_info *new_mci;
- struct sbridge_pvt *pvt = mci->pvt_info;
- char msg[256];
- int n_rir, n_sads, n_tads, sad_way, sck_xch;
- int sad_interl, idx, base_ch;
- int interleave_mode;
- unsigned sad_interleave[MAX_INTERLEAVE];
- u32 reg;
- u8 ch_way,sck_way;
- u32 tad_offset;
- u32 rir_way;
- u32 mb, kb;
- u64 ch_addr, offset, limit, prv = 0;
-
-
- /*
- * Step 0) Check if the address is at special memory ranges
- * The check bellow is probably enough to fill all cases where
- * the error is not inside a memory, except for the legacy
- * range (e. g. VGA addresses). It is unlikely, however, that the
- * memory controller would generate an error on that range.
- */
- if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
- sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- if (addr >= (u64)pvt->tohm) {
- sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
-
- /*
- * Step 1) Get socket
- */
- for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
- pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
- &reg);
-
- if (!DRAM_RULE_ENABLE(reg))
- continue;
-
- limit = SAD_LIMIT(reg);
- if (limit <= prv) {
- sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- if (addr <= limit)
- break;
- prv = limit;
- }
- if (n_sads == MAX_SAD) {
- sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- area_type = get_dram_attr(reg);
- interleave_mode = INTERLEAVE_MODE(reg);
-
- pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
- &reg);
- sad_interl = sad_pkg(reg, 0);
- for (sad_way = 0; sad_way < 8; sad_way++) {
- if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
- break;
- sad_interleave[sad_way] = sad_pkg(reg, sad_way);
- debugf0("SAD interleave #%d: %d\n",
- sad_way, sad_interleave[sad_way]);
- }
- debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
- pvt->sbridge_dev->mc,
- n_sads,
- addr,
- limit,
- sad_way + 7,
- interleave_mode ? "" : "XOR[18:16]");
- if (interleave_mode)
- idx = ((addr >> 6) ^ (addr >> 16)) & 7;
- else
- idx = (addr >> 6) & 7;
- switch (sad_way) {
- case 1:
- idx = 0;
- break;
- case 2:
- idx = idx & 1;
- break;
- case 4:
- idx = idx & 3;
- break;
- case 8:
- break;
- default:
- sprintf(msg, "Can't discover socket interleave");
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- *socket = sad_interleave[idx];
- debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n",
- idx, sad_way, *socket);
-
- /*
- * Move to the proper node structure, in order to access the
- * right PCI registers
- */
- new_mci = get_mci_for_node_id(*socket);
- if (!new_mci) {
- sprintf(msg, "Struct for socket #%u wasn't initialized",
- *socket);
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- mci = new_mci;
- pvt = mci->pvt_info;
-
- /*
- * Step 2) Get memory channel
- */
- prv = 0;
- for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
- pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
- &reg);
- limit = TAD_LIMIT(reg);
- if (limit <= prv) {
- sprintf(msg, "Can't discover the memory channel");
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- if (addr <= limit)
- break;
- prv = limit;
- }
- ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg) + 1;
- /*
- * FIXME: Is it right to always use channel 0 for offsets?
- */
- pci_read_config_dword(pvt->pci_tad[0],
- tad_ch_nilv_offset[n_tads],
- &tad_offset);
-
- if (ch_way == 3)
- idx = addr >> 6;
- else
- idx = addr >> (6 + sck_way);
- idx = idx % ch_way;
-
- /*
- * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
- */
- switch (idx) {
- case 0:
- base_ch = TAD_TGT0(reg);
- break;
- case 1:
- base_ch = TAD_TGT1(reg);
- break;
- case 2:
- base_ch = TAD_TGT2(reg);
- break;
- case 3:
- base_ch = TAD_TGT3(reg);
- break;
- default:
- sprintf(msg, "Can't discover the TAD target");
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- *channel_mask = 1 << base_ch;
-
- if (pvt->is_mirrored) {
- *channel_mask |= 1 << ((base_ch + 2) % 4);
- switch(ch_way) {
- case 2:
- case 4:
- sck_xch = 1 << sck_way * (ch_way >> 1);
- break;
- default:
- sprintf(msg, "Invalid mirror set. Can't decode addr");
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- } else
- sck_xch = (1 << sck_way) * ch_way;
-
- if (pvt->is_lockstep)
- *channel_mask |= 1 << ((base_ch + 1) % 4);
-
- offset = TAD_OFFSET(tad_offset);
-
- debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
- n_tads,
- addr,
- limit,
- (u32)TAD_SOCK(reg),
- ch_way,
- offset,
- idx,
- base_ch,
- *channel_mask);
-
- /* Calculate channel address */
- /* Remove the TAD offset */
-
- if (offset > addr) {
- sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
- offset, addr);
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- addr -= offset;
- /* Store the low bits [0:6] of the addr */
- ch_addr = addr & 0x7f;
- /* Remove socket wayness and remove 6 bits */
- addr >>= 6;
- addr = div_u64(addr, sck_xch);
-#if 0
- /* Divide by channel way */
- addr = addr / ch_way;
-#endif
- /* Recover the last 6 bits */
- ch_addr |= addr << 6;
-
- /*
- * Step 3) Decode rank
- */
- for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
- pci_read_config_dword(pvt->pci_tad[base_ch],
- rir_way_limit[n_rir],
- &reg);
-
- if (!IS_RIR_VALID(reg))
- continue;
-
- limit = RIR_LIMIT(reg);
- mb = div_u64_rem(limit >> 20, 1000, &kb);
- debugf0("RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
- n_rir,
- mb, kb,
- limit,
- 1 << RIR_WAY(reg));
- if (ch_addr <= limit)
- break;
- }
- if (n_rir == MAX_RIR_RANGES) {
- sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
- ch_addr);
- edac_mc_handle_ce_no_info(mci, msg);
- return -EINVAL;
- }
- rir_way = RIR_WAY(reg);
- if (pvt->is_close_pg)
- idx = (ch_addr >> 6);
- else
- idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
- idx %= 1 << rir_way;
-
- pci_read_config_dword(pvt->pci_tad[base_ch],
- rir_offset[n_rir][idx],
- &reg);
- *rank = RIR_RNK_TGT(reg);
-
- debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
- n_rir,
- ch_addr,
- limit,
- rir_way,
- idx);
-
- return 0;
-}
-
-/****************************************************************************
- Device initialization routines: put/get, init/exit
- ****************************************************************************/
-
-/*
- * sbridge_put_all_devices 'put' all the devices that we have
- * reserved via 'get'
- */
-static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
-{
- int i;
-
- debugf0(__FILE__ ": %s()\n", __func__);
- for (i = 0; i < sbridge_dev->n_devs; i++) {
- struct pci_dev *pdev = sbridge_dev->pdev[i];
- if (!pdev)
- continue;
- debugf0("Removing dev %02x:%02x.%d\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- pci_dev_put(pdev);
- }
-}
-
-static void sbridge_put_all_devices(void)
-{
- struct sbridge_dev *sbridge_dev, *tmp;
-
- list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
- sbridge_put_devices(sbridge_dev);
- free_sbridge_dev(sbridge_dev);
- }
-}
-
-/*
- * sbridge_get_all_devices Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver
- *
- * Need to 'get' device 16 func 1 and func 2
- */
-static int sbridge_get_onedevice(struct pci_dev **prev,
- u8 *num_mc,
- const struct pci_id_table *table,
- const unsigned devno)
-{
- struct sbridge_dev *sbridge_dev;
- const struct pci_id_descr *dev_descr = &table->descr[devno];
-
- struct pci_dev *pdev = NULL;
- u8 bus = 0;
-
- sbridge_printk(KERN_INFO,
- "Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
- dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- dev_descr->dev_id, *prev);
-
- if (!pdev) {
- if (*prev) {
- *prev = pdev;
- return 0;
- }
-
- if (dev_descr->optional)
- return 0;
-
- if (devno == 0)
- return -ENODEV;
-
- sbridge_printk(KERN_INFO,
- "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
- dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
-
- /* End of list, leave */
- return -ENODEV;
- }
- bus = pdev->bus->number;
-
- sbridge_dev = get_sbridge_dev(bus);
- if (!sbridge_dev) {
- sbridge_dev = alloc_sbridge_dev(bus, table);
- if (!sbridge_dev) {
- pci_dev_put(pdev);
- return -ENOMEM;
- }
- (*num_mc)++;
- }
-
- if (sbridge_dev->pdev[devno]) {
- sbridge_printk(KERN_ERR,
- "Duplicated device for "
- "dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
- pci_dev_put(pdev);
- return -ENODEV;
- }
-
- sbridge_dev->pdev[devno] = pdev;
-
- /* Sanity check */
- if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
- PCI_FUNC(pdev->devfn) != dev_descr->func)) {
- sbridge_printk(KERN_ERR,
- "Device PCI ID %04x:%04x "
- "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n",
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
- bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- bus, dev_descr->dev, dev_descr->func);
- return -ENODEV;
- }
-
- /* Be sure that the device is enabled */
- if (unlikely(pci_enable_device(pdev) < 0)) {
- sbridge_printk(KERN_ERR,
- "Couldn't enable "
- "dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
- return -ENODEV;
- }
-
- debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev,
- dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
-
- /*
- * As stated on drivers/pci/search.c, the reference count for
- * @from is always decremented if it is not %NULL. So, as we need
- * to get all devices up to null, we need to do a get for the device
- */
- pci_dev_get(pdev);
-
- *prev = pdev;
-
- return 0;
-}
-
-static int sbridge_get_all_devices(u8 *num_mc)
-{
- int i, rc;
- struct pci_dev *pdev = NULL;
- const struct pci_id_table *table = pci_dev_descr_sbridge_table;
-
- while (table && table->descr) {
- for (i = 0; i < table->n_devs; i++) {
- pdev = NULL;
- do {
- rc = sbridge_get_onedevice(&pdev, num_mc,
- table, i);
- if (rc < 0) {
- if (i == 0) {
- i = table->n_devs;
- break;
- }
- sbridge_put_all_devices();
- return -ENODEV;
- }
- } while (pdev);
- }
- table++;
- }
-
- return 0;
-}
-
-static int mci_bind_devs(struct mem_ctl_info *mci,
- struct sbridge_dev *sbridge_dev)
-{
- struct sbridge_pvt *pvt = mci->pvt_info;
- struct pci_dev *pdev;
- int i, func, slot;
-
- for (i = 0; i < sbridge_dev->n_devs; i++) {
- pdev = sbridge_dev->pdev[i];
- if (!pdev)
- continue;
- slot = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- switch (slot) {
- case 12:
- switch (func) {
- case 6:
- pvt->pci_sad0 = pdev;
- break;
- case 7:
- pvt->pci_sad1 = pdev;
- break;
- default:
- goto error;
- }
- break;
- case 13:
- switch (func) {
- case 6:
- pvt->pci_br = pdev;
- break;
- default:
- goto error;
- }
- break;
- case 14:
- switch (func) {
- case 0:
- pvt->pci_ha0 = pdev;
- break;
- default:
- goto error;
- }
- break;
- case 15:
- switch (func) {
- case 0:
- pvt->pci_ta = pdev;
- break;
- case 1:
- pvt->pci_ras = pdev;
- break;
- case 2:
- case 3:
- case 4:
- case 5:
- pvt->pci_tad[func - 2] = pdev;
- break;
- default:
- goto error;
- }
- break;
- case 17:
- switch (func) {
- case 0:
- pvt->pci_ddrio = pdev;
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
-
- debugf0("Associated PCI %02x.%02d.%d with dev = %p\n",
- sbridge_dev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev);
- }
-
- /* Check if everything were registered */
- if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
- !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta ||
- !pvt->pci_ddrio)
- goto enodev;
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- if (!pvt->pci_tad[i])
- goto enodev;
- }
- return 0;
-
-enodev:
- sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
- return -ENODEV;
-
-error:
- sbridge_printk(KERN_ERR, "Device %d, function %d "
- "is out of the expected range\n",
- slot, func);
- return -EINVAL;
-}
-
-/****************************************************************************
- Error check routines
- ****************************************************************************/
-
-/*
- * While Sandy Bridge has error count registers, SMI BIOS read values from
- * and resets the counters. So, they are not reliable for the OS to read
- * from them. So, we have no option but to just trust on whatever MCE is
- * telling us about the errors.
- */
-static void sbridge_mce_output_error(struct mem_ctl_info *mci,
- const struct mce *m)
-{
- struct mem_ctl_info *new_mci;
- struct sbridge_pvt *pvt = mci->pvt_info;
- char *type, *optype, *msg, *recoverable_msg;
- bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
- bool overflow = GET_BITFIELD(m->status, 62, 62);
- bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
- bool recoverable = GET_BITFIELD(m->status, 56, 56);
- u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
- u32 mscod = GET_BITFIELD(m->status, 16, 31);
- u32 errcode = GET_BITFIELD(m->status, 0, 15);
- u32 channel = GET_BITFIELD(m->status, 0, 3);
- u32 optypenum = GET_BITFIELD(m->status, 4, 6);
- long channel_mask, first_channel;
- u8 rank, socket;
- int csrow, rc, dimm;
- char *area_type = "Unknown";
-
- if (ripv)
- type = "NON_FATAL";
- else
- type = "FATAL";
-
- /*
- * According with Table 15-9 of the Intel Archictecture spec vol 3A,
- * memory errors should fit in this mask:
- * 000f 0000 1mmm cccc (binary)
- * where:
- * f = Correction Report Filtering Bit. If 1, subsequent errors
- * won't be shown
- * mmm = error type
- * cccc = channel
- * If the mask doesn't match, report an error to the parsing logic
- */
- if (! ((errcode & 0xef80) == 0x80)) {
- optype = "Can't parse: it is not a mem";
- } else {
- switch (optypenum) {
- case 0:
- optype = "generic undef request";
- break;
- case 1:
- optype = "memory read";
- break;
- case 2:
- optype = "memory write";
- break;
- case 3:
- optype = "addr/cmd";
- break;
- case 4:
- optype = "memory scrubbing";
- break;
- default:
- optype = "reserved";
- break;
- }
- }
-
- rc = get_memory_error_data(mci, m->addr, &socket,
- &channel_mask, &rank, area_type);
- if (rc < 0)
- return;
- new_mci = get_mci_for_node_id(socket);
- if (!new_mci) {
- edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
- return;
- }
- mci = new_mci;
- pvt = mci->pvt_info;
-
- first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
-
- if (rank < 4)
- dimm = 0;
- else if (rank < 8)
- dimm = 1;
- else
- dimm = 2;
-
- csrow = pvt->csrow_map[first_channel][dimm];
-
- if (uncorrected_error && recoverable)
- recoverable_msg = " recoverable";
- else
- recoverable_msg = "";
-
- /*
- * FIXME: What should we do with "channel" information on mcelog?
- * Probably, we can just discard it, as the channel information
- * comes from the get_memory_error_data() address decoding
- */
- msg = kasprintf(GFP_ATOMIC,
- "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
- "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
- core_err_cnt,
- area_type,
- optype,
- type,
- recoverable_msg,
- overflow ? "OVERFLOW" : "",
- m->cpu,
- mscod, errcode,
- channel, /* 1111b means not specified */
- (long long) m->addr,
- socket,
- first_channel, /* This is the real channel on SB */
- channel_mask,
- rank);
-
- debugf0("%s", msg);
-
- /* Call the helper to output message */
- if (uncorrected_error)
- edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
- else
- edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
-
- kfree(msg);
-}
-
-/*
- * sbridge_check_error Retrieve and process errors reported by the
- * hardware. Called by the Core module.
- */
-static void sbridge_check_error(struct mem_ctl_info *mci)
-{
- struct sbridge_pvt *pvt = mci->pvt_info;
- int i;
- unsigned count = 0;
- struct mce *m;
-
- /*
- * MCE first step: Copy all mce errors into a temporary buffer
- * We use a double buffering here, to reduce the risk of
- * loosing an error.
- */
- smp_rmb();
- count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
- % MCE_LOG_LEN;
- if (!count)
- return;
-
- m = pvt->mce_outentry;
- if (pvt->mce_in + count > MCE_LOG_LEN) {
- unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
- smp_wmb();
- pvt->mce_in = 0;
- count -= l;
- m += l;
- }
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
- smp_wmb();
- pvt->mce_in += count;
-
- smp_rmb();
- if (pvt->mce_overrun) {
- sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
- pvt->mce_overrun);
- smp_wmb();
- pvt->mce_overrun = 0;
- }
-
- /*
- * MCE second step: parse errors and display
- */
- for (i = 0; i < count; i++)
- sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
-}
-
-/*
- * sbridge_mce_check_error Replicates mcelog routine to get errors
- * This routine simply queues mcelog errors, and
- * return. The error itself should be handled later
- * by sbridge_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
- */
-static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct mce *mce = (struct mce *)data;
- struct mem_ctl_info *mci;
- struct sbridge_pvt *pvt;
-
- mci = get_mci_for_node_id(mce->socketid);
- if (!mci)
- return NOTIFY_BAD;
- pvt = mci->pvt_info;
-
- /*
- * Just let mcelog handle it if the error is
- * outside the memory controller. A memory error
- * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
- * bit 12 has an special meaning.
- */
- if ((mce->status & 0xefff) >> 7 != 1)
- return NOTIFY_DONE;
-
- printk("sbridge: HANDLING MCE MEMORY ERROR\n");
-
- printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
- mce->extcpu, mce->mcgstatus, mce->bank, mce->status);
- printk("TSC %llx ", mce->tsc);
- printk("ADDR %llx ", mce->addr);
- printk("MISC %llx ", mce->misc);
-
- printk("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
- mce->cpuvendor, mce->cpuid, mce->time,
- mce->socketid, mce->apicid);
-
- /* Only handle if it is the right mc controller */
- if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
- return NOTIFY_DONE;
-
- smp_rmb();
- if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
- smp_wmb();
- pvt->mce_overrun++;
- return NOTIFY_DONE;
- }
-
- /* Copy memory error at the ringbuffer */
- memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
- smp_wmb();
- pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
- /* Handle fatal errors immediately */
- if (mce->mcgstatus & 1)
- sbridge_check_error(mci);
-
- /* Advice mcelog that the error were handled */
- return NOTIFY_STOP;
-}
-
-static struct notifier_block sbridge_mce_dec = {
- .notifier_call = sbridge_mce_check_error,
-};
-
-/****************************************************************************
- EDAC register/unregister logic
- ****************************************************************************/
-
-static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
-{
- struct mem_ctl_info *mci = sbridge_dev->mci;
- struct sbridge_pvt *pvt;
-
- if (unlikely(!mci || !mci->pvt_info)) {
- debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
- __func__, &sbridge_dev->pdev[0]->dev);
-
- sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
- return;
- }
-
- pvt = mci->pvt_info;
-
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &sbridge_dev->pdev[0]->dev);
-
- /* Remove MC sysfs nodes */
- edac_mc_del_mc(mci->dev);
-
- debugf1("%s: free mci struct\n", mci->ctl_name);
- kfree(mci->ctl_name);
- edac_mc_free(mci);
- sbridge_dev->mci = NULL;
-}
-
-static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
-{
- struct mem_ctl_info *mci;
- struct sbridge_pvt *pvt;
- int rc, channels, csrows;
-
- /* Check the number of active and not disabled channels */
- rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
- if (unlikely(rc < 0))
- return rc;
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
- if (unlikely(!mci))
- return -ENOMEM;
-
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &sbridge_dev->pdev[0]->dev);
-
- pvt = mci->pvt_info;
- memset(pvt, 0, sizeof(*pvt));
-
- /* Associate sbridge_dev and mci for future usage */
- pvt->sbridge_dev = sbridge_dev;
- sbridge_dev->mci = mci;
-
- mci->mtype_cap = MEM_FLAG_DDR3;
- mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "sbridge_edac.c";
- mci->mod_ver = SBRIDGE_REVISION;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
- mci->dev_name = pci_name(sbridge_dev->pdev[0]);
- mci->ctl_page_to_phys = NULL;
-
- /* Set the function pointer to an actual operation function */
- mci->edac_check = sbridge_check_error;
-
- /* Store pci devices at mci for faster access */
- rc = mci_bind_devs(mci, sbridge_dev);
- if (unlikely(rc < 0))
- goto fail0;
-
- /* Get dimm basic config and the memory layout */
- get_dimm_config(mci);
- get_memory_layout(mci);
-
- /* record ptr to the generic device */
- mci->dev = &sbridge_dev->pdev[0]->dev;
-
- /* add this new MC control structure to EDAC's list of MCs */
- if (unlikely(edac_mc_add_mc(mci))) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
- rc = -EINVAL;
- goto fail0;
- }
-
- return 0;
-
-fail0:
- kfree(mci->ctl_name);
- edac_mc_free(mci);
- sbridge_dev->mci = NULL;
- return rc;
-}
-
-/*
- * sbridge_probe Probe for ONE instance of device to see if it is
- * present.
- * return:
- * 0 for FOUND a device
- * < 0 for error code
- */
-
-static int __devinit sbridge_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int rc;
- u8 mc, num_mc = 0;
- struct sbridge_dev *sbridge_dev;
-
- /* get the pci devices we want to reserve for our use */
- mutex_lock(&sbridge_edac_lock);
-
- /*
- * All memory controllers are allocated at the first pass.
- */
- if (unlikely(probed >= 1)) {
- mutex_unlock(&sbridge_edac_lock);
- return -ENODEV;
- }
- probed++;
-
- rc = sbridge_get_all_devices(&num_mc);
- if (unlikely(rc < 0))
- goto fail0;
- mc = 0;
-
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc);
- sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev);
- if (unlikely(rc < 0))
- goto fail1;
- }
-
- sbridge_printk(KERN_INFO, "Driver loaded.\n");
-
- mutex_unlock(&sbridge_edac_lock);
- return 0;
-
-fail1:
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
- sbridge_unregister_mci(sbridge_dev);
-
- sbridge_put_all_devices();
-fail0:
- mutex_unlock(&sbridge_edac_lock);
- return rc;
-}
-
-/*
- * sbridge_remove destructor for one instance of device
- *
- */
-static void __devexit sbridge_remove(struct pci_dev *pdev)
-{
- struct sbridge_dev *sbridge_dev;
-
- debugf0(__FILE__ ": %s()\n", __func__);
-
- /*
- * we have a trouble here: pdev value for removal will be wrong, since
- * it will point to the X58 register used to detect that the machine
- * is a Nehalem or upper design. However, due to the way several PCI
- * devices are grouped together to provide MC functionality, we need
- * to use a different method for releasing the devices
- */
-
- mutex_lock(&sbridge_edac_lock);
-
- if (unlikely(!probed)) {
- mutex_unlock(&sbridge_edac_lock);
- return;
- }
-
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
- sbridge_unregister_mci(sbridge_dev);
-
- /* Release PCI resources */
- sbridge_put_all_devices();
-
- probed--;
-
- mutex_unlock(&sbridge_edac_lock);
-}
-
-MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
-
-/*
- * sbridge_driver pci_driver structure for this module
- *
- */
-static struct pci_driver sbridge_driver = {
- .name = "sbridge_edac",
- .probe = sbridge_probe,
- .remove = __devexit_p(sbridge_remove),
- .id_table = sbridge_pci_tbl,
-};
-
-/*
- * sbridge_init Module entry function
- * Try to initialize this module for its devices
- */
-static int __init sbridge_init(void)
-{
- int pci_rc;
-
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&sbridge_driver);
-
- if (pci_rc >= 0) {
- mce_register_decode_chain(&sbridge_mce_dec);
- return 0;
- }
-
- sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
- pci_rc);
-
- return pci_rc;
-}
-
-/*
- * sbridge_exit() Module exit function
- * Unregister the driver
- */
-static void __exit sbridge_exit(void)
-{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
- pci_unregister_driver(&sbridge_driver);
- mce_unregister_decode_chain(&sbridge_mce_dec);
-}
-
-module_init(sbridge_init);
-module_exit(sbridge_exit);
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
-MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - "
- SBRIDGE_REVISION);
diff --git a/ANDROID_3.4.5/drivers/edac/tile_edac.c b/ANDROID_3.4.5/drivers/edac/tile_edac.c
deleted file mode 100644
index e99d0097..00000000
--- a/ANDROID_3.4.5/drivers/edac/tile_edac.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- * Tilera-specific EDAC driver.
- *
- * This source code is derived from the following driver:
- *
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/edac.h>
-#include <hv/hypervisor.h>
-#include <hv/drv_mshim_intf.h>
-
-#include "edac_core.h"
-
-#define DRV_NAME "tile-edac"
-
-/* Number of cs_rows needed per memory controller on TILEPro. */
-#define TILE_EDAC_NR_CSROWS 1
-
-/* Number of channels per memory controller on TILEPro. */
-#define TILE_EDAC_NR_CHANS 1
-
-/* Granularity of reported error in bytes on TILEPro. */
-#define TILE_EDAC_ERROR_GRAIN 8
-
-/* TILE processor has multiple independent memory controllers. */
-struct platform_device *mshim_pdev[TILE_MAX_MSHIMS];
-
-struct tile_edac_priv {
- int hv_devhdl; /* Hypervisor device handle. */
- int node; /* Memory controller instance #. */
- unsigned int ce_count; /*
- * Correctable-error counter
- * kept by the driver.
- */
-};
-
-static void tile_edac_check(struct mem_ctl_info *mci)
-{
- struct tile_edac_priv *priv = mci->pvt_info;
- struct mshim_mem_error mem_error;
-
- if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error,
- sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) !=
- sizeof(struct mshim_mem_error)) {
- pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n");
- return;
- }
-
- /* Check if the current error count is different from the saved one. */
- if (mem_error.sbe_count != priv->ce_count) {
- dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
- priv->ce_count = mem_error.sbe_count;
- edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
- }
-}
-
-/*
- * Initialize the 'csrows' table within the mci control structure with the
- * addressing of memory.
- */
-static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = &mci->csrows[0];
- struct tile_edac_priv *priv = mci->pvt_info;
- struct mshim_mem_info mem_info;
-
- if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
- sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
- sizeof(struct mshim_mem_info)) {
- pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n");
- return -1;
- }
-
- if (mem_info.mem_ecc)
- csrow->edac_mode = EDAC_SECDED;
- else
- csrow->edac_mode = EDAC_NONE;
- switch (mem_info.mem_type) {
- case DDR2:
- csrow->mtype = MEM_DDR2;
- break;
-
- case DDR3:
- csrow->mtype = MEM_DDR3;
- break;
-
- default:
- return -1;
- }
-
- csrow->first_page = 0;
- csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = TILE_EDAC_ERROR_GRAIN;
- csrow->dtype = DEV_UNKNOWN;
-
- return 0;
-}
-
-static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
-{
- char hv_file[32];
- int hv_devhdl;
- struct mem_ctl_info *mci;
- struct tile_edac_priv *priv;
- int rc;
-
- sprintf(hv_file, "mshim/%d", pdev->id);
- hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0);
- if (hv_devhdl < 0)
- return -EINVAL;
-
- /* A TILE MC has a single channel and one chip-select row. */
- mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
- TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->node = pdev->id;
- priv->hv_devhdl = hv_devhdl;
-
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_SECDED;
-
- mci->mod_name = DRV_NAME;
-#ifdef __tilegx__
- mci->ctl_name = "TILEGx_Memory_Controller";
-#else
- mci->ctl_name = "TILEPro_Memory_Controller";
-#endif
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = tile_edac_check;
-
- /*
- * Initialize the MC control structure 'csrows' table
- * with the mapping and control information.
- */
- if (tile_edac_init_csrows(mci)) {
- /* No csrows found. */
- mci->edac_cap = EDAC_FLAG_NONE;
- } else {
- mci->edac_cap = EDAC_FLAG_SECDED;
- }
-
- platform_set_drvdata(pdev, mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = platform_get_drvdata(pdev);
-
- edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
- return 0;
-}
-
-static struct platform_driver tile_edac_mc_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = tile_edac_mc_probe,
- .remove = __devexit_p(tile_edac_mc_remove),
-};
-
-/*
- * Driver init routine.
- */
-static int __init tile_edac_init(void)
-{
- char hv_file[32];
- struct platform_device *pdev;
- int i, err, num = 0;
-
- /* Only support POLL mode. */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- err = platform_driver_register(&tile_edac_mc_driver);
- if (err)
- return err;
-
- for (i = 0; i < TILE_MAX_MSHIMS; i++) {
- /*
- * Not all memory controllers are configured such as in the
- * case of a simulator. So we register only those mshims
- * that are configured by the hypervisor.
- */
- sprintf(hv_file, "mshim/%d", i);
- if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0)
- continue;
-
- pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0);
- if (IS_ERR(pdev))
- continue;
- mshim_pdev[i] = pdev;
- num++;
- }
-
- if (num == 0) {
- platform_driver_unregister(&tile_edac_mc_driver);
- return -ENODEV;
- }
- return 0;
-}
-
-/*
- * Driver cleanup routine.
- */
-static void __exit tile_edac_exit(void)
-{
- int i;
-
- for (i = 0; i < TILE_MAX_MSHIMS; i++) {
- struct platform_device *pdev = mshim_pdev[i];
- if (!pdev)
- continue;
-
- platform_set_drvdata(pdev, NULL);
- platform_device_unregister(pdev);
- }
- platform_driver_unregister(&tile_edac_mc_driver);
-}
-
-module_init(tile_edac_init);
-module_exit(tile_edac_exit);
diff --git a/ANDROID_3.4.5/drivers/edac/x38_edac.c b/ANDROID_3.4.5/drivers/edac/x38_edac.c
deleted file mode 100644
index a4382973..00000000
--- a/ANDROID_3.4.5/drivers/edac/x38_edac.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/*
- * Intel X38 Memory Controller kernel module
- * Copyright (C) 2008 Cluster Computing, Inc.
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * This file is based on i3200_edac.c
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/edac.h>
-#include "edac_core.h"
-
-#define X38_REVISION "1.1"
-
-#define EDAC_MOD_STR "x38_edac"
-
-#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
-
-#define X38_RANKS 8
-#define X38_RANKS_PER_CHANNEL 4
-#define X38_CHANNELS 2
-
-/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
-
-#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
-#define X38_MCHBAR_HIGH 0x4c
-#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
-#define X38_MMR_WINDOW_SIZE 16384
-
-#define X38_TOM 0xa0 /* Top of Memory (16b)
- *
- * 15:10 reserved
- * 9:0 total populated physical memory
- */
-#define X38_TOM_MASK 0x3ff /* bits 9:0 */
-#define X38_TOM_SHIFT 26 /* 64MiB grain */
-
-#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
- * 1 Multi-bit DRAM ECC Error Flag (DMERR)
- * 0 Single-bit DRAM ECC Error Flag (DSERR)
- */
-#define X38_ERRSTS_UE 0x0002
-#define X38_ERRSTS_CE 0x0001
-#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
-
-
-/* Intel MMIO register space - device 0 function 0 - MMR space */
-
-#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
- *
- * 15:10 reserved
- * 9:0 Channel 0 DRAM Rank Boundary Address
- */
-#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
-#define X38_DRB_MASK 0x3ff /* bits 9:0 */
-#define X38_DRB_SHIFT 26 /* 64MiB grain */
-
-#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
-#define X38_ECCERRLOG_CE 0x1
-#define X38_ECCERRLOG_UE 0x2
-#define X38_ECCERRLOG_RANK_BITS 0x18000000
-#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
-
-#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
-
-static int x38_channel_num;
-
-static int how_many_channel(struct pci_dev *pdev)
-{
- unsigned char capid0_8b; /* 8th byte of CAPID0 */
-
- pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
- if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
- debugf0("In single channel mode.\n");
- x38_channel_num = 1;
- } else {
- debugf0("In dual channel mode.\n");
- x38_channel_num = 2;
- }
-
- return x38_channel_num;
-}
-
-static unsigned long eccerrlog_syndrome(u64 log)
-{
- return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
-}
-
-static int eccerrlog_row(int channel, u64 log)
-{
- return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
- (channel * X38_RANKS_PER_CHANNEL);
-}
-
-enum x38_chips {
- X38 = 0,
-};
-
-struct x38_dev_info {
- const char *ctl_name;
-};
-
-struct x38_error_info {
- u16 errsts;
- u16 errsts2;
- u64 eccerrlog[X38_CHANNELS];
-};
-
-static const struct x38_dev_info x38_devs[] = {
- [X38] = {
- .ctl_name = "x38"},
-};
-
-static struct pci_dev *mci_pdev;
-static int x38_registered = 1;
-
-
-static void x38_clear_error_info(struct mem_ctl_info *mci)
-{
- struct pci_dev *pdev;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * Clear any error bits.
- * (Yes, we really clear bits by writing 1 to them.)
- */
- pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
- X38_ERRSTS_BITS);
-}
-
-static u64 x38_readq(const void __iomem *addr)
-{
- return readl(addr) | (((u64)readl(addr + 4)) << 32);
-}
-
-static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
- struct x38_error_info *info)
-{
- struct pci_dev *pdev;
- void __iomem *window = mci->pvt_info;
-
- pdev = to_pci_dev(mci->dev);
-
- /*
- * This is a mess because there is no atomic way to read all the
- * registers at once and the registers can transition from CE being
- * overwritten by UE.
- */
- pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
- if (!(info->errsts & X38_ERRSTS_BITS))
- return;
-
- info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
- if (x38_channel_num == 2)
- info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
-
- pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
-
- /*
- * If the error is the same for both reads then the first set
- * of reads is valid. If there is a change then there is a CE
- * with no info and the second set of reads is valid and
- * should be UE info.
- */
- if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
- if (x38_channel_num == 2)
- info->eccerrlog[1] =
- x38_readq(window + X38_C1ECCERRLOG);
- }
-
- x38_clear_error_info(mci);
-}
-
-static void x38_process_error_info(struct mem_ctl_info *mci,
- struct x38_error_info *info)
-{
- int channel;
- u64 log;
-
- if (!(info->errsts & X38_ERRSTS_BITS))
- return;
-
- if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
- info->errsts = info->errsts2;
- }
-
- for (channel = 0; channel < x38_channel_num; channel++) {
- log = info->eccerrlog[channel];
- if (log & X38_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log), "x38 UE");
- } else if (log & X38_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0, "x38 CE");
- }
- }
-}
-
-static void x38_check(struct mem_ctl_info *mci)
-{
- struct x38_error_info info;
-
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
- x38_get_and_clear_error_info(mci, &info);
- x38_process_error_info(mci, &info);
-}
-
-
-void __iomem *x38_map_mchbar(struct pci_dev *pdev)
-{
- union {
- u64 mchbar;
- struct {
- u32 mchbar_low;
- u32 mchbar_high;
- };
- } u;
- void __iomem *window;
-
- pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
- pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
- pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= X38_MCHBAR_MASK;
-
- if (u.mchbar != (resource_size_t)u.mchbar) {
- printk(KERN_ERR
- "x38: mmio space beyond accessible range (0x%llx)\n",
- (unsigned long long)u.mchbar);
- return NULL;
- }
-
- window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
- if (!window)
- printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
- (unsigned long long)u.mchbar);
-
- return window;
-}
-
-
-static void x38_get_drbs(void __iomem *window,
- u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
-{
- int i;
-
- for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
- drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
- drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
- }
-}
-
-static bool x38_is_stacked(struct pci_dev *pdev,
- u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
-{
- u16 tom;
-
- pci_read_config_word(pdev, X38_TOM, &tom);
- tom &= X38_TOM_MASK;
-
- return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
-}
-
-static unsigned long drb_to_nr_pages(
- u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
- bool stacked, int channel, int rank)
-{
- int n;
-
- n = drbs[channel][rank];
- if (rank > 0)
- n -= drbs[channel][rank - 1];
- if (stacked && (channel == 1) && drbs[channel][rank] ==
- drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
- n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
- }
-
- n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
- return n;
-}
-
-static int x38_probe1(struct pci_dev *pdev, int dev_idx)
-{
- int rc;
- int i;
- struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
- u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
- bool stacked;
- void __iomem *window;
-
- debugf0("MC: %s()\n", __func__);
-
- window = x38_map_mchbar(pdev);
- if (!window)
- return -ENODEV;
-
- x38_get_drbs(window, drbs);
-
- how_many_channel(pdev);
-
- /* FIXME: unconventional pvt_info usage */
- mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
- if (!mci)
- return -ENOMEM;
-
- debugf3("MC: %s(): init mci\n", __func__);
-
- mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR2;
-
- mci->edac_ctl_cap = EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
-
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = X38_REVISION;
- mci->ctl_name = x38_devs[dev_idx].ctl_name;
- mci->dev_name = pci_name(pdev);
- mci->edac_check = x38_check;
- mci->ctl_page_to_phys = NULL;
- mci->pvt_info = window;
-
- stacked = x38_is_stacked(pdev, drbs);
-
- /*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
- */
- last_page = -1UL;
- for (i = 0; i < mci->nr_csrows; i++) {
- unsigned long nr_pages;
- struct csrow_info *csrow = &mci->csrows[i];
-
- nr_pages = drb_to_nr_pages(drbs, stacked,
- i / X38_RANKS_PER_CHANNEL,
- i % X38_RANKS_PER_CHANNEL);
-
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
- continue;
- }
-
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
-
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
- }
-
- x38_clear_error_info(mci);
-
- rc = -ENODEV;
- if (edac_mc_add_mc(mci)) {
- debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
- }
-
- /* get this far and it's successful */
- debugf3("MC: %s(): success\n", __func__);
- return 0;
-
-fail:
- iounmap(window);
- if (mci)
- edac_mc_free(mci);
-
- return rc;
-}
-
-static int __devinit x38_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
-
- debugf0("MC: %s()\n", __func__);
-
- if (pci_enable_device(pdev) < 0)
- return -EIO;
-
- rc = x38_probe1(pdev, ent->driver_data);
- if (!mci_pdev)
- mci_pdev = pci_dev_get(pdev);
-
- return rc;
-}
-
-static void __devexit x38_remove_one(struct pci_dev *pdev)
-{
- struct mem_ctl_info *mci;
-
- debugf0("%s()\n", __func__);
-
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
-
- iounmap(mci->pvt_info);
-
- edac_mc_free(mci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = {
- {
- PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- X38},
- {
- 0,
- } /* 0 terminated list. */
-};
-
-MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
-
-static struct pci_driver x38_driver = {
- .name = EDAC_MOD_STR,
- .probe = x38_init_one,
- .remove = __devexit_p(x38_remove_one),
- .id_table = x38_pci_tbl,
-};
-
-static int __init x38_init(void)
-{
- int pci_rc;
-
- debugf3("MC: %s()\n", __func__);
-
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
-
- pci_rc = pci_register_driver(&x38_driver);
- if (pci_rc < 0)
- goto fail0;
-
- if (!mci_pdev) {
- x38_registered = 0;
- mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_X38_HB, NULL);
- if (!mci_pdev) {
- debugf0("x38 pci_get_device fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
-
- pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
- if (pci_rc < 0) {
- debugf0("x38 init fail\n");
- pci_rc = -ENODEV;
- goto fail1;
- }
- }
-
- return 0;
-
-fail1:
- pci_unregister_driver(&x38_driver);
-
-fail0:
- if (mci_pdev)
- pci_dev_put(mci_pdev);
-
- return pci_rc;
-}
-
-static void __exit x38_exit(void)
-{
- debugf3("MC: %s()\n", __func__);
-
- pci_unregister_driver(&x38_driver);
- if (!x38_registered) {
- x38_remove_one(mci_pdev);
- pci_dev_put(mci_pdev);
- }
-}
-
-module_init(x38_init);
-module_exit(x38_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
-MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
-
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");