summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/arch/x86/kernel/cpu')
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/Makefile50
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/amd.c802
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/bugs.c174
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/bugs_64.c33
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/centaur.c500
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/common.c1318
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/cpu.h37
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/cyrix.c461
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/hypervisor.c78
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/intel.c555
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/intel_cacheinfo.c1216
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/match.c91
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/Makefile11
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-apei.c149
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-inject.c248
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-internal.h53
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-severity.c285
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce.c2364
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_amd.c775
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_intel.c229
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/p5.c67
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/therm_throt.c508
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/threshold.c29
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/winchip.c39
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mkcapflags.pl32
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mshyperv.c79
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/Makefile3
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/amd.c124
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/centaur.c126
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cleanup.c980
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cyrix.c282
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/generic.c846
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/if.c451
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/main.c764
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/mtrr.h78
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.c1886
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.h605
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd.c686
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd_ibs.c301
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel.c1886
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_ds.c725
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_lbr.c704
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p4.c1345
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p6.c162
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/perfctr-watchdog.c156
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/powerflags.c21
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/proc.c167
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/rdrand.c73
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/scattered.c71
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/sched.c55
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/topology.c99
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/transmeta.c109
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/umc.c26
-rw-r--r--ANDROID_3.4.5/arch/x86/kernel/cpu/vmware.c134
54 files changed, 0 insertions, 23048 deletions
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/Makefile b/ANDROID_3.4.5/arch/x86/kernel/cpu/Makefile
deleted file mode 100644
index 6ab6aa2f..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Makefile for x86-compatible CPU details, features and quirks
-#
-
-# Don't trace early stages of a secondary CPU boot
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_common.o = -pg
-CFLAGS_REMOVE_perf_event.o = -pg
-endif
-
-# Make sure load_percpu_segment has no stackprotector
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_common.o := $(nostackp)
-
-obj-y := intel_cacheinfo.o scattered.o topology.o
-obj-y += proc.o capflags.o powerflags.o common.o
-obj-y += vmware.o hypervisor.o sched.o mshyperv.o
-obj-y += rdrand.o
-obj-y += match.o
-
-obj-$(CONFIG_X86_32) += bugs.o
-obj-$(CONFIG_X86_64) += bugs_64.o
-
-obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
-obj-$(CONFIG_CPU_SUP_AMD) += amd.o
-obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
-obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
-obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
-obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
-
-ifdef CONFIG_PERF_EVENTS
-obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-endif
-
-obj-$(CONFIG_X86_MCE) += mcheck/
-obj-$(CONFIG_MTRR) += mtrr/
-
-obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
-
-quiet_cmd_mkcapflags = MKCAP $@
- cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
-
-cpufeature = $(src)/../../include/asm/cpufeature.h
-
-targets += capflags.c
-$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE
- $(call if_changed,mkcapflags)
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/amd.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/amd.c
deleted file mode 100644
index 146bb621..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/amd.c
+++ /dev/null
@@ -1,802 +0,0 @@
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/elf.h>
-#include <linux/mm.h>
-
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <asm/processor.h>
-#include <asm/apic.h>
-#include <asm/cpu.h>
-#include <asm/pci-direct.h>
-
-#ifdef CONFIG_X86_64
-# include <asm/numa_64.h>
-# include <asm/mmconfig.h>
-# include <asm/cacheflush.h>
-#endif
-
-#include "cpu.h"
-
-#ifdef CONFIG_X86_32
-/*
- * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
- * misexecution of code under Linux. Owners of such processors should
- * contact AMD for precise details and a CPU swap.
- *
- * See http://www.multimania.com/poulot/k6bug.html
- * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
- * (Publication # 21266 Issue Date: August 1998)
- *
- * The following test is erm.. interesting. AMD neglected to up
- * the chip setting when fixing the bug but they also tweaked some
- * performance at the same time..
- */
-
-extern void vide(void);
-__asm__(".align 4\nvide: ret");
-
-static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
-{
-/*
- * General Systems BIOSen alias the cpu frequency registers
- * of the Elan at 0x000df000. Unfortuantly, one of the Linux
- * drivers subsequently pokes it, and changes the CPU speed.
- * Workaround : Remove the unneeded alias.
- */
-#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
-#define CBAR_ENB (0x80000000)
-#define CBAR_KEY (0X000000CB)
- if (c->x86_model == 9 || c->x86_model == 10) {
- if (inl(CBAR) & CBAR_ENB)
- outl(0 | CBAR_KEY, CBAR);
- }
-}
-
-
-static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
-{
- u32 l, h;
- int mbytes = num_physpages >> (20-PAGE_SHIFT);
-
- if (c->x86_model < 6) {
- /* Based on AMD doc 20734R - June 2000 */
- if (c->x86_model == 0) {
- clear_cpu_cap(c, X86_FEATURE_APIC);
- set_cpu_cap(c, X86_FEATURE_PGE);
- }
- return;
- }
-
- if (c->x86_model == 6 && c->x86_mask == 1) {
- const int K6_BUG_LOOP = 1000000;
- int n;
- void (*f_vide)(void);
- unsigned long d, d2;
-
- printk(KERN_INFO "AMD K6 stepping B detected - ");
-
- /*
- * It looks like AMD fixed the 2.6.2 bug and improved indirect
- * calls at the same time.
- */
-
- n = K6_BUG_LOOP;
- f_vide = vide;
- rdtscl(d);
- while (n--)
- f_vide();
- rdtscl(d2);
- d = d2-d;
-
- if (d > 20*K6_BUG_LOOP)
- printk(KERN_CONT
- "system stability may be impaired when more than 32 MB are used.\n");
- else
- printk(KERN_CONT "probably OK (after B9730xxxx).\n");
- }
-
- /* K6 with old style WHCR */
- if (c->x86_model < 8 ||
- (c->x86_model == 8 && c->x86_mask < 8)) {
- /* We can only write allocate on the low 508Mb */
- if (mbytes > 508)
- mbytes = 508;
-
- rdmsr(MSR_K6_WHCR, l, h);
- if ((l&0x0000FFFF) == 0) {
- unsigned long flags;
- l = (1<<0)|((mbytes/4)<<1);
- local_irq_save(flags);
- wbinvd();
- wrmsr(MSR_K6_WHCR, l, h);
- local_irq_restore(flags);
- printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
- mbytes);
- }
- return;
- }
-
- if ((c->x86_model == 8 && c->x86_mask > 7) ||
- c->x86_model == 9 || c->x86_model == 13) {
- /* The more serious chips .. */
-
- if (mbytes > 4092)
- mbytes = 4092;
-
- rdmsr(MSR_K6_WHCR, l, h);
- if ((l&0xFFFF0000) == 0) {
- unsigned long flags;
- l = ((mbytes>>2)<<22)|(1<<16);
- local_irq_save(flags);
- wbinvd();
- wrmsr(MSR_K6_WHCR, l, h);
- local_irq_restore(flags);
- printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
- mbytes);
- }
-
- return;
- }
-
- if (c->x86_model == 10) {
- /* AMD Geode LX is model 10 */
- /* placeholder for any needed mods */
- return;
- }
-}
-
-static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
-{
- /* calling is from identify_secondary_cpu() ? */
- if (!c->cpu_index)
- return;
-
- /*
- * Certain Athlons might work (for various values of 'work') in SMP
- * but they are not certified as MP capable.
- */
- /* Athlon 660/661 is valid. */
- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
- (c->x86_mask == 1)))
- goto valid_k7;
-
- /* Duron 670 is valid */
- if ((c->x86_model == 7) && (c->x86_mask == 0))
- goto valid_k7;
-
- /*
- * Athlon 662, Duron 671, and Athlon >model 7 have capability
- * bit. It's worth noting that the A5 stepping (662) of some
- * Athlon XP's have the MP bit set.
- * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
- * more.
- */
- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
- (c->x86_model > 7))
- if (cpu_has_mp)
- goto valid_k7;
-
- /* If we get here, not a certified SMP capable AMD system. */
-
- /*
- * Don't taint if we are running SMP kernel on a single non-MP
- * approved Athlon
- */
- WARN_ONCE(1, "WARNING: This combination of AMD"
- " processors is not suitable for SMP.\n");
- if (!test_taint(TAINT_UNSAFE_SMP))
- add_taint(TAINT_UNSAFE_SMP);
-
-valid_k7:
- ;
-}
-
-static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
-{
- u32 l, h;
-
- /*
- * Bit 15 of Athlon specific MSR 15, needs to be 0
- * to enable SSE on Palomino/Morgan/Barton CPU's.
- * If the BIOS didn't enable it already, enable it here.
- */
- if (c->x86_model >= 6 && c->x86_model <= 10) {
- if (!cpu_has(c, X86_FEATURE_XMM)) {
- printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
- rdmsr(MSR_K7_HWCR, l, h);
- l &= ~0x00008000;
- wrmsr(MSR_K7_HWCR, l, h);
- set_cpu_cap(c, X86_FEATURE_XMM);
- }
- }
-
- /*
- * It's been determined by AMD that Athlons since model 8 stepping 1
- * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
- * As per AMD technical note 27212 0.2
- */
- if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
- rdmsr(MSR_K7_CLK_CTL, l, h);
- if ((l & 0xfff00000) != 0x20000000) {
- printk(KERN_INFO
- "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
- l, ((l & 0x000fffff)|0x20000000));
- wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
- }
- }
-
- set_cpu_cap(c, X86_FEATURE_K7);
-
- amd_k7_smp_check(c);
-}
-#endif
-
-#ifdef CONFIG_NUMA
-/*
- * To workaround broken NUMA config. Read the comment in
- * srat_detect_node().
- */
-static int __cpuinit nearby_node(int apicid)
-{
- int i, node;
-
- for (i = apicid - 1; i >= 0; i--) {
- node = __apicid_to_node[i];
- if (node != NUMA_NO_NODE && node_online(node))
- return node;
- }
- for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
- node = __apicid_to_node[i];
- if (node != NUMA_NO_NODE && node_online(node))
- return node;
- }
- return first_node(node_online_map); /* Shouldn't happen */
-}
-#endif
-
-/*
- * Fixup core topology information for
- * (1) AMD multi-node processors
- * Assumption: Number of cores in each internal node is the same.
- * (2) AMD processors supporting compute units
- */
-#ifdef CONFIG_X86_HT
-static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
-{
- u32 nodes, cores_per_cu = 1;
- u8 node_id;
- int cpu = smp_processor_id();
-
- /* get information required for multi-node processors */
- if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
- u32 eax, ebx, ecx, edx;
-
- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- nodes = ((ecx >> 8) & 7) + 1;
- node_id = ecx & 7;
-
- /* get compute unit information */
- smp_num_siblings = ((ebx >> 8) & 3) + 1;
- c->compute_unit_id = ebx & 0xff;
- cores_per_cu += ((ebx >> 8) & 3);
- } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
- u64 value;
-
- rdmsrl(MSR_FAM10H_NODE_ID, value);
- nodes = ((value >> 3) & 7) + 1;
- node_id = value & 7;
- } else
- return;
-
- /* fixup multi-node processor information */
- if (nodes > 1) {
- u32 cores_per_node;
- u32 cus_per_node;
-
- set_cpu_cap(c, X86_FEATURE_AMD_DCM);
- cores_per_node = c->x86_max_cores / nodes;
- cus_per_node = cores_per_node / cores_per_cu;
-
- /* store NodeID, use llc_shared_map to store sibling info */
- per_cpu(cpu_llc_id, cpu) = node_id;
-
- /* core id has to be in the [0 .. cores_per_node - 1] range */
- c->cpu_core_id %= cores_per_node;
- c->compute_unit_id %= cus_per_node;
- }
-}
-#endif
-
-/*
- * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
- * Assumes number of cores is a power of two.
- */
-static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_HT
- unsigned bits;
- int cpu = smp_processor_id();
-
- bits = c->x86_coreid_bits;
- /* Low order bits define the core id (index of core in socket) */
- c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
- /* Convert the initial APIC ID into the socket ID */
- c->phys_proc_id = c->initial_apicid >> bits;
- /* use socket ID also for last level cache */
- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
- amd_get_topology(c);
-#endif
-}
-
-int amd_get_nb_id(int cpu)
-{
- int id = 0;
-#ifdef CONFIG_SMP
- id = per_cpu(cpu_llc_id, cpu);
-#endif
- return id;
-}
-EXPORT_SYMBOL_GPL(amd_get_nb_id);
-
-static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_NUMA
- int cpu = smp_processor_id();
- int node;
- unsigned apicid = c->apicid;
-
- node = numa_cpu_node(cpu);
- if (node == NUMA_NO_NODE)
- node = per_cpu(cpu_llc_id, cpu);
-
- /*
- * On multi-fabric platform (e.g. Numascale NumaChip) a
- * platform-specific handler needs to be called to fixup some
- * IDs of the CPU.
- */
- if (x86_cpuinit.fixup_cpu_id)
- x86_cpuinit.fixup_cpu_id(c, node);
-
- if (!node_online(node)) {
- /*
- * Two possibilities here:
- *
- * - The CPU is missing memory and no node was created. In
- * that case try picking one from a nearby CPU.
- *
- * - The APIC IDs differ from the HyperTransport node IDs
- * which the K8 northbridge parsing fills in. Assume
- * they are all increased by a constant offset, but in
- * the same order as the HT nodeids. If that doesn't
- * result in a usable node fall back to the path for the
- * previous case.
- *
- * This workaround operates directly on the mapping between
- * APIC ID and NUMA node, assuming certain relationship
- * between APIC ID, HT node ID and NUMA topology. As going
- * through CPU mapping may alter the outcome, directly
- * access __apicid_to_node[].
- */
- int ht_nodeid = c->initial_apicid;
-
- if (ht_nodeid >= 0 &&
- __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
- node = __apicid_to_node[ht_nodeid];
- /* Pick a nearby node */
- if (!node_online(node))
- node = nearby_node(apicid);
- }
- numa_set_node(cpu, node);
-#endif
-}
-
-static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_HT
- unsigned bits, ecx;
-
- /* Multi core CPU? */
- if (c->extended_cpuid_level < 0x80000008)
- return;
-
- ecx = cpuid_ecx(0x80000008);
-
- c->x86_max_cores = (ecx & 0xff) + 1;
-
- /* CPU telling us the core id bits shift? */
- bits = (ecx >> 12) & 0xF;
-
- /* Otherwise recompute */
- if (bits == 0) {
- while ((1 << bits) < c->x86_max_cores)
- bits++;
- }
-
- c->x86_coreid_bits = bits;
-#endif
-}
-
-static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
-{
- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
-
- if (c->x86 > 0x10 ||
- (c->x86 == 0x10 && c->x86_model >= 0x2)) {
- u64 val;
-
- rdmsrl(MSR_K7_HWCR, val);
- if (!(val & BIT(24)))
- printk(KERN_WARNING FW_BUG "TSC doesn't count "
- "with P0 frequency!\n");
- }
- }
-
- if (c->x86 == 0x15) {
- unsigned long upperbit;
- u32 cpuid, assoc;
-
- cpuid = cpuid_edx(0x80000005);
- assoc = cpuid >> 16 & 0xff;
- upperbit = ((cpuid >> 24) << 10) / assoc;
-
- va_align.mask = (upperbit - 1) & PAGE_MASK;
- va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
- }
-}
-
-static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
-{
- early_init_amd_mc(c);
-
- /*
- * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
- * with P/T states and does not stop in deep C-states
- */
- if (c->x86_power & (1 << 8)) {
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- if (!check_tsc_unstable())
- sched_clock_stable = 1;
- }
-
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_SYSCALL32);
-#else
- /* Set MTRR capability flag if appropriate */
- if (c->x86 == 5)
- if (c->x86_model == 13 || c->x86_model == 9 ||
- (c->x86_model == 8 && c->x86_mask >= 8))
- set_cpu_cap(c, X86_FEATURE_K6_MTRR);
-#endif
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
- /* check CPU config space for extended APIC ID */
- if (cpu_has_apic && c->x86 >= 0xf) {
- unsigned int val;
- val = read_pci_config(0, 24, 0, 0x68);
- if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
- set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
- }
-#endif
-}
-
-static void __cpuinit init_amd(struct cpuinfo_x86 *c)
-{
- u32 dummy;
-
-#ifdef CONFIG_SMP
- unsigned long long value;
-
- /*
- * Disable TLB flush filter by setting HWCR.FFDIS on K8
- * bit 6 of msr C001_0015
- *
- * Errata 63 for SH-B3 steppings
- * Errata 122 for all steppings (F+ have it disabled by default)
- */
- if (c->x86 == 0xf) {
- rdmsrl(MSR_K7_HWCR, value);
- value |= 1 << 6;
- wrmsrl(MSR_K7_HWCR, value);
- }
-#endif
-
- early_init_amd(c);
-
- /*
- * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
- */
- clear_cpu_cap(c, 0*32+31);
-
-#ifdef CONFIG_X86_64
- /* On C+ stepping K8 rep microcode works well for copy/memset */
- if (c->x86 == 0xf) {
- u32 level;
-
- level = cpuid_eax(1);
- if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-
- /*
- * Some BIOSes incorrectly force this feature, but only K8
- * revision D (model = 0x14) and later actually support it.
- * (AMD Erratum #110, docId: 25759).
- */
- if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
- u64 val;
-
- clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
- if (!rdmsrl_amd_safe(0xc001100d, &val)) {
- val &= ~(1ULL << 32);
- wrmsrl_amd_safe(0xc001100d, val);
- }
- }
-
- }
- if (c->x86 >= 0x10)
- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-
- /* get apicid instead of initial apic id from cpuid */
- c->apicid = hard_smp_processor_id();
-#else
-
- /*
- * FIXME: We should handle the K5 here. Set up the write
- * range and also turn on MSR 83 bits 4 and 31 (write alloc,
- * no bus pipeline)
- */
-
- switch (c->x86) {
- case 4:
- init_amd_k5(c);
- break;
- case 5:
- init_amd_k6(c);
- break;
- case 6: /* An Athlon/Duron */
- init_amd_k7(c);
- break;
- }
-
- /* K6s reports MCEs but don't actually have all the MSRs */
- if (c->x86 < 6)
- clear_cpu_cap(c, X86_FEATURE_MCE);
-#endif
-
- /* Enable workaround for FXSAVE leak */
- if (c->x86 >= 6)
- set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
-
- if (!c->x86_model_id[0]) {
- switch (c->x86) {
- case 0xf:
- /* Should distinguish Models here, but this is only
- a fallback anyways. */
- strcpy(c->x86_model_id, "Hammer");
- break;
- }
- }
-
- /* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86 == 0x15) &&
- (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
- u64 val;
-
- if (!rdmsrl_amd_safe(0xc0011005, &val)) {
- val |= 1ULL << 54;
- wrmsrl_amd_safe(0xc0011005, val);
- rdmsrl(0xc0011005, val);
- if (val & (1ULL << 54)) {
- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- printk(KERN_INFO FW_INFO "CPU: Re-enabling "
- "disabled Topology Extensions Support\n");
- }
- }
- }
-
- cpu_detect_cache_sizes(c);
-
- /* Multi core CPU? */
- if (c->extended_cpuid_level >= 0x80000008) {
- amd_detect_cmp(c);
- srat_detect_node(c);
- }
-
-#ifdef CONFIG_X86_32
- detect_ht(c);
-#endif
-
- if (c->extended_cpuid_level >= 0x80000006) {
- if (cpuid_edx(0x80000006) & 0xf000)
- num_cache_leaves = 4;
- else
- num_cache_leaves = 3;
- }
-
- if (c->x86 >= 0xf)
- set_cpu_cap(c, X86_FEATURE_K8);
-
- if (cpu_has_xmm2) {
- /* MFENCE stops RDTSC speculation */
- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
- }
-
-#ifdef CONFIG_X86_64
- if (c->x86 == 0x10) {
- /* do this for boot cpu */
- if (c == &boot_cpu_data)
- check_enable_amd_mmconf_dmi();
-
- fam10h_check_enable_mmcfg();
- }
-
- if (c == &boot_cpu_data && c->x86 >= 0xf) {
- unsigned long long tseg;
-
- /*
- * Split up direct mapping around the TSEG SMM area.
- * Don't do it for gbpages because there seems very little
- * benefit in doing so.
- */
- if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
- printk(KERN_DEBUG "tseg: %010llx\n", tseg);
- if ((tseg>>PMD_SHIFT) <
- (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
- ((tseg>>PMD_SHIFT) <
- (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
- (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
- set_memory_4k((unsigned long)__va(tseg), 1);
- }
- }
-#endif
-
- /*
- * Family 0x12 and above processors have APIC timer
- * running in deep C states.
- */
- if (c->x86 > 0x11)
- set_cpu_cap(c, X86_FEATURE_ARAT);
-
- /*
- * Disable GART TLB Walk Errors on Fam10h. We do this here
- * because this is always needed when GART is enabled, even in a
- * kernel which has no MCE support built in.
- */
- if (c->x86 == 0x10) {
- /*
- * BIOS should disable GartTlbWlk Errors themself. If
- * it doesn't do it here as suggested by the BKDG.
- *
- * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
- */
- u64 mask;
- int err;
-
- err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
- if (err == 0) {
- mask |= (1 << 10);
- checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
- }
- }
-
- rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
-}
-
-#ifdef CONFIG_X86_32
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
- unsigned int size)
-{
- /* AMD errata T13 (order #21922) */
- if ((c->x86 == 6)) {
- /* Duron Rev A0 */
- if (c->x86_model == 3 && c->x86_mask == 0)
- size = 64;
- /* Tbird rev A1/A2 */
- if (c->x86_model == 4 &&
- (c->x86_mask == 0 || c->x86_mask == 1))
- size = 256;
- }
- return size;
-}
-#endif
-
-static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
- .c_vendor = "AMD",
- .c_ident = { "AuthenticAMD" },
-#ifdef CONFIG_X86_32
- .c_models = {
- { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
- {
- [3] = "486 DX/2",
- [7] = "486 DX/2-WB",
- [8] = "486 DX/4",
- [9] = "486 DX/4-WB",
- [14] = "Am5x86-WT",
- [15] = "Am5x86-WB"
- }
- },
- },
- .c_size_cache = amd_size_cache,
-#endif
- .c_early_init = early_init_amd,
- .c_bsp_init = bsp_init_amd,
- .c_init = init_amd,
- .c_x86_vendor = X86_VENDOR_AMD,
-};
-
-cpu_dev_register(amd_cpu_dev);
-
-/*
- * AMD errata checking
- *
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
- * have an OSVW id assigned, which it takes as first argument. Both take a
- * variable number of family-specific model-stepping ranges created by
- * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
- * int[] in arch/x86/include/asm/processor.h.
- *
- * Example:
- *
- * const int amd_erratum_319[] =
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
- */
-
-const int amd_erratum_400[] =
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
-EXPORT_SYMBOL_GPL(amd_erratum_400);
-
-const int amd_erratum_383[] =
- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-EXPORT_SYMBOL_GPL(amd_erratum_383);
-
-bool cpu_has_amd_erratum(const int *erratum)
-{
- struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
- int osvw_id = *erratum++;
- u32 range;
- u32 ms;
-
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
- if (osvw_id >= 0 && osvw_id < 65536 &&
- cpu_has(cpu, X86_FEATURE_OSVW)) {
- u64 osvw_len;
-
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
- if (osvw_id < osvw_len) {
- u64 osvw_bits;
-
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
- osvw_bits);
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
- }
- }
-
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_mask;
- while ((range = *erratum++))
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
- (ms >= AMD_MODEL_RANGE_START(range)) &&
- (ms <= AMD_MODEL_RANGE_END(range)))
- return true;
-
- return false;
-}
-
-EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/bugs.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/bugs.c
deleted file mode 100644
index 46674fbb..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/bugs.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- *
- * Cyrix stuff, June 1998 by:
- * - Rafael R. Reilova (moved everything from head.S),
- * <rreilova@ececs.uc.edu>
- * - Channing Corn (tests & fixes),
- * - Andrew D. Balsa (code cleanup).
- */
-#include <linux/init.h>
-#include <linux/utsname.h>
-#include <asm/bugs.h>
-#include <asm/processor.h>
-#include <asm/processor-flags.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-#include <asm/paravirt.h>
-#include <asm/alternative.h>
-
-static int __init no_halt(char *s)
-{
- WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
- boot_cpu_data.hlt_works_ok = 0;
- return 1;
-}
-
-__setup("no-hlt", no_halt);
-
-static int __init no_387(char *s)
-{
- boot_cpu_data.hard_math = 0;
- write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
- return 1;
-}
-
-__setup("no387", no_387);
-
-static double __initdata x = 4195835.0;
-static double __initdata y = 3145727.0;
-
-/*
- * This used to check for exceptions..
- * However, it turns out that to support that,
- * the XMM trap handlers basically had to
- * be buggy. So let's have a correct XMM trap
- * handler, and forget about printing out
- * some status at boot.
- *
- * We should really only care about bugs here
- * anyway. Not features.
- */
-static void __init check_fpu(void)
-{
- s32 fdiv_bug;
-
- if (!boot_cpu_data.hard_math) {
-#ifndef CONFIG_MATH_EMULATION
- printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
- printk(KERN_EMERG "Giving up.\n");
- for (;;) ;
-#endif
- return;
- }
-
- kernel_fpu_begin();
-
- /*
- * trap_init() enabled FXSR and company _before_ testing for FP
- * problems here.
- *
- * Test for the divl bug..
- */
- __asm__("fninit\n\t"
- "fldl %1\n\t"
- "fdivl %2\n\t"
- "fmull %2\n\t"
- "fldl %1\n\t"
- "fsubp %%st,%%st(1)\n\t"
- "fistpl %0\n\t"
- "fwait\n\t"
- "fninit"
- : "=m" (*&fdiv_bug)
- : "m" (*&x), "m" (*&y));
-
- kernel_fpu_end();
-
- boot_cpu_data.fdiv_bug = fdiv_bug;
- if (boot_cpu_data.fdiv_bug)
- printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
-}
-
-static void __init check_hlt(void)
-{
- if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
- return;
-
- printk(KERN_INFO "Checking 'hlt' instruction... ");
- if (!boot_cpu_data.hlt_works_ok) {
- printk("disabled\n");
- return;
- }
- halt();
- halt();
- halt();
- halt();
- printk(KERN_CONT "OK.\n");
-}
-
-/*
- * Most 386 processors have a bug where a POPAD can lock the
- * machine even from user space.
- */
-
-static void __init check_popad(void)
-{
-#ifndef CONFIG_X86_POPAD_OK
- int res, inp = (int) &res;
-
- printk(KERN_INFO "Checking for popad bug... ");
- __asm__ __volatile__(
- "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
- : "=&a" (res)
- : "d" (inp)
- : "ecx", "edi");
- /*
- * If this fails, it means that any user program may lock the
- * CPU hard. Too bad.
- */
- if (res != 12345678)
- printk(KERN_CONT "Buggy.\n");
- else
- printk(KERN_CONT "OK.\n");
-#endif
-}
-
-/*
- * Check whether we are able to run this kernel safely on SMP.
- *
- * - In order to run on a i386, we need to be compiled for i386
- * (for due to lack of "invlpg" and working WP on a i386)
- * - In order to run on anything without a TSC, we need to be
- * compiled for a i486.
- */
-
-static void __init check_config(void)
-{
-/*
- * We'd better not be a i386 if we're configured to use some
- * i486+ only features! (WP works in supervisor mode and the
- * new "invlpg" and "bswap" instructions)
- */
-#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
- defined(CONFIG_X86_BSWAP)
- if (boot_cpu_data.x86 == 3)
- panic("Kernel requires i486+ for 'invlpg' and other features");
-#endif
-}
-
-
-void __init check_bugs(void)
-{
- identify_boot_cpu();
-#ifndef CONFIG_SMP
- printk(KERN_INFO "CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- check_config();
- check_fpu();
- check_hlt();
- check_popad();
- init_utsname()->machine[1] =
- '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
- alternative_instructions();
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/bugs_64.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/bugs_64.c
deleted file mode 100644
index 04f0fe5a..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/bugs_64.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- * Copyright (C) 2000 SuSE
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/alternative.h>
-#include <asm/bugs.h>
-#include <asm/processor.h>
-#include <asm/mtrr.h>
-#include <asm/cacheflush.h>
-
-void __init check_bugs(void)
-{
- identify_boot_cpu();
-#if !defined(CONFIG_SMP)
- printk(KERN_INFO "CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- alternative_instructions();
-
- /*
- * Make sure the first 2MB area is not mapped by huge pages
- * There are typically fixed size MTRRs in there and overlapping
- * MTRRs into large pages causes slow downs.
- *
- * Right now we don't do that with gbpages because there seems
- * very little benefit for that case.
- */
- if (!direct_gbpages)
- set_memory_4k((unsigned long)__va(0), 1);
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/centaur.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/centaur.c
deleted file mode 100644
index 159103c0..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/centaur.c
+++ /dev/null
@@ -1,500 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/processor.h>
-#include <asm/e820.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "cpu.h"
-
-#ifdef CONFIG_X86_OOSTORE
-
-static u32 __cpuinit power2(u32 x)
-{
- u32 s = 1;
-
- while (s <= x)
- s <<= 1;
-
- return s >>= 1;
-}
-
-
-/*
- * Set up an actual MCR
- */
-static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
-{
- u32 lo, hi;
-
- hi = base & ~0xFFF;
- lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
- lo &= ~0xFFF; /* Remove the ctrl value bits */
- lo |= key; /* Attribute we wish to set */
- wrmsr(reg+MSR_IDT_MCR0, lo, hi);
- mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
-}
-
-/*
- * Figure what we can cover with MCR's
- *
- * Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-static u32 __cpuinit ramtop(void)
-{
- u32 clip = 0xFFFFFFFFUL;
- u32 top = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
-
- if (e820.map[i].addr > 0xFFFFFFFFUL)
- continue;
- /*
- * Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophe already
- */
- if (e820.map[i].type == E820_RESERVED) {
- if (e820.map[i].addr >= 0x100000UL &&
- e820.map[i].addr < clip)
- clip = e820.map[i].addr;
- continue;
- }
- start = e820.map[i].addr;
- end = e820.map[i].addr + e820.map[i].size;
- if (start >= end)
- continue;
- if (end > top)
- top = end;
- }
- /*
- * Everything below 'top' should be RAM except for the ISA hole.
- * Because of the limited MCR's we want to map NV/ACPI into our
- * MCR range for gunk in RAM
- *
- * Clip might cause us to MCR insufficient RAM but that is an
- * acceptable failure mode and should only bite obscure boxes with
- * a VESA hole at 15Mb
- *
- * The second case Clip sometimes kicks in is when the EBDA is marked
- * as reserved. Again we fail safe with reasonable results
- */
- if (top > clip)
- top = clip;
-
- return top;
-}
-
-/*
- * Compute a set of MCR's to give maximum coverage
- */
-static int __cpuinit centaur_mcr_compute(int nr, int key)
-{
- u32 mem = ramtop();
- u32 root = power2(mem);
- u32 base = root;
- u32 top = root;
- u32 floor = 0;
- int ct = 0;
-
- while (ct < nr) {
- u32 fspace = 0;
- u32 high;
- u32 low;
-
- /*
- * Find the largest block we will fill going upwards
- */
- high = power2(mem-top);
-
- /*
- * Find the largest block we will fill going downwards
- */
- low = base/2;
-
- /*
- * Don't fill below 1Mb going downwards as there
- * is an ISA hole in the way.
- */
- if (base <= 1024*1024)
- low = 0;
-
- /*
- * See how much space we could cover by filling below
- * the ISA hole
- */
-
- if (floor == 0)
- fspace = 512*1024;
- else if (floor == 512*1024)
- fspace = 128*1024;
-
- /* And forget ROM space */
-
- /*
- * Now install the largest coverage we get
- */
- if (fspace > high && fspace > low) {
- centaur_mcr_insert(ct, floor, fspace, key);
- floor += fspace;
- } else if (high > low) {
- centaur_mcr_insert(ct, top, high, key);
- top += high;
- } else if (low > 0) {
- base -= low;
- centaur_mcr_insert(ct, base, low, key);
- } else
- break;
- ct++;
- }
- /*
- * We loaded ct values. We now need to set the mask. The caller
- * must do this bit.
- */
- return ct;
-}
-
-static void __cpuinit centaur_create_optimal_mcr(void)
-{
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining and weak write ordered.
- *
- * To experiment with: Linux never uses stack operations for
- * mmio spaces so we could globally enable stack operation wc
- *
- * Load the registers with type 31 - full write combining, all
- * writes weakly ordered.
- */
- used = centaur_mcr_compute(6, 31);
-
- /*
- * Wipe unused MCRs
- */
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-static void __cpuinit winchip2_create_optimal_mcr(void)
-{
- u32 lo, hi;
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining, weak store ordered.
- *
- * Load the registers with type 25
- * 8 - weak write ordering
- * 16 - weak read ordering
- * 1 - write combining
- */
- used = centaur_mcr_compute(6, 25);
-
- /*
- * Mark the registers we are using.
- */
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- for (i = 0; i < used; i++)
- lo |= 1<<(9+i);
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-
- /*
- * Wipe unused MCRs
- */
-
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-/*
- * Handle the MCR key on the Winchip 2.
- */
-static void __cpuinit winchip2_unprotect_mcr(void)
-{
- u32 lo, hi;
- u32 key;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- key = (lo>>17) & 7;
- lo |= key<<6; /* replace with unlock key */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-static void __cpuinit winchip2_protect_mcr(void)
-{
- u32 lo, hi;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-#endif /* CONFIG_X86_OOSTORE */
-
-#define ACE_PRESENT (1 << 6)
-#define ACE_ENABLED (1 << 7)
-#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
-
-#define RNG_PRESENT (1 << 2)
-#define RNG_ENABLED (1 << 3)
-#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
-
-static void __cpuinit init_c3(struct cpuinfo_x86 *c)
-{
- u32 lo, hi;
-
- /* Test for Centaur Extended Feature Flags presence */
- if (cpuid_eax(0xC0000000) >= 0xC0000001) {
- u32 tmp = cpuid_edx(0xC0000001);
-
- /* enable ACE unit, if present and disabled */
- if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
- rdmsr(MSR_VIA_FCR, lo, hi);
- lo |= ACE_FCR; /* enable ACE unit */
- wrmsr(MSR_VIA_FCR, lo, hi);
- printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
- }
-
- /* enable RNG unit, if present and disabled */
- if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
- rdmsr(MSR_VIA_RNG, lo, hi);
- lo |= RNG_ENABLE; /* enable RNG unit */
- wrmsr(MSR_VIA_RNG, lo, hi);
- printk(KERN_INFO "CPU: Enabled h/w RNG\n");
- }
-
- /* store Centaur Extended Feature Flags as
- * word 5 of the CPU capability bit array
- */
- c->x86_capability[5] = cpuid_edx(0xC0000001);
- }
-#ifdef CONFIG_X86_32
- /* Cyrix III family needs CX8 & PGE explicitly enabled. */
- if (c->x86_model >= 6 && c->x86_model <= 13) {
- rdmsr(MSR_VIA_FCR, lo, hi);
- lo |= (1<<1 | 1<<7);
- wrmsr(MSR_VIA_FCR, lo, hi);
- set_cpu_cap(c, X86_FEATURE_CX8);
- }
-
- /* Before Nehemiah, the C3's had 3dNOW! */
- if (c->x86_model >= 6 && c->x86_model < 9)
- set_cpu_cap(c, X86_FEATURE_3DNOW);
-#endif
- if (c->x86 == 0x6 && c->x86_model >= 0xf) {
- c->x86_cache_alignment = c->x86_clflush_size * 2;
- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
- }
-
- cpu_detect_cache_sizes(c);
-}
-
-enum {
- ECX8 = 1<<1,
- EIERRINT = 1<<2,
- DPM = 1<<3,
- DMCE = 1<<4,
- DSTPCLK = 1<<5,
- ELINEAR = 1<<6,
- DSMC = 1<<7,
- DTLOCK = 1<<8,
- EDCTLB = 1<<8,
- EMMX = 1<<9,
- DPDC = 1<<11,
- EBRPRED = 1<<12,
- DIC = 1<<13,
- DDC = 1<<14,
- DNA = 1<<15,
- ERETSTK = 1<<16,
- E2MMX = 1<<19,
- EAMD3D = 1<<20,
-};
-
-static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
-{
- switch (c->x86) {
-#ifdef CONFIG_X86_32
- case 5:
- /* Emulate MTRRs using Centaur's MCR. */
- set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
- break;
-#endif
- case 6:
- if (c->x86_model >= 0xf)
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- break;
- }
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#endif
-}
-
-static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
- char *name;
- u32 fcr_set = 0;
- u32 fcr_clr = 0;
- u32 lo, hi, newlo;
- u32 aa, bb, cc, dd;
-
- /*
- * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
- */
- clear_cpu_cap(c, 0*32+31);
-#endif
- early_init_centaur(c);
- switch (c->x86) {
-#ifdef CONFIG_X86_32
- case 5:
- switch (c->x86_model) {
- case 4:
- name = "C6";
- fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
- fcr_clr = DPDC;
- printk(KERN_NOTICE "Disabling bugged TSC.\n");
- clear_cpu_cap(c, X86_FEATURE_TSC);
-#ifdef CONFIG_X86_OOSTORE
- centaur_create_optimal_mcr();
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- *
- * The C6 original lacks weak read order
- *
- * Note 0x120 is write only on Winchip 1
- */
- wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
-#endif
- break;
- case 8:
- switch (c->x86_mask) {
- default:
- name = "2";
- break;
- case 7 ... 9:
- name = "2A";
- break;
- case 10 ... 15:
- name = "2B";
- break;
- }
- fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
- E2MMX|EAMD3D;
- fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
- break;
- case 9:
- name = "3";
- fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
- E2MMX|EAMD3D;
- fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
- break;
- default:
- name = "??";
- }
-
- rdmsr(MSR_IDT_FCR1, lo, hi);
- newlo = (lo|fcr_set) & (~fcr_clr);
-
- if (newlo != lo) {
- printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n",
- lo, newlo);
- wrmsr(MSR_IDT_FCR1, newlo, hi);
- } else {
- printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
- }
- /* Emulate MTRRs using Centaur's MCR. */
- set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
- /* Report CX8 */
- set_cpu_cap(c, X86_FEATURE_CX8);
- /* Set 3DNow! on Winchip 2 and above. */
- if (c->x86_model >= 8)
- set_cpu_cap(c, X86_FEATURE_3DNOW);
- /* See if we can find out some more. */
- if (cpuid_eax(0x80000000) >= 0x80000005) {
- /* Yes, we can. */
- cpuid(0x80000005, &aa, &bb, &cc, &dd);
- /* Add L1 data and code cache sizes. */
- c->x86_cache_size = (cc>>24)+(dd>>24);
- }
- sprintf(c->x86_model_id, "WinChip %s", name);
- break;
-#endif
- case 6:
- init_c3(c);
- break;
- }
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
-#endif
-}
-
-static unsigned int __cpuinit
-centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
-{
-#ifdef CONFIG_X86_32
- /* VIA C3 CPUs (670-68F) need further shifting. */
- if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
- size >>= 8;
-
- /*
- * There's also an erratum in Nehemiah stepping 1, which
- * returns '65KB' instead of '64KB'
- * - Note, it seems this may only be in engineering samples.
- */
- if ((c->x86 == 6) && (c->x86_model == 9) &&
- (c->x86_mask == 1) && (size == 65))
- size -= 1;
-#endif
- return size;
-}
-
-static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
- .c_vendor = "Centaur",
- .c_ident = { "CentaurHauls" },
- .c_early_init = early_init_centaur,
- .c_init = init_centaur,
- .c_size_cache = centaur_size_cache,
- .c_x86_vendor = X86_VENDOR_CENTAUR,
-};
-
-cpu_dev_register(centaur_cpu_dev);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/common.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/common.c
deleted file mode 100644
index cf793021..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/common.c
+++ /dev/null
@@ -1,1318 +0,0 @@
-#include <linux/bootmem.h>
-#include <linux/linkage.h>
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/kgdb.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include <asm/stackprotector.h>
-#include <asm/perf_event.h>
-#include <asm/mmu_context.h>
-#include <asm/archrandom.h>
-#include <asm/hypervisor.h>
-#include <asm/processor.h>
-#include <asm/debugreg.h>
-#include <asm/sections.h>
-#include <linux/topology.h>
-#include <linux/cpumask.h>
-#include <asm/pgtable.h>
-#include <linux/atomic.h>
-#include <asm/proto.h>
-#include <asm/setup.h>
-#include <asm/apic.h>
-#include <asm/desc.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
-#include <asm/mtrr.h>
-#include <linux/numa.h>
-#include <asm/asm.h>
-#include <asm/cpu.h>
-#include <asm/mce.h>
-#include <asm/msr.h>
-#include <asm/pat.h>
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/uv/uv.h>
-#endif
-
-#include "cpu.h"
-
-/* all of these masks are initialized in setup_cpu_local_masks() */
-cpumask_var_t cpu_initialized_mask;
-cpumask_var_t cpu_callout_mask;
-cpumask_var_t cpu_callin_mask;
-
-/* representing cpus for which sibling maps can be computed */
-cpumask_var_t cpu_sibling_setup_mask;
-
-/* correctly size the local cpu masks */
-void __init setup_cpu_local_masks(void)
-{
- alloc_bootmem_cpumask_var(&cpu_initialized_mask);
- alloc_bootmem_cpumask_var(&cpu_callin_mask);
- alloc_bootmem_cpumask_var(&cpu_callout_mask);
- alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
-}
-
-static void __cpuinit default_init(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_64
- cpu_detect_cache_sizes(c);
-#else
- /* Not much we can do here... */
- /* Check if at least it has cpuid */
- if (c->cpuid_level == -1) {
- /* No cpuid. It must be an ancient CPU */
- if (c->x86 == 4)
- strcpy(c->x86_model_id, "486");
- else if (c->x86 == 3)
- strcpy(c->x86_model_id, "386");
- }
-#endif
-}
-
-static const struct cpu_dev __cpuinitconst default_cpu = {
- .c_init = default_init,
- .c_vendor = "Unknown",
- .c_x86_vendor = X86_VENDOR_UNKNOWN,
-};
-
-static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
-
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
-#ifdef CONFIG_X86_64
- /*
- * We need valid kernel segments for data and code in long mode too
- * IRET will check the segment types kkeil 2000/10/28
- * Also sysret mandates a special GDT layout
- *
- * TLS descriptors are currently at a different place compared to i386.
- * Hopefully nobody expects them at a fixed place (Wine?)
- */
- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
-#else
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
- /*
- * Segments used for calling PnP BIOS have byte granularity.
- * They code segments and data segments have fixed 64k limits,
- * the transfer segment sizes are set at run time.
- */
- /* 32-bit code */
- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
- /* 16-bit code */
- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
- /* 16-bit data */
- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
- /* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
- /* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
- /*
- * The APM segments have byte granularity and their bases
- * are set at run time. All have 64k limits.
- */
- /* 32-bit code */
- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
- /* 16-bit code */
- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
- /* data */
- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
-
- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
- GDT_STACK_CANARY_INIT
-#endif
-} };
-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
-
-static int __init x86_xsave_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_XSAVE);
- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
- return 1;
-}
-__setup("noxsave", x86_xsave_setup);
-
-static int __init x86_xsaveopt_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
- return 1;
-}
-__setup("noxsaveopt", x86_xsaveopt_setup);
-
-#ifdef CONFIG_X86_32
-static int cachesize_override __cpuinitdata = -1;
-static int disable_x86_serial_nr __cpuinitdata = 1;
-
-static int __init cachesize_setup(char *str)
-{
- get_option(&str, &cachesize_override);
- return 1;
-}
-__setup("cachesize=", cachesize_setup);
-
-static int __init x86_fxsr_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_FXSR);
- setup_clear_cpu_cap(X86_FEATURE_XMM);
- return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
-static int __init x86_sep_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_SEP);
- return 1;
-}
-__setup("nosep", x86_sep_setup);
-
-/* Standard macro to see if a specific flag is changeable */
-static inline int flag_is_changeable_p(u32 flag)
-{
- u32 f1, f2;
-
- /*
- * Cyrix and IDT cpus allow disabling of CPUID
- * so the code below may return different results
- * when it is executed before and after enabling
- * the CPUID. Add "volatile" to not allow gcc to
- * optimize the subsequent calls to this function.
- */
- asm volatile ("pushfl \n\t"
- "pushfl \n\t"
- "popl %0 \n\t"
- "movl %0, %1 \n\t"
- "xorl %2, %0 \n\t"
- "pushl %0 \n\t"
- "popfl \n\t"
- "pushfl \n\t"
- "popl %0 \n\t"
- "popfl \n\t"
-
- : "=&r" (f1), "=&r" (f2)
- : "ir" (flag));
-
- return ((f1^f2) & flag) != 0;
-}
-
-/* Probe for the CPUID instruction */
-static int __cpuinit have_cpuid_p(void)
-{
- return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
-static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-{
- unsigned long lo, hi;
-
- if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
- return;
-
- /* Disable processor serial number: */
-
- rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
- lo |= 0x200000;
- wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
-
- printk(KERN_NOTICE "CPU serial number disabled.\n");
- clear_cpu_cap(c, X86_FEATURE_PN);
-
- /* Disabling the serial number may affect the cpuid level */
- c->cpuid_level = cpuid_eax(0);
-}
-
-static int __init x86_serial_nr_setup(char *s)
-{
- disable_x86_serial_nr = 0;
- return 1;
-}
-__setup("serialnumber", x86_serial_nr_setup);
-#else
-static inline int flag_is_changeable_p(u32 flag)
-{
- return 1;
-}
-/* Probe for the CPUID instruction */
-static inline int have_cpuid_p(void)
-{
- return 1;
-}
-static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-{
-}
-#endif
-
-static int disable_smep __cpuinitdata;
-static __init int setup_disable_smep(char *arg)
-{
- disable_smep = 1;
- return 1;
-}
-__setup("nosmep", setup_disable_smep);
-
-static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
-{
- if (cpu_has(c, X86_FEATURE_SMEP)) {
- if (unlikely(disable_smep)) {
- setup_clear_cpu_cap(X86_FEATURE_SMEP);
- clear_in_cr4(X86_CR4_SMEP);
- } else
- set_in_cr4(X86_CR4_SMEP);
- }
-}
-
-/*
- * Some CPU features depend on higher CPUID levels, which may not always
- * be available due to CPUID level capping or broken virtualization
- * software. Add those features to this table to auto-disable them.
- */
-struct cpuid_dependent_feature {
- u32 feature;
- u32 level;
-};
-
-static const struct cpuid_dependent_feature __cpuinitconst
-cpuid_dependent_features[] = {
- { X86_FEATURE_MWAIT, 0x00000005 },
- { X86_FEATURE_DCA, 0x00000009 },
- { X86_FEATURE_XSAVE, 0x0000000d },
- { 0, 0 }
-};
-
-static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
-{
- const struct cpuid_dependent_feature *df;
-
- for (df = cpuid_dependent_features; df->feature; df++) {
-
- if (!cpu_has(c, df->feature))
- continue;
- /*
- * Note: cpuid_level is set to -1 if unavailable, but
- * extended_extended_level is set to 0 if unavailable
- * and the legitimate extended levels are all negative
- * when signed; hence the weird messing around with
- * signs here...
- */
- if (!((s32)df->level < 0 ?
- (u32)df->level > (u32)c->extended_cpuid_level :
- (s32)df->level > (s32)c->cpuid_level))
- continue;
-
- clear_cpu_cap(c, df->feature);
- if (!warn)
- continue;
-
- printk(KERN_WARNING
- "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
- x86_cap_flags[df->feature], df->level);
- }
-}
-
-/*
- * Naming convention should be: <Name> [(<Codename>)]
- * This table only is used unless init_<vendor>() below doesn't set it;
- * in particular, if CPUID levels 0x80000002..4 are supported, this
- * isn't used
- */
-
-/* Look up CPU names by table lookup. */
-static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
-{
- const struct cpu_model_info *info;
-
- if (c->x86_model >= 16)
- return NULL; /* Range check */
-
- if (!this_cpu)
- return NULL;
-
- info = this_cpu->c_models;
-
- while (info && info->family) {
- if (info->family == c->x86)
- return info->model_names[c->x86_model];
- info++;
- }
- return NULL; /* Not found */
-}
-
-__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
-__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
-
-void load_percpu_segment(int cpu)
-{
-#ifdef CONFIG_X86_32
- loadsegment(fs, __KERNEL_PERCPU);
-#else
- loadsegment(gs, 0);
- wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
-#endif
- load_stack_canary_segment();
-}
-
-/*
- * Current gdt points %fs at the "master" per-cpu area: after this,
- * it's on the real one.
- */
-void switch_to_new_gdt(int cpu)
-{
- struct desc_ptr gdt_descr;
-
- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
- /* Reload the per-cpu base */
-
- load_percpu_segment(cpu);
-}
-
-static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
-
-static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
-{
- unsigned int *v;
- char *p, *q;
-
- if (c->extended_cpuid_level < 0x80000004)
- return;
-
- v = (unsigned int *)c->x86_model_id;
- cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
- cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
- cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
- c->x86_model_id[48] = 0;
-
- /*
- * Intel chips right-justify this string for some dumb reason;
- * undo that brain damage:
- */
- p = q = &c->x86_model_id[0];
- while (*p == ' ')
- p++;
- if (p != q) {
- while (*p)
- *q++ = *p++;
- while (q <= &c->x86_model_id[48])
- *q++ = '\0'; /* Zero-pad the rest */
- }
-}
-
-void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
-{
- unsigned int n, dummy, ebx, ecx, edx, l2size;
-
- n = c->extended_cpuid_level;
-
- if (n >= 0x80000005) {
- cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
- c->x86_cache_size = (ecx>>24) + (edx>>24);
-#ifdef CONFIG_X86_64
- /* On K8 L1 TLB is inclusive, so don't count it */
- c->x86_tlbsize = 0;
-#endif
- }
-
- if (n < 0x80000006) /* Some chips just has a large L1. */
- return;
-
- cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
- l2size = ecx >> 16;
-
-#ifdef CONFIG_X86_64
- c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
-#else
- /* do processor-specific cache resizing */
- if (this_cpu->c_size_cache)
- l2size = this_cpu->c_size_cache(c, l2size);
-
- /* Allow user to override all this if necessary. */
- if (cachesize_override != -1)
- l2size = cachesize_override;
-
- if (l2size == 0)
- return; /* Again, no L2 cache is possible */
-#endif
-
- c->x86_cache_size = l2size;
-}
-
-void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_HT
- u32 eax, ebx, ecx, edx;
- int index_msb, core_bits;
- static bool printed;
-
- if (!cpu_has(c, X86_FEATURE_HT))
- return;
-
- if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
- goto out;
-
- if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
- return;
-
- cpuid(1, &eax, &ebx, &ecx, &edx);
-
- smp_num_siblings = (ebx & 0xff0000) >> 16;
-
- if (smp_num_siblings == 1) {
- printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
- goto out;
- }
-
- if (smp_num_siblings <= 1)
- goto out;
-
- index_msb = get_count_order(smp_num_siblings);
- c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
-
- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-
- index_msb = get_count_order(smp_num_siblings);
-
- core_bits = get_count_order(c->x86_max_cores);
-
- c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
- ((1 << core_bits) - 1);
-
-out:
- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
-#endif
-}
-
-static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
-{
- char *v = c->x86_vendor_id;
- int i;
-
- for (i = 0; i < X86_VENDOR_NUM; i++) {
- if (!cpu_devs[i])
- break;
-
- if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
- (cpu_devs[i]->c_ident[1] &&
- !strcmp(v, cpu_devs[i]->c_ident[1]))) {
-
- this_cpu = cpu_devs[i];
- c->x86_vendor = this_cpu->c_x86_vendor;
- return;
- }
- }
-
- printk_once(KERN_ERR
- "CPU: vendor_id '%s' unknown, using generic init.\n" \
- "CPU: Your system may be unstable.\n", v);
-
- c->x86_vendor = X86_VENDOR_UNKNOWN;
- this_cpu = &default_cpu;
-}
-
-void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
-{
- /* Get vendor name */
- cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
- (unsigned int *)&c->x86_vendor_id[0],
- (unsigned int *)&c->x86_vendor_id[8],
- (unsigned int *)&c->x86_vendor_id[4]);
-
- c->x86 = 4;
- /* Intel-defined flags: level 0x00000001 */
- if (c->cpuid_level >= 0x00000001) {
- u32 junk, tfms, cap0, misc;
-
- cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
- c->x86 = (tfms >> 8) & 0xf;
- c->x86_model = (tfms >> 4) & 0xf;
- c->x86_mask = tfms & 0xf;
-
- if (c->x86 == 0xf)
- c->x86 += (tfms >> 20) & 0xff;
- if (c->x86 >= 0x6)
- c->x86_model += ((tfms >> 16) & 0xf) << 4;
-
- if (cap0 & (1<<19)) {
- c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
- c->x86_cache_alignment = c->x86_clflush_size;
- }
- }
-}
-
-void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
-{
- u32 tfms, xlvl;
- u32 ebx;
-
- /* Intel-defined flags: level 0x00000001 */
- if (c->cpuid_level >= 0x00000001) {
- u32 capability, excap;
-
- cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
- c->x86_capability[0] = capability;
- c->x86_capability[4] = excap;
- }
-
- /* Additional Intel-defined flags: level 0x00000007 */
- if (c->cpuid_level >= 0x00000007) {
- u32 eax, ebx, ecx, edx;
-
- cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
-
- c->x86_capability[9] = ebx;
- }
-
- /* AMD-defined flags: level 0x80000001 */
- xlvl = cpuid_eax(0x80000000);
- c->extended_cpuid_level = xlvl;
-
- if ((xlvl & 0xffff0000) == 0x80000000) {
- if (xlvl >= 0x80000001) {
- c->x86_capability[1] = cpuid_edx(0x80000001);
- c->x86_capability[6] = cpuid_ecx(0x80000001);
- }
- }
-
- if (c->extended_cpuid_level >= 0x80000008) {
- u32 eax = cpuid_eax(0x80000008);
-
- c->x86_virt_bits = (eax >> 8) & 0xff;
- c->x86_phys_bits = eax & 0xff;
- }
-#ifdef CONFIG_X86_32
- else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
- c->x86_phys_bits = 36;
-#endif
-
- if (c->extended_cpuid_level >= 0x80000007)
- c->x86_power = cpuid_edx(0x80000007);
-
- init_scattered_cpuid_features(c);
-}
-
-static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
- int i;
-
- /*
- * First of all, decide if this is a 486 or higher
- * It's a 486 if we can modify the AC flag
- */
- if (flag_is_changeable_p(X86_EFLAGS_AC))
- c->x86 = 4;
- else
- c->x86 = 3;
-
- for (i = 0; i < X86_VENDOR_NUM; i++)
- if (cpu_devs[i] && cpu_devs[i]->c_identify) {
- c->x86_vendor_id[0] = 0;
- cpu_devs[i]->c_identify(c);
- if (c->x86_vendor_id[0]) {
- get_cpu_vendor(c);
- break;
- }
- }
-#endif
-}
-
-/*
- * Do minimum CPU detection early.
- * Fields really needed: vendor, cpuid_level, family, model, mask,
- * cache alignment.
- * The others are not touched to avoid unwanted side effects.
- *
- * WARNING: this function is only called on the BP. Don't add code here
- * that is supposed to run on all CPUs.
- */
-static void __init early_identify_cpu(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_64
- c->x86_clflush_size = 64;
- c->x86_phys_bits = 36;
- c->x86_virt_bits = 48;
-#else
- c->x86_clflush_size = 32;
- c->x86_phys_bits = 32;
- c->x86_virt_bits = 32;
-#endif
- c->x86_cache_alignment = c->x86_clflush_size;
-
- memset(&c->x86_capability, 0, sizeof c->x86_capability);
- c->extended_cpuid_level = 0;
-
- if (!have_cpuid_p())
- identify_cpu_without_cpuid(c);
-
- /* cyrix could have cpuid enabled via c_identify()*/
- if (!have_cpuid_p())
- return;
-
- cpu_detect(c);
-
- get_cpu_vendor(c);
-
- get_cpu_cap(c);
-
- if (this_cpu->c_early_init)
- this_cpu->c_early_init(c);
-
- c->cpu_index = 0;
- filter_cpuid_features(c, false);
-
- setup_smep(c);
-
- if (this_cpu->c_bsp_init)
- this_cpu->c_bsp_init(c);
-}
-
-void __init early_cpu_init(void)
-{
- const struct cpu_dev *const *cdev;
- int count = 0;
-
-#ifdef CONFIG_PROCESSOR_SELECT
- printk(KERN_INFO "KERNEL supported cpus:\n");
-#endif
-
- for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
- const struct cpu_dev *cpudev = *cdev;
-
- if (count >= X86_VENDOR_NUM)
- break;
- cpu_devs[count] = cpudev;
- count++;
-
-#ifdef CONFIG_PROCESSOR_SELECT
- {
- unsigned int j;
-
- for (j = 0; j < 2; j++) {
- if (!cpudev->c_ident[j])
- continue;
- printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
- cpudev->c_ident[j]);
- }
- }
-#endif
- }
- early_identify_cpu(&boot_cpu_data);
-}
-
-/*
- * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
- * unfortunately, that's not true in practice because of early VIA
- * chips and (more importantly) broken virtualizers that are not easy
- * to detect. In the latter case it doesn't even *fail* reliably, so
- * probing for it doesn't even work. Disable it completely on 32-bit
- * unless we can find a reliable way to detect all the broken cases.
- * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
- */
-static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
- clear_cpu_cap(c, X86_FEATURE_NOPL);
-#else
- set_cpu_cap(c, X86_FEATURE_NOPL);
-#endif
-}
-
-static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
-{
- c->extended_cpuid_level = 0;
-
- if (!have_cpuid_p())
- identify_cpu_without_cpuid(c);
-
- /* cyrix could have cpuid enabled via c_identify()*/
- if (!have_cpuid_p())
- return;
-
- cpu_detect(c);
-
- get_cpu_vendor(c);
-
- get_cpu_cap(c);
-
- if (c->cpuid_level >= 0x00000001) {
- c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
-#ifdef CONFIG_X86_32
-# ifdef CONFIG_X86_HT
- c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
-# else
- c->apicid = c->initial_apicid;
-# endif
-#endif
- c->phys_proc_id = c->initial_apicid;
- }
-
- setup_smep(c);
-
- get_model_name(c); /* Default name */
-
- detect_nopl(c);
-}
-
-/*
- * This does the hard work of actually picking apart the CPU stuff...
- */
-static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-{
- int i;
-
- c->loops_per_jiffy = loops_per_jiffy;
- c->x86_cache_size = -1;
- c->x86_vendor = X86_VENDOR_UNKNOWN;
- c->x86_model = c->x86_mask = 0; /* So far unknown... */
- c->x86_vendor_id[0] = '\0'; /* Unset */
- c->x86_model_id[0] = '\0'; /* Unset */
- c->x86_max_cores = 1;
- c->x86_coreid_bits = 0;
-#ifdef CONFIG_X86_64
- c->x86_clflush_size = 64;
- c->x86_phys_bits = 36;
- c->x86_virt_bits = 48;
-#else
- c->cpuid_level = -1; /* CPUID not detected */
- c->x86_clflush_size = 32;
- c->x86_phys_bits = 32;
- c->x86_virt_bits = 32;
-#endif
- c->x86_cache_alignment = c->x86_clflush_size;
- memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
- generic_identify(c);
-
- if (this_cpu->c_identify)
- this_cpu->c_identify(c);
-
- /* Clear/Set all flags overriden by options, after probe */
- for (i = 0; i < NCAPINTS; i++) {
- c->x86_capability[i] &= ~cpu_caps_cleared[i];
- c->x86_capability[i] |= cpu_caps_set[i];
- }
-
-#ifdef CONFIG_X86_64
- c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
-#endif
-
- /*
- * Vendor-specific initialization. In this section we
- * canonicalize the feature flags, meaning if there are
- * features a certain CPU supports which CPUID doesn't
- * tell us, CPUID claiming incorrect flags, or other bugs,
- * we handle them here.
- *
- * At the end of this section, c->x86_capability better
- * indicate the features this CPU genuinely supports!
- */
- if (this_cpu->c_init)
- this_cpu->c_init(c);
-
- /* Disable the PN if appropriate */
- squash_the_stupid_serial_number(c);
-
- /*
- * The vendor-specific functions might have changed features.
- * Now we do "generic changes."
- */
-
- /* Filter out anything that depends on CPUID levels we don't have */
- filter_cpuid_features(c, true);
-
- /* If the model name is still unset, do table lookup. */
- if (!c->x86_model_id[0]) {
- const char *p;
- p = table_lookup_model(c);
- if (p)
- strcpy(c->x86_model_id, p);
- else
- /* Last resort... */
- sprintf(c->x86_model_id, "%02x/%02x",
- c->x86, c->x86_model);
- }
-
-#ifdef CONFIG_X86_64
- detect_ht(c);
-#endif
-
- init_hypervisor(c);
- x86_init_rdrand(c);
-
- /*
- * Clear/Set all flags overriden by options, need do it
- * before following smp all cpus cap AND.
- */
- for (i = 0; i < NCAPINTS; i++) {
- c->x86_capability[i] &= ~cpu_caps_cleared[i];
- c->x86_capability[i] |= cpu_caps_set[i];
- }
-
- /*
- * On SMP, boot_cpu_data holds the common feature set between
- * all CPUs; so make sure that we indicate which features are
- * common between the CPUs. The first time this routine gets
- * executed, c == &boot_cpu_data.
- */
- if (c != &boot_cpu_data) {
- /* AND the already accumulated flags with these */
- for (i = 0; i < NCAPINTS; i++)
- boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
- }
-
- /* Init Machine Check Exception if available. */
- mcheck_cpu_init(c);
-
- select_idle_routine(c);
-
-#ifdef CONFIG_NUMA
- numa_add_cpu(smp_processor_id());
-#endif
-}
-
-#ifdef CONFIG_X86_64
-static void vgetcpu_set_mode(void)
-{
- if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
- vgetcpu_mode = VGETCPU_RDTSCP;
- else
- vgetcpu_mode = VGETCPU_LSL;
-}
-#endif
-
-void __init identify_boot_cpu(void)
-{
- identify_cpu(&boot_cpu_data);
- init_amd_e400_c1e_mask();
-#ifdef CONFIG_X86_32
- sysenter_setup();
- enable_sep_cpu();
-#else
- vgetcpu_set_mode();
-#endif
-}
-
-void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
-{
- BUG_ON(c == &boot_cpu_data);
- identify_cpu(c);
-#ifdef CONFIG_X86_32
- enable_sep_cpu();
-#endif
- mtrr_ap_init();
-}
-
-struct msr_range {
- unsigned min;
- unsigned max;
-};
-
-static const struct msr_range msr_range_array[] __cpuinitconst = {
- { 0x00000000, 0x00000418},
- { 0xc0000000, 0xc000040b},
- { 0xc0010000, 0xc0010142},
- { 0xc0011000, 0xc001103b},
-};
-
-static void __cpuinit __print_cpu_msr(void)
-{
- unsigned index_min, index_max;
- unsigned index;
- u64 val;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
- index_min = msr_range_array[i].min;
- index_max = msr_range_array[i].max;
-
- for (index = index_min; index < index_max; index++) {
- if (rdmsrl_amd_safe(index, &val))
- continue;
- printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
- }
- }
-}
-
-static int show_msr __cpuinitdata;
-
-static __init int setup_show_msr(char *arg)
-{
- int num;
-
- get_option(&arg, &num);
-
- if (num > 0)
- show_msr = num;
- return 1;
-}
-__setup("show_msr=", setup_show_msr);
-
-static __init int setup_noclflush(char *arg)
-{
- setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
- return 1;
-}
-__setup("noclflush", setup_noclflush);
-
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-{
- const char *vendor = NULL;
-
- if (c->x86_vendor < X86_VENDOR_NUM) {
- vendor = this_cpu->c_vendor;
- } else {
- if (c->cpuid_level >= 0)
- vendor = c->x86_vendor_id;
- }
-
- if (vendor && !strstr(c->x86_model_id, vendor))
- printk(KERN_CONT "%s ", vendor);
-
- if (c->x86_model_id[0])
- printk(KERN_CONT "%s", c->x86_model_id);
- else
- printk(KERN_CONT "%d86", c->x86);
-
- if (c->x86_mask || c->cpuid_level >= 0)
- printk(KERN_CONT " stepping %02x\n", c->x86_mask);
- else
- printk(KERN_CONT "\n");
-
- print_cpu_msr(c);
-}
-
-void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
-{
- if (c->cpu_index < show_msr)
- __print_cpu_msr();
-}
-
-static __init int setup_disablecpuid(char *arg)
-{
- int bit;
-
- if (get_option(&arg, &bit) && bit < NCAPINTS*32)
- setup_clear_cpu_cap(bit);
- else
- return 0;
-
- return 1;
-}
-__setup("clearcpuid=", setup_disablecpuid);
-
-#ifdef CONFIG_X86_64
-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
- (unsigned long) nmi_idt_table };
-
-DEFINE_PER_CPU_FIRST(union irq_stack_union,
- irq_stack_union) __aligned(PAGE_SIZE);
-
-/*
- * The following four percpu variables are hot. Align current_task to
- * cacheline size such that all four fall in the same cacheline.
- */
-DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
- &init_task;
-EXPORT_PER_CPU_SYMBOL(current_task);
-
-DEFINE_PER_CPU(unsigned long, kernel_stack) =
- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
-EXPORT_PER_CPU_SYMBOL(kernel_stack);
-
-DEFINE_PER_CPU(char *, irq_stack_ptr) =
- init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
-
-DEFINE_PER_CPU(unsigned int, irq_count) = -1;
-
-DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
-
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
- [DEBUG_STACK - 1] = DEBUG_STKSZ
-};
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
- [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-
-/* May not be marked __init: used by software suspend */
-void syscall_init(void)
-{
- /*
- * LSTAR and STAR live in a bit strange symbiosis.
- * They both write to the same internal register. STAR allows to
- * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
- */
- wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
- wrmsrl(MSR_LSTAR, system_call);
- wrmsrl(MSR_CSTAR, ignore_sysret);
-
-#ifdef CONFIG_IA32_EMULATION
- syscall32_cpu_init();
-#endif
-
- /* Flags to clear on syscall */
- wrmsrl(MSR_SYSCALL_MASK,
- X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
-}
-
-unsigned long kernel_eflags;
-
-/*
- * Copies of the original ist values from the tss are only accessed during
- * debugging, no special alignment required.
- */
-DEFINE_PER_CPU(struct orig_ist, orig_ist);
-
-static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
-DEFINE_PER_CPU(int, debug_stack_usage);
-
-int is_debug_stack(unsigned long addr)
-{
- return __get_cpu_var(debug_stack_usage) ||
- (addr <= __get_cpu_var(debug_stack_addr) &&
- addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
-}
-
-void debug_stack_set_zero(void)
-{
- load_idt((const struct desc_ptr *)&nmi_idt_descr);
-}
-
-void debug_stack_reset(void)
-{
- load_idt((const struct desc_ptr *)&idt_descr);
-}
-
-#else /* CONFIG_X86_64 */
-
-DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
-EXPORT_PER_CPU_SYMBOL(current_task);
-DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
-
-#ifdef CONFIG_CC_STACKPROTECTOR
-DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
-#endif
-
-/* Make sure %fs and %gs are initialized properly in idle threads */
-struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
-{
- memset(regs, 0, sizeof(struct pt_regs));
- regs->fs = __KERNEL_PERCPU;
- regs->gs = __KERNEL_STACK_CANARY;
-
- return regs;
-}
-#endif /* CONFIG_X86_64 */
-
-/*
- * Clear all 6 debug registers:
- */
-static void clear_all_debug_regs(void)
-{
- int i;
-
- for (i = 0; i < 8; i++) {
- /* Ignore db4, db5 */
- if ((i == 4) || (i == 5))
- continue;
-
- set_debugreg(0, i);
- }
-}
-
-#ifdef CONFIG_KGDB
-/*
- * Restore debug regs if using kgdbwait and you have a kernel debugger
- * connection established.
- */
-static void dbg_restore_debug_regs(void)
-{
- if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
- arch_kgdb_ops.correct_hw_break();
-}
-#else /* ! CONFIG_KGDB */
-#define dbg_restore_debug_regs()
-#endif /* ! CONFIG_KGDB */
-
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- * A lot of state is already set up in PDA init for 64 bit
- */
-#ifdef CONFIG_X86_64
-
-void __cpuinit cpu_init(void)
-{
- struct orig_ist *oist;
- struct task_struct *me;
- struct tss_struct *t;
- unsigned long v;
- int cpu;
- int i;
-
- cpu = stack_smp_processor_id();
- t = &per_cpu(init_tss, cpu);
- oist = &per_cpu(orig_ist, cpu);
-
-#ifdef CONFIG_NUMA
- if (cpu != 0 && percpu_read(numa_node) == 0 &&
- early_cpu_to_node(cpu) != NUMA_NO_NODE)
- set_numa_node(early_cpu_to_node(cpu));
-#endif
-
- me = current;
-
- if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
- panic("CPU#%d already initialized!\n", cpu);
-
- pr_debug("Initializing CPU#%d\n", cpu);
-
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-
- /*
- * Initialize the per-CPU GDT with the boot GDT,
- * and set up the GDT descriptor:
- */
-
- switch_to_new_gdt(cpu);
- loadsegment(fs, 0);
-
- load_idt((const struct desc_ptr *)&idt_descr);
-
- memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
- syscall_init();
-
- wrmsrl(MSR_FS_BASE, 0);
- wrmsrl(MSR_KERNEL_GS_BASE, 0);
- barrier();
-
- x86_configure_nx();
- if (cpu != 0)
- enable_x2apic();
-
- /*
- * set up and load the per-CPU TSS
- */
- if (!oist->ist[0]) {
- char *estacks = per_cpu(exception_stacks, cpu);
-
- for (v = 0; v < N_EXCEPTION_STACKS; v++) {
- estacks += exception_stack_sizes[v];
- oist->ist[v] = t->x86_tss.ist[v] =
- (unsigned long)estacks;
- if (v == DEBUG_STACK-1)
- per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
- }
- }
-
- t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-
- /*
- * <= is required because the CPU will access up to
- * 8 bits beyond the end of the IO permission bitmap.
- */
- for (i = 0; i <= IO_BITMAP_LONGS; i++)
- t->io_bitmap[i] = ~0UL;
-
- atomic_inc(&init_mm.mm_count);
- me->active_mm = &init_mm;
- BUG_ON(me->mm);
- enter_lazy_tlb(&init_mm, me);
-
- load_sp0(t, &current->thread);
- set_tss_desc(cpu, t);
- load_TR_desc();
- load_LDT(&init_mm.context);
-
- clear_all_debug_regs();
- dbg_restore_debug_regs();
-
- fpu_init();
- xsave_init();
-
- raw_local_save_flags(kernel_eflags);
-
- if (is_uv_system())
- uv_cpu_init();
-}
-
-#else
-
-void __cpuinit cpu_init(void)
-{
- int cpu = smp_processor_id();
- struct task_struct *curr = current;
- struct tss_struct *t = &per_cpu(init_tss, cpu);
- struct thread_struct *thread = &curr->thread;
-
- if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
- printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
- for (;;)
- local_irq_enable();
- }
-
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-
- if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-
- load_idt(&idt_descr);
- switch_to_new_gdt(cpu);
-
- /*
- * Set up and load the per-CPU TSS and LDT
- */
- atomic_inc(&init_mm.mm_count);
- curr->active_mm = &init_mm;
- BUG_ON(curr->mm);
- enter_lazy_tlb(&init_mm, curr);
-
- load_sp0(t, thread);
- set_tss_desc(cpu, t);
- load_TR_desc();
- load_LDT(&init_mm.context);
-
- t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-
-#ifdef CONFIG_DOUBLEFAULT
- /* Set up doublefault TSS pointer in the GDT */
- __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
-#endif
-
- clear_all_debug_regs();
- dbg_restore_debug_regs();
-
- fpu_init();
- xsave_init();
-}
-#endif
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/cpu.h b/ANDROID_3.4.5/arch/x86/kernel/cpu/cpu.h
deleted file mode 100644
index 8bacc782..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/cpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef ARCH_X86_CPU_H
-#define ARCH_X86_CPU_H
-
-struct cpu_model_info {
- int vendor;
- int family;
- const char *model_names[16];
-};
-
-/* attempt to consolidate cpu attributes */
-struct cpu_dev {
- const char *c_vendor;
-
- /* some have two possibilities for cpuid string */
- const char *c_ident[2];
-
- struct cpu_model_info c_models[4];
-
- void (*c_early_init)(struct cpuinfo_x86 *);
- void (*c_bsp_init)(struct cpuinfo_x86 *);
- void (*c_init)(struct cpuinfo_x86 *);
- void (*c_identify)(struct cpuinfo_x86 *);
- unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int);
- int c_x86_vendor;
-};
-
-#define cpu_dev_register(cpu_devX) \
- static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
- __attribute__((__section__(".x86_cpu_dev.init"))) = \
- &cpu_devX;
-
-extern const struct cpu_dev *const __x86_cpu_dev_start[],
- *const __x86_cpu_dev_end[];
-
-extern void get_cpu_cap(struct cpuinfo_x86 *c);
-extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
-#endif /* ARCH_X86_CPU_H */
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/cyrix.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/cyrix.c
deleted file mode 100644
index 4fbd384f..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/cyrix.c
+++ /dev/null
@@ -1,461 +0,0 @@
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <asm/dma.h>
-#include <linux/io.h>
-#include <asm/processor-cyrix.h>
-#include <asm/processor-flags.h>
-#include <linux/timer.h>
-#include <asm/pci-direct.h>
-#include <asm/tsc.h>
-
-#include "cpu.h"
-
-/*
- * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
- */
-static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
-{
- unsigned char ccr2, ccr3;
-
- /* we test for DEVID by checking whether CCR3 is writable */
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, ccr3 ^ 0x80);
- getCx86(0xc0); /* dummy to change bus */
-
- if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
- ccr2 = getCx86(CX86_CCR2);
- setCx86(CX86_CCR2, ccr2 ^ 0x04);
- getCx86(0xc0); /* dummy */
-
- if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
- *dir0 = 0xfd;
- else { /* Cx486S A step */
- setCx86(CX86_CCR2, ccr2);
- *dir0 = 0xfe;
- }
- } else {
- setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
-
- /* read DIR0 and DIR1 CPU registers */
- *dir0 = getCx86(CX86_DIR0);
- *dir1 = getCx86(CX86_DIR1);
- }
-}
-
-static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __do_cyrix_devid(dir0, dir1);
- local_irq_restore(flags);
-}
-/*
- * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
- * order to identify the Cyrix CPU model after we're out of setup.c
- *
- * Actually since bugs.h doesn't even reference this perhaps someone should
- * fix the documentation ???
- */
-static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
-
-static const char __cpuinitconst Cx86_model[][9] = {
- "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
- "M II ", "Unknown"
-};
-static const char __cpuinitconst Cx486_name[][5] = {
- "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
- "SRx2", "DRx2"
-};
-static const char __cpuinitconst Cx486S_name[][4] = {
- "S", "S2", "Se", "S2e"
-};
-static const char __cpuinitconst Cx486D_name[][4] = {
- "DX", "DX2", "?", "?", "?", "DX4"
-};
-static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
-static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
-static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
-
-/*
- * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
- * BIOSes for compatibility with DOS games. This makes the udelay loop
- * work correctly, and improves performance.
- *
- * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
- */
-
-static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
-{
- unsigned long flags;
-
- if (Cx86_dir0_msb == 3) {
- unsigned char ccr3, ccr5;
-
- local_irq_save(flags);
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- ccr5 = getCx86(CX86_CCR5);
- if (ccr5 & 2)
- setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
- local_irq_restore(flags);
-
- if (ccr5 & 2) { /* possible wrong calibration done */
- printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
- calibrate_delay();
- c->loops_per_jiffy = loops_per_jiffy;
- }
- }
-}
-
-
-static void __cpuinit set_cx86_reorder(void)
-{
- u8 ccr3;
-
- printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
-
- /* Load/Store Serialize to mem access disable (=reorder it) */
- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
- /* set load/store serialize from 1GB to 4GB */
- ccr3 |= 0xe0;
- setCx86(CX86_CCR3, ccr3);
-}
-
-static void __cpuinit set_cx86_memwb(void)
-{
- printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
-
- /* CCR2 bit 2: unlock NW bit */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
- /* set 'Not Write-through' */
- write_cr0(read_cr0() | X86_CR0_NW);
- /* CCR2 bit 2: lock NW bit and set WT1 */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
-}
-
-/*
- * Configure later MediaGX and/or Geode processor.
- */
-
-static void __cpuinit geode_configure(void)
-{
- unsigned long flags;
- u8 ccr3;
- local_irq_save(flags);
-
- /* Suspend on halt power saving and enable #SUSP pin */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
-
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
-
-
- /* FPU fast, DTE cache, Mem bypass */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
-
- set_cx86_memwb();
- set_cx86_reorder();
-
- local_irq_restore(flags);
-}
-
-static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
-{
- unsigned char dir0, dir0_msn, dir1 = 0;
-
- __do_cyrix_devid(&dir0, &dir1);
- dir0_msn = dir0 >> 4; /* identifies CPU "family" */
-
- switch (dir0_msn) {
- case 3: /* 6x86/6x86L */
- /* Emulate MTRRs using Cyrix's ARRs. */
- set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
- break;
- case 5: /* 6x86MX/M II */
- /* Emulate MTRRs using Cyrix's ARRs. */
- set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
- break;
- }
-}
-
-static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
-{
- unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
- char *buf = c->x86_model_id;
- const char *p = NULL;
-
- /*
- * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
- */
- clear_cpu_cap(c, 0*32+31);
-
- /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
- if (test_cpu_cap(c, 1*32+24)) {
- clear_cpu_cap(c, 1*32+24);
- set_cpu_cap(c, X86_FEATURE_CXMMX);
- }
-
- do_cyrix_devid(&dir0, &dir1);
-
- check_cx686_slop(c);
-
- Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
- dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
-
- /* common case step number/rev -- exceptions handled below */
- c->x86_model = (dir1 >> 4) + 1;
- c->x86_mask = dir1 & 0xf;
-
- /* Now cook; the original recipe is by Channing Corn, from Cyrix.
- * We do the same thing for each generation: we work out
- * the model, multiplier and stepping. Black magic included,
- * to make the silicon step/rev numbers match the printed ones.
- */
-
- switch (dir0_msn) {
- unsigned char tmp;
-
- case 0: /* Cx486SLC/DLC/SRx/DRx */
- p = Cx486_name[dir0_lsn & 7];
- break;
-
- case 1: /* Cx486S/DX/DX2/DX4 */
- p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
- : Cx486S_name[dir0_lsn & 3];
- break;
-
- case 2: /* 5x86 */
- Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
- p = Cx86_cb+2;
- break;
-
- case 3: /* 6x86/6x86L */
- Cx86_cb[1] = ' ';
- Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
- if (dir1 > 0x21) { /* 686L */
- Cx86_cb[0] = 'L';
- p = Cx86_cb;
- (c->x86_model)++;
- } else /* 686 */
- p = Cx86_cb+1;
- /* Emulate MTRRs using Cyrix's ARRs. */
- set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
- /* 6x86's contain this bug */
- c->coma_bug = 1;
- break;
-
- case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
-#ifdef CONFIG_PCI
- {
- u32 vendor, device;
- /*
- * It isn't really a PCI quirk directly, but the cure is the
- * same. The MediaGX has deep magic SMM stuff that handles the
- * SB emulation. It throws away the fifo on disable_dma() which
- * is wrong and ruins the audio.
- *
- * Bug2: VSA1 has a wrap bug so that using maximum sized DMA
- * causes bad things. According to NatSemi VSA2 has another
- * bug to do with 'hlt'. I've not seen any boards using VSA2
- * and X doesn't seem to support it either so who cares 8).
- * VSA1 we work around however.
- */
-
- printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
- isa_dma_bridge_buggy = 2;
-
- /* We do this before the PCI layer is running. However we
- are safe here as we know the bridge must be a Cyrix
- companion and must be present */
- vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
- device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
-
- /*
- * The 5510/5520 companion chips have a funky PIT.
- */
- if (vendor == PCI_VENDOR_ID_CYRIX &&
- (device == PCI_DEVICE_ID_CYRIX_5510 ||
- device == PCI_DEVICE_ID_CYRIX_5520))
- mark_tsc_unstable("cyrix 5510/5520 detected");
- }
-#endif
- c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */
-
- /* GXm supports extended cpuid levels 'ala' AMD */
- if (c->cpuid_level == 2) {
- /* Enable cxMMX extensions (GX1 Datasheet 54) */
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
-
- /*
- * GXm : 0x30 ... 0x5f GXm datasheet 51
- * GXlv: 0x6x GXlv datasheet 54
- * ? : 0x7x
- * GX1 : 0x8x GX1 datasheet 56
- */
- if ((0x30 <= dir1 && dir1 <= 0x6f) ||
- (0x80 <= dir1 && dir1 <= 0x8f))
- geode_configure();
- return;
- } else { /* MediaGX */
- Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
- p = Cx86_cb+2;
- c->x86_model = (dir1 & 0x20) ? 1 : 2;
- }
- break;
-
- case 5: /* 6x86MX/M II */
- if (dir1 > 7) {
- dir0_msn++; /* M II */
- /* Enable MMX extensions (App note 108) */
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
- } else {
- c->coma_bug = 1; /* 6x86MX, it has the bug. */
- }
- tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
- Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
- p = Cx86_cb+tmp;
- if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
- (c->x86_model)++;
- /* Emulate MTRRs using Cyrix's ARRs. */
- set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
- break;
-
- case 0xf: /* Cyrix 486 without DEVID registers */
- switch (dir0_lsn) {
- case 0xd: /* either a 486SLC or DLC w/o DEVID */
- dir0_msn = 0;
- p = Cx486_name[(c->hard_math) ? 1 : 0];
- break;
-
- case 0xe: /* a 486S A step */
- dir0_msn = 0;
- p = Cx486S_name[0];
- break;
- }
- break;
-
- default: /* unknown (shouldn't happen, we know everyone ;-) */
- dir0_msn = 7;
- break;
- }
- strcpy(buf, Cx86_model[dir0_msn & 7]);
- if (p)
- strcat(buf, p);
- return;
-}
-
-/*
- * Handle National Semiconductor branded processors
- */
-static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
-{
- /*
- * There may be GX1 processors in the wild that are branded
- * NSC and not Cyrix.
- *
- * This function only handles the GX processor, and kicks every
- * thing else to the Cyrix init function above - that should
- * cover any processors that might have been branded differently
- * after NSC acquired Cyrix.
- *
- * If this breaks your GX1 horribly, please e-mail
- * info-linux@ldcmail.amd.com to tell us.
- */
-
- /* Handle the GX (Formally known as the GX2) */
-
- if (c->x86 == 5 && c->x86_model == 5)
- cpu_detect_cache_sizes(c);
- else
- init_cyrix(c);
-}
-
-/*
- * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
- * by the fact that they preserve the flags across the division of 5/2.
- * PII and PPro exhibit this behavior too, but they have cpuid available.
- */
-
-/*
- * Perform the Cyrix 5/2 test. A Cyrix won't change
- * the flags, while other 486 chips will.
- */
-static inline int test_cyrix_52div(void)
-{
- unsigned int test;
-
- __asm__ __volatile__(
- "sahf\n\t" /* clear flags (%eax = 0x0005) */
- "div %b2\n\t" /* divide 5 by 2 */
- "lahf" /* store flags into %ah */
- : "=a" (test)
- : "0" (5), "q" (2)
- : "cc");
-
- /* AH is 0x02 on Cyrix after the divide.. */
- return (unsigned char) (test >> 8) == 0x02;
-}
-
-static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
-{
- /* Detect Cyrix with disabled CPUID */
- if (c->x86 == 4 && test_cyrix_52div()) {
- unsigned char dir0, dir1;
-
- strcpy(c->x86_vendor_id, "CyrixInstead");
- c->x86_vendor = X86_VENDOR_CYRIX;
-
- /* Actually enable cpuid on the older cyrix */
-
- /* Retrieve CPU revisions */
-
- do_cyrix_devid(&dir0, &dir1);
-
- dir0 >>= 4;
-
- /* Check it is an affected model */
-
- if (dir0 == 5 || dir0 == 3) {
- unsigned char ccr3;
- unsigned long flags;
- printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
- local_irq_save(flags);
- ccr3 = getCx86(CX86_CCR3);
- /* enable MAPEN */
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
- /* enable cpuid */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
- /* disable MAPEN */
- setCx86(CX86_CCR3, ccr3);
- local_irq_restore(flags);
- }
- }
-}
-
-static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
- .c_vendor = "Cyrix",
- .c_ident = { "CyrixInstead" },
- .c_early_init = early_init_cyrix,
- .c_init = init_cyrix,
- .c_identify = cyrix_identify,
- .c_x86_vendor = X86_VENDOR_CYRIX,
-};
-
-cpu_dev_register(cyrix_cpu_dev);
-
-static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
- .c_vendor = "NSC",
- .c_ident = { "Geode by NSC" },
- .c_init = init_nsc,
- .c_x86_vendor = X86_VENDOR_NSC,
-};
-
-cpu_dev_register(nsc_cpu_dev);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/hypervisor.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/hypervisor.c
deleted file mode 100644
index 755f64fb..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/hypervisor.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Common hypervisor code
- *
- * Copyright (C) 2008, VMware, Inc.
- * Author : Alok N Kataria <akataria@vmware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/module.h>
-#include <asm/processor.h>
-#include <asm/hypervisor.h>
-
-/*
- * Hypervisor detect order. This is specified explicitly here because
- * some hypervisors might implement compatibility modes for other
- * hypervisors and therefore need to be detected in specific sequence.
- */
-static const __initconst struct hypervisor_x86 * const hypervisors[] =
-{
-#ifdef CONFIG_XEN_PVHVM
- &x86_hyper_xen_hvm,
-#endif
- &x86_hyper_vmware,
- &x86_hyper_ms_hyperv,
-};
-
-const struct hypervisor_x86 *x86_hyper;
-EXPORT_SYMBOL(x86_hyper);
-
-static inline void __init
-detect_hypervisor_vendor(void)
-{
- const struct hypervisor_x86 *h, * const *p;
-
- for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
- h = *p;
- if (h->detect()) {
- x86_hyper = h;
- printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
- break;
- }
- }
-}
-
-void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
-{
- if (x86_hyper && x86_hyper->set_cpu_features)
- x86_hyper->set_cpu_features(c);
-}
-
-void __init init_hypervisor_platform(void)
-{
-
- detect_hypervisor_vendor();
-
- if (!x86_hyper)
- return;
-
- init_hypervisor(&boot_cpu_data);
-
- if (x86_hyper->init_platform)
- x86_hyper->init_platform();
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/intel.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/intel.c
deleted file mode 100644
index 3e6ff6cb..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/intel.c
+++ /dev/null
@@ -1,555 +0,0 @@
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <linux/smp.h>
-#include <linux/sched.h>
-#include <linux/thread_info.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/msr.h>
-#include <asm/bugs.h>
-#include <asm/cpu.h>
-
-#ifdef CONFIG_X86_64
-#include <linux/topology.h>
-#include <asm/numa_64.h>
-#endif
-
-#include "cpu.h"
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#endif
-
-static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
-{
- u64 misc_enable;
-
- /* Unmask CPUID levels if masked: */
- if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
- if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
- misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
- wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- c->cpuid_level = cpuid_eax(0);
- get_cpu_cap(c);
- }
- }
-
- if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
- (c->x86 == 0x6 && c->x86_model >= 0x0e))
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-
- if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
- unsigned lower_word;
-
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- /* Required by the SDM */
- sync_core();
- rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
- }
-
- /*
- * Atom erratum AAE44/AAF40/AAG38/AAH41:
- *
- * A race condition between speculative fetches and invalidating
- * a large page. This is worked around in microcode, but we
- * need the microcode to have already been loaded... so if it is
- * not, recommend a BIOS update and disable large pages.
- */
- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
- c->microcode < 0x20e) {
- printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
- clear_cpu_cap(c, X86_FEATURE_PSE);
- }
-
-#ifdef CONFIG_X86_64
- set_cpu_cap(c, X86_FEATURE_SYSENTER32);
-#else
- /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
- if (c->x86 == 15 && c->x86_cache_alignment == 64)
- c->x86_cache_alignment = 128;
-#endif
-
- /* CPUID workaround for 0F33/0F34 CPU */
- if (c->x86 == 0xF && c->x86_model == 0x3
- && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
- c->x86_phys_bits = 36;
-
- /*
- * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
- * with P/T states and does not stop in deep C-states.
- *
- * It is also reliable across cores and sockets. (but not across
- * cabinets - we turn it off in that case explicitly.)
- */
- if (c->x86_power & (1 << 8)) {
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- if (!check_tsc_unstable())
- sched_clock_stable = 1;
- }
-
- /*
- * There is a known erratum on Pentium III and Core Solo
- * and Core Duo CPUs.
- * " Page with PAT set to WC while associated MTRR is UC
- * may consolidate to UC "
- * Because of this erratum, it is better to stick with
- * setting WC in MTRR rather than using PAT on these CPUs.
- *
- * Enable PAT WC only on P4, Core 2 or later CPUs.
- */
- if (c->x86 == 6 && c->x86_model < 15)
- clear_cpu_cap(c, X86_FEATURE_PAT);
-
-#ifdef CONFIG_KMEMCHECK
- /*
- * P4s have a "fast strings" feature which causes single-
- * stepping REP instructions to only generate a #DB on
- * cache-line boundaries.
- *
- * Ingo Molnar reported a Pentium D (model 6) and a Xeon
- * (model 2) with the same problem.
- */
- if (c->x86 == 15) {
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
- if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
- printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
-
- misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
- wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- }
- }
-#endif
-
- /*
- * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
- * clear the fast string and enhanced fast string CPU capabilities.
- */
- if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
- printk(KERN_INFO "Disabled fast string operations\n");
- setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
- setup_clear_cpu_cap(X86_FEATURE_ERMS);
- }
- }
-}
-
-#ifdef CONFIG_X86_32
-/*
- * Early probe support logic for ppro memory erratum #50
- *
- * This is called before we do cpu ident work
- */
-
-int __cpuinit ppro_with_ram_bug(void)
-{
- /* Uses data from early_cpu_detect now */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 1 &&
- boot_cpu_data.x86_mask < 8) {
- printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
- return 1;
- }
- return 0;
-}
-
-#ifdef CONFIG_X86_F00F_BUG
-static void __cpuinit trap_init_f00f_bug(void)
-{
- __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
-
- /*
- * Update the IDT descriptor and reload the IDT so that
- * it uses the read-only mapped virtual address.
- */
- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
- load_idt(&idt_descr);
-}
-#endif
-
-static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
-{
- /* calling is from identify_secondary_cpu() ? */
- if (!c->cpu_index)
- return;
-
- /*
- * Mask B, Pentium, but not Pentium MMX
- */
- if (c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
- c->x86_model <= 3) {
- /*
- * Remember we have B step Pentia with bugs
- */
- WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
- "with B stepping processors.\n");
- }
-}
-
-static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
-{
- unsigned long lo, hi;
-
-#ifdef CONFIG_X86_F00F_BUG
- /*
- * All current models of Pentium and Pentium with MMX technology CPUs
- * have the F0 0F bug, which lets nonprivileged users lock up the
- * system.
- * Note that the workaround only should be initialized once...
- */
- c->f00f_bug = 0;
- if (!paravirt_enabled() && c->x86 == 5) {
- static int f00f_workaround_enabled;
-
- c->f00f_bug = 1;
- if (!f00f_workaround_enabled) {
- trap_init_f00f_bug();
- printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
- f00f_workaround_enabled = 1;
- }
- }
-#endif
-
- /*
- * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
- * model 3 mask 3
- */
- if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
- clear_cpu_cap(c, X86_FEATURE_SEP);
-
- /*
- * P4 Xeon errata 037 workaround.
- * Hardware prefetcher may cause stale data to be loaded into the cache.
- */
- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
- rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
- if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
- printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
- printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
- lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
- wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
- }
- }
-
- /*
- * See if we have a good local APIC by checking for buggy Pentia,
- * i.e. all B steppings and the C2 stepping of P54C when using their
- * integrated APIC (see 11AP erratum in "Pentium Processor
- * Specification Update").
- */
- if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
- (c->x86_mask < 0x6 || c->x86_mask == 0xb))
- set_cpu_cap(c, X86_FEATURE_11AP);
-
-
-#ifdef CONFIG_X86_INTEL_USERCOPY
- /*
- * Set up the preferred alignment for movsl bulk memory moves
- */
- switch (c->x86) {
- case 4: /* 486: untested */
- break;
- case 5: /* Old Pentia: untested */
- break;
- case 6: /* PII/PIII only like movsl with 8-byte alignment */
- movsl_mask.mask = 7;
- break;
- case 15: /* P4 is OK down to 8-byte alignment */
- movsl_mask.mask = 7;
- break;
- }
-#endif
-
-#ifdef CONFIG_X86_NUMAQ
- numaq_tsc_disable();
-#endif
-
- intel_smp_check(c);
-}
-#else
-static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
-{
-}
-#endif
-
-static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_NUMA
- unsigned node;
- int cpu = smp_processor_id();
-
- /* Don't do the funky fallback heuristics the AMD version employs
- for now. */
- node = numa_cpu_node(cpu);
- if (node == NUMA_NO_NODE || !node_online(node)) {
- /* reuse the value from init_cpu_to_node() */
- node = cpu_to_node(cpu);
- }
- numa_set_node(cpu, node);
-#endif
-}
-
-/*
- * find out the number of processor cores on the die
- */
-static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
-{
- unsigned int eax, ebx, ecx, edx;
-
- if (c->cpuid_level < 4)
- return 1;
-
- /* Intel has a non-standard dependency on %ecx for this CPUID level. */
- cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
- if (eax & 0x1f)
- return (eax >> 26) + 1;
- else
- return 1;
-}
-
-static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
-{
- /* Intel VMX MSR indicated features */
-#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
-#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
-#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
-#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
-#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
-#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
-
- u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
-
- clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
- clear_cpu_cap(c, X86_FEATURE_VNMI);
- clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
- clear_cpu_cap(c, X86_FEATURE_EPT);
- clear_cpu_cap(c, X86_FEATURE_VPID);
-
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
- msr_ctl = vmx_msr_high | vmx_msr_low;
- if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
- set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
- if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
- set_cpu_cap(c, X86_FEATURE_VNMI);
- if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
- vmx_msr_low, vmx_msr_high);
- msr_ctl2 = vmx_msr_high | vmx_msr_low;
- if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
- (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
- set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
- set_cpu_cap(c, X86_FEATURE_EPT);
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
- set_cpu_cap(c, X86_FEATURE_VPID);
- }
-}
-
-static void __cpuinit init_intel(struct cpuinfo_x86 *c)
-{
- unsigned int l2 = 0;
-
- early_init_intel(c);
-
- intel_workarounds(c);
-
- /*
- * Detect the extended topology information if available. This
- * will reinitialise the initial_apicid which will be used
- * in init_intel_cacheinfo()
- */
- detect_extended_topology(c);
-
- l2 = init_intel_cacheinfo(c);
- if (c->cpuid_level > 9) {
- unsigned eax = cpuid_eax(10);
- /* Check for version and the number of counters */
- if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
- set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
- }
-
- if (cpu_has_xmm2)
- set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
- if (cpu_has_ds) {
- unsigned int l1;
- rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
- if (!(l1 & (1<<11)))
- set_cpu_cap(c, X86_FEATURE_BTS);
- if (!(l1 & (1<<12)))
- set_cpu_cap(c, X86_FEATURE_PEBS);
- }
-
- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
- set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
-
-#ifdef CONFIG_X86_64
- if (c->x86 == 15)
- c->x86_cache_alignment = c->x86_clflush_size * 2;
- if (c->x86 == 6)
- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-#else
- /*
- * Names for the Pentium II/Celeron processors
- * detectable only by also checking the cache size.
- * Dixon is NOT a Celeron.
- */
- if (c->x86 == 6) {
- char *p = NULL;
-
- switch (c->x86_model) {
- case 5:
- if (l2 == 0)
- p = "Celeron (Covington)";
- else if (l2 == 256)
- p = "Mobile Pentium II (Dixon)";
- break;
-
- case 6:
- if (l2 == 128)
- p = "Celeron (Mendocino)";
- else if (c->x86_mask == 0 || c->x86_mask == 5)
- p = "Celeron-A";
- break;
-
- case 8:
- if (l2 == 128)
- p = "Celeron (Coppermine)";
- break;
- }
-
- if (p)
- strcpy(c->x86_model_id, p);
- }
-
- if (c->x86 == 15)
- set_cpu_cap(c, X86_FEATURE_P4);
- if (c->x86 == 6)
- set_cpu_cap(c, X86_FEATURE_P3);
-#endif
-
- if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
- /*
- * let's use the legacy cpuid vector 0x1 and 0x4 for topology
- * detection.
- */
- c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
- detect_ht(c);
-#endif
- }
-
- /* Work around errata */
- srat_detect_node(c);
-
- if (cpu_has(c, X86_FEATURE_VMX))
- detect_vmx_virtcap(c);
-
- /*
- * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
- * x86_energy_perf_policy(8) is available to change it at run-time
- */
- if (cpu_has(c, X86_FEATURE_EPB)) {
- u64 epb;
-
- rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
- if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
- printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
- " Set to 'normal', was 'performance'\n"
- "ENERGY_PERF_BIAS: View and update with"
- " x86_energy_perf_policy(8)\n");
- epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
- wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
- }
- }
-}
-
-#ifdef CONFIG_X86_32
-static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
-{
- /*
- * Intel PIII Tualatin. This comes in two flavours.
- * One has 256kb of cache, the other 512. We have no way
- * to determine which, so we use a boottime override
- * for the 512kb model, and assume 256 otherwise.
- */
- if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
- size = 256;
- return size;
-}
-#endif
-
-static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
- .c_vendor = "Intel",
- .c_ident = { "GenuineIntel" },
-#ifdef CONFIG_X86_32
- .c_models = {
- { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
- {
- [0] = "486 DX-25/33",
- [1] = "486 DX-50",
- [2] = "486 SX",
- [3] = "486 DX/2",
- [4] = "486 SL",
- [5] = "486 SX/2",
- [7] = "486 DX/2-WB",
- [8] = "486 DX/4",
- [9] = "486 DX/4-WB"
- }
- },
- { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
- {
- [0] = "Pentium 60/66 A-step",
- [1] = "Pentium 60/66",
- [2] = "Pentium 75 - 200",
- [3] = "OverDrive PODP5V83",
- [4] = "Pentium MMX",
- [7] = "Mobile Pentium 75 - 200",
- [8] = "Mobile Pentium MMX"
- }
- },
- { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
- {
- [0] = "Pentium Pro A-step",
- [1] = "Pentium Pro",
- [3] = "Pentium II (Klamath)",
- [4] = "Pentium II (Deschutes)",
- [5] = "Pentium II (Deschutes)",
- [6] = "Mobile Pentium II",
- [7] = "Pentium III (Katmai)",
- [8] = "Pentium III (Coppermine)",
- [10] = "Pentium III (Cascades)",
- [11] = "Pentium III (Tualatin)",
- }
- },
- { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
- {
- [0] = "Pentium 4 (Unknown)",
- [1] = "Pentium 4 (Willamette)",
- [2] = "Pentium 4 (Northwood)",
- [4] = "Pentium 4 (Foster)",
- [5] = "Pentium 4 (Foster)",
- }
- },
- },
- .c_size_cache = intel_size_cache,
-#endif
- .c_early_init = early_init_intel,
- .c_init = init_intel,
- .c_x86_vendor = X86_VENDOR_INTEL,
-};
-
-cpu_dev_register(intel_cpu_dev);
-
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/intel_cacheinfo.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/intel_cacheinfo.c
deleted file mode 100644
index b8f3653d..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-/*
- * Routines to indentify caches on Intel CPU.
- *
- * Changes:
- * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
- * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
- * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/compiler.h>
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-
-#include <asm/processor.h>
-#include <linux/smp.h>
-#include <asm/amd_nb.h>
-#include <asm/smp.h>
-
-#define LVL_1_INST 1
-#define LVL_1_DATA 2
-#define LVL_2 3
-#define LVL_3 4
-#define LVL_TRACE 5
-
-struct _cache_table {
- unsigned char descriptor;
- char cache_type;
- short size;
-};
-
-#define MB(x) ((x) * 1024)
-
-/* All the cache descriptor types we care about (no TLB or
- trace cache entries) */
-
-static const struct _cache_table __cpuinitconst cache_table[] =
-{
- { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
- { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
- { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
- { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
- { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
- { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
- { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
- { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
- { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
- { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
- { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
- { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
- { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
- { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
- { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
- { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
- { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
- { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
- { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
- { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
- { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
- { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
- { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
- { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
- { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
- { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
- { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
- { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
- { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
- { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
- { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
- { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
- { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
- { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
- { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
- { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
- { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
- { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
- { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
- { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
- { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
- { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
- { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
- { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
- { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
- { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
- { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
- { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
- { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
- { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
- { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
- { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
- { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
- { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
- { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
- { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
- { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
- { 0x00, 0, 0}
-};
-
-
-enum _cache_type {
- CACHE_TYPE_NULL = 0,
- CACHE_TYPE_DATA = 1,
- CACHE_TYPE_INST = 2,
- CACHE_TYPE_UNIFIED = 3
-};
-
-union _cpuid4_leaf_eax {
- struct {
- enum _cache_type type:5;
- unsigned int level:3;
- unsigned int is_self_initializing:1;
- unsigned int is_fully_associative:1;
- unsigned int reserved:4;
- unsigned int num_threads_sharing:12;
- unsigned int num_cores_on_die:6;
- } split;
- u32 full;
-};
-
-union _cpuid4_leaf_ebx {
- struct {
- unsigned int coherency_line_size:12;
- unsigned int physical_line_partition:10;
- unsigned int ways_of_associativity:10;
- } split;
- u32 full;
-};
-
-union _cpuid4_leaf_ecx {
- struct {
- unsigned int number_of_sets:32;
- } split;
- u32 full;
-};
-
-struct _cpuid4_info_regs {
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned long size;
- struct amd_northbridge *nb;
-};
-
-struct _cpuid4_info {
- struct _cpuid4_info_regs base;
- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
-};
-
-unsigned short num_cache_leaves;
-
-/* AMD doesn't have CPUID4. Emulate it here to report the same
- information to the user. This makes some assumptions about the machine:
- L2 not shared, no SMT etc. that is currently true on AMD CPUs.
-
- In theory the TLBs could be reported as fake type (they are in "dummy").
- Maybe later */
-union l1_cache {
- struct {
- unsigned line_size:8;
- unsigned lines_per_tag:8;
- unsigned assoc:8;
- unsigned size_in_kb:8;
- };
- unsigned val;
-};
-
-union l2_cache {
- struct {
- unsigned line_size:8;
- unsigned lines_per_tag:4;
- unsigned assoc:4;
- unsigned size_in_kb:16;
- };
- unsigned val;
-};
-
-union l3_cache {
- struct {
- unsigned line_size:8;
- unsigned lines_per_tag:4;
- unsigned assoc:4;
- unsigned res:2;
- unsigned size_encoded:14;
- };
- unsigned val;
-};
-
-static const unsigned short __cpuinitconst assocs[] = {
- [1] = 1,
- [2] = 2,
- [4] = 4,
- [6] = 8,
- [8] = 16,
- [0xa] = 32,
- [0xb] = 48,
- [0xc] = 64,
- [0xd] = 96,
- [0xe] = 128,
- [0xf] = 0xffff /* fully associative - no way to show this currently */
-};
-
-static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
-static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
-
-static void __cpuinit
-amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
- union _cpuid4_leaf_ebx *ebx,
- union _cpuid4_leaf_ecx *ecx)
-{
- unsigned dummy;
- unsigned line_size, lines_per_tag, assoc, size_in_kb;
- union l1_cache l1i, l1d;
- union l2_cache l2;
- union l3_cache l3;
- union l1_cache *l1 = &l1d;
-
- eax->full = 0;
- ebx->full = 0;
- ecx->full = 0;
-
- cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
- cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
-
- switch (leaf) {
- case 1:
- l1 = &l1i;
- case 0:
- if (!l1->val)
- return;
- assoc = assocs[l1->assoc];
- line_size = l1->line_size;
- lines_per_tag = l1->lines_per_tag;
- size_in_kb = l1->size_in_kb;
- break;
- case 2:
- if (!l2.val)
- return;
- assoc = assocs[l2.assoc];
- line_size = l2.line_size;
- lines_per_tag = l2.lines_per_tag;
- /* cpu_data has errata corrections for K7 applied */
- size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
- break;
- case 3:
- if (!l3.val)
- return;
- assoc = assocs[l3.assoc];
- line_size = l3.line_size;
- lines_per_tag = l3.lines_per_tag;
- size_in_kb = l3.size_encoded * 512;
- if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
- size_in_kb = size_in_kb >> 1;
- assoc = assoc >> 1;
- }
- break;
- default:
- return;
- }
-
- eax->split.is_self_initializing = 1;
- eax->split.type = types[leaf];
- eax->split.level = levels[leaf];
- eax->split.num_threads_sharing = 0;
- eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
-
-
- if (assoc == 0xffff)
- eax->split.is_fully_associative = 1;
- ebx->split.coherency_line_size = line_size - 1;
- ebx->split.ways_of_associativity = assoc - 1;
- ebx->split.physical_line_partition = lines_per_tag - 1;
- ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
- (ebx->split.ways_of_associativity + 1) - 1;
-}
-
-struct _cache_attr {
- struct attribute attr;
- ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
- unsigned int);
-};
-
-#ifdef CONFIG_AMD_NB
-
-/*
- * L3 cache descriptors
- */
-static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
-{
- struct amd_l3_cache *l3 = &nb->l3_cache;
- unsigned int sc0, sc1, sc2, sc3;
- u32 val = 0;
-
- pci_read_config_dword(nb->misc, 0x1C4, &val);
-
- /* calculate subcache sizes */
- l3->subcaches[0] = sc0 = !(val & BIT(0));
- l3->subcaches[1] = sc1 = !(val & BIT(4));
-
- if (boot_cpu_data.x86 == 0x15) {
- l3->subcaches[0] = sc0 += !(val & BIT(1));
- l3->subcaches[1] = sc1 += !(val & BIT(5));
- }
-
- l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
- l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
-
- l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
-}
-
-static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
-{
- int node;
-
- /* only for L3, and not in virtualized environments */
- if (index < 3)
- return;
-
- node = amd_get_nb_id(smp_processor_id());
- this_leaf->nb = node_to_amd_nb(node);
- if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
- amd_calc_l3_indices(this_leaf->nb);
-}
-
-/*
- * check whether a slot used for disabling an L3 index is occupied.
- * @l3: L3 cache descriptor
- * @slot: slot number (0..1)
- *
- * @returns: the disabled index if used or negative value if slot free.
- */
-int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
-{
- unsigned int reg = 0;
-
- pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
-
- /* check whether this slot is activated already */
- if (reg & (3UL << 30))
- return reg & 0xfff;
-
- return -1;
-}
-
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int slot)
-{
- int index;
-
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
-
- index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
- if (index >= 0)
- return sprintf(buf, "%d\n", index);
-
- return sprintf(buf, "FREE\n");
-}
-
-#define SHOW_CACHE_DISABLE(slot) \
-static ssize_t \
-show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
- unsigned int cpu) \
-{ \
- return show_cache_disable(this_leaf, buf, slot); \
-}
-SHOW_CACHE_DISABLE(0)
-SHOW_CACHE_DISABLE(1)
-
-static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
- unsigned slot, unsigned long idx)
-{
- int i;
-
- idx |= BIT(30);
-
- /*
- * disable index in all 4 subcaches
- */
- for (i = 0; i < 4; i++) {
- u32 reg = idx | (i << 20);
-
- if (!nb->l3_cache.subcaches[i])
- continue;
-
- pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
-
- /*
- * We need to WBINVD on a core on the node containing the L3
- * cache which indices we disable therefore a simple wbinvd()
- * is not sufficient.
- */
- wbinvd_on_cpu(cpu);
-
- reg |= BIT(31);
- pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
- }
-}
-
-/*
- * disable a L3 cache index by using a disable-slot
- *
- * @l3: L3 cache descriptor
- * @cpu: A CPU on the node containing the L3 cache
- * @slot: slot number (0..1)
- * @index: index to disable
- *
- * @return: 0 on success, error status on failure
- */
-int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
- unsigned long index)
-{
- int ret = 0;
-
- /* check if @slot is already used or the index is already disabled */
- ret = amd_get_l3_disable_slot(nb, slot);
- if (ret >= 0)
- return -EEXIST;
-
- if (index > nb->l3_cache.indices)
- return -EINVAL;
-
- /* check whether the other slot has disabled the same index already */
- if (index == amd_get_l3_disable_slot(nb, !slot))
- return -EEXIST;
-
- amd_l3_disable_index(nb, cpu, slot, index);
-
- return 0;
-}
-
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count,
- unsigned int slot)
-{
- unsigned long val = 0;
- int cpu, err = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
-
- cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
-
- if (strict_strtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
- if (err) {
- if (err == -EEXIST)
- pr_warning("L3 slot %d in use/index already disabled!\n",
- slot);
- return err;
- }
- return count;
-}
-
-#define STORE_CACHE_DISABLE(slot) \
-static ssize_t \
-store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
- const char *buf, size_t count, \
- unsigned int cpu) \
-{ \
- return store_cache_disable(this_leaf, buf, count, slot); \
-}
-STORE_CACHE_DISABLE(0)
-STORE_CACHE_DISABLE(1)
-
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
- show_cache_disable_0, store_cache_disable_0);
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
- show_cache_disable_1, store_cache_disable_1);
-
-static ssize_t
-show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
-{
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
-
- return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
-}
-
-static ssize_t
-store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
- unsigned int cpu)
-{
- unsigned long val;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
-
- if (strict_strtoul(buf, 16, &val) < 0)
- return -EINVAL;
-
- if (amd_set_subcaches(cpu, val))
- return -EINVAL;
-
- return count;
-}
-
-static struct _cache_attr subcaches =
- __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
-
-#else /* CONFIG_AMD_NB */
-#define amd_init_l3_cache(x, y)
-#endif /* CONFIG_AMD_NB */
-
-static int
-__cpuinit cpuid4_cache_lookup_regs(int index,
- struct _cpuid4_info_regs *this_leaf)
-{
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned edx;
-
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
- amd_cpuid4(index, &eax, &ebx, &ecx);
- amd_init_l3_cache(this_leaf, index);
- } else {
- cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
- }
-
- if (eax.split.type == CACHE_TYPE_NULL)
- return -EIO; /* better error ? */
-
- this_leaf->eax = eax;
- this_leaf->ebx = ebx;
- this_leaf->ecx = ecx;
- this_leaf->size = (ecx.split.number_of_sets + 1) *
- (ebx.split.coherency_line_size + 1) *
- (ebx.split.physical_line_partition + 1) *
- (ebx.split.ways_of_associativity + 1);
- return 0;
-}
-
-static int __cpuinit find_num_cache_leaves(void)
-{
- unsigned int eax, ebx, ecx, edx;
- union _cpuid4_leaf_eax cache_eax;
- int i = -1;
-
- do {
- ++i;
- /* Do cpuid(4) loop to find out num_cache_leaves */
- cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
- cache_eax.full = eax;
- } while (cache_eax.split.type != CACHE_TYPE_NULL);
- return i;
-}
-
-unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
-{
- /* Cache sizes */
- unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
- unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
- unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
- unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_X86_HT
- unsigned int cpu = c->cpu_index;
-#endif
-
- if (c->cpuid_level > 3) {
- static int is_initialized;
-
- if (is_initialized == 0) {
- /* Init num_cache_leaves from boot CPU */
- num_cache_leaves = find_num_cache_leaves();
- is_initialized++;
- }
-
- /*
- * Whenever possible use cpuid(4), deterministic cache
- * parameters cpuid leaf to find the cache details
- */
- for (i = 0; i < num_cache_leaves; i++) {
- struct _cpuid4_info_regs this_leaf;
- int retval;
-
- retval = cpuid4_cache_lookup_regs(i, &this_leaf);
- if (retval >= 0) {
- switch (this_leaf.eax.split.level) {
- case 1:
- if (this_leaf.eax.split.type ==
- CACHE_TYPE_DATA)
- new_l1d = this_leaf.size/1024;
- else if (this_leaf.eax.split.type ==
- CACHE_TYPE_INST)
- new_l1i = this_leaf.size/1024;
- break;
- case 2:
- new_l2 = this_leaf.size/1024;
- num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
- index_msb = get_count_order(num_threads_sharing);
- l2_id = c->apicid >> index_msb;
- break;
- case 3:
- new_l3 = this_leaf.size/1024;
- num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
- index_msb = get_count_order(
- num_threads_sharing);
- l3_id = c->apicid >> index_msb;
- break;
- default:
- break;
- }
- }
- }
- }
- /*
- * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
- * trace cache
- */
- if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
- /* supports eax=2 call */
- int j, n;
- unsigned int regs[4];
- unsigned char *dp = (unsigned char *)regs;
- int only_trace = 0;
-
- if (num_cache_leaves != 0 && c->x86 == 15)
- only_trace = 1;
-
- /* Number of times to iterate */
- n = cpuid_eax(2) & 0xFF;
-
- for (i = 0 ; i < n ; i++) {
- cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
-
- /* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 3 ; j++)
- if (regs[j] & (1 << 31))
- regs[j] = 0;
-
- /* Byte 0 is level count, not a descriptor */
- for (j = 1 ; j < 16 ; j++) {
- unsigned char des = dp[j];
- unsigned char k = 0;
-
- /* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0) {
- if (cache_table[k].descriptor == des) {
- if (only_trace && cache_table[k].cache_type != LVL_TRACE)
- break;
- switch (cache_table[k].cache_type) {
- case LVL_1_INST:
- l1i += cache_table[k].size;
- break;
- case LVL_1_DATA:
- l1d += cache_table[k].size;
- break;
- case LVL_2:
- l2 += cache_table[k].size;
- break;
- case LVL_3:
- l3 += cache_table[k].size;
- break;
- case LVL_TRACE:
- trace += cache_table[k].size;
- break;
- }
-
- break;
- }
-
- k++;
- }
- }
- }
- }
-
- if (new_l1d)
- l1d = new_l1d;
-
- if (new_l1i)
- l1i = new_l1i;
-
- if (new_l2) {
- l2 = new_l2;
-#ifdef CONFIG_X86_HT
- per_cpu(cpu_llc_id, cpu) = l2_id;
-#endif
- }
-
- if (new_l3) {
- l3 = new_l3;
-#ifdef CONFIG_X86_HT
- per_cpu(cpu_llc_id, cpu) = l3_id;
-#endif
- }
-
- c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
-
- return l2;
-}
-
-#ifdef CONFIG_SYSFS
-
-/* pointer to _cpuid4_info array (for each cache leaf) */
-static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
-#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
-
-#ifdef CONFIG_SMP
-
-static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
-{
- struct _cpuid4_info *this_leaf;
- int ret, i, sibling;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- ret = 0;
- if (index == 3) {
- ret = 1;
- for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
- if (!per_cpu(ici_cpuid4_info, i))
- continue;
- this_leaf = CPUID4_INFO_IDX(i, index);
- for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
- if (!cpu_online(sibling))
- continue;
- set_bit(sibling, this_leaf->shared_cpu_map);
- }
- }
- } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
- ret = 1;
- for_each_cpu(i, cpu_sibling_mask(cpu)) {
- if (!per_cpu(ici_cpuid4_info, i))
- continue;
- this_leaf = CPUID4_INFO_IDX(i, index);
- for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
- if (!cpu_online(sibling))
- continue;
- set_bit(sibling, this_leaf->shared_cpu_map);
- }
- }
- }
-
- return ret;
-}
-
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
-{
- struct _cpuid4_info *this_leaf, *sibling_leaf;
- unsigned long num_threads_sharing;
- int index_msb, i;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- if (c->x86_vendor == X86_VENDOR_AMD) {
- if (cache_shared_amd_cpu_map_setup(cpu, index))
- return;
- }
-
- this_leaf = CPUID4_INFO_IDX(cpu, index);
- num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
-
- if (num_threads_sharing == 1)
- cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
- else {
- index_msb = get_count_order(num_threads_sharing);
-
- for_each_online_cpu(i) {
- if (cpu_data(i).apicid >> index_msb ==
- c->apicid >> index_msb) {
- cpumask_set_cpu(i,
- to_cpumask(this_leaf->shared_cpu_map));
- if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
- sibling_leaf =
- CPUID4_INFO_IDX(i, index);
- cpumask_set_cpu(cpu, to_cpumask(
- sibling_leaf->shared_cpu_map));
- }
- }
- }
- }
-}
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
-{
- struct _cpuid4_info *this_leaf, *sibling_leaf;
- int sibling;
-
- this_leaf = CPUID4_INFO_IDX(cpu, index);
- for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
- sibling_leaf = CPUID4_INFO_IDX(sibling, index);
- cpumask_clear_cpu(cpu,
- to_cpumask(sibling_leaf->shared_cpu_map));
- }
-}
-#else
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
-{
-}
-
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
-{
-}
-#endif
-
-static void __cpuinit free_cache_attributes(unsigned int cpu)
-{
- int i;
-
- for (i = 0; i < num_cache_leaves; i++)
- cache_remove_shared_cpu_map(cpu, i);
-
- kfree(per_cpu(ici_cpuid4_info, cpu));
- per_cpu(ici_cpuid4_info, cpu) = NULL;
-}
-
-static void __cpuinit get_cpu_leaves(void *_retval)
-{
- int j, *retval = _retval, cpu = smp_processor_id();
-
- /* Do cpuid and store the results */
- for (j = 0; j < num_cache_leaves; j++) {
- struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
-
- *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
- if (unlikely(*retval < 0)) {
- int i;
-
- for (i = 0; i < j; i++)
- cache_remove_shared_cpu_map(cpu, i);
- break;
- }
- cache_shared_cpu_map_setup(cpu, j);
- }
-}
-
-static int __cpuinit detect_cache_attributes(unsigned int cpu)
-{
- int retval;
-
- if (num_cache_leaves == 0)
- return -ENOENT;
-
- per_cpu(ici_cpuid4_info, cpu) = kzalloc(
- sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (per_cpu(ici_cpuid4_info, cpu) == NULL)
- return -ENOMEM;
-
- smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
- if (retval) {
- kfree(per_cpu(ici_cpuid4_info, cpu));
- per_cpu(ici_cpuid4_info, cpu) = NULL;
- }
-
- return retval;
-}
-
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-#include <linux/cpu.h>
-
-/* pointer to kobject for cpuX/cache */
-static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
-
-struct _index_kobject {
- struct kobject kobj;
- unsigned int cpu;
- unsigned short index;
-};
-
-/* pointer to array of kobjects for cpuX/cache/indexY */
-static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
-#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
-
-#define show_one_plus(file_name, object, val) \
-static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
- unsigned int cpu) \
-{ \
- return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
-}
-
-show_one_plus(level, base.eax.split.level, 0);
-show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
-show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
-show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
-show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
-
-static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
-{
- return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
-}
-
-static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
- int type, char *buf)
-{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
-
- if (len > 1) {
- const struct cpumask *mask;
-
- mask = to_cpumask(this_leaf->shared_cpu_map);
- n = type ?
- cpulist_scnprintf(buf, len-2, mask) :
- cpumask_scnprintf(buf, len-2, mask);
- buf[n++] = '\n';
- buf[n] = '\0';
- }
- return n;
-}
-
-static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
- unsigned int cpu)
-{
- return show_shared_cpu_map_func(leaf, 0, buf);
-}
-
-static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
- unsigned int cpu)
-{
- return show_shared_cpu_map_func(leaf, 1, buf);
-}
-
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
-{
- switch (this_leaf->base.eax.split.type) {
- case CACHE_TYPE_DATA:
- return sprintf(buf, "Data\n");
- case CACHE_TYPE_INST:
- return sprintf(buf, "Instruction\n");
- case CACHE_TYPE_UNIFIED:
- return sprintf(buf, "Unified\n");
- default:
- return sprintf(buf, "Unknown\n");
- }
-}
-
-#define to_object(k) container_of(k, struct _index_kobject, kobj)
-#define to_attr(a) container_of(a, struct _cache_attr, attr)
-
-#define define_one_ro(_name) \
-static struct _cache_attr _name = \
- __ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(level);
-define_one_ro(type);
-define_one_ro(coherency_line_size);
-define_one_ro(physical_line_partition);
-define_one_ro(ways_of_associativity);
-define_one_ro(number_of_sets);
-define_one_ro(size);
-define_one_ro(shared_cpu_map);
-define_one_ro(shared_cpu_list);
-
-static struct attribute *default_attrs[] = {
- &type.attr,
- &level.attr,
- &coherency_line_size.attr,
- &physical_line_partition.attr,
- &ways_of_associativity.attr,
- &number_of_sets.attr,
- &size.attr,
- &shared_cpu_map.attr,
- &shared_cpu_list.attr,
- NULL
-};
-
-#ifdef CONFIG_AMD_NB
-static struct attribute ** __cpuinit amd_l3_attrs(void)
-{
- static struct attribute **attrs;
- int n;
-
- if (attrs)
- return attrs;
-
- n = sizeof (default_attrs) / sizeof (struct attribute *);
-
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- n += 2;
-
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- n += 1;
-
- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
- if (attrs == NULL)
- return attrs = default_attrs;
-
- for (n = 0; default_attrs[n]; n++)
- attrs[n] = default_attrs[n];
-
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
- attrs[n++] = &cache_disable_0.attr;
- attrs[n++] = &cache_disable_1.attr;
- }
-
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- attrs[n++] = &subcaches.attr;
-
- return attrs;
-}
-#endif
-
-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- struct _cache_attr *fattr = to_attr(attr);
- struct _index_kobject *this_leaf = to_object(kobj);
- ssize_t ret;
-
- ret = fattr->show ?
- fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
- buf, this_leaf->cpu) :
- 0;
- return ret;
-}
-
-static ssize_t store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct _cache_attr *fattr = to_attr(attr);
- struct _index_kobject *this_leaf = to_object(kobj);
- ssize_t ret;
-
- ret = fattr->store ?
- fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
- buf, count, this_leaf->cpu) :
- 0;
- return ret;
-}
-
-static const struct sysfs_ops sysfs_ops = {
- .show = show,
- .store = store,
-};
-
-static struct kobj_type ktype_cache = {
- .sysfs_ops = &sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-static struct kobj_type ktype_percpu_entry = {
- .sysfs_ops = &sysfs_ops,
-};
-
-static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
-{
- kfree(per_cpu(ici_cache_kobject, cpu));
- kfree(per_cpu(ici_index_kobject, cpu));
- per_cpu(ici_cache_kobject, cpu) = NULL;
- per_cpu(ici_index_kobject, cpu) = NULL;
- free_cache_attributes(cpu);
-}
-
-static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
-{
- int err;
-
- if (num_cache_leaves == 0)
- return -ENOENT;
-
- err = detect_cache_attributes(cpu);
- if (err)
- return err;
-
- /* Allocate all required memory */
- per_cpu(ici_cache_kobject, cpu) =
- kzalloc(sizeof(struct kobject), GFP_KERNEL);
- if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
- goto err_out;
-
- per_cpu(ici_index_kobject, cpu) = kzalloc(
- sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
- goto err_out;
-
- return 0;
-
-err_out:
- cpuid4_cache_sysfs_exit(cpu);
- return -ENOMEM;
-}
-
-static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
-
-/* Add/Remove cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct device *dev)
-{
- unsigned int cpu = dev->id;
- unsigned long i, j;
- struct _index_kobject *this_object;
- struct _cpuid4_info *this_leaf;
- int retval;
-
- retval = cpuid4_cache_sysfs_init(cpu);
- if (unlikely(retval < 0))
- return retval;
-
- retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
- &ktype_percpu_entry,
- &dev->kobj, "%s", "cache");
- if (retval < 0) {
- cpuid4_cache_sysfs_exit(cpu);
- return retval;
- }
-
- for (i = 0; i < num_cache_leaves; i++) {
- this_object = INDEX_KOBJECT_PTR(cpu, i);
- this_object->cpu = cpu;
- this_object->index = i;
-
- this_leaf = CPUID4_INFO_IDX(cpu, i);
-
- ktype_cache.default_attrs = default_attrs;
-#ifdef CONFIG_AMD_NB
- if (this_leaf->base.nb)
- ktype_cache.default_attrs = amd_l3_attrs();
-#endif
- retval = kobject_init_and_add(&(this_object->kobj),
- &ktype_cache,
- per_cpu(ici_cache_kobject, cpu),
- "index%1lu", i);
- if (unlikely(retval)) {
- for (j = 0; j < i; j++)
- kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
- kobject_put(per_cpu(ici_cache_kobject, cpu));
- cpuid4_cache_sysfs_exit(cpu);
- return retval;
- }
- kobject_uevent(&(this_object->kobj), KOBJ_ADD);
- }
- cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
-
- kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
- return 0;
-}
-
-static void __cpuinit cache_remove_dev(struct device *dev)
-{
- unsigned int cpu = dev->id;
- unsigned long i;
-
- if (per_cpu(ici_cpuid4_info, cpu) == NULL)
- return;
- if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
- return;
- cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
-
- for (i = 0; i < num_cache_leaves; i++)
- kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
- kobject_put(per_cpu(ici_cache_kobject, cpu));
- cpuid4_cache_sysfs_exit(cpu);
-}
-
-static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *dev;
-
- dev = get_cpu_device(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cache_add_dev(dev);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cache_remove_dev(dev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
- .notifier_call = cacheinfo_cpu_callback,
-};
-
-static int __cpuinit cache_sysfs_init(void)
-{
- int i;
-
- if (num_cache_leaves == 0)
- return 0;
-
- for_each_online_cpu(i) {
- int err;
- struct device *dev = get_cpu_device(i);
-
- err = cache_add_dev(dev);
- if (err)
- return err;
- }
- register_hotcpu_notifier(&cacheinfo_cpu_notifier);
- return 0;
-}
-
-device_initcall(cache_sysfs_init);
-
-#endif
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/match.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/match.c
deleted file mode 100644
index 5502b289..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/match.c
+++ /dev/null
@@ -1,91 +0,0 @@
-#include <asm/cpu_device_id.h>
-#include <asm/processor.h>
-#include <linux/cpu.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-/**
- * x86_match_cpu - match current CPU again an array of x86_cpu_ids
- * @match: Pointer to array of x86_cpu_ids. Last entry terminated with
- * {}.
- *
- * Return the entry if the current CPU matches the entries in the
- * passed x86_cpu_id match table. Otherwise NULL. The match table
- * contains vendor (X86_VENDOR_*), family, model and feature bits or
- * respective wildcard entries.
- *
- * A typical table entry would be to match a specific CPU
- * { X86_VENDOR_INTEL, 6, 0x12 }
- * or to match a specific CPU feature
- * { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) }
- *
- * Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
- * %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor)
- *
- * Arrays used to match for this should also be declared using
- * MODULE_DEVICE_TABLE(x86_cpu, ...)
- *
- * This always matches against the boot cpu, assuming models and features are
- * consistent over all CPUs.
- */
-const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
-{
- const struct x86_cpu_id *m;
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- for (m = match; m->vendor | m->family | m->model | m->feature; m++) {
- if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
- continue;
- if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
- continue;
- if (m->model != X86_MODEL_ANY && c->x86_model != m->model)
- continue;
- if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
- continue;
- return m;
- }
- return NULL;
-}
-EXPORT_SYMBOL(x86_match_cpu);
-
-ssize_t arch_print_cpu_modalias(struct device *dev,
- struct device_attribute *attr,
- char *bufptr)
-{
- int size = PAGE_SIZE;
- int i, n;
- char *buf = bufptr;
-
- n = snprintf(buf, size, "x86cpu:vendor:%04X:family:%04X:"
- "model:%04X:feature:",
- boot_cpu_data.x86_vendor,
- boot_cpu_data.x86,
- boot_cpu_data.x86_model);
- size -= n;
- buf += n;
- size -= 1;
- for (i = 0; i < NCAPINTS*32; i++) {
- if (boot_cpu_has(i)) {
- n = snprintf(buf, size, ",%04X", i);
- if (n >= size) {
- WARN(1, "x86 features overflow page\n");
- break;
- }
- size -= n;
- buf += n;
- }
- }
- *buf++ = '\n';
- return buf - bufptr;
-}
-
-int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (buf) {
- arch_print_cpu_modalias(NULL, NULL, buf);
- add_uevent_var(env, "MODALIAS=%s", buf);
- kfree(buf);
- }
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/Makefile b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/Makefile
deleted file mode 100644
index bb34b03a..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-obj-y = mce.o mce-severity.o
-
-obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o
-obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
-obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
-obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
-obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
-
-obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
-
-obj-$(CONFIG_ACPI_APEI) += mce-apei.o
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-apei.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-apei.c
deleted file mode 100644
index 507ea586..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Bridge between MCE and APEI
- *
- * On some machine, corrected memory errors are reported via APEI
- * generic hardware error source (GHES) instead of corrected Machine
- * Check. These corrected memory errors can be reported to user space
- * through /dev/mcelog via faking a corrected Machine Check, so that
- * the error memory page can be offlined by /sbin/mcelog if the error
- * count for one page is beyond the threshold.
- *
- * For fatal MCE, save MCE record into persistent storage via ERST, so
- * that the MCE record can be logged after reboot via ERST.
- *
- * Copyright 2010 Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/acpi.h>
-#include <linux/cper.h>
-#include <acpi/apei.h>
-#include <asm/mce.h>
-
-#include "mce-internal.h"
-
-void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
-{
- struct mce m;
-
- /* Only corrected MC is reported */
- if (!corrected)
- return;
-
- mce_setup(&m);
- m.bank = 1;
- /* Fake a memory read corrected error with unknown channel */
- m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
- m.addr = mem_err->physical_addr;
- mce_log(&m);
- mce_notify_irq();
-}
-EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
-
-#define CPER_CREATOR_MCE \
- UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
- 0x64, 0x90, 0xb8, 0x9d)
-#define CPER_SECTION_TYPE_MCE \
- UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
- 0x04, 0x4a, 0x38, 0xfc)
-
-/*
- * CPER specification (in UEFI specification 2.3 appendix N) requires
- * byte-packed.
- */
-struct cper_mce_record {
- struct cper_record_header hdr;
- struct cper_section_descriptor sec_hdr;
- struct mce mce;
-} __packed;
-
-int apei_write_mce(struct mce *m)
-{
- struct cper_mce_record rcd;
-
- memset(&rcd, 0, sizeof(rcd));
- memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
- rcd.hdr.revision = CPER_RECORD_REV;
- rcd.hdr.signature_end = CPER_SIG_END;
- rcd.hdr.section_count = 1;
- rcd.hdr.error_severity = CPER_SEV_FATAL;
- /* timestamp, platform_id, partition_id are all invalid */
- rcd.hdr.validation_bits = 0;
- rcd.hdr.record_length = sizeof(rcd);
- rcd.hdr.creator_id = CPER_CREATOR_MCE;
- rcd.hdr.notification_type = CPER_NOTIFY_MCE;
- rcd.hdr.record_id = cper_next_record_id();
- rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
-
- rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
- rcd.sec_hdr.section_length = sizeof(rcd.mce);
- rcd.sec_hdr.revision = CPER_SEC_REV;
- /* fru_id and fru_text is invalid */
- rcd.sec_hdr.validation_bits = 0;
- rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
- rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
- rcd.sec_hdr.section_severity = CPER_SEV_FATAL;
-
- memcpy(&rcd.mce, m, sizeof(*m));
-
- return erst_write(&rcd.hdr);
-}
-
-ssize_t apei_read_mce(struct mce *m, u64 *record_id)
-{
- struct cper_mce_record rcd;
- int rc, pos;
-
- rc = erst_get_record_id_begin(&pos);
- if (rc)
- return rc;
-retry:
- rc = erst_get_record_id_next(&pos, record_id);
- if (rc)
- goto out;
- /* no more record */
- if (*record_id == APEI_ERST_INVALID_RECORD_ID)
- goto out;
- rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd));
- /* someone else has cleared the record, try next one */
- if (rc == -ENOENT)
- goto retry;
- else if (rc < 0)
- goto out;
- /* try to skip other type records in storage */
- else if (rc != sizeof(rcd) ||
- uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE))
- goto retry;
- memcpy(m, &rcd.mce, sizeof(*m));
- rc = sizeof(*m);
-out:
- erst_get_record_id_end();
-
- return rc;
-}
-
-/* Check whether there is record in ERST */
-int apei_check_mce(void)
-{
- return erst_get_record_count();
-}
-
-int apei_clear_mce(u64 record_id)
-{
- return erst_clear(record_id);
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-inject.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-inject.c
deleted file mode 100644
index fc4beb39..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Machine check injection support.
- * Copyright 2008 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- *
- * Authors:
- * Andi Kleen
- * Ying Huang
- */
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/preempt.h>
-#include <linux/smp.h>
-#include <linux/notifier.h>
-#include <linux/kdebug.h>
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <asm/mce.h>
-#include <asm/apic.h>
-#include <asm/nmi.h>
-
-/* Update fake mce registers on current CPU. */
-static void inject_mce(struct mce *m)
-{
- struct mce *i = &per_cpu(injectm, m->extcpu);
-
- /* Make sure no one reads partially written injectm */
- i->finished = 0;
- mb();
- m->finished = 0;
- /* First set the fields after finished */
- i->extcpu = m->extcpu;
- mb();
- /* Now write record in order, finished last (except above) */
- memcpy(i, m, sizeof(struct mce));
- /* Finally activate it */
- mb();
- i->finished = 1;
-}
-
-static void raise_poll(struct mce *m)
-{
- unsigned long flags;
- mce_banks_t b;
-
- memset(&b, 0xff, sizeof(mce_banks_t));
- local_irq_save(flags);
- machine_check_poll(0, &b);
- local_irq_restore(flags);
- m->finished = 0;
-}
-
-static void raise_exception(struct mce *m, struct pt_regs *pregs)
-{
- struct pt_regs regs;
- unsigned long flags;
-
- if (!pregs) {
- memset(&regs, 0, sizeof(struct pt_regs));
- regs.ip = m->ip;
- regs.cs = m->cs;
- pregs = &regs;
- }
- /* in mcheck exeception handler, irq will be disabled */
- local_irq_save(flags);
- do_machine_check(pregs, 0);
- local_irq_restore(flags);
- m->finished = 0;
-}
-
-static cpumask_var_t mce_inject_cpumask;
-
-static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
- struct mce *m = &__get_cpu_var(injectm);
- if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
- return NMI_DONE;
- cpumask_clear_cpu(cpu, mce_inject_cpumask);
- if (m->inject_flags & MCJ_EXCEPTION)
- raise_exception(m, regs);
- else if (m->status)
- raise_poll(m);
- return NMI_HANDLED;
-}
-
-static void mce_irq_ipi(void *info)
-{
- int cpu = smp_processor_id();
- struct mce *m = &__get_cpu_var(injectm);
-
- if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
- m->inject_flags & MCJ_EXCEPTION) {
- cpumask_clear_cpu(cpu, mce_inject_cpumask);
- raise_exception(m, NULL);
- }
-}
-
-/* Inject mce on current CPU */
-static int raise_local(void)
-{
- struct mce *m = &__get_cpu_var(injectm);
- int context = MCJ_CTX(m->inject_flags);
- int ret = 0;
- int cpu = m->extcpu;
-
- if (m->inject_flags & MCJ_EXCEPTION) {
- printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu);
- switch (context) {
- case MCJ_CTX_IRQ:
- /*
- * Could do more to fake interrupts like
- * calling irq_enter, but the necessary
- * machinery isn't exported currently.
- */
- /*FALL THROUGH*/
- case MCJ_CTX_PROCESS:
- raise_exception(m, NULL);
- break;
- default:
- printk(KERN_INFO "Invalid MCE context\n");
- ret = -EINVAL;
- }
- printk(KERN_INFO "MCE exception done on CPU %d\n", cpu);
- } else if (m->status) {
- printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
- raise_poll(m);
- mce_notify_irq();
- printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu);
- } else
- m->finished = 0;
-
- return ret;
-}
-
-static void raise_mce(struct mce *m)
-{
- int context = MCJ_CTX(m->inject_flags);
-
- inject_mce(m);
-
- if (context == MCJ_CTX_RANDOM)
- return;
-
-#ifdef CONFIG_X86_LOCAL_APIC
- if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) {
- unsigned long start;
- int cpu;
-
- get_online_cpus();
- cpumask_copy(mce_inject_cpumask, cpu_online_mask);
- cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
- for_each_online_cpu(cpu) {
- struct mce *mcpu = &per_cpu(injectm, cpu);
- if (!mcpu->finished ||
- MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
- cpumask_clear_cpu(cpu, mce_inject_cpumask);
- }
- if (!cpumask_empty(mce_inject_cpumask)) {
- if (m->inject_flags & MCJ_IRQ_BRAODCAST) {
- /*
- * don't wait because mce_irq_ipi is necessary
- * to be sync with following raise_local
- */
- preempt_disable();
- smp_call_function_many(mce_inject_cpumask,
- mce_irq_ipi, NULL, 0);
- preempt_enable();
- } else if (m->inject_flags & MCJ_NMI_BROADCAST)
- apic->send_IPI_mask(mce_inject_cpumask,
- NMI_VECTOR);
- }
- start = jiffies;
- while (!cpumask_empty(mce_inject_cpumask)) {
- if (!time_before(jiffies, start + 2*HZ)) {
- printk(KERN_ERR
- "Timeout waiting for mce inject %lx\n",
- *cpumask_bits(mce_inject_cpumask));
- break;
- }
- cpu_relax();
- }
- raise_local();
- put_cpu();
- put_online_cpus();
- } else
-#endif
- raise_local();
-}
-
-/* Error injection interface */
-static ssize_t mce_write(struct file *filp, const char __user *ubuf,
- size_t usize, loff_t *off)
-{
- struct mce m;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- /*
- * There are some cases where real MSR reads could slip
- * through.
- */
- if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
- return -EIO;
-
- if ((unsigned long)usize > sizeof(struct mce))
- usize = sizeof(struct mce);
- if (copy_from_user(&m, ubuf, usize))
- return -EFAULT;
-
- if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
- return -EINVAL;
-
- /*
- * Need to give user space some time to set everything up,
- * so do it a jiffie or two later everywhere.
- */
- schedule_timeout(2);
- raise_mce(&m);
- return usize;
-}
-
-static int inject_init(void)
-{
- if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
- return -ENOMEM;
- printk(KERN_INFO "Machine check injector initialized\n");
- register_mce_write_callback(mce_write);
- register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
- "mce_notify");
- return 0;
-}
-
-module_init(inject_init);
-/*
- * Cannot tolerate unloading currently because we cannot
- * guarantee all openers of mce_chrdev will get a reference to us.
- */
-MODULE_LICENSE("GPL");
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-internal.h b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-internal.h
deleted file mode 100644
index ed44c8a6..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#include <linux/device.h>
-#include <asm/mce.h>
-
-enum severity_level {
- MCE_NO_SEVERITY,
- MCE_KEEP_SEVERITY,
- MCE_SOME_SEVERITY,
- MCE_AO_SEVERITY,
- MCE_UC_SEVERITY,
- MCE_AR_SEVERITY,
- MCE_PANIC_SEVERITY,
-};
-
-#define ATTR_LEN 16
-
-/* One object for each MCE bank, shared by all CPUs */
-struct mce_bank {
- u64 ctl; /* subevents to enable */
- unsigned char init; /* initialise bank? */
- struct device_attribute attr; /* device attribute */
- char attrname[ATTR_LEN]; /* attribute name */
-};
-
-int mce_severity(struct mce *a, int tolerant, char **msg);
-struct dentry *mce_get_debugfs_dir(void);
-
-extern int mce_ser;
-
-extern struct mce_bank *mce_banks;
-
-#ifdef CONFIG_ACPI_APEI
-int apei_write_mce(struct mce *m);
-ssize_t apei_read_mce(struct mce *m, u64 *record_id);
-int apei_check_mce(void);
-int apei_clear_mce(u64 record_id);
-#else
-static inline int apei_write_mce(struct mce *m)
-{
- return -EINVAL;
-}
-static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
-{
- return 0;
-}
-static inline int apei_check_mce(void)
-{
- return 0;
-}
-static inline int apei_clear_mce(u64 record_id)
-{
- return -EINVAL;
-}
-#endif
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-severity.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-severity.c
deleted file mode 100644
index 1ccd4539..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * MCE grading rules.
- * Copyright 2008, 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- *
- * Author: Andi Kleen
- */
-#include <linux/kernel.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <asm/mce.h>
-
-#include "mce-internal.h"
-
-/*
- * Grade an mce by severity. In general the most severe ones are processed
- * first. Since there are quite a lot of combinations test the bits in a
- * table-driven way. The rules are simply processed in order, first
- * match wins.
- *
- * Note this is only used for machine check exceptions, the corrected
- * errors use much simpler rules. The exceptions still check for the corrected
- * errors, but only to leave them alone for the CMCI handler (except for
- * panic situations)
- */
-
-enum context { IN_KERNEL = 1, IN_USER = 2 };
-enum ser { SER_REQUIRED = 1, NO_SER = 2 };
-
-static struct severity {
- u64 mask;
- u64 result;
- unsigned char sev;
- unsigned char mcgmask;
- unsigned char mcgres;
- unsigned char ser;
- unsigned char context;
- unsigned char covered;
- char *msg;
-} severities[] = {
-#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
-#define KERNEL .context = IN_KERNEL
-#define USER .context = IN_USER
-#define SER .ser = SER_REQUIRED
-#define NOSER .ser = NO_SER
-#define BITCLR(x) .mask = x, .result = 0
-#define BITSET(x) .mask = x, .result = x
-#define MCGMASK(x, y) .mcgmask = x, .mcgres = y
-#define MASK(x, y) .mask = x, .result = y
-#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
-#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
-#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
-#define MCACOD 0xffff
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
-
- MCESEV(
- NO, "Invalid",
- BITCLR(MCI_STATUS_VAL)
- ),
- MCESEV(
- NO, "Not enabled",
- BITCLR(MCI_STATUS_EN)
- ),
- MCESEV(
- PANIC, "Processor context corrupt",
- BITSET(MCI_STATUS_PCC)
- ),
- /* When MCIP is not set something is very confused */
- MCESEV(
- PANIC, "MCIP not set in MCA handler",
- MCGMASK(MCG_STATUS_MCIP, 0)
- ),
- /* Neither return not error IP -- no chance to recover -> PANIC */
- MCESEV(
- PANIC, "Neither restart nor error IP",
- MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
- ),
- MCESEV(
- PANIC, "In kernel and no restart IP",
- KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
- ),
- MCESEV(
- KEEP, "Corrected error",
- NOSER, BITCLR(MCI_STATUS_UC)
- ),
-
- /* ignore OVER for UCNA */
- MCESEV(
- KEEP, "Uncorrected no action required",
- SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
- ),
- MCESEV(
- PANIC, "Illegal combination (UCNA with AR=1)",
- SER,
- MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
- ),
- MCESEV(
- KEEP, "Non signalled machine check",
- SER, BITCLR(MCI_STATUS_S)
- ),
-
- MCESEV(
- PANIC, "Action required with lost events",
- SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
- ),
-
- /* known AR MCACODs: */
-#ifdef CONFIG_MEMORY_FAILURE
- MCESEV(
- KEEP, "HT thread notices Action required: data load error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
- MCGMASK(MCG_STATUS_EIPV, 0)
- ),
- MCESEV(
- AR, "Action required: data load error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
- USER
- ),
-#endif
- MCESEV(
- PANIC, "Action required: unknown MCACOD",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
- ),
-
- /* known AO MCACODs: */
- MCESEV(
- AO, "Action optional: memory scrubbing error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
- ),
- MCESEV(
- AO, "Action optional: last level cache writeback error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
- ),
- MCESEV(
- SOME, "Action optional: unknown MCACOD",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
- ),
- MCESEV(
- SOME, "Action optional with lost events",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
- ),
-
- MCESEV(
- PANIC, "Overflowed uncorrected",
- BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
- ),
- MCESEV(
- UC, "Uncorrected",
- BITSET(MCI_STATUS_UC)
- ),
- MCESEV(
- SOME, "No match",
- BITSET(0)
- ) /* always matches. keep at end */
-};
-
-/*
- * If mcgstatus indicated that ip/cs on the stack were
- * no good, then "m->cs" will be zero and we will have
- * to assume the worst case (IN_KERNEL) as we actually
- * have no idea what we were executing when the machine
- * check hit.
- * If we do have a good "m->cs" (or a faked one in the
- * case we were executing in VM86 mode) we can use it to
- * distinguish an exception taken in user from from one
- * taken in the kernel.
- */
-static int error_context(struct mce *m)
-{
- return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
-}
-
-int mce_severity(struct mce *m, int tolerant, char **msg)
-{
- enum context ctx = error_context(m);
- struct severity *s;
-
- for (s = severities;; s++) {
- if ((m->status & s->mask) != s->result)
- continue;
- if ((m->mcgstatus & s->mcgmask) != s->mcgres)
- continue;
- if (s->ser == SER_REQUIRED && !mce_ser)
- continue;
- if (s->ser == NO_SER && mce_ser)
- continue;
- if (s->context && ctx != s->context)
- continue;
- if (msg)
- *msg = s->msg;
- s->covered = 1;
- if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
- if (panic_on_oops || tolerant < 1)
- return MCE_PANIC_SEVERITY;
- }
- return s->sev;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static void *s_start(struct seq_file *f, loff_t *pos)
-{
- if (*pos >= ARRAY_SIZE(severities))
- return NULL;
- return &severities[*pos];
-}
-
-static void *s_next(struct seq_file *f, void *data, loff_t *pos)
-{
- if (++(*pos) >= ARRAY_SIZE(severities))
- return NULL;
- return &severities[*pos];
-}
-
-static void s_stop(struct seq_file *f, void *data)
-{
-}
-
-static int s_show(struct seq_file *f, void *data)
-{
- struct severity *ser = data;
- seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
- return 0;
-}
-
-static const struct seq_operations severities_seq_ops = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show,
-};
-
-static int severities_coverage_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &severities_seq_ops);
-}
-
-static ssize_t severities_coverage_write(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(severities); i++)
- severities[i].covered = 0;
- return count;
-}
-
-static const struct file_operations severities_coverage_fops = {
- .open = severities_coverage_open,
- .release = seq_release,
- .read = seq_read,
- .write = severities_coverage_write,
- .llseek = seq_lseek,
-};
-
-static int __init severities_debugfs_init(void)
-{
- struct dentry *dmce, *fsev;
-
- dmce = mce_get_debugfs_dir();
- if (!dmce)
- goto err_out;
-
- fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
- &severities_coverage_fops);
- if (!fsev)
- goto err_out;
-
- return 0;
-
-err_out:
- return -ENOMEM;
-}
-late_initcall(severities_debugfs_init);
-#endif /* CONFIG_DEBUG_FS */
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce.c
deleted file mode 100644
index 61604aef..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce.c
+++ /dev/null
@@ -1,2364 +0,0 @@
-/*
- * Machine check handler.
- *
- * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
- * Rest from unknown author(s).
- * 2004 Andi Kleen. Rewrote most of it.
- * Copyright 2008 Intel Corporation
- * Author: Andi Kleen
- */
-#include <linux/thread_info.h>
-#include <linux/capability.h>
-#include <linux/miscdevice.h>
-#include <linux/ratelimit.h>
-#include <linux/kallsyms.h>
-#include <linux/rcupdate.h>
-#include <linux/kobject.h>
-#include <linux/uaccess.h>
-#include <linux/kdebug.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/syscore_ops.h>
-#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/sched.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/poll.h>
-#include <linux/nmi.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/debugfs.h>
-#include <linux/irq_work.h>
-#include <linux/export.h>
-
-#include <asm/processor.h>
-#include <asm/mce.h>
-#include <asm/msr.h>
-
-#include "mce-internal.h"
-
-static DEFINE_MUTEX(mce_chrdev_read_mutex);
-
-#define rcu_dereference_check_mce(p) \
- rcu_dereference_index_check((p), \
- rcu_read_lock_sched_held() || \
- lockdep_is_held(&mce_chrdev_read_mutex))
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/mce.h>
-
-int mce_disabled __read_mostly;
-
-#define MISC_MCELOG_MINOR 227
-
-#define SPINUNIT 100 /* 100ns */
-
-atomic_t mce_entry;
-
-DEFINE_PER_CPU(unsigned, mce_exception_count);
-
-/*
- * Tolerant levels:
- * 0: always panic on uncorrected errors, log corrected errors
- * 1: panic or SIGBUS on uncorrected errors, log corrected errors
- * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
- * 3: never panic or SIGBUS, log all errors (for testing only)
- */
-static int tolerant __read_mostly = 1;
-static int banks __read_mostly;
-static int rip_msr __read_mostly;
-static int mce_bootlog __read_mostly = -1;
-static int monarch_timeout __read_mostly = -1;
-static int mce_panic_timeout __read_mostly;
-static int mce_dont_log_ce __read_mostly;
-int mce_cmci_disabled __read_mostly;
-int mce_ignore_ce __read_mostly;
-int mce_ser __read_mostly;
-
-struct mce_bank *mce_banks __read_mostly;
-
-/* User mode helper program triggered by machine check event */
-static unsigned long mce_need_notify;
-static char mce_helper[128];
-static char *mce_helper_argv[2] = { mce_helper, NULL };
-
-static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
-
-static DEFINE_PER_CPU(struct mce, mces_seen);
-static int cpu_missing;
-
-/* MCA banks polled by the period polling timer for corrected events */
-DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
- [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
-};
-
-static DEFINE_PER_CPU(struct work_struct, mce_work);
-
-/*
- * CPU/chipset specific EDAC code can register a notifier call here to print
- * MCE errors in a human-readable form.
- */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
-
-/* Do initial initialization of a struct mce */
-void mce_setup(struct mce *m)
-{
- memset(m, 0, sizeof(struct mce));
- m->cpu = m->extcpu = smp_processor_id();
- rdtscll(m->tsc);
- /* We hope get_seconds stays lockless */
- m->time = get_seconds();
- m->cpuvendor = boot_cpu_data.x86_vendor;
- m->cpuid = cpuid_eax(1);
- m->socketid = cpu_data(m->extcpu).phys_proc_id;
- m->apicid = cpu_data(m->extcpu).initial_apicid;
- rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
-}
-
-DEFINE_PER_CPU(struct mce, injectm);
-EXPORT_PER_CPU_SYMBOL_GPL(injectm);
-
-/*
- * Lockless MCE logging infrastructure.
- * This avoids deadlocks on printk locks without having to break locks. Also
- * separate MCEs from kernel messages to avoid bogus bug reports.
- */
-
-static struct mce_log mcelog = {
- .signature = MCE_LOG_SIGNATURE,
- .len = MCE_LOG_LEN,
- .recordlen = sizeof(struct mce),
-};
-
-void mce_log(struct mce *mce)
-{
- unsigned next, entry;
- int ret = 0;
-
- /* Emit the trace record: */
- trace_mce_record(mce);
-
- ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
- if (ret == NOTIFY_STOP)
- return;
-
- mce->finished = 0;
- wmb();
- for (;;) {
- entry = rcu_dereference_check_mce(mcelog.next);
- for (;;) {
-
- /*
- * When the buffer fills up discard new entries.
- * Assume that the earlier errors are the more
- * interesting ones:
- */
- if (entry >= MCE_LOG_LEN) {
- set_bit(MCE_OVERFLOW,
- (unsigned long *)&mcelog.flags);
- return;
- }
- /* Old left over entry. Skip: */
- if (mcelog.entry[entry].finished) {
- entry++;
- continue;
- }
- break;
- }
- smp_rmb();
- next = entry + 1;
- if (cmpxchg(&mcelog.next, entry, next) == entry)
- break;
- }
- memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
- wmb();
- mcelog.entry[entry].finished = 1;
- wmb();
-
- mce->finished = 1;
- set_bit(0, &mce_need_notify);
-}
-
-static void drain_mcelog_buffer(void)
-{
- unsigned int next, i, prev = 0;
-
- next = ACCESS_ONCE(mcelog.next);
-
- do {
- struct mce *m;
-
- /* drain what was logged during boot */
- for (i = prev; i < next; i++) {
- unsigned long start = jiffies;
- unsigned retries = 1;
-
- m = &mcelog.entry[i];
-
- while (!m->finished) {
- if (time_after_eq(jiffies, start + 2*retries))
- retries++;
-
- cpu_relax();
-
- if (!m->finished && retries >= 4) {
- pr_err("MCE: skipping error being logged currently!\n");
- break;
- }
- }
- smp_rmb();
- atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
- }
-
- memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
- prev = next;
- next = cmpxchg(&mcelog.next, prev, 0);
- } while (next != prev);
-}
-
-
-void mce_register_decode_chain(struct notifier_block *nb)
-{
- atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
- drain_mcelog_buffer();
-}
-EXPORT_SYMBOL_GPL(mce_register_decode_chain);
-
-void mce_unregister_decode_chain(struct notifier_block *nb)
-{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
-}
-EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
-
-static void print_mce(struct mce *m)
-{
- int ret = 0;
-
- pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
- m->extcpu, m->mcgstatus, m->bank, m->status);
-
- if (m->ip) {
- pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
- !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
- m->cs, m->ip);
-
- if (m->cs == __KERNEL_CS)
- print_symbol("{%s}", m->ip);
- pr_cont("\n");
- }
-
- pr_emerg(HW_ERR "TSC %llx ", m->tsc);
- if (m->addr)
- pr_cont("ADDR %llx ", m->addr);
- if (m->misc)
- pr_cont("MISC %llx ", m->misc);
-
- pr_cont("\n");
- /*
- * Note this output is parsed by external tools and old fields
- * should not be changed.
- */
- pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
- m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
- cpu_data(m->extcpu).microcode);
-
- /*
- * Print out human-readable details about the MCE error,
- * (if the CPU has an implementation for that)
- */
- ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
- if (ret == NOTIFY_STOP)
- return;
-
- pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
-}
-
-#define PANIC_TIMEOUT 5 /* 5 seconds */
-
-static atomic_t mce_paniced;
-
-static int fake_panic;
-static atomic_t mce_fake_paniced;
-
-/* Panic in progress. Enable interrupts and wait for final IPI */
-static void wait_for_panic(void)
-{
- long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
-
- preempt_disable();
- local_irq_enable();
- while (timeout-- > 0)
- udelay(1);
- if (panic_timeout == 0)
- panic_timeout = mce_panic_timeout;
- panic("Panicing machine check CPU died");
-}
-
-static void mce_panic(char *msg, struct mce *final, char *exp)
-{
- int i, apei_err = 0;
-
- if (!fake_panic) {
- /*
- * Make sure only one CPU runs in machine check panic
- */
- if (atomic_inc_return(&mce_paniced) > 1)
- wait_for_panic();
- barrier();
-
- bust_spinlocks(1);
- console_verbose();
- } else {
- /* Don't log too much for fake panic */
- if (atomic_inc_return(&mce_fake_paniced) > 1)
- return;
- }
- /* First print corrected ones that are still unlogged */
- for (i = 0; i < MCE_LOG_LEN; i++) {
- struct mce *m = &mcelog.entry[i];
- if (!(m->status & MCI_STATUS_VAL))
- continue;
- if (!(m->status & MCI_STATUS_UC)) {
- print_mce(m);
- if (!apei_err)
- apei_err = apei_write_mce(m);
- }
- }
- /* Now print uncorrected but with the final one last */
- for (i = 0; i < MCE_LOG_LEN; i++) {
- struct mce *m = &mcelog.entry[i];
- if (!(m->status & MCI_STATUS_VAL))
- continue;
- if (!(m->status & MCI_STATUS_UC))
- continue;
- if (!final || memcmp(m, final, sizeof(struct mce))) {
- print_mce(m);
- if (!apei_err)
- apei_err = apei_write_mce(m);
- }
- }
- if (final) {
- print_mce(final);
- if (!apei_err)
- apei_err = apei_write_mce(final);
- }
- if (cpu_missing)
- pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
- if (exp)
- pr_emerg(HW_ERR "Machine check: %s\n", exp);
- if (!fake_panic) {
- if (panic_timeout == 0)
- panic_timeout = mce_panic_timeout;
- panic(msg);
- } else
- pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
-}
-
-/* Support code for software error injection */
-
-static int msr_to_offset(u32 msr)
-{
- unsigned bank = __this_cpu_read(injectm.bank);
-
- if (msr == rip_msr)
- return offsetof(struct mce, ip);
- if (msr == MSR_IA32_MCx_STATUS(bank))
- return offsetof(struct mce, status);
- if (msr == MSR_IA32_MCx_ADDR(bank))
- return offsetof(struct mce, addr);
- if (msr == MSR_IA32_MCx_MISC(bank))
- return offsetof(struct mce, misc);
- if (msr == MSR_IA32_MCG_STATUS)
- return offsetof(struct mce, mcgstatus);
- return -1;
-}
-
-/* MSR access wrappers used for error injection */
-static u64 mce_rdmsrl(u32 msr)
-{
- u64 v;
-
- if (__this_cpu_read(injectm.finished)) {
- int offset = msr_to_offset(msr);
-
- if (offset < 0)
- return 0;
- return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
- }
-
- if (rdmsrl_safe(msr, &v)) {
- WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
- /*
- * Return zero in case the access faulted. This should
- * not happen normally but can happen if the CPU does
- * something weird, or if the code is buggy.
- */
- v = 0;
- }
-
- return v;
-}
-
-static void mce_wrmsrl(u32 msr, u64 v)
-{
- if (__this_cpu_read(injectm.finished)) {
- int offset = msr_to_offset(msr);
-
- if (offset >= 0)
- *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
- return;
- }
- wrmsrl(msr, v);
-}
-
-/*
- * Collect all global (w.r.t. this processor) status about this machine
- * check into our "mce" struct so that we can use it later to assess
- * the severity of the problem as we read per-bank specific details.
- */
-static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
-{
- mce_setup(m);
-
- m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
- if (regs) {
- /*
- * Get the address of the instruction at the time of
- * the machine check error.
- */
- if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
- m->ip = regs->ip;
- m->cs = regs->cs;
-
- /*
- * When in VM86 mode make the cs look like ring 3
- * always. This is a lie, but it's better than passing
- * the additional vm86 bit around everywhere.
- */
- if (v8086_mode(regs))
- m->cs |= 3;
- }
- /* Use accurate RIP reporting if available. */
- if (rip_msr)
- m->ip = mce_rdmsrl(rip_msr);
- }
-}
-
-/*
- * Simple lockless ring to communicate PFNs from the exception handler with the
- * process context work function. This is vastly simplified because there's
- * only a single reader and a single writer.
- */
-#define MCE_RING_SIZE 16 /* we use one entry less */
-
-struct mce_ring {
- unsigned short start;
- unsigned short end;
- unsigned long ring[MCE_RING_SIZE];
-};
-static DEFINE_PER_CPU(struct mce_ring, mce_ring);
-
-/* Runs with CPU affinity in workqueue */
-static int mce_ring_empty(void)
-{
- struct mce_ring *r = &__get_cpu_var(mce_ring);
-
- return r->start == r->end;
-}
-
-static int mce_ring_get(unsigned long *pfn)
-{
- struct mce_ring *r;
- int ret = 0;
-
- *pfn = 0;
- get_cpu();
- r = &__get_cpu_var(mce_ring);
- if (r->start == r->end)
- goto out;
- *pfn = r->ring[r->start];
- r->start = (r->start + 1) % MCE_RING_SIZE;
- ret = 1;
-out:
- put_cpu();
- return ret;
-}
-
-/* Always runs in MCE context with preempt off */
-static int mce_ring_add(unsigned long pfn)
-{
- struct mce_ring *r = &__get_cpu_var(mce_ring);
- unsigned next;
-
- next = (r->end + 1) % MCE_RING_SIZE;
- if (next == r->start)
- return -1;
- r->ring[r->end] = pfn;
- wmb();
- r->end = next;
- return 0;
-}
-
-int mce_available(struct cpuinfo_x86 *c)
-{
- if (mce_disabled)
- return 0;
- return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
-}
-
-static void mce_schedule_work(void)
-{
- if (!mce_ring_empty()) {
- struct work_struct *work = &__get_cpu_var(mce_work);
- if (!work_pending(work))
- schedule_work(work);
- }
-}
-
-DEFINE_PER_CPU(struct irq_work, mce_irq_work);
-
-static void mce_irq_work_cb(struct irq_work *entry)
-{
- mce_notify_irq();
- mce_schedule_work();
-}
-
-static void mce_report_event(struct pt_regs *regs)
-{
- if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
- mce_notify_irq();
- /*
- * Triggering the work queue here is just an insurance
- * policy in case the syscall exit notify handler
- * doesn't run soon enough or ends up running on the
- * wrong CPU (can happen when audit sleeps)
- */
- mce_schedule_work();
- return;
- }
-
- irq_work_queue(&__get_cpu_var(mce_irq_work));
-}
-
-/*
- * Read ADDR and MISC registers.
- */
-static void mce_read_aux(struct mce *m, int i)
-{
- if (m->status & MCI_STATUS_MISCV)
- m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
- if (m->status & MCI_STATUS_ADDRV) {
- m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
-
- /*
- * Mask the reported address by the reported granularity.
- */
- if (mce_ser && (m->status & MCI_STATUS_MISCV)) {
- u8 shift = MCI_MISC_ADDR_LSB(m->misc);
- m->addr >>= shift;
- m->addr <<= shift;
- }
- }
-}
-
-DEFINE_PER_CPU(unsigned, mce_poll_count);
-
-/*
- * Poll for corrected events or events that happened before reset.
- * Those are just logged through /dev/mcelog.
- *
- * This is executed in standard interrupt context.
- *
- * Note: spec recommends to panic for fatal unsignalled
- * errors here. However this would be quite problematic --
- * we would need to reimplement the Monarch handling and
- * it would mess up the exclusion between exception handler
- * and poll hander -- * so we skip this for now.
- * These cases should not happen anyways, or only when the CPU
- * is already totally * confused. In this case it's likely it will
- * not fully execute the machine check handler either.
- */
-void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
-{
- struct mce m;
- int i;
-
- percpu_inc(mce_poll_count);
-
- mce_gather_info(&m, NULL);
-
- for (i = 0; i < banks; i++) {
- if (!mce_banks[i].ctl || !test_bit(i, *b))
- continue;
-
- m.misc = 0;
- m.addr = 0;
- m.bank = i;
- m.tsc = 0;
-
- barrier();
- m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if (!(m.status & MCI_STATUS_VAL))
- continue;
-
- /*
- * Uncorrected or signalled events are handled by the exception
- * handler when it is enabled, so don't process those here.
- *
- * TBD do the same check for MCI_STATUS_EN here?
- */
- if (!(flags & MCP_UC) &&
- (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
- continue;
-
- mce_read_aux(&m, i);
-
- if (!(flags & MCP_TIMESTAMP))
- m.tsc = 0;
- /*
- * Don't get the IP here because it's unlikely to
- * have anything to do with the actual error location.
- */
- if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
- mce_log(&m);
-
- /*
- * Clear state for this bank.
- */
- mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
- }
-
- /*
- * Don't clear MCG_STATUS here because it's only defined for
- * exceptions.
- */
-
- sync_core();
-}
-EXPORT_SYMBOL_GPL(machine_check_poll);
-
-/*
- * Do a quick check if any of the events requires a panic.
- * This decides if we keep the events around or clear them.
- */
-static int mce_no_way_out(struct mce *m, char **msg)
-{
- int i;
-
- for (i = 0; i < banks; i++) {
- m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
- return 1;
- }
- return 0;
-}
-
-/*
- * Variable to establish order between CPUs while scanning.
- * Each CPU spins initially until executing is equal its number.
- */
-static atomic_t mce_executing;
-
-/*
- * Defines order of CPUs on entry. First CPU becomes Monarch.
- */
-static atomic_t mce_callin;
-
-/*
- * Check if a timeout waiting for other CPUs happened.
- */
-static int mce_timed_out(u64 *t)
-{
- /*
- * The others already did panic for some reason.
- * Bail out like in a timeout.
- * rmb() to tell the compiler that system_state
- * might have been modified by someone else.
- */
- rmb();
- if (atomic_read(&mce_paniced))
- wait_for_panic();
- if (!monarch_timeout)
- goto out;
- if ((s64)*t < SPINUNIT) {
- /* CHECKME: Make panic default for 1 too? */
- if (tolerant < 1)
- mce_panic("Timeout synchronizing machine check over CPUs",
- NULL, NULL);
- cpu_missing = 1;
- return 1;
- }
- *t -= SPINUNIT;
-out:
- touch_nmi_watchdog();
- return 0;
-}
-
-/*
- * The Monarch's reign. The Monarch is the CPU who entered
- * the machine check handler first. It waits for the others to
- * raise the exception too and then grades them. When any
- * error is fatal panic. Only then let the others continue.
- *
- * The other CPUs entering the MCE handler will be controlled by the
- * Monarch. They are called Subjects.
- *
- * This way we prevent any potential data corruption in a unrecoverable case
- * and also makes sure always all CPU's errors are examined.
- *
- * Also this detects the case of a machine check event coming from outer
- * space (not detected by any CPUs) In this case some external agent wants
- * us to shut down, so panic too.
- *
- * The other CPUs might still decide to panic if the handler happens
- * in a unrecoverable place, but in this case the system is in a semi-stable
- * state and won't corrupt anything by itself. It's ok to let the others
- * continue for a bit first.
- *
- * All the spin loops have timeouts; when a timeout happens a CPU
- * typically elects itself to be Monarch.
- */
-static void mce_reign(void)
-{
- int cpu;
- struct mce *m = NULL;
- int global_worst = 0;
- char *msg = NULL;
- char *nmsg = NULL;
-
- /*
- * This CPU is the Monarch and the other CPUs have run
- * through their handlers.
- * Grade the severity of the errors of all the CPUs.
- */
- for_each_possible_cpu(cpu) {
- int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
- &nmsg);
- if (severity > global_worst) {
- msg = nmsg;
- global_worst = severity;
- m = &per_cpu(mces_seen, cpu);
- }
- }
-
- /*
- * Cannot recover? Panic here then.
- * This dumps all the mces in the log buffer and stops the
- * other CPUs.
- */
- if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
- mce_panic("Fatal Machine check", m, msg);
-
- /*
- * For UC somewhere we let the CPU who detects it handle it.
- * Also must let continue the others, otherwise the handling
- * CPU could deadlock on a lock.
- */
-
- /*
- * No machine check event found. Must be some external
- * source or one CPU is hung. Panic.
- */
- if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
- mce_panic("Machine check from unknown source", NULL, NULL);
-
- /*
- * Now clear all the mces_seen so that they don't reappear on
- * the next mce.
- */
- for_each_possible_cpu(cpu)
- memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
-}
-
-static atomic_t global_nwo;
-
-/*
- * Start of Monarch synchronization. This waits until all CPUs have
- * entered the exception handler and then determines if any of them
- * saw a fatal event that requires panic. Then it executes them
- * in the entry order.
- * TBD double check parallel CPU hotunplug
- */
-static int mce_start(int *no_way_out)
-{
- int order;
- int cpus = num_online_cpus();
- u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
-
- if (!timeout)
- return -1;
-
- atomic_add(*no_way_out, &global_nwo);
- /*
- * global_nwo should be updated before mce_callin
- */
- smp_wmb();
- order = atomic_inc_return(&mce_callin);
-
- /*
- * Wait for everyone.
- */
- while (atomic_read(&mce_callin) != cpus) {
- if (mce_timed_out(&timeout)) {
- atomic_set(&global_nwo, 0);
- return -1;
- }
- ndelay(SPINUNIT);
- }
-
- /*
- * mce_callin should be read before global_nwo
- */
- smp_rmb();
-
- if (order == 1) {
- /*
- * Monarch: Starts executing now, the others wait.
- */
- atomic_set(&mce_executing, 1);
- } else {
- /*
- * Subject: Now start the scanning loop one by one in
- * the original callin order.
- * This way when there are any shared banks it will be
- * only seen by one CPU before cleared, avoiding duplicates.
- */
- while (atomic_read(&mce_executing) < order) {
- if (mce_timed_out(&timeout)) {
- atomic_set(&global_nwo, 0);
- return -1;
- }
- ndelay(SPINUNIT);
- }
- }
-
- /*
- * Cache the global no_way_out state.
- */
- *no_way_out = atomic_read(&global_nwo);
-
- return order;
-}
-
-/*
- * Synchronize between CPUs after main scanning loop.
- * This invokes the bulk of the Monarch processing.
- */
-static int mce_end(int order)
-{
- int ret = -1;
- u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
-
- if (!timeout)
- goto reset;
- if (order < 0)
- goto reset;
-
- /*
- * Allow others to run.
- */
- atomic_inc(&mce_executing);
-
- if (order == 1) {
- /* CHECKME: Can this race with a parallel hotplug? */
- int cpus = num_online_cpus();
-
- /*
- * Monarch: Wait for everyone to go through their scanning
- * loops.
- */
- while (atomic_read(&mce_executing) <= cpus) {
- if (mce_timed_out(&timeout))
- goto reset;
- ndelay(SPINUNIT);
- }
-
- mce_reign();
- barrier();
- ret = 0;
- } else {
- /*
- * Subject: Wait for Monarch to finish.
- */
- while (atomic_read(&mce_executing) != 0) {
- if (mce_timed_out(&timeout))
- goto reset;
- ndelay(SPINUNIT);
- }
-
- /*
- * Don't reset anything. That's done by the Monarch.
- */
- return 0;
- }
-
- /*
- * Reset all global state.
- */
-reset:
- atomic_set(&global_nwo, 0);
- atomic_set(&mce_callin, 0);
- barrier();
-
- /*
- * Let others run again.
- */
- atomic_set(&mce_executing, 0);
- return ret;
-}
-
-/*
- * Check if the address reported by the CPU is in a format we can parse.
- * It would be possible to add code for most other cases, but all would
- * be somewhat complicated (e.g. segment offset would require an instruction
- * parser). So only support physical addresses up to page granuality for now.
- */
-static int mce_usable_address(struct mce *m)
-{
- if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
- return 0;
- if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
- return 0;
- if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
- return 0;
- return 1;
-}
-
-static void mce_clear_state(unsigned long *toclear)
-{
- int i;
-
- for (i = 0; i < banks; i++) {
- if (test_bit(i, toclear))
- mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
- }
-}
-
-/*
- * Need to save faulting physical address associated with a process
- * in the machine check handler some place where we can grab it back
- * later in mce_notify_process()
- */
-#define MCE_INFO_MAX 16
-
-struct mce_info {
- atomic_t inuse;
- struct task_struct *t;
- __u64 paddr;
- int restartable;
-} mce_info[MCE_INFO_MAX];
-
-static void mce_save_info(__u64 addr, int c)
-{
- struct mce_info *mi;
-
- for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
- if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
- mi->t = current;
- mi->paddr = addr;
- mi->restartable = c;
- return;
- }
- }
-
- mce_panic("Too many concurrent recoverable errors", NULL, NULL);
-}
-
-static struct mce_info *mce_find_info(void)
-{
- struct mce_info *mi;
-
- for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
- if (atomic_read(&mi->inuse) && mi->t == current)
- return mi;
- return NULL;
-}
-
-static void mce_clear_info(struct mce_info *mi)
-{
- atomic_set(&mi->inuse, 0);
-}
-
-/*
- * The actual machine check handler. This only handles real
- * exceptions when something got corrupted coming in through int 18.
- *
- * This is executed in NMI context not subject to normal locking rules. This
- * implies that most kernel services cannot be safely used. Don't even
- * think about putting a printk in there!
- *
- * On Intel systems this is entered on all CPUs in parallel through
- * MCE broadcast. However some CPUs might be broken beyond repair,
- * so be always careful when synchronizing with others.
- */
-void do_machine_check(struct pt_regs *regs, long error_code)
-{
- struct mce m, *final;
- int i;
- int worst = 0;
- int severity;
- /*
- * Establish sequential order between the CPUs entering the machine
- * check handler.
- */
- int order;
- /*
- * If no_way_out gets set, there is no safe way to recover from this
- * MCE. If tolerant is cranked up, we'll try anyway.
- */
- int no_way_out = 0;
- /*
- * If kill_it gets set, there might be a way to recover from this
- * error.
- */
- int kill_it = 0;
- DECLARE_BITMAP(toclear, MAX_NR_BANKS);
- char *msg = "Unknown";
-
- atomic_inc(&mce_entry);
-
- percpu_inc(mce_exception_count);
-
- if (!banks)
- goto out;
-
- mce_gather_info(&m, regs);
-
- final = &__get_cpu_var(mces_seen);
- *final = m;
-
- no_way_out = mce_no_way_out(&m, &msg);
-
- barrier();
-
- /*
- * When no restart IP might need to kill or panic.
- * Assume the worst for now, but if we find the
- * severity is MCE_AR_SEVERITY we have other options.
- */
- if (!(m.mcgstatus & MCG_STATUS_RIPV))
- kill_it = 1;
-
- /*
- * Go through all the banks in exclusion of the other CPUs.
- * This way we don't report duplicated events on shared banks
- * because the first one to see it will clear it.
- */
- order = mce_start(&no_way_out);
- for (i = 0; i < banks; i++) {
- __clear_bit(i, toclear);
- if (!mce_banks[i].ctl)
- continue;
-
- m.misc = 0;
- m.addr = 0;
- m.bank = i;
-
- m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if ((m.status & MCI_STATUS_VAL) == 0)
- continue;
-
- /*
- * Non uncorrected or non signaled errors are handled by
- * machine_check_poll. Leave them alone, unless this panics.
- */
- if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
- !no_way_out)
- continue;
-
- /*
- * Set taint even when machine check was not enabled.
- */
- add_taint(TAINT_MACHINE_CHECK);
-
- severity = mce_severity(&m, tolerant, NULL);
-
- /*
- * When machine check was for corrected handler don't touch,
- * unless we're panicing.
- */
- if (severity == MCE_KEEP_SEVERITY && !no_way_out)
- continue;
- __set_bit(i, toclear);
- if (severity == MCE_NO_SEVERITY) {
- /*
- * Machine check event was not enabled. Clear, but
- * ignore.
- */
- continue;
- }
-
- mce_read_aux(&m, i);
-
- /*
- * Action optional error. Queue address for later processing.
- * When the ring overflows we just ignore the AO error.
- * RED-PEN add some logging mechanism when
- * usable_address or mce_add_ring fails.
- * RED-PEN don't ignore overflow for tolerant == 0
- */
- if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
- mce_ring_add(m.addr >> PAGE_SHIFT);
-
- mce_log(&m);
-
- if (severity > worst) {
- *final = m;
- worst = severity;
- }
- }
-
- /* mce_clear_state will clear *final, save locally for use later */
- m = *final;
-
- if (!no_way_out)
- mce_clear_state(toclear);
-
- /*
- * Do most of the synchronization with other CPUs.
- * When there's any problem use only local no_way_out state.
- */
- if (mce_end(order) < 0)
- no_way_out = worst >= MCE_PANIC_SEVERITY;
-
- /*
- * At insane "tolerant" levels we take no action. Otherwise
- * we only die if we have no other choice. For less serious
- * issues we try to recover, or limit damage to the current
- * process.
- */
- if (tolerant < 3) {
- if (no_way_out)
- mce_panic("Fatal machine check on current CPU", &m, msg);
- if (worst == MCE_AR_SEVERITY) {
- /* schedule action before return to userland */
- mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
- set_thread_flag(TIF_MCE_NOTIFY);
- } else if (kill_it) {
- force_sig(SIGBUS, current);
- }
- }
-
- if (worst > 0)
- mce_report_event(regs);
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
- atomic_dec(&mce_entry);
- sync_core();
-}
-EXPORT_SYMBOL_GPL(do_machine_check);
-
-#ifndef CONFIG_MEMORY_FAILURE
-int memory_failure(unsigned long pfn, int vector, int flags)
-{
- /* mce_severity() should not hand us an ACTION_REQUIRED error */
- BUG_ON(flags & MF_ACTION_REQUIRED);
- printk(KERN_ERR "Uncorrected memory error in page 0x%lx ignored\n"
- "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn);
-
- return 0;
-}
-#endif
-
-/*
- * Called in process context that interrupted by MCE and marked with
- * TIF_MCE_NOTIFY, just before returning to erroneous userland.
- * This code is allowed to sleep.
- * Attempt possible recovery such as calling the high level VM handler to
- * process any corrupted pages, and kill/signal current process if required.
- * Action required errors are handled here.
- */
-void mce_notify_process(void)
-{
- unsigned long pfn;
- struct mce_info *mi = mce_find_info();
-
- if (!mi)
- mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
- pfn = mi->paddr >> PAGE_SHIFT;
-
- clear_thread_flag(TIF_MCE_NOTIFY);
-
- pr_err("Uncorrected hardware memory error in user-access at %llx",
- mi->paddr);
- /*
- * We must call memory_failure() here even if the current process is
- * doomed. We still need to mark the page as poisoned and alert any
- * other users of the page.
- */
- if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
- mi->restartable == 0) {
- pr_err("Memory error not recovered");
- force_sig(SIGBUS, current);
- }
- mce_clear_info(mi);
-}
-
-/*
- * Action optional processing happens here (picking up
- * from the list of faulting pages that do_machine_check()
- * placed into the "ring").
- */
-static void mce_process_work(struct work_struct *dummy)
-{
- unsigned long pfn;
-
- while (mce_ring_get(&pfn))
- memory_failure(pfn, MCE_VECTOR, 0);
-}
-
-#ifdef CONFIG_X86_MCE_INTEL
-/***
- * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
- * @cpu: The CPU on which the event occurred.
- * @status: Event status information
- *
- * This function should be called by the thermal interrupt after the
- * event has been processed and the decision was made to log the event
- * further.
- *
- * The status parameter will be saved to the 'status' field of 'struct mce'
- * and historically has been the register value of the
- * MSR_IA32_THERMAL_STATUS (Intel) msr.
- */
-void mce_log_therm_throt_event(__u64 status)
-{
- struct mce m;
-
- mce_setup(&m);
- m.bank = MCE_THERMAL_BANK;
- m.status = status;
- mce_log(&m);
-}
-#endif /* CONFIG_X86_MCE_INTEL */
-
-/*
- * Periodic polling timer for "silent" machine check errors. If the
- * poller finds an MCE, poll 2x faster. When the poller finds no more
- * errors, poll 2x slower (up to check_interval seconds).
- */
-static int check_interval = 5 * 60; /* 5 minutes */
-
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
-static DEFINE_PER_CPU(struct timer_list, mce_timer);
-
-static void mce_start_timer(unsigned long data)
-{
- struct timer_list *t = &per_cpu(mce_timer, data);
- int *n;
-
- WARN_ON(smp_processor_id() != data);
-
- if (mce_available(__this_cpu_ptr(&cpu_info))) {
- machine_check_poll(MCP_TIMESTAMP,
- &__get_cpu_var(mce_poll_banks));
- }
-
- /*
- * Alert userspace if needed. If we logged an MCE, reduce the
- * polling interval, otherwise increase the polling interval.
- */
- n = &__get_cpu_var(mce_next_interval);
- if (mce_notify_irq())
- *n = max(*n/2, HZ/100);
- else
- *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
-
- t->expires = jiffies + *n;
- add_timer_on(t, smp_processor_id());
-}
-
-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
-static void mce_timer_delete_all(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- del_timer_sync(&per_cpu(mce_timer, cpu));
-}
-
-static void mce_do_trigger(struct work_struct *work)
-{
- call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
-}
-
-static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
-
-/*
- * Notify the user(s) about new machine check events.
- * Can be called from interrupt context, but not from machine check/NMI
- * context.
- */
-int mce_notify_irq(void)
-{
- /* Not more than two messages every minute */
- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-
- if (test_and_clear_bit(0, &mce_need_notify)) {
- /* wake processes polling /dev/mcelog */
- wake_up_interruptible(&mce_chrdev_wait);
-
- /*
- * There is no risk of missing notifications because
- * work_pending is always cleared before the function is
- * executed.
- */
- if (mce_helper[0] && !work_pending(&mce_trigger_work))
- schedule_work(&mce_trigger_work);
-
- if (__ratelimit(&ratelimit))
- pr_info(HW_ERR "Machine check events logged\n");
-
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(mce_notify_irq);
-
-static int __cpuinit __mcheck_cpu_mce_banks_init(void)
-{
- int i;
-
- mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
- if (!mce_banks)
- return -ENOMEM;
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-
- b->ctl = -1ULL;
- b->init = 1;
- }
- return 0;
-}
-
-/*
- * Initialize Machine Checks for a CPU.
- */
-static int __cpuinit __mcheck_cpu_cap_init(void)
-{
- unsigned b;
- u64 cap;
-
- rdmsrl(MSR_IA32_MCG_CAP, cap);
-
- b = cap & MCG_BANKCNT_MASK;
- if (!banks)
- printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
-
- if (b > MAX_NR_BANKS) {
- printk(KERN_WARNING
- "MCE: Using only %u machine check banks out of %u\n",
- MAX_NR_BANKS, b);
- b = MAX_NR_BANKS;
- }
-
- /* Don't support asymmetric configurations today */
- WARN_ON(banks != 0 && b != banks);
- banks = b;
- if (!mce_banks) {
- int err = __mcheck_cpu_mce_banks_init();
-
- if (err)
- return err;
- }
-
- /* Use accurate RIP reporting if available. */
- if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
- rip_msr = MSR_IA32_MCG_EIP;
-
- if (cap & MCG_SER_P)
- mce_ser = 1;
-
- return 0;
-}
-
-static void __mcheck_cpu_init_generic(void)
-{
- mce_banks_t all_banks;
- u64 cap;
- int i;
-
- /*
- * Log the machine checks left over from the previous reset.
- */
- bitmap_fill(all_banks, MAX_NR_BANKS);
- machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
-
- set_in_cr4(X86_CR4_MCE);
-
- rdmsrl(MSR_IA32_MCG_CAP, cap);
- if (cap & MCG_CTL_P)
- wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
-
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-
- if (!b->init)
- continue;
- wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
- wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
- }
-}
-
-/* Add per CPU specific workarounds here */
-static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
-{
- if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
- pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
- return -EOPNOTSUPP;
- }
-
- /* This should be disabled by the BIOS, but isn't always */
- if (c->x86_vendor == X86_VENDOR_AMD) {
- if (c->x86 == 15 && banks > 4) {
- /*
- * disable GART TBL walk error reporting, which
- * trips off incorrectly with the IOMMU & 3ware
- * & Cerberus:
- */
- clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
- }
- if (c->x86 <= 17 && mce_bootlog < 0) {
- /*
- * Lots of broken BIOS around that don't clear them
- * by default and leave crap in there. Don't log:
- */
- mce_bootlog = 0;
- }
- /*
- * Various K7s with broken bank 0 around. Always disable
- * by default.
- */
- if (c->x86 == 6 && banks > 0)
- mce_banks[0].ctl = 0;
- }
-
- if (c->x86_vendor == X86_VENDOR_INTEL) {
- /*
- * SDM documents that on family 6 bank 0 should not be written
- * because it aliases to another special BIOS controlled
- * register.
- * But it's not aliased anymore on model 0x1a+
- * Don't ignore bank 0 completely because there could be a
- * valid event later, merely don't write CTL0.
- */
-
- if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
- mce_banks[0].init = 0;
-
- /*
- * All newer Intel systems support MCE broadcasting. Enable
- * synchronization with a one second timeout.
- */
- if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
- monarch_timeout < 0)
- monarch_timeout = USEC_PER_SEC;
-
- /*
- * There are also broken BIOSes on some Pentium M and
- * earlier systems:
- */
- if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
- mce_bootlog = 0;
- }
- if (monarch_timeout < 0)
- monarch_timeout = 0;
- if (mce_bootlog != 0)
- mce_panic_timeout = 30;
-
- return 0;
-}
-
-static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
-{
- if (c->x86 != 5)
- return 0;
-
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL:
- intel_p5_mcheck_init(c);
- return 1;
- break;
- case X86_VENDOR_CENTAUR:
- winchip_mcheck_init(c);
- return 1;
- break;
- }
-
- return 0;
-}
-
-static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
-{
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL:
- mce_intel_feature_init(c);
- break;
- case X86_VENDOR_AMD:
- mce_amd_feature_init(c);
- break;
- default:
- break;
- }
-}
-
-static void __mcheck_cpu_init_timer(void)
-{
- struct timer_list *t = &__get_cpu_var(mce_timer);
- int *n = &__get_cpu_var(mce_next_interval);
-
- setup_timer(t, mce_start_timer, smp_processor_id());
-
- if (mce_ignore_ce)
- return;
-
- *n = check_interval * HZ;
- if (!*n)
- return;
- t->expires = round_jiffies(jiffies + *n);
- add_timer_on(t, smp_processor_id());
-}
-
-/* Handle unconfigured int18 (should never happen) */
-static void unexpected_machine_check(struct pt_regs *regs, long error_code)
-{
- printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
- smp_processor_id());
-}
-
-/* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
- unexpected_machine_check;
-
-/*
- * Called for each booted CPU to set up machine checks.
- * Must be called with preempt off:
- */
-void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
-{
- if (mce_disabled)
- return;
-
- if (__mcheck_cpu_ancient_init(c))
- return;
-
- if (!mce_available(c))
- return;
-
- if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
- mce_disabled = 1;
- return;
- }
-
- machine_check_vector = do_machine_check;
-
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(c);
- __mcheck_cpu_init_timer();
- INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
- init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
-}
-
-/*
- * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
- */
-
-static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count; /* #times opened */
-static int mce_chrdev_open_exclu; /* already open exclusive? */
-
-static int mce_chrdev_open(struct inode *inode, struct file *file)
-{
- spin_lock(&mce_chrdev_state_lock);
-
- if (mce_chrdev_open_exclu ||
- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
- spin_unlock(&mce_chrdev_state_lock);
-
- return -EBUSY;
- }
-
- if (file->f_flags & O_EXCL)
- mce_chrdev_open_exclu = 1;
- mce_chrdev_open_count++;
-
- spin_unlock(&mce_chrdev_state_lock);
-
- return nonseekable_open(inode, file);
-}
-
-static int mce_chrdev_release(struct inode *inode, struct file *file)
-{
- spin_lock(&mce_chrdev_state_lock);
-
- mce_chrdev_open_count--;
- mce_chrdev_open_exclu = 0;
-
- spin_unlock(&mce_chrdev_state_lock);
-
- return 0;
-}
-
-static void collect_tscs(void *data)
-{
- unsigned long *cpu_tsc = (unsigned long *)data;
-
- rdtscll(cpu_tsc[smp_processor_id()]);
-}
-
-static int mce_apei_read_done;
-
-/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
-static int __mce_read_apei(char __user **ubuf, size_t usize)
-{
- int rc;
- u64 record_id;
- struct mce m;
-
- if (usize < sizeof(struct mce))
- return -EINVAL;
-
- rc = apei_read_mce(&m, &record_id);
- /* Error or no more MCE record */
- if (rc <= 0) {
- mce_apei_read_done = 1;
- /*
- * When ERST is disabled, mce_chrdev_read() should return
- * "no record" instead of "no device."
- */
- if (rc == -ENODEV)
- return 0;
- return rc;
- }
- rc = -EFAULT;
- if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
- return rc;
- /*
- * In fact, we should have cleared the record after that has
- * been flushed to the disk or sent to network in
- * /sbin/mcelog, but we have no interface to support that now,
- * so just clear it to avoid duplication.
- */
- rc = apei_clear_mce(record_id);
- if (rc) {
- mce_apei_read_done = 1;
- return rc;
- }
- *ubuf += sizeof(struct mce);
-
- return 0;
-}
-
-static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
- size_t usize, loff_t *off)
-{
- char __user *buf = ubuf;
- unsigned long *cpu_tsc;
- unsigned prev, next;
- int i, err;
-
- cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
- if (!cpu_tsc)
- return -ENOMEM;
-
- mutex_lock(&mce_chrdev_read_mutex);
-
- if (!mce_apei_read_done) {
- err = __mce_read_apei(&buf, usize);
- if (err || buf != ubuf)
- goto out;
- }
-
- next = rcu_dereference_check_mce(mcelog.next);
-
- /* Only supports full reads right now */
- err = -EINVAL;
- if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
- goto out;
-
- err = 0;
- prev = 0;
- do {
- for (i = prev; i < next; i++) {
- unsigned long start = jiffies;
- struct mce *m = &mcelog.entry[i];
-
- while (!m->finished) {
- if (time_after_eq(jiffies, start + 2)) {
- memset(m, 0, sizeof(*m));
- goto timeout;
- }
- cpu_relax();
- }
- smp_rmb();
- err |= copy_to_user(buf, m, sizeof(*m));
- buf += sizeof(*m);
-timeout:
- ;
- }
-
- memset(mcelog.entry + prev, 0,
- (next - prev) * sizeof(struct mce));
- prev = next;
- next = cmpxchg(&mcelog.next, prev, 0);
- } while (next != prev);
-
- synchronize_sched();
-
- /*
- * Collect entries that were still getting written before the
- * synchronize.
- */
- on_each_cpu(collect_tscs, cpu_tsc, 1);
-
- for (i = next; i < MCE_LOG_LEN; i++) {
- struct mce *m = &mcelog.entry[i];
-
- if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
- err |= copy_to_user(buf, m, sizeof(*m));
- smp_rmb();
- buf += sizeof(*m);
- memset(m, 0, sizeof(*m));
- }
- }
-
- if (err)
- err = -EFAULT;
-
-out:
- mutex_unlock(&mce_chrdev_read_mutex);
- kfree(cpu_tsc);
-
- return err ? err : buf - ubuf;
-}
-
-static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
-{
- poll_wait(file, &mce_chrdev_wait, wait);
- if (rcu_access_index(mcelog.next))
- return POLLIN | POLLRDNORM;
- if (!mce_apei_read_done && apei_check_mce())
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
- unsigned long arg)
-{
- int __user *p = (int __user *)arg;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- switch (cmd) {
- case MCE_GET_RECORD_LEN:
- return put_user(sizeof(struct mce), p);
- case MCE_GET_LOG_LEN:
- return put_user(MCE_LOG_LEN, p);
- case MCE_GETCLEAR_FLAGS: {
- unsigned flags;
-
- do {
- flags = mcelog.flags;
- } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
-
- return put_user(flags, p);
- }
- default:
- return -ENOTTY;
- }
-}
-
-static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
- size_t usize, loff_t *off);
-
-void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
- const char __user *ubuf,
- size_t usize, loff_t *off))
-{
- mce_write = fn;
-}
-EXPORT_SYMBOL_GPL(register_mce_write_callback);
-
-ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
- size_t usize, loff_t *off)
-{
- if (mce_write)
- return mce_write(filp, ubuf, usize, off);
- else
- return -EINVAL;
-}
-
-static const struct file_operations mce_chrdev_ops = {
- .open = mce_chrdev_open,
- .release = mce_chrdev_release,
- .read = mce_chrdev_read,
- .write = mce_chrdev_write,
- .poll = mce_chrdev_poll,
- .unlocked_ioctl = mce_chrdev_ioctl,
- .llseek = no_llseek,
-};
-
-static struct miscdevice mce_chrdev_device = {
- MISC_MCELOG_MINOR,
- "mcelog",
- &mce_chrdev_ops,
-};
-
-/*
- * mce=off Disables machine check
- * mce=no_cmci Disables CMCI
- * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
- * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
- * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
- * monarchtimeout is how long to wait for other CPUs on machine
- * check, or 0 to not wait
- * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
- * mce=nobootlog Don't log MCEs from before booting.
- */
-static int __init mcheck_enable(char *str)
-{
- if (*str == 0) {
- enable_p5_mce();
- return 1;
- }
- if (*str == '=')
- str++;
- if (!strcmp(str, "off"))
- mce_disabled = 1;
- else if (!strcmp(str, "no_cmci"))
- mce_cmci_disabled = 1;
- else if (!strcmp(str, "dont_log_ce"))
- mce_dont_log_ce = 1;
- else if (!strcmp(str, "ignore_ce"))
- mce_ignore_ce = 1;
- else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
- mce_bootlog = (str[0] == 'b');
- else if (isdigit(str[0])) {
- get_option(&str, &tolerant);
- if (*str == ',') {
- ++str;
- get_option(&str, &monarch_timeout);
- }
- } else {
- printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
- str);
- return 0;
- }
- return 1;
-}
-__setup("mce", mcheck_enable);
-
-int __init mcheck_init(void)
-{
- mcheck_intel_therm_init();
-
- return 0;
-}
-
-/*
- * mce_syscore: PM support
- */
-
-/*
- * Disable machine checks on suspend and shutdown. We can't really handle
- * them later.
- */
-static int mce_disable_error_reporting(void)
-{
- int i;
-
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-
- if (b->init)
- wrmsrl(MSR_IA32_MCx_CTL(i), 0);
- }
- return 0;
-}
-
-static int mce_syscore_suspend(void)
-{
- return mce_disable_error_reporting();
-}
-
-static void mce_syscore_shutdown(void)
-{
- mce_disable_error_reporting();
-}
-
-/*
- * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
- * Only one CPU is active at this time, the others get re-added later using
- * CPU hotplug:
- */
-static void mce_syscore_resume(void)
-{
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
-}
-
-static struct syscore_ops mce_syscore_ops = {
- .suspend = mce_syscore_suspend,
- .shutdown = mce_syscore_shutdown,
- .resume = mce_syscore_resume,
-};
-
-/*
- * mce_device: Sysfs support
- */
-
-static void mce_cpu_restart(void *data)
-{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
- return;
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_timer();
-}
-
-/* Reinit MCEs after user configuration changes */
-static void mce_restart(void)
-{
- mce_timer_delete_all();
- on_each_cpu(mce_cpu_restart, NULL, 1);
-}
-
-/* Toggle features for corrected errors */
-static void mce_disable_cmci(void *data)
-{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
- return;
- cmci_clear();
-}
-
-static void mce_enable_ce(void *all)
-{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
- return;
- cmci_reenable();
- cmci_recheck();
- if (all)
- __mcheck_cpu_init_timer();
-}
-
-static struct bus_type mce_subsys = {
- .name = "machinecheck",
- .dev_name = "machinecheck",
-};
-
-DEFINE_PER_CPU(struct device *, mce_device);
-
-__cpuinitdata
-void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
-
-static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
-{
- return container_of(attr, struct mce_bank, attr);
-}
-
-static ssize_t show_bank(struct device *s, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
-}
-
-static ssize_t set_bank(struct device *s, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u64 new;
-
- if (strict_strtoull(buf, 0, &new) < 0)
- return -EINVAL;
-
- attr_to_bank(attr)->ctl = new;
- mce_restart();
-
- return size;
-}
-
-static ssize_t
-show_trigger(struct device *s, struct device_attribute *attr, char *buf)
-{
- strcpy(buf, mce_helper);
- strcat(buf, "\n");
- return strlen(mce_helper) + 1;
-}
-
-static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
- const char *buf, size_t siz)
-{
- char *p;
-
- strncpy(mce_helper, buf, sizeof(mce_helper));
- mce_helper[sizeof(mce_helper)-1] = 0;
- p = strchr(mce_helper, '\n');
-
- if (p)
- *p = 0;
-
- return strlen(mce_helper) + !!p;
-}
-
-static ssize_t set_ignore_ce(struct device *s,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u64 new;
-
- if (strict_strtoull(buf, 0, &new) < 0)
- return -EINVAL;
-
- if (mce_ignore_ce ^ !!new) {
- if (new) {
- /* disable ce features */
- mce_timer_delete_all();
- on_each_cpu(mce_disable_cmci, NULL, 1);
- mce_ignore_ce = 1;
- } else {
- /* enable ce features */
- mce_ignore_ce = 0;
- on_each_cpu(mce_enable_ce, (void *)1, 1);
- }
- }
- return size;
-}
-
-static ssize_t set_cmci_disabled(struct device *s,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u64 new;
-
- if (strict_strtoull(buf, 0, &new) < 0)
- return -EINVAL;
-
- if (mce_cmci_disabled ^ !!new) {
- if (new) {
- /* disable cmci */
- on_each_cpu(mce_disable_cmci, NULL, 1);
- mce_cmci_disabled = 1;
- } else {
- /* enable cmci */
- mce_cmci_disabled = 0;
- on_each_cpu(mce_enable_ce, NULL, 1);
- }
- }
- return size;
-}
-
-static ssize_t store_int_with_restart(struct device *s,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t ret = device_store_int(s, attr, buf, size);
- mce_restart();
- return ret;
-}
-
-static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
-static DEVICE_INT_ATTR(tolerant, 0644, tolerant);
-static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
-static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
-
-static struct dev_ext_attribute dev_attr_check_interval = {
- __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
- &check_interval
-};
-
-static struct dev_ext_attribute dev_attr_ignore_ce = {
- __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce),
- &mce_ignore_ce
-};
-
-static struct dev_ext_attribute dev_attr_cmci_disabled = {
- __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled),
- &mce_cmci_disabled
-};
-
-static struct device_attribute *mce_device_attrs[] = {
- &dev_attr_tolerant.attr,
- &dev_attr_check_interval.attr,
- &dev_attr_trigger,
- &dev_attr_monarch_timeout.attr,
- &dev_attr_dont_log_ce.attr,
- &dev_attr_ignore_ce.attr,
- &dev_attr_cmci_disabled.attr,
- NULL
-};
-
-static cpumask_var_t mce_device_initialized;
-
-static void mce_device_release(struct device *dev)
-{
- kfree(dev);
-}
-
-/* Per cpu device init. All of the cpus still share the same ctrl bank: */
-static __cpuinit int mce_device_create(unsigned int cpu)
-{
- struct device *dev;
- int err;
- int i, j;
-
- if (!mce_available(&boot_cpu_data))
- return -EIO;
-
- dev = kzalloc(sizeof *dev, GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
- dev->id = cpu;
- dev->bus = &mce_subsys;
- dev->release = &mce_device_release;
-
- err = device_register(dev);
- if (err)
- return err;
-
- for (i = 0; mce_device_attrs[i]; i++) {
- err = device_create_file(dev, mce_device_attrs[i]);
- if (err)
- goto error;
- }
- for (j = 0; j < banks; j++) {
- err = device_create_file(dev, &mce_banks[j].attr);
- if (err)
- goto error2;
- }
- cpumask_set_cpu(cpu, mce_device_initialized);
- per_cpu(mce_device, cpu) = dev;
-
- return 0;
-error2:
- while (--j >= 0)
- device_remove_file(dev, &mce_banks[j].attr);
-error:
- while (--i >= 0)
- device_remove_file(dev, mce_device_attrs[i]);
-
- device_unregister(dev);
-
- return err;
-}
-
-static __cpuinit void mce_device_remove(unsigned int cpu)
-{
- struct device *dev = per_cpu(mce_device, cpu);
- int i;
-
- if (!cpumask_test_cpu(cpu, mce_device_initialized))
- return;
-
- for (i = 0; mce_device_attrs[i]; i++)
- device_remove_file(dev, mce_device_attrs[i]);
-
- for (i = 0; i < banks; i++)
- device_remove_file(dev, &mce_banks[i].attr);
-
- device_unregister(dev);
- cpumask_clear_cpu(cpu, mce_device_initialized);
- per_cpu(mce_device, cpu) = NULL;
-}
-
-/* Make sure there are no machine checks on offlined CPUs. */
-static void __cpuinit mce_disable_cpu(void *h)
-{
- unsigned long action = *(unsigned long *)h;
- int i;
-
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
- return;
-
- if (!(action & CPU_TASKS_FROZEN))
- cmci_clear();
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-
- if (b->init)
- wrmsrl(MSR_IA32_MCx_CTL(i), 0);
- }
-}
-
-static void __cpuinit mce_reenable_cpu(void *h)
-{
- unsigned long action = *(unsigned long *)h;
- int i;
-
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
- return;
-
- if (!(action & CPU_TASKS_FROZEN))
- cmci_reenable();
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-
- if (b->init)
- wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
- }
-}
-
-/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int __cpuinit
-mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct timer_list *t = &per_cpu(mce_timer, cpu);
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- mce_device_create(cpu);
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- mce_device_remove(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- del_timer_sync(t);
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
- break;
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- if (!mce_ignore_ce && check_interval) {
- t->expires = round_jiffies(jiffies +
- __get_cpu_var(mce_next_interval));
- add_timer_on(t, cpu);
- }
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
- break;
- case CPU_POST_DEAD:
- /* intentionally ignoring frozen here */
- cmci_rediscover(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
- .notifier_call = mce_cpu_callback,
-};
-
-static __init void mce_init_banks(void)
-{
- int i;
-
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
- struct device_attribute *a = &b->attr;
-
- sysfs_attr_init(&a->attr);
- a->attr.name = b->attrname;
- snprintf(b->attrname, ATTR_LEN, "bank%d", i);
-
- a->attr.mode = 0644;
- a->show = show_bank;
- a->store = set_bank;
- }
-}
-
-static __init int mcheck_init_device(void)
-{
- int err;
- int i = 0;
-
- if (!mce_available(&boot_cpu_data))
- return -EIO;
-
- zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
-
- mce_init_banks();
-
- err = subsys_system_register(&mce_subsys, NULL);
- if (err)
- return err;
-
- for_each_online_cpu(i) {
- err = mce_device_create(i);
- if (err)
- return err;
- }
-
- register_syscore_ops(&mce_syscore_ops);
- register_hotcpu_notifier(&mce_cpu_notifier);
-
- /* register character device /dev/mcelog */
- misc_register(&mce_chrdev_device);
-
- return err;
-}
-device_initcall(mcheck_init_device);
-
-/*
- * Old style boot options parsing. Only for compatibility.
- */
-static int __init mcheck_disable(char *str)
-{
- mce_disabled = 1;
- return 1;
-}
-__setup("nomce", mcheck_disable);
-
-#ifdef CONFIG_DEBUG_FS
-struct dentry *mce_get_debugfs_dir(void)
-{
- static struct dentry *dmce;
-
- if (!dmce)
- dmce = debugfs_create_dir("mce", NULL);
-
- return dmce;
-}
-
-static void mce_reset(void)
-{
- cpu_missing = 0;
- atomic_set(&mce_fake_paniced, 0);
- atomic_set(&mce_executing, 0);
- atomic_set(&mce_callin, 0);
- atomic_set(&global_nwo, 0);
-}
-
-static int fake_panic_get(void *data, u64 *val)
-{
- *val = fake_panic;
- return 0;
-}
-
-static int fake_panic_set(void *data, u64 val)
-{
- mce_reset();
- fake_panic = val;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
- fake_panic_set, "%llu\n");
-
-static int __init mcheck_debugfs_init(void)
-{
- struct dentry *dmce, *ffake_panic;
-
- dmce = mce_get_debugfs_dir();
- if (!dmce)
- return -ENOMEM;
- ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
- &fake_panic_fops);
- if (!ffake_panic)
- return -ENOMEM;
-
- return 0;
-}
-late_initcall(mcheck_debugfs_init);
-#endif
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_amd.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_amd.c
deleted file mode 100644
index 2c1d178b..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ /dev/null
@@ -1,775 +0,0 @@
-/*
- * (c) 2005, 2006 Advanced Micro Devices, Inc.
- * Your use of this code is subject to the terms and conditions of the
- * GNU general public license version 2. See "COPYING" or
- * http://www.gnu.org/licenses/gpl.html
- *
- * Written by Jacob Shin - AMD, Inc.
- *
- * Support : jacob.shin@amd.com
- *
- * April 2006
- * - added support for AMD Family 0x10 processors
- *
- * All MC4_MISCi registers are shared between multi-cores
- */
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <linux/kobject.h>
-#include <linux/percpu.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/sysfs.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-
-#include <asm/apic.h>
-#include <asm/idle.h>
-#include <asm/mce.h>
-#include <asm/msr.h>
-
-#define NR_BANKS 6
-#define NR_BLOCKS 9
-#define THRESHOLD_MAX 0xFFF
-#define INT_TYPE_APIC 0x00020000
-#define MASK_VALID_HI 0x80000000
-#define MASK_CNTP_HI 0x40000000
-#define MASK_LOCKED_HI 0x20000000
-#define MASK_LVTOFF_HI 0x00F00000
-#define MASK_COUNT_EN_HI 0x00080000
-#define MASK_INT_TYPE_HI 0x00060000
-#define MASK_OVERFLOW_HI 0x00010000
-#define MASK_ERR_COUNT_HI 0x00000FFF
-#define MASK_BLKPTR_LO 0xFF000000
-#define MCG_XBLK_ADDR 0xC0000400
-
-struct threshold_block {
- unsigned int block;
- unsigned int bank;
- unsigned int cpu;
- u32 address;
- u16 interrupt_enable;
- bool interrupt_capable;
- u16 threshold_limit;
- struct kobject kobj;
- struct list_head miscj;
-};
-
-struct threshold_bank {
- struct kobject *kobj;
- struct threshold_block *blocks;
- cpumask_var_t cpus;
-};
-static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
-
-static unsigned char shared_bank[NR_BANKS] = {
- 0, 0, 0, 0, 1
-};
-
-static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
-
-static void amd_threshold_interrupt(void);
-
-/*
- * CPU Initialization
- */
-
-struct thresh_restart {
- struct threshold_block *b;
- int reset;
- int set_lvt_off;
- int lvt_off;
- u16 old_limit;
-};
-
-static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
-{
- /*
- * bank 4 supports APIC LVT interrupts implicitly since forever.
- */
- if (bank == 4)
- return true;
-
- /*
- * IntP: interrupt present; if this bit is set, the thresholding
- * bank can generate APIC LVT interrupts
- */
- return msr_high_bits & BIT(28);
-}
-
-static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
-{
- int msr = (hi & MASK_LVTOFF_HI) >> 20;
-
- if (apic < 0) {
- pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
- "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
- b->bank, b->block, b->address, hi, lo);
- return 0;
- }
-
- if (apic != msr) {
- pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
- "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
- b->cpu, apic, b->bank, b->block, b->address, hi, lo);
- return 0;
- }
-
- return 1;
-};
-
-/*
- * Called via smp_call_function_single(), must be called with correct
- * cpu affinity.
- */
-static void threshold_restart_bank(void *_tr)
-{
- struct thresh_restart *tr = _tr;
- u32 hi, lo;
-
- rdmsr(tr->b->address, lo, hi);
-
- if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
- tr->reset = 1; /* limit cannot be lower than err count */
-
- if (tr->reset) { /* reset err count and overflow bit */
- hi =
- (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
- (THRESHOLD_MAX - tr->b->threshold_limit);
- } else if (tr->old_limit) { /* change limit w/o reset */
- int new_count = (hi & THRESHOLD_MAX) +
- (tr->old_limit - tr->b->threshold_limit);
-
- hi = (hi & ~MASK_ERR_COUNT_HI) |
- (new_count & THRESHOLD_MAX);
- }
-
- /* clear IntType */
- hi &= ~MASK_INT_TYPE_HI;
-
- if (!tr->b->interrupt_capable)
- goto done;
-
- if (tr->set_lvt_off) {
- if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
- /* set new lvt offset */
- hi &= ~MASK_LVTOFF_HI;
- hi |= tr->lvt_off << 20;
- }
- }
-
- if (tr->b->interrupt_enable)
- hi |= INT_TYPE_APIC;
-
- done:
-
- hi |= MASK_COUNT_EN_HI;
- wrmsr(tr->b->address, lo, hi);
-}
-
-static void mce_threshold_block_init(struct threshold_block *b, int offset)
-{
- struct thresh_restart tr = {
- .b = b,
- .set_lvt_off = 1,
- .lvt_off = offset,
- };
-
- b->threshold_limit = THRESHOLD_MAX;
- threshold_restart_bank(&tr);
-};
-
-static int setup_APIC_mce(int reserved, int new)
-{
- if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
- APIC_EILVT_MSG_FIX, 0))
- return new;
-
- return reserved;
-}
-
-/* cpu init entry point, called from mce.c with preempt off */
-void mce_amd_feature_init(struct cpuinfo_x86 *c)
-{
- struct threshold_block b;
- unsigned int cpu = smp_processor_id();
- u32 low = 0, high = 0, address = 0;
- unsigned int bank, block;
- int offset = -1;
-
- for (bank = 0; bank < NR_BANKS; ++bank) {
- for (block = 0; block < NR_BLOCKS; ++block) {
- if (block == 0)
- address = MSR_IA32_MC0_MISC + bank * 4;
- else if (block == 1) {
- address = (low & MASK_BLKPTR_LO) >> 21;
- if (!address)
- break;
-
- address += MCG_XBLK_ADDR;
- } else
- ++address;
-
- if (rdmsr_safe(address, &low, &high))
- break;
-
- if (!(high & MASK_VALID_HI))
- continue;
-
- if (!(high & MASK_CNTP_HI) ||
- (high & MASK_LOCKED_HI))
- continue;
-
- if (!block)
- per_cpu(bank_map, cpu) |= (1 << bank);
- if (shared_bank[bank] && c->cpu_core_id)
- break;
-
- memset(&b, 0, sizeof(b));
- b.cpu = cpu;
- b.bank = bank;
- b.block = block;
- b.address = address;
- b.interrupt_capable = lvt_interrupt_supported(bank, high);
-
- if (b.interrupt_capable) {
- int new = (high & MASK_LVTOFF_HI) >> 20;
- offset = setup_APIC_mce(offset, new);
- }
-
- mce_threshold_block_init(&b, offset);
- mce_threshold_vector = amd_threshold_interrupt;
- }
- }
-}
-
-/*
- * APIC Interrupt Handler
- */
-
-/*
- * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
- * the interrupt goes off when error_count reaches threshold_limit.
- * the handler will simply log mcelog w/ software defined bank number.
- */
-static void amd_threshold_interrupt(void)
-{
- u32 low = 0, high = 0, address = 0;
- unsigned int bank, block;
- struct mce m;
-
- mce_setup(&m);
-
- /* assume first bank caused it */
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
- continue;
- for (block = 0; block < NR_BLOCKS; ++block) {
- if (block == 0) {
- address = MSR_IA32_MC0_MISC + bank * 4;
- } else if (block == 1) {
- address = (low & MASK_BLKPTR_LO) >> 21;
- if (!address)
- break;
- address += MCG_XBLK_ADDR;
- } else {
- ++address;
- }
-
- if (rdmsr_safe(address, &low, &high))
- break;
-
- if (!(high & MASK_VALID_HI)) {
- if (block)
- continue;
- else
- break;
- }
-
- if (!(high & MASK_CNTP_HI) ||
- (high & MASK_LOCKED_HI))
- continue;
-
- /*
- * Log the machine check that caused the threshold
- * event.
- */
- machine_check_poll(MCP_TIMESTAMP,
- &__get_cpu_var(mce_poll_banks));
-
- if (high & MASK_OVERFLOW_HI) {
- rdmsrl(address, m.misc);
- rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
- m.status);
- m.bank = K8_MCE_THRESHOLD_BASE
- + bank * NR_BLOCKS
- + block;
- mce_log(&m);
- return;
- }
- }
- }
-}
-
-/*
- * Sysfs Interface
- */
-
-struct threshold_attr {
- struct attribute attr;
- ssize_t (*show) (struct threshold_block *, char *);
- ssize_t (*store) (struct threshold_block *, const char *, size_t count);
-};
-
-#define SHOW_FIELDS(name) \
-static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
-{ \
- return sprintf(buf, "%lx\n", (unsigned long) b->name); \
-}
-SHOW_FIELDS(interrupt_enable)
-SHOW_FIELDS(threshold_limit)
-
-static ssize_t
-store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
-{
- struct thresh_restart tr;
- unsigned long new;
-
- if (!b->interrupt_capable)
- return -EINVAL;
-
- if (strict_strtoul(buf, 0, &new) < 0)
- return -EINVAL;
-
- b->interrupt_enable = !!new;
-
- memset(&tr, 0, sizeof(tr));
- tr.b = b;
-
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
-
- return size;
-}
-
-static ssize_t
-store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
-{
- struct thresh_restart tr;
- unsigned long new;
-
- if (strict_strtoul(buf, 0, &new) < 0)
- return -EINVAL;
-
- if (new > THRESHOLD_MAX)
- new = THRESHOLD_MAX;
- if (new < 1)
- new = 1;
-
- memset(&tr, 0, sizeof(tr));
- tr.old_limit = b->threshold_limit;
- b->threshold_limit = new;
- tr.b = b;
-
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
-
- return size;
-}
-
-struct threshold_block_cross_cpu {
- struct threshold_block *tb;
- long retval;
-};
-
-static void local_error_count_handler(void *_tbcc)
-{
- struct threshold_block_cross_cpu *tbcc = _tbcc;
- struct threshold_block *b = tbcc->tb;
- u32 low, high;
-
- rdmsr(b->address, low, high);
- tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
-}
-
-static ssize_t show_error_count(struct threshold_block *b, char *buf)
-{
- struct threshold_block_cross_cpu tbcc = { .tb = b, };
-
- smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
- return sprintf(buf, "%lx\n", tbcc.retval);
-}
-
-static ssize_t store_error_count(struct threshold_block *b,
- const char *buf, size_t count)
-{
- struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
-
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
- return 1;
-}
-
-#define RW_ATTR(val) \
-static struct threshold_attr val = { \
- .attr = {.name = __stringify(val), .mode = 0644 }, \
- .show = show_## val, \
- .store = store_## val, \
-};
-
-RW_ATTR(interrupt_enable);
-RW_ATTR(threshold_limit);
-RW_ATTR(error_count);
-
-static struct attribute *default_attrs[] = {
- &interrupt_enable.attr,
- &threshold_limit.attr,
- &error_count.attr,
- NULL
-};
-
-#define to_block(k) container_of(k, struct threshold_block, kobj)
-#define to_attr(a) container_of(a, struct threshold_attr, attr)
-
-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- struct threshold_block *b = to_block(kobj);
- struct threshold_attr *a = to_attr(attr);
- ssize_t ret;
-
- ret = a->show ? a->show(b, buf) : -EIO;
-
- return ret;
-}
-
-static ssize_t store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct threshold_block *b = to_block(kobj);
- struct threshold_attr *a = to_attr(attr);
- ssize_t ret;
-
- ret = a->store ? a->store(b, buf, count) : -EIO;
-
- return ret;
-}
-
-static const struct sysfs_ops threshold_ops = {
- .show = show,
- .store = store,
-};
-
-static struct kobj_type threshold_ktype = {
- .sysfs_ops = &threshold_ops,
- .default_attrs = default_attrs,
-};
-
-static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
- unsigned int bank,
- unsigned int block,
- u32 address)
-{
- struct threshold_block *b = NULL;
- u32 low, high;
- int err;
-
- if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
- return 0;
-
- if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
- return 0;
-
- if (!(high & MASK_VALID_HI)) {
- if (block)
- goto recurse;
- else
- return 0;
- }
-
- if (!(high & MASK_CNTP_HI) ||
- (high & MASK_LOCKED_HI))
- goto recurse;
-
- b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
- if (!b)
- return -ENOMEM;
-
- b->block = block;
- b->bank = bank;
- b->cpu = cpu;
- b->address = address;
- b->interrupt_enable = 0;
- b->interrupt_capable = lvt_interrupt_supported(bank, high);
- b->threshold_limit = THRESHOLD_MAX;
-
- INIT_LIST_HEAD(&b->miscj);
-
- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
- list_add(&b->miscj,
- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- } else {
- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
- }
-
- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
- per_cpu(threshold_banks, cpu)[bank]->kobj,
- "misc%i", block);
- if (err)
- goto out_free;
-recurse:
- if (!block) {
- address = (low & MASK_BLKPTR_LO) >> 21;
- if (!address)
- return 0;
- address += MCG_XBLK_ADDR;
- } else {
- ++address;
- }
-
- err = allocate_threshold_blocks(cpu, bank, ++block, address);
- if (err)
- goto out_free;
-
- if (b)
- kobject_uevent(&b->kobj, KOBJ_ADD);
-
- return err;
-
-out_free:
- if (b) {
- kobject_put(&b->kobj);
- list_del(&b->miscj);
- kfree(b);
- }
- return err;
-}
-
-static __cpuinit long
-local_allocate_threshold_blocks(int cpu, unsigned int bank)
-{
- return allocate_threshold_blocks(cpu, bank, 0,
- MSR_IA32_MC0_MISC + bank * 4);
-}
-
-/* symlinks sibling shared banks to first core. first core owns dir/files. */
-static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
-{
- int i, err = 0;
- struct threshold_bank *b = NULL;
- struct device *dev = per_cpu(mce_device, cpu);
- char name[32];
-
- sprintf(name, "threshold_bank%i", bank);
-
-#ifdef CONFIG_SMP
- if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
- i = cpumask_first(cpu_llc_shared_mask(cpu));
-
- /* first core not up yet */
- if (cpu_data(i).cpu_core_id)
- goto out;
-
- /* already linked */
- if (per_cpu(threshold_banks, cpu)[bank])
- goto out;
-
- b = per_cpu(threshold_banks, i)[bank];
-
- if (!b)
- goto out;
-
- err = sysfs_create_link(&dev->kobj, b->kobj, name);
- if (err)
- goto out;
-
- cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
- per_cpu(threshold_banks, cpu)[bank] = b;
-
- goto out;
- }
-#endif
-
- b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
- if (!b) {
- err = -ENOMEM;
- goto out;
- }
- if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
- kfree(b);
- err = -ENOMEM;
- goto out;
- }
-
- b->kobj = kobject_create_and_add(name, &dev->kobj);
- if (!b->kobj)
- goto out_free;
-
-#ifndef CONFIG_SMP
- cpumask_setall(b->cpus);
-#else
- cpumask_set_cpu(cpu, b->cpus);
-#endif
-
- per_cpu(threshold_banks, cpu)[bank] = b;
-
- err = local_allocate_threshold_blocks(cpu, bank);
- if (err)
- goto out_free;
-
- for_each_cpu(i, b->cpus) {
- if (i == cpu)
- continue;
-
- dev = per_cpu(mce_device, i);
- if (dev)
- err = sysfs_create_link(&dev->kobj,b->kobj, name);
- if (err)
- goto out;
-
- per_cpu(threshold_banks, i)[bank] = b;
- }
-
- goto out;
-
-out_free:
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- free_cpumask_var(b->cpus);
- kfree(b);
-out:
- return err;
-}
-
-/* create dir/files for all valid threshold banks */
-static __cpuinit int threshold_create_device(unsigned int cpu)
-{
- unsigned int bank;
- int err = 0;
-
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- continue;
- err = threshold_create_bank(cpu, bank);
- if (err)
- return err;
- }
-
- return err;
-}
-
-/*
- * let's be hotplug friendly.
- * in case of multiple core processors, the first core always takes ownership
- * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
- */
-
-static void deallocate_threshold_block(unsigned int cpu,
- unsigned int bank)
-{
- struct threshold_block *pos = NULL;
- struct threshold_block *tmp = NULL;
- struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
-
- if (!head)
- return;
-
- list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
- kobject_put(&pos->kobj);
- list_del(&pos->miscj);
- kfree(pos);
- }
-
- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
-}
-
-static void threshold_remove_bank(unsigned int cpu, int bank)
-{
- struct threshold_bank *b;
- struct device *dev;
- char name[32];
- int i = 0;
-
- b = per_cpu(threshold_banks, cpu)[bank];
- if (!b)
- return;
- if (!b->blocks)
- goto free_out;
-
- sprintf(name, "threshold_bank%i", bank);
-
-#ifdef CONFIG_SMP
- /* sibling symlink */
- if (shared_bank[bank] && b->blocks->cpu != cpu) {
- dev = per_cpu(mce_device, cpu);
- sysfs_remove_link(&dev->kobj, name);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
-
- return;
- }
-#endif
-
- /* remove all sibling symlinks before unregistering */
- for_each_cpu(i, b->cpus) {
- if (i == cpu)
- continue;
-
- dev = per_cpu(mce_device, i);
- if (dev)
- sysfs_remove_link(&dev->kobj, name);
- per_cpu(threshold_banks, i)[bank] = NULL;
- }
-
- deallocate_threshold_block(cpu, bank);
-
-free_out:
- kobject_del(b->kobj);
- kobject_put(b->kobj);
- free_cpumask_var(b->cpus);
- kfree(b);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
-}
-
-static void threshold_remove_device(unsigned int cpu)
-{
- unsigned int bank;
-
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- continue;
- threshold_remove_bank(cpu, bank);
- }
-}
-
-/* get notified when a cpu comes on/off */
-static void __cpuinit
-amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
-{
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- threshold_create_device(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- threshold_remove_device(cpu);
- break;
- default:
- break;
- }
-}
-
-static __init int threshold_init_device(void)
-{
- unsigned lcpu = 0;
-
- /* to hit CPUs online before the notifier is up */
- for_each_online_cpu(lcpu) {
- int err = threshold_create_device(lcpu);
-
- if (err)
- return err;
- }
- threshold_cpu_callback = amd_64_threshold_cpu_callback;
-
- return 0;
-}
-device_initcall(threshold_init_device);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_intel.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_intel.c
deleted file mode 100644
index 38e49bc9..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Intel specific MCE features.
- * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
- * Copyright (C) 2008, 2009 Intel Corporation
- * Author: Andi Kleen
- */
-
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/percpu.h>
-#include <linux/sched.h>
-#include <asm/apic.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-#include <asm/mce.h>
-
-/*
- * Support for Intel Correct Machine Check Interrupts. This allows
- * the CPU to raise an interrupt when a corrected machine check happened.
- * Normally we pick those up using a regular polling timer.
- * Also supports reliable discovery of shared banks.
- */
-
-static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
-
-/*
- * cmci_discover_lock protects against parallel discovery attempts
- * which could race against each other.
- */
-static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
-
-#define CMCI_THRESHOLD 1
-
-static int cmci_supported(int *banks)
-{
- u64 cap;
-
- if (mce_cmci_disabled || mce_ignore_ce)
- return 0;
-
- /*
- * Vendor check is not strictly needed, but the initial
- * initialization is vendor keyed and this
- * makes sure none of the backdoors are entered otherwise.
- */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
- if (!cpu_has_apic || lapic_get_maxlvt() < 6)
- return 0;
- rdmsrl(MSR_IA32_MCG_CAP, cap);
- *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
- return !!(cap & MCG_CMCI_P);
-}
-
-/*
- * The interrupt handler. This is called on every event.
- * Just call the poller directly to log any events.
- * This could in theory increase the threshold under high load,
- * but doesn't for now.
- */
-static void intel_threshold_interrupt(void)
-{
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
- mce_notify_irq();
-}
-
-static void print_update(char *type, int *hdr, int num)
-{
- if (*hdr == 0)
- printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
- *hdr = 1;
- printk(KERN_CONT " %s:%d", type, num);
-}
-
-/*
- * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
- * on this CPU. Use the algorithm recommended in the SDM to discover shared
- * banks.
- */
-static void cmci_discover(int banks, int boot)
-{
- unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
- unsigned long flags;
- int hdr = 0;
- int i;
-
- raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- u64 val;
-
- if (test_bit(i, owned))
- continue;
-
- rdmsrl(MSR_IA32_MCx_CTL2(i), val);
-
- /* Already owned by someone else? */
- if (val & MCI_CTL2_CMCI_EN) {
- if (test_and_clear_bit(i, owned) && !boot)
- print_update("SHD", &hdr, i);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
- continue;
- }
-
- val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
- val |= MCI_CTL2_CMCI_EN | CMCI_THRESHOLD;
- wrmsrl(MSR_IA32_MCx_CTL2(i), val);
- rdmsrl(MSR_IA32_MCx_CTL2(i), val);
-
- /* Did the enable bit stick? -- the bank supports CMCI */
- if (val & MCI_CTL2_CMCI_EN) {
- if (!test_and_set_bit(i, owned) && !boot)
- print_update("CMCI", &hdr, i);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
- } else {
- WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
- }
- }
- raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
- if (hdr)
- printk(KERN_CONT "\n");
-}
-
-/*
- * Just in case we missed an event during initialization check
- * all the CMCI owned banks.
- */
-void cmci_recheck(void)
-{
- unsigned long flags;
- int banks;
-
- if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
- return;
- local_irq_save(flags);
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
- local_irq_restore(flags);
-}
-
-/*
- * Disable CMCI on this CPU for all banks it owns when it goes down.
- * This allows other CPUs to claim the banks on rediscovery.
- */
-void cmci_clear(void)
-{
- unsigned long flags;
- int i;
- int banks;
- u64 val;
-
- if (!cmci_supported(&banks))
- return;
- raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
- continue;
- /* Disable CMCI */
- rdmsrl(MSR_IA32_MCx_CTL2(i), val);
- val &= ~(MCI_CTL2_CMCI_EN|MCI_CTL2_CMCI_THRESHOLD_MASK);
- wrmsrl(MSR_IA32_MCx_CTL2(i), val);
- __clear_bit(i, __get_cpu_var(mce_banks_owned));
- }
- raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
-}
-
-/*
- * After a CPU went down cycle through all the others and rediscover
- * Must run in process context.
- */
-void cmci_rediscover(int dying)
-{
- int banks;
- int cpu;
- cpumask_var_t old;
-
- if (!cmci_supported(&banks))
- return;
- if (!alloc_cpumask_var(&old, GFP_KERNEL))
- return;
- cpumask_copy(old, &current->cpus_allowed);
-
- for_each_online_cpu(cpu) {
- if (cpu == dying)
- continue;
- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
- continue;
- /* Recheck banks in case CPUs don't all have the same */
- if (cmci_supported(&banks))
- cmci_discover(banks, 0);
- }
-
- set_cpus_allowed_ptr(current, old);
- free_cpumask_var(old);
-}
-
-/*
- * Reenable CMCI on this CPU in case a CPU down failed.
- */
-void cmci_reenable(void)
-{
- int banks;
- if (cmci_supported(&banks))
- cmci_discover(banks, 0);
-}
-
-static void intel_init_cmci(void)
-{
- int banks;
-
- if (!cmci_supported(&banks))
- return;
-
- mce_threshold_vector = intel_threshold_interrupt;
- cmci_discover(banks, 1);
- /*
- * For CPU #0 this runs with still disabled APIC, but that's
- * ok because only the vector is set up. We still do another
- * check for the banks later for CPU #0 just to make sure
- * to not miss any events.
- */
- apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
- cmci_recheck();
-}
-
-void mce_intel_feature_init(struct cpuinfo_x86 *c)
-{
- intel_init_thermal(c);
- intel_init_cmci();
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/p5.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/p5.c
deleted file mode 100644
index 2d5454cd..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/p5.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * P5 specific Machine Check Exception Reporting
- * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
- */
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-
-#include <asm/processor.h>
-#include <asm/mce.h>
-#include <asm/msr.h>
-
-/* By default disabled */
-int mce_p5_enabled __read_mostly;
-
-/* Machine check handler for Pentium class Intel CPUs: */
-static void pentium_machine_check(struct pt_regs *regs, long error_code)
-{
- u32 loaddr, hi, lotype;
-
- rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
- rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
-
- printk(KERN_EMERG
- "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n",
- smp_processor_id(), loaddr, lotype);
-
- if (lotype & (1<<5)) {
- printk(KERN_EMERG
- "CPU#%d: Possible thermal failure (CPU on fire ?).\n",
- smp_processor_id());
- }
-
- add_taint(TAINT_MACHINE_CHECK);
-}
-
-/* Set up machine check reporting for processors with Intel style MCE: */
-void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
-{
- u32 l, h;
-
- /* Default P5 to off as its often misconnected: */
- if (!mce_p5_enabled)
- return;
-
- /* Check for MCE support: */
- if (!cpu_has(c, X86_FEATURE_MCE))
- return;
-
- machine_check_vector = pentium_machine_check;
- /* Make sure the vector pointer is visible before we enable MCEs: */
- wmb();
-
- /* Read registers before enabling: */
- rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
- rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
- printk(KERN_INFO
- "Intel old style machine check architecture supported.\n");
-
- /* Enable MCE: */
- set_in_cr4(X86_CR4_MCE);
- printk(KERN_INFO
- "Intel old style machine check reporting enabled on CPU#%d.\n",
- smp_processor_id());
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/therm_throt.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/therm_throt.c
deleted file mode 100644
index 47a18702..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Thermal throttle event support code (such as syslog messaging and rate
- * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
- *
- * This allows consistent reporting of CPU thermal throttle events.
- *
- * Maintains a counter in /sys that keeps track of the number of thermal
- * events, such that the user knows how bad the thermal problem might be
- * (since the logging to syslog and mcelog is rate limited).
- *
- * Author: Dmitriy Zavin (dmitriyz@google.com)
- *
- * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
- * Inspired by Ross Biro's and Al Borchers' counter code.
- */
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-
-#include <asm/processor.h>
-#include <asm/apic.h>
-#include <asm/idle.h>
-#include <asm/mce.h>
-#include <asm/msr.h>
-
-/* How long to wait between reporting thermal events */
-#define CHECK_INTERVAL (300 * HZ)
-
-#define THERMAL_THROTTLING_EVENT 0
-#define POWER_LIMIT_EVENT 1
-
-/*
- * Current thermal event state:
- */
-struct _thermal_state {
- bool new_event;
- int event;
- u64 next_check;
- unsigned long count;
- unsigned long last_count;
-};
-
-struct thermal_state {
- struct _thermal_state core_throttle;
- struct _thermal_state core_power_limit;
- struct _thermal_state package_throttle;
- struct _thermal_state package_power_limit;
- struct _thermal_state core_thresh0;
- struct _thermal_state core_thresh1;
-};
-
-/* Callback to handle core threshold interrupts */
-int (*platform_thermal_notify)(__u64 msr_val);
-EXPORT_SYMBOL(platform_thermal_notify);
-
-static DEFINE_PER_CPU(struct thermal_state, thermal_state);
-
-static atomic_t therm_throt_en = ATOMIC_INIT(0);
-
-static u32 lvtthmr_init __read_mostly;
-
-#ifdef CONFIG_SYSFS
-#define define_therm_throt_device_one_ro(_name) \
- static DEVICE_ATTR(_name, 0444, \
- therm_throt_device_show_##_name, \
- NULL) \
-
-#define define_therm_throt_device_show_func(event, name) \
- \
-static ssize_t therm_throt_device_show_##event##_##name( \
- struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- ssize_t ret; \
- \
- preempt_disable(); /* CPU hotplug */ \
- if (cpu_online(cpu)) { \
- ret = sprintf(buf, "%lu\n", \
- per_cpu(thermal_state, cpu).event.name); \
- } else \
- ret = 0; \
- preempt_enable(); \
- \
- return ret; \
-}
-
-define_therm_throt_device_show_func(core_throttle, count);
-define_therm_throt_device_one_ro(core_throttle_count);
-
-define_therm_throt_device_show_func(core_power_limit, count);
-define_therm_throt_device_one_ro(core_power_limit_count);
-
-define_therm_throt_device_show_func(package_throttle, count);
-define_therm_throt_device_one_ro(package_throttle_count);
-
-define_therm_throt_device_show_func(package_power_limit, count);
-define_therm_throt_device_one_ro(package_power_limit_count);
-
-static struct attribute *thermal_throttle_attrs[] = {
- &dev_attr_core_throttle_count.attr,
- NULL
-};
-
-static struct attribute_group thermal_attr_group = {
- .attrs = thermal_throttle_attrs,
- .name = "thermal_throttle"
-};
-#endif /* CONFIG_SYSFS */
-
-#define CORE_LEVEL 0
-#define PACKAGE_LEVEL 1
-
-/***
- * therm_throt_process - Process thermal throttling event from interrupt
- * @curr: Whether the condition is current or not (boolean), since the
- * thermal interrupt normally gets called both when the thermal
- * event begins and once the event has ended.
- *
- * This function is called by the thermal interrupt after the
- * IRQ has been acknowledged.
- *
- * It will take care of rate limiting and printing messages to the syslog.
- *
- * Returns: 0 : Event should NOT be further logged, i.e. still in
- * "timeout" from previous log message.
- * 1 : Event should be logged further, and a message has been
- * printed to the syslog.
- */
-static int therm_throt_process(bool new_event, int event, int level)
-{
- struct _thermal_state *state;
- unsigned int this_cpu = smp_processor_id();
- bool old_event;
- u64 now;
- struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
-
- now = get_jiffies_64();
- if (level == CORE_LEVEL) {
- if (event == THERMAL_THROTTLING_EVENT)
- state = &pstate->core_throttle;
- else if (event == POWER_LIMIT_EVENT)
- state = &pstate->core_power_limit;
- else
- return 0;
- } else if (level == PACKAGE_LEVEL) {
- if (event == THERMAL_THROTTLING_EVENT)
- state = &pstate->package_throttle;
- else if (event == POWER_LIMIT_EVENT)
- state = &pstate->package_power_limit;
- else
- return 0;
- } else
- return 0;
-
- old_event = state->new_event;
- state->new_event = new_event;
-
- if (new_event)
- state->count++;
-
- if (time_before64(now, state->next_check) &&
- state->count != state->last_count)
- return 0;
-
- state->next_check = now + CHECK_INTERVAL;
- state->last_count = state->count;
-
- /* if we just entered the thermal event */
- if (new_event) {
- if (event == THERMAL_THROTTLING_EVENT)
- printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
- this_cpu,
- level == CORE_LEVEL ? "Core" : "Package",
- state->count);
- else
- printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
- this_cpu,
- level == CORE_LEVEL ? "Core" : "Package",
- state->count);
- return 1;
- }
- if (old_event) {
- if (event == THERMAL_THROTTLING_EVENT)
- printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
- this_cpu,
- level == CORE_LEVEL ? "Core" : "Package");
- else
- printk(KERN_INFO "CPU%d: %s power limit normal\n",
- this_cpu,
- level == CORE_LEVEL ? "Core" : "Package");
- return 1;
- }
-
- return 0;
-}
-
-static int thresh_event_valid(int event)
-{
- struct _thermal_state *state;
- unsigned int this_cpu = smp_processor_id();
- struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
- u64 now = get_jiffies_64();
-
- state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1;
-
- if (time_before64(now, state->next_check))
- return 0;
-
- state->next_check = now + CHECK_INTERVAL;
- return 1;
-}
-
-#ifdef CONFIG_SYSFS
-/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct device *dev,
- unsigned int cpu)
-{
- int err;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
- if (err)
- return err;
-
- if (cpu_has(c, X86_FEATURE_PLN))
- err = sysfs_add_file_to_group(&dev->kobj,
- &dev_attr_core_power_limit_count.attr,
- thermal_attr_group.name);
- if (cpu_has(c, X86_FEATURE_PTS)) {
- err = sysfs_add_file_to_group(&dev->kobj,
- &dev_attr_package_throttle_count.attr,
- thermal_attr_group.name);
- if (cpu_has(c, X86_FEATURE_PLN))
- err = sysfs_add_file_to_group(&dev->kobj,
- &dev_attr_package_power_limit_count.attr,
- thermal_attr_group.name);
- }
-
- return err;
-}
-
-static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &thermal_attr_group);
-}
-
-/* Mutex protecting device creation against CPU hotplug: */
-static DEFINE_MUTEX(therm_cpu_lock);
-
-/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static __cpuinit int
-thermal_throttle_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *dev;
- int err = 0;
-
- dev = get_cpu_device(cpu);
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- mutex_lock(&therm_cpu_lock);
- err = thermal_throttle_add_dev(dev, cpu);
- mutex_unlock(&therm_cpu_lock);
- WARN_ON(err);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- mutex_lock(&therm_cpu_lock);
- thermal_throttle_remove_dev(dev);
- mutex_unlock(&therm_cpu_lock);
- break;
- }
- return notifier_from_errno(err);
-}
-
-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
-{
- .notifier_call = thermal_throttle_cpu_callback,
-};
-
-static __init int thermal_throttle_init_device(void)
-{
- unsigned int cpu = 0;
- int err;
-
- if (!atomic_read(&therm_throt_en))
- return 0;
-
- register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
-
-#ifdef CONFIG_HOTPLUG_CPU
- mutex_lock(&therm_cpu_lock);
-#endif
- /* connect live CPUs to sysfs */
- for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
- WARN_ON(err);
- }
-#ifdef CONFIG_HOTPLUG_CPU
- mutex_unlock(&therm_cpu_lock);
-#endif
-
- return 0;
-}
-device_initcall(thermal_throttle_init_device);
-
-#endif /* CONFIG_SYSFS */
-
-static void notify_thresholds(__u64 msr_val)
-{
- /* check whether the interrupt handler is defined;
- * otherwise simply return
- */
- if (!platform_thermal_notify)
- return;
-
- /* lower threshold reached */
- if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0))
- platform_thermal_notify(msr_val);
- /* higher threshold reached */
- if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1))
- platform_thermal_notify(msr_val);
-}
-
-/* Thermal transition interrupt handler */
-static void intel_thermal_interrupt(void)
-{
- __u64 msr_val;
-
- rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
-
- /* Check for violation of core thermal thresholds*/
- notify_thresholds(msr_val);
-
- if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
- THERMAL_THROTTLING_EVENT,
- CORE_LEVEL) != 0)
- mce_log_therm_throt_event(msr_val);
-
- if (this_cpu_has(X86_FEATURE_PLN))
- therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
- POWER_LIMIT_EVENT,
- CORE_LEVEL);
-
- if (this_cpu_has(X86_FEATURE_PTS)) {
- rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
- therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
- THERMAL_THROTTLING_EVENT,
- PACKAGE_LEVEL);
- if (this_cpu_has(X86_FEATURE_PLN))
- therm_throt_process(msr_val &
- PACKAGE_THERM_STATUS_POWER_LIMIT,
- POWER_LIMIT_EVENT,
- PACKAGE_LEVEL);
- }
-}
-
-static void unexpected_thermal_interrupt(void)
-{
- printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
- smp_processor_id());
-}
-
-static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
-
-asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
-{
- irq_enter();
- exit_idle();
- inc_irq_stat(irq_thermal_count);
- smp_thermal_vector();
- irq_exit();
- /* Ack only at the end to avoid potential reentry */
- ack_APIC_irq();
-}
-
-/* Thermal monitoring depends on APIC, ACPI and clock modulation */
-static int intel_thermal_supported(struct cpuinfo_x86 *c)
-{
- if (!cpu_has_apic)
- return 0;
- if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
- return 0;
- return 1;
-}
-
-void __init mcheck_intel_therm_init(void)
-{
- /*
- * This function is only called on boot CPU. Save the init thermal
- * LVT value on BSP and use that value to restore APs' thermal LVT
- * entry BIOS programmed later
- */
- if (intel_thermal_supported(&boot_cpu_data))
- lvtthmr_init = apic_read(APIC_LVTTHMR);
-}
-
-void intel_init_thermal(struct cpuinfo_x86 *c)
-{
- unsigned int cpu = smp_processor_id();
- int tm2 = 0;
- u32 l, h;
-
- if (!intel_thermal_supported(c))
- return;
-
- /*
- * First check if its enabled already, in which case there might
- * be some SMM goo which handles it, so we can't even put a handler
- * since it might be delivered via SMI already:
- */
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-
- h = lvtthmr_init;
- /*
- * The initial value of thermal LVT entries on all APs always reads
- * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
- * sequence to them and LVT registers are reset to 0s except for
- * the mask bits which are set to 1s when APs receive INIT IPI.
- * If BIOS takes over the thermal interrupt and sets its interrupt
- * delivery mode to SMI (not fixed), it restores the value that the
- * BIOS has programmed on AP based on BSP's info we saved since BIOS
- * is always setting the same value for all threads/cores.
- */
- if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
- apic_write(APIC_LVTTHMR, lvtthmr_init);
-
-
- if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
- printk(KERN_DEBUG
- "CPU%d: Thermal monitoring handled by SMI\n", cpu);
- return;
- }
-
- /* Check whether a vector already exists */
- if (h & APIC_VECTOR_MASK) {
- printk(KERN_DEBUG
- "CPU%d: Thermal LVT vector (%#x) already installed\n",
- cpu, (h & APIC_VECTOR_MASK));
- return;
- }
-
- /* early Pentium M models use different method for enabling TM2 */
- if (cpu_has(c, X86_FEATURE_TM2)) {
- if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
- rdmsr(MSR_THERM2_CTL, l, h);
- if (l & MSR_THERM2_CTL_TM_SELECT)
- tm2 = 1;
- } else if (l & MSR_IA32_MISC_ENABLE_TM2)
- tm2 = 1;
- }
-
- /* We'll mask the thermal vector in the lapic till we're ready: */
- h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
- apic_write(APIC_LVTTHMR, h);
-
- rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
- if (cpu_has(c, X86_FEATURE_PLN))
- wrmsr(MSR_IA32_THERM_INTERRUPT,
- l | (THERM_INT_LOW_ENABLE
- | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
- else
- wrmsr(MSR_IA32_THERM_INTERRUPT,
- l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
-
- if (cpu_has(c, X86_FEATURE_PTS)) {
- rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
- if (cpu_has(c, X86_FEATURE_PLN))
- wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
- l | (PACKAGE_THERM_INT_LOW_ENABLE
- | PACKAGE_THERM_INT_HIGH_ENABLE
- | PACKAGE_THERM_INT_PLN_ENABLE), h);
- else
- wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
- l | (PACKAGE_THERM_INT_LOW_ENABLE
- | PACKAGE_THERM_INT_HIGH_ENABLE), h);
- }
-
- smp_thermal_vector = intel_thermal_interrupt;
-
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
- wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
-
- /* Unmask the thermal vector: */
- l = apic_read(APIC_LVTTHMR);
- apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
-
- printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
- tm2 ? "TM2" : "TM1");
-
- /* enable thermal throttle processing */
- atomic_set(&therm_throt_en, 1);
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/threshold.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/threshold.c
deleted file mode 100644
index aa578cad..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/threshold.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Common corrected MCE threshold handler code:
- */
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-
-#include <asm/irq_vectors.h>
-#include <asm/apic.h>
-#include <asm/idle.h>
-#include <asm/mce.h>
-
-static void default_threshold_interrupt(void)
-{
- printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
- THRESHOLD_APIC_VECTOR);
-}
-
-void (*mce_threshold_vector)(void) = default_threshold_interrupt;
-
-asmlinkage void smp_threshold_interrupt(void)
-{
- irq_enter();
- exit_idle();
- inc_irq_stat(irq_threshold_count);
- mce_threshold_vector();
- irq_exit();
- /* Ack only at the end to avoid potential reentry */
- ack_APIC_irq();
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/winchip.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/winchip.c
deleted file mode 100644
index 2d7998fb..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mcheck/winchip.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * IDT Winchip specific Machine Check Exception Reporting
- * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
- */
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/processor.h>
-#include <asm/mce.h>
-#include <asm/msr.h>
-
-/* Machine check handler for WinChip C6: */
-static void winchip_machine_check(struct pt_regs *regs, long error_code)
-{
- printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
- add_taint(TAINT_MACHINE_CHECK);
-}
-
-/* Set up machine check reporting on the Winchip C6 series */
-void winchip_mcheck_init(struct cpuinfo_x86 *c)
-{
- u32 lo, hi;
-
- machine_check_vector = winchip_machine_check;
- /* Make sure the vector pointer is visible before we enable MCEs: */
- wmb();
-
- rdmsr(MSR_IDT_FCR1, lo, hi);
- lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */
- lo &= ~(1<<4); /* Enable MCE */
- wrmsr(MSR_IDT_FCR1, lo, hi);
-
- set_in_cr4(X86_CR4_MCE);
-
- printk(KERN_INFO
- "Winchip machine check reporting enabled on CPU#0.\n");
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mkcapflags.pl b/ANDROID_3.4.5/arch/x86/kernel/cpu/mkcapflags.pl
deleted file mode 100644
index dfea390e..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mkcapflags.pl
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/perl
-#
-# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
-#
-
-($in, $out) = @ARGV;
-
-open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
-open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
-
-print OUT "#include <asm/cpufeature.h>\n\n";
-print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
-
-while (defined($line = <IN>)) {
- if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
- $macro = $1;
- $feature = $2;
- $tail = $3;
- if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
- $feature = $1;
- }
-
- if ($feature ne '') {
- printf OUT "\t%-32s = \"%s\",\n",
- "[$macro]", "\L$feature";
- }
- }
-}
-print OUT "};\n";
-
-close(IN);
-close(OUT);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mshyperv.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mshyperv.c
deleted file mode 100644
index 0a630dd4..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mshyperv.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * HyperV Detection code.
- *
- * Copyright (C) 2010, Novell, Inc.
- * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- */
-
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/clocksource.h>
-#include <linux/module.h>
-#include <asm/processor.h>
-#include <asm/hypervisor.h>
-#include <asm/hyperv.h>
-#include <asm/mshyperv.h>
-
-struct ms_hyperv_info ms_hyperv;
-EXPORT_SYMBOL_GPL(ms_hyperv);
-
-static bool __init ms_hyperv_platform(void)
-{
- u32 eax;
- u32 hyp_signature[3];
-
- if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
- return false;
-
- cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
- &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
-
- return eax >= HYPERV_CPUID_MIN &&
- eax <= HYPERV_CPUID_MAX &&
- !memcmp("Microsoft Hv", hyp_signature, 12);
-}
-
-static cycle_t read_hv_clock(struct clocksource *arg)
-{
- cycle_t current_tick;
- /*
- * Read the partition counter to get the current tick count. This count
- * is set to 0 when the partition is created and is incremented in
- * 100 nanosecond units.
- */
- rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
- return current_tick;
-}
-
-static struct clocksource hyperv_cs = {
- .name = "hyperv_clocksource",
- .rating = 400, /* use this when running on Hyperv*/
- .read = read_hv_clock,
- .mask = CLOCKSOURCE_MASK(64),
-};
-
-static void __init ms_hyperv_init_platform(void)
-{
- /*
- * Extract the features and hints
- */
- ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
- ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
-
- printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
- ms_hyperv.features, ms_hyperv.hints);
-
- clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
-}
-
-const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
- .name = "Microsoft HyperV",
- .detect = ms_hyperv_platform,
- .init_platform = ms_hyperv_init_platform,
-};
-EXPORT_SYMBOL(x86_hyper_ms_hyperv);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/Makefile b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/Makefile
deleted file mode 100644
index ad9e5ed8..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y := main.o if.o generic.o cleanup.o
-obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
-
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/amd.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/amd.c
deleted file mode 100644
index 92ba9cd3..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/amd.c
+++ /dev/null
@@ -1,124 +0,0 @@
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "mtrr.h"
-
-static void
-amd_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type *type)
-{
- unsigned long low, high;
-
- rdmsr(MSR_K6_UWCCR, low, high);
- /* Upper dword is region 1, lower is region 0 */
- if (reg == 1)
- low = high;
- /* The base masks off on the right alignment */
- *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
- *type = 0;
- if (low & 1)
- *type = MTRR_TYPE_UNCACHABLE;
- if (low & 2)
- *type = MTRR_TYPE_WRCOMB;
- if (!(low & 3)) {
- *size = 0;
- return;
- }
- /*
- * This needs a little explaining. The size is stored as an
- * inverted mask of bits of 128K granularity 15 bits long offset
- * 2 bits.
- *
- * So to get a size we do invert the mask and add 1 to the lowest
- * mask bit (4 as its 2 bits in). This gives us a size we then shift
- * to turn into 128K blocks.
- *
- * eg 111 1111 1111 1100 is 512K
- *
- * invert 000 0000 0000 0011
- * +1 000 0000 0000 0100
- * *128K ...
- */
- low = (~low) & 0x1FFFC;
- *size = (low + 4) << (15 - PAGE_SHIFT);
-}
-
-/**
- * amd_set_mtrr - Set variable MTRR register on the local CPU.
- *
- * @reg The register to set.
- * @base The base address of the region.
- * @size The size of the region. If this is 0 the region is disabled.
- * @type The type of the region.
- *
- * Returns nothing.
- */
-static void
-amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
-{
- u32 regs[2];
-
- /*
- * Low is MTRR0, High MTRR 1
- */
- rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
- /*
- * Blank to disable
- */
- if (size == 0) {
- regs[reg] = 0;
- } else {
- /*
- * Set the register to the base, the type (off by one) and an
- * inverted bitmask of the size The size is the only odd
- * bit. We are fed say 512K We invert this and we get 111 1111
- * 1111 1011 but if you subtract one and invert you get the
- * desired 111 1111 1111 1100 mask
- *
- * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
- */
- regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
- | (base << PAGE_SHIFT) | (type + 1);
- }
-
- /*
- * The writeback rule is quite specific. See the manual. Its
- * disable local interrupts, write back the cache, set the mtrr
- */
- wbinvd();
- wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
-}
-
-static int
-amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
-{
- /*
- * Apply the K6 block alignment and size rules
- * In order
- * o Uncached or gathering only
- * o 128K or bigger block
- * o Power of 2 block
- * o base suitably aligned to the power
- */
- if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
- || (size & ~(size - 1)) - size || (base & (size - 1)))
- return -EINVAL;
- return 0;
-}
-
-static const struct mtrr_ops amd_mtrr_ops = {
- .vendor = X86_VENDOR_AMD,
- .set = amd_set_mtrr,
- .get = amd_get_mtrr,
- .get_free_region = generic_get_free_region,
- .validate_add_page = amd_validate_add_page,
- .have_wrcomb = positive_have_wrcomb,
-};
-
-int __init amd_init_mtrr(void)
-{
- set_mtrr_ops(&amd_mtrr_ops);
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/centaur.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/centaur.c
deleted file mode 100644
index 316fe3e6..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/centaur.c
+++ /dev/null
@@ -1,126 +0,0 @@
-#include <linux/init.h>
-#include <linux/mm.h>
-
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "mtrr.h"
-
-static struct {
- unsigned long high;
- unsigned long low;
-} centaur_mcr[8];
-
-static u8 centaur_mcr_reserved;
-static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
-
-/**
- * centaur_get_free_region - Get a free MTRR.
- *
- * @base: The starting (base) address of the region.
- * @size: The size (in bytes) of the region.
- *
- * Returns: the index of the region on success, else -1 on error.
- */
-static int
-centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-{
- unsigned long lbase, lsize;
- mtrr_type ltype;
- int i, max;
-
- max = num_var_ranges;
- if (replace_reg >= 0 && replace_reg < max)
- return replace_reg;
-
- for (i = 0; i < max; ++i) {
- if (centaur_mcr_reserved & (1 << i))
- continue;
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (lsize == 0)
- return i;
- }
-
- return -ENOSPC;
-}
-
-/*
- * Report boot time MCR setups
- */
-void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
-{
- centaur_mcr[mcr].low = lo;
- centaur_mcr[mcr].high = hi;
-}
-
-static void
-centaur_get_mcr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type)
-{
- *base = centaur_mcr[reg].high >> PAGE_SHIFT;
- *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
- *type = MTRR_TYPE_WRCOMB; /* write-combining */
-
- if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
- *type = MTRR_TYPE_UNCACHABLE;
- if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
- *type = MTRR_TYPE_WRBACK;
- if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
- *type = MTRR_TYPE_WRBACK;
-}
-
-static void
-centaur_set_mcr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-{
- unsigned long low, high;
-
- if (size == 0) {
- /* Disable */
- high = low = 0;
- } else {
- high = base << PAGE_SHIFT;
- if (centaur_mcr_type == 0) {
- /* Only support write-combining... */
- low = -size << PAGE_SHIFT | 0x1f;
- } else {
- if (type == MTRR_TYPE_UNCACHABLE)
- low = -size << PAGE_SHIFT | 0x02; /* NC */
- else
- low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */
- }
- }
- centaur_mcr[reg].high = high;
- centaur_mcr[reg].low = low;
- wrmsr(MSR_IDT_MCR0 + reg, low, high);
-}
-
-static int
-centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
-{
- /*
- * FIXME: Winchip2 supports uncached
- */
- if (type != MTRR_TYPE_WRCOMB &&
- (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
- pr_warning("mtrr: only write-combining%s supported\n",
- centaur_mcr_type ? " and uncacheable are" : " is");
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct mtrr_ops centaur_mtrr_ops = {
- .vendor = X86_VENDOR_CENTAUR,
- .set = centaur_set_mcr,
- .get = centaur_get_mcr,
- .get_free_region = centaur_get_free_region,
- .validate_add_page = centaur_validate_add_page,
- .have_wrcomb = positive_have_wrcomb,
-};
-
-int __init centaur_init_mtrr(void)
-{
- set_mtrr_ops(&centaur_mtrr_ops);
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cleanup.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cleanup.c
deleted file mode 100644
index ac140c7b..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- * MTRR (Memory Type Range Register) cleanup
- *
- * Copyright (C) 2009 Yinghai Lu
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/kvm_para.h>
-#include <linux/range.h>
-
-#include <asm/processor.h>
-#include <asm/e820.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "mtrr.h"
-
-struct var_mtrr_range_state {
- unsigned long base_pfn;
- unsigned long size_pfn;
- mtrr_type type;
-};
-
-struct var_mtrr_state {
- unsigned long range_startk;
- unsigned long range_sizek;
- unsigned long chunk_sizek;
- unsigned long gran_sizek;
- unsigned int reg;
-};
-
-/* Should be related to MTRR_VAR_RANGES nums */
-#define RANGE_NUM 256
-
-static struct range __initdata range[RANGE_NUM];
-static int __initdata nr_range;
-
-static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
-
-static int __initdata debug_print;
-#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
-
-#define BIOS_BUG_MSG KERN_WARNING \
- "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
-
-static int __init
-x86_get_mtrr_mem_range(struct range *range, int nr_range,
- unsigned long extra_remove_base,
- unsigned long extra_remove_size)
-{
- unsigned long base, size;
- mtrr_type type;
- int i;
-
- for (i = 0; i < num_var_ranges; i++) {
- type = range_state[i].type;
- if (type != MTRR_TYPE_WRBACK)
- continue;
- base = range_state[i].base_pfn;
- size = range_state[i].size_pfn;
- nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
- base, base + size);
- }
- if (debug_print) {
- printk(KERN_DEBUG "After WB checking\n");
- for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
- range[i].start, range[i].end);
- }
-
- /* Take out UC ranges: */
- for (i = 0; i < num_var_ranges; i++) {
- type = range_state[i].type;
- if (type != MTRR_TYPE_UNCACHABLE &&
- type != MTRR_TYPE_WRPROT)
- continue;
- size = range_state[i].size_pfn;
- if (!size)
- continue;
- base = range_state[i].base_pfn;
- if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
- (mtrr_state.enabled & 1)) {
- /* Var MTRR contains UC entry below 1M? Skip it: */
- printk(BIOS_BUG_MSG, i);
- if (base + size <= (1<<(20-PAGE_SHIFT)))
- continue;
- size -= (1<<(20-PAGE_SHIFT)) - base;
- base = 1<<(20-PAGE_SHIFT);
- }
- subtract_range(range, RANGE_NUM, base, base + size);
- }
- if (extra_remove_size)
- subtract_range(range, RANGE_NUM, extra_remove_base,
- extra_remove_base + extra_remove_size);
-
- if (debug_print) {
- printk(KERN_DEBUG "After UC checking\n");
- for (i = 0; i < RANGE_NUM; i++) {
- if (!range[i].end)
- continue;
- printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
- range[i].start, range[i].end);
- }
- }
-
- /* sort the ranges */
- nr_range = clean_sort_range(range, RANGE_NUM);
- if (debug_print) {
- printk(KERN_DEBUG "After sorting\n");
- for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
- range[i].start, range[i].end);
- }
-
- return nr_range;
-}
-
-#ifdef CONFIG_MTRR_SANITIZER
-
-static unsigned long __init sum_ranges(struct range *range, int nr_range)
-{
- unsigned long sum = 0;
- int i;
-
- for (i = 0; i < nr_range; i++)
- sum += range[i].end - range[i].start;
-
- return sum;
-}
-
-static int enable_mtrr_cleanup __initdata =
- CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
-
-static int __init disable_mtrr_cleanup_setup(char *str)
-{
- enable_mtrr_cleanup = 0;
- return 0;
-}
-early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
-
-static int __init enable_mtrr_cleanup_setup(char *str)
-{
- enable_mtrr_cleanup = 1;
- return 0;
-}
-early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
-
-static int __init mtrr_cleanup_debug_setup(char *str)
-{
- debug_print = 1;
- return 0;
-}
-early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
-
-static void __init
-set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
- unsigned char type, unsigned int address_bits)
-{
- u32 base_lo, base_hi, mask_lo, mask_hi;
- u64 base, mask;
-
- if (!sizek) {
- fill_mtrr_var_range(reg, 0, 0, 0, 0);
- return;
- }
-
- mask = (1ULL << address_bits) - 1;
- mask &= ~((((u64)sizek) << 10) - 1);
-
- base = ((u64)basek) << 10;
-
- base |= type;
- mask |= 0x800;
-
- base_lo = base & ((1ULL<<32) - 1);
- base_hi = base >> 32;
-
- mask_lo = mask & ((1ULL<<32) - 1);
- mask_hi = mask >> 32;
-
- fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
-}
-
-static void __init
-save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
- unsigned char type)
-{
- range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
- range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
- range_state[reg].type = type;
-}
-
-static void __init set_var_mtrr_all(unsigned int address_bits)
-{
- unsigned long basek, sizek;
- unsigned char type;
- unsigned int reg;
-
- for (reg = 0; reg < num_var_ranges; reg++) {
- basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
- sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
- type = range_state[reg].type;
-
- set_var_mtrr(reg, basek, sizek, type, address_bits);
- }
-}
-
-static unsigned long to_size_factor(unsigned long sizek, char *factorp)
-{
- unsigned long base = sizek;
- char factor;
-
- if (base & ((1<<10) - 1)) {
- /* Not MB-aligned: */
- factor = 'K';
- } else if (base & ((1<<20) - 1)) {
- factor = 'M';
- base >>= 10;
- } else {
- factor = 'G';
- base >>= 20;
- }
-
- *factorp = factor;
-
- return base;
-}
-
-static unsigned int __init
-range_to_mtrr(unsigned int reg, unsigned long range_startk,
- unsigned long range_sizek, unsigned char type)
-{
- if (!range_sizek || (reg >= num_var_ranges))
- return reg;
-
- while (range_sizek) {
- unsigned long max_align, align;
- unsigned long sizek;
-
- /* Compute the maximum size with which we can make a range: */
- if (range_startk)
- max_align = ffs(range_startk) - 1;
- else
- max_align = 32;
-
- align = fls(range_sizek) - 1;
- if (align > max_align)
- align = max_align;
-
- sizek = 1 << align;
- if (debug_print) {
- char start_factor = 'K', size_factor = 'K';
- unsigned long start_base, size_base;
-
- start_base = to_size_factor(range_startk, &start_factor);
- size_base = to_size_factor(sizek, &size_factor);
-
- Dprintk("Setting variable MTRR %d, "
- "base: %ld%cB, range: %ld%cB, type %s\n",
- reg, start_base, start_factor,
- size_base, size_factor,
- (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
- ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")
- );
- }
- save_var_mtrr(reg++, range_startk, sizek, type);
- range_startk += sizek;
- range_sizek -= sizek;
- if (reg >= num_var_ranges)
- break;
- }
- return reg;
-}
-
-static unsigned __init
-range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
- unsigned long sizek)
-{
- unsigned long hole_basek, hole_sizek;
- unsigned long second_basek, second_sizek;
- unsigned long range0_basek, range0_sizek;
- unsigned long range_basek, range_sizek;
- unsigned long chunk_sizek;
- unsigned long gran_sizek;
-
- hole_basek = 0;
- hole_sizek = 0;
- second_basek = 0;
- second_sizek = 0;
- chunk_sizek = state->chunk_sizek;
- gran_sizek = state->gran_sizek;
-
- /* Align with gran size, prevent small block used up MTRRs: */
- range_basek = ALIGN(state->range_startk, gran_sizek);
- if ((range_basek > basek) && basek)
- return second_sizek;
-
- state->range_sizek -= (range_basek - state->range_startk);
- range_sizek = ALIGN(state->range_sizek, gran_sizek);
-
- while (range_sizek > state->range_sizek) {
- range_sizek -= gran_sizek;
- if (!range_sizek)
- return 0;
- }
- state->range_sizek = range_sizek;
-
- /* Try to append some small hole: */
- range0_basek = state->range_startk;
- range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
-
- /* No increase: */
- if (range0_sizek == state->range_sizek) {
- Dprintk("rangeX: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + state->range_sizek)<<10);
- state->reg = range_to_mtrr(state->reg, range0_basek,
- state->range_sizek, MTRR_TYPE_WRBACK);
- return 0;
- }
-
- /* Only cut back when it is not the last: */
- if (sizek) {
- while (range0_basek + range0_sizek > (basek + sizek)) {
- if (range0_sizek >= chunk_sizek)
- range0_sizek -= chunk_sizek;
- else
- range0_sizek = 0;
-
- if (!range0_sizek)
- break;
- }
- }
-
-second_try:
- range_basek = range0_basek + range0_sizek;
-
- /* One hole in the middle: */
- if (range_basek > basek && range_basek <= (basek + sizek))
- second_sizek = range_basek - basek;
-
- if (range0_sizek > state->range_sizek) {
-
- /* One hole in middle or at the end: */
- hole_sizek = range0_sizek - state->range_sizek - second_sizek;
-
- /* Hole size should be less than half of range0 size: */
- if (hole_sizek >= (range0_sizek >> 1) &&
- range0_sizek >= chunk_sizek) {
- range0_sizek -= chunk_sizek;
- second_sizek = 0;
- hole_sizek = 0;
-
- goto second_try;
- }
- }
-
- if (range0_sizek) {
- Dprintk("range0: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + range0_sizek)<<10);
- state->reg = range_to_mtrr(state->reg, range0_basek,
- range0_sizek, MTRR_TYPE_WRBACK);
- }
-
- if (range0_sizek < state->range_sizek) {
- /* Need to handle left over range: */
- range_sizek = state->range_sizek - range0_sizek;
-
- Dprintk("range: %016lx - %016lx\n",
- range_basek<<10,
- (range_basek + range_sizek)<<10);
-
- state->reg = range_to_mtrr(state->reg, range_basek,
- range_sizek, MTRR_TYPE_WRBACK);
- }
-
- if (hole_sizek) {
- hole_basek = range_basek - hole_sizek - second_sizek;
- Dprintk("hole: %016lx - %016lx\n",
- hole_basek<<10,
- (hole_basek + hole_sizek)<<10);
- state->reg = range_to_mtrr(state->reg, hole_basek,
- hole_sizek, MTRR_TYPE_UNCACHABLE);
- }
-
- return second_sizek;
-}
-
-static void __init
-set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
- unsigned long size_pfn)
-{
- unsigned long basek, sizek;
- unsigned long second_sizek = 0;
-
- if (state->reg >= num_var_ranges)
- return;
-
- basek = base_pfn << (PAGE_SHIFT - 10);
- sizek = size_pfn << (PAGE_SHIFT - 10);
-
- /* See if I can merge with the last range: */
- if ((basek <= 1024) ||
- (state->range_startk + state->range_sizek == basek)) {
- unsigned long endk = basek + sizek;
- state->range_sizek = endk - state->range_startk;
- return;
- }
- /* Write the range mtrrs: */
- if (state->range_sizek != 0)
- second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
-
- /* Allocate an msr: */
- state->range_startk = basek + second_sizek;
- state->range_sizek = sizek - second_sizek;
-}
-
-/* Mininum size of mtrr block that can take hole: */
-static u64 mtrr_chunk_size __initdata = (256ULL<<20);
-
-static int __init parse_mtrr_chunk_size_opt(char *p)
-{
- if (!p)
- return -EINVAL;
- mtrr_chunk_size = memparse(p, &p);
- return 0;
-}
-early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
-
-/* Granularity of mtrr of block: */
-static u64 mtrr_gran_size __initdata;
-
-static int __init parse_mtrr_gran_size_opt(char *p)
-{
- if (!p)
- return -EINVAL;
- mtrr_gran_size = memparse(p, &p);
- return 0;
-}
-early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
-
-static unsigned long nr_mtrr_spare_reg __initdata =
- CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
-
-static int __init parse_mtrr_spare_reg(char *arg)
-{
- if (arg)
- nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
- return 0;
-}
-early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
-
-static int __init
-x86_setup_var_mtrrs(struct range *range, int nr_range,
- u64 chunk_size, u64 gran_size)
-{
- struct var_mtrr_state var_state;
- int num_reg;
- int i;
-
- var_state.range_startk = 0;
- var_state.range_sizek = 0;
- var_state.reg = 0;
- var_state.chunk_sizek = chunk_size >> 10;
- var_state.gran_sizek = gran_size >> 10;
-
- memset(range_state, 0, sizeof(range_state));
-
- /* Write the range: */
- for (i = 0; i < nr_range; i++) {
- set_var_mtrr_range(&var_state, range[i].start,
- range[i].end - range[i].start);
- }
-
- /* Write the last range: */
- if (var_state.range_sizek != 0)
- range_to_mtrr_with_hole(&var_state, 0, 0);
-
- num_reg = var_state.reg;
- /* Clear out the extra MTRR's: */
- while (var_state.reg < num_var_ranges) {
- save_var_mtrr(var_state.reg, 0, 0, 0);
- var_state.reg++;
- }
-
- return num_reg;
-}
-
-struct mtrr_cleanup_result {
- unsigned long gran_sizek;
- unsigned long chunk_sizek;
- unsigned long lose_cover_sizek;
- unsigned int num_reg;
- int bad;
-};
-
-/*
- * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
- * chunk size: gran_size, ..., 2G
- * so we need (1+16)*8
- */
-#define NUM_RESULT 136
-#define PSHIFT (PAGE_SHIFT - 10)
-
-static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
-static unsigned long __initdata min_loss_pfn[RANGE_NUM];
-
-static void __init print_out_mtrr_range_state(void)
-{
- char start_factor = 'K', size_factor = 'K';
- unsigned long start_base, size_base;
- mtrr_type type;
- int i;
-
- for (i = 0; i < num_var_ranges; i++) {
-
- size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
- if (!size_base)
- continue;
-
- size_base = to_size_factor(size_base, &size_factor),
- start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
- start_base = to_size_factor(start_base, &start_factor),
- type = range_state[i].type;
-
- printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
- i, start_base, start_factor,
- size_base, size_factor,
- (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
- ((type == MTRR_TYPE_WRPROT) ? "WP" :
- ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
- );
- }
-}
-
-static int __init mtrr_need_cleanup(void)
-{
- int i;
- mtrr_type type;
- unsigned long size;
- /* Extra one for all 0: */
- int num[MTRR_NUM_TYPES + 1];
-
- /* Check entries number: */
- memset(num, 0, sizeof(num));
- for (i = 0; i < num_var_ranges; i++) {
- type = range_state[i].type;
- size = range_state[i].size_pfn;
- if (type >= MTRR_NUM_TYPES)
- continue;
- if (!size)
- type = MTRR_NUM_TYPES;
- num[type]++;
- }
-
- /* Check if we got UC entries: */
- if (!num[MTRR_TYPE_UNCACHABLE])
- return 0;
-
- /* Check if we only had WB and UC */
- if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
- num_var_ranges - num[MTRR_NUM_TYPES])
- return 0;
-
- return 1;
-}
-
-static unsigned long __initdata range_sums;
-
-static void __init
-mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
- unsigned long x_remove_base,
- unsigned long x_remove_size, int i)
-{
- static struct range range_new[RANGE_NUM];
- unsigned long range_sums_new;
- static int nr_range_new;
- int num_reg;
-
- /* Convert ranges to var ranges state: */
- num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
-
- /* We got new setting in range_state, check it: */
- memset(range_new, 0, sizeof(range_new));
- nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
- x_remove_base, x_remove_size);
- range_sums_new = sum_ranges(range_new, nr_range_new);
-
- result[i].chunk_sizek = chunk_size >> 10;
- result[i].gran_sizek = gran_size >> 10;
- result[i].num_reg = num_reg;
-
- if (range_sums < range_sums_new) {
- result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
- result[i].bad = 1;
- } else {
- result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
- }
-
- /* Double check it: */
- if (!result[i].bad && !result[i].lose_cover_sizek) {
- if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
- result[i].bad = 1;
- }
-
- if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
- min_loss_pfn[num_reg] = range_sums - range_sums_new;
-}
-
-static void __init mtrr_print_out_one_result(int i)
-{
- unsigned long gran_base, chunk_base, lose_base;
- char gran_factor, chunk_factor, lose_factor;
-
- gran_base = to_size_factor(result[i].gran_sizek, &gran_factor);
- chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor);
- lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor);
-
- pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
- result[i].bad ? "*BAD*" : " ",
- gran_base, gran_factor, chunk_base, chunk_factor);
- pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
- result[i].num_reg, result[i].bad ? "-" : "",
- lose_base, lose_factor);
-}
-
-static int __init mtrr_search_optimal_index(void)
-{
- int num_reg_good;
- int index_good;
- int i;
-
- if (nr_mtrr_spare_reg >= num_var_ranges)
- nr_mtrr_spare_reg = num_var_ranges - 1;
-
- num_reg_good = -1;
- for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
- if (!min_loss_pfn[i])
- num_reg_good = i;
- }
-
- index_good = -1;
- if (num_reg_good != -1) {
- for (i = 0; i < NUM_RESULT; i++) {
- if (!result[i].bad &&
- result[i].num_reg == num_reg_good &&
- !result[i].lose_cover_sizek) {
- index_good = i;
- break;
- }
- }
- }
-
- return index_good;
-}
-
-int __init mtrr_cleanup(unsigned address_bits)
-{
- unsigned long x_remove_base, x_remove_size;
- unsigned long base, size, def, dummy;
- u64 chunk_size, gran_size;
- mtrr_type type;
- int index_good;
- int i;
-
- if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
- return 0;
-
- rdmsr(MSR_MTRRdefType, def, dummy);
- def &= 0xff;
- if (def != MTRR_TYPE_UNCACHABLE)
- return 0;
-
- /* Get it and store it aside: */
- memset(range_state, 0, sizeof(range_state));
- for (i = 0; i < num_var_ranges; i++) {
- mtrr_if->get(i, &base, &size, &type);
- range_state[i].base_pfn = base;
- range_state[i].size_pfn = size;
- range_state[i].type = type;
- }
-
- /* Check if we need handle it and can handle it: */
- if (!mtrr_need_cleanup())
- return 0;
-
- /* Print original var MTRRs at first, for debugging: */
- printk(KERN_DEBUG "original variable MTRRs\n");
- print_out_mtrr_range_state();
-
- memset(range, 0, sizeof(range));
- x_remove_size = 0;
- x_remove_base = 1 << (32 - PAGE_SHIFT);
- if (mtrr_tom2)
- x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
-
- nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
- /*
- * [0, 1M) should always be covered by var mtrr with WB
- * and fixed mtrrs should take effect before var mtrr for it:
- */
- nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
- 1ULL<<(20 - PAGE_SHIFT));
- /* Sort the ranges: */
- sort_range(range, nr_range);
-
- range_sums = sum_ranges(range, nr_range);
- printk(KERN_INFO "total RAM covered: %ldM\n",
- range_sums >> (20 - PAGE_SHIFT));
-
- if (mtrr_chunk_size && mtrr_gran_size) {
- i = 0;
- mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
- x_remove_base, x_remove_size, i);
-
- mtrr_print_out_one_result(i);
-
- if (!result[i].bad) {
- set_var_mtrr_all(address_bits);
- printk(KERN_DEBUG "New variable MTRRs\n");
- print_out_mtrr_range_state();
- return 1;
- }
- printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
- "will find optimal one\n");
- }
-
- i = 0;
- memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
- memset(result, 0, sizeof(result));
- for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
-
- for (chunk_size = gran_size; chunk_size < (1ULL<<32);
- chunk_size <<= 1) {
-
- if (i >= NUM_RESULT)
- continue;
-
- mtrr_calc_range_state(chunk_size, gran_size,
- x_remove_base, x_remove_size, i);
- if (debug_print) {
- mtrr_print_out_one_result(i);
- printk(KERN_INFO "\n");
- }
-
- i++;
- }
- }
-
- /* Try to find the optimal index: */
- index_good = mtrr_search_optimal_index();
-
- if (index_good != -1) {
- printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
- i = index_good;
- mtrr_print_out_one_result(i);
-
- /* Convert ranges to var ranges state: */
- chunk_size = result[i].chunk_sizek;
- chunk_size <<= 10;
- gran_size = result[i].gran_sizek;
- gran_size <<= 10;
- x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
- set_var_mtrr_all(address_bits);
- printk(KERN_DEBUG "New variable MTRRs\n");
- print_out_mtrr_range_state();
- return 1;
- } else {
- /* print out all */
- for (i = 0; i < NUM_RESULT; i++)
- mtrr_print_out_one_result(i);
- }
-
- printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
- printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
-
- return 0;
-}
-#else
-int __init mtrr_cleanup(unsigned address_bits)
-{
- return 0;
-}
-#endif
-
-static int disable_mtrr_trim;
-
-static int __init disable_mtrr_trim_setup(char *str)
-{
- disable_mtrr_trim = 1;
- return 0;
-}
-early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
-
-/*
- * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
- * for memory >4GB. Check for that here.
- * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
- * apply to are wrong, but so far we don't know of any such case in the wild.
- */
-#define Tom2Enabled (1U << 21)
-#define Tom2ForceMemTypeWB (1U << 22)
-
-int __init amd_special_default_mtrr(void)
-{
- u32 l, h;
-
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return 0;
- if (boot_cpu_data.x86 < 0xf)
- return 0;
- /* In case some hypervisor doesn't pass SYSCFG through: */
- if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
- return 0;
- /*
- * Memory between 4GB and top of mem is forced WB by this magic bit.
- * Reserved before K8RevF, but should be zero there.
- */
- if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
- (Tom2Enabled | Tom2ForceMemTypeWB))
- return 1;
- return 0;
-}
-
-static u64 __init
-real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
-{
- u64 trim_start, trim_size;
-
- trim_start = start_pfn;
- trim_start <<= PAGE_SHIFT;
-
- trim_size = limit_pfn;
- trim_size <<= PAGE_SHIFT;
- trim_size -= trim_start;
-
- return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
-}
-
-/**
- * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
- * @end_pfn: ending page frame number
- *
- * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
- * memory configurations. This routine checks that the highest MTRR matches
- * the end of memory, to make sure the MTRRs having a write back type cover
- * all of the memory the kernel is intending to use. If not, it'll trim any
- * memory off the end by adjusting end_pfn, removing it from the kernel's
- * allocation pools, warning the user with an obnoxious message.
- */
-int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
-{
- unsigned long i, base, size, highest_pfn = 0, def, dummy;
- mtrr_type type;
- u64 total_trim_size;
- /* extra one for all 0 */
- int num[MTRR_NUM_TYPES + 1];
-
- /*
- * Make sure we only trim uncachable memory on machines that
- * support the Intel MTRR architecture:
- */
- if (!is_cpu(INTEL) || disable_mtrr_trim)
- return 0;
-
- rdmsr(MSR_MTRRdefType, def, dummy);
- def &= 0xff;
- if (def != MTRR_TYPE_UNCACHABLE)
- return 0;
-
- /* Get it and store it aside: */
- memset(range_state, 0, sizeof(range_state));
- for (i = 0; i < num_var_ranges; i++) {
- mtrr_if->get(i, &base, &size, &type);
- range_state[i].base_pfn = base;
- range_state[i].size_pfn = size;
- range_state[i].type = type;
- }
-
- /* Find highest cached pfn: */
- for (i = 0; i < num_var_ranges; i++) {
- type = range_state[i].type;
- if (type != MTRR_TYPE_WRBACK)
- continue;
- base = range_state[i].base_pfn;
- size = range_state[i].size_pfn;
- if (highest_pfn < base + size)
- highest_pfn = base + size;
- }
-
- /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
- if (!highest_pfn) {
- printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
- return 0;
- }
-
- /* Check entries number: */
- memset(num, 0, sizeof(num));
- for (i = 0; i < num_var_ranges; i++) {
- type = range_state[i].type;
- if (type >= MTRR_NUM_TYPES)
- continue;
- size = range_state[i].size_pfn;
- if (!size)
- type = MTRR_NUM_TYPES;
- num[type]++;
- }
-
- /* No entry for WB? */
- if (!num[MTRR_TYPE_WRBACK])
- return 0;
-
- /* Check if we only had WB and UC: */
- if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
- num_var_ranges - num[MTRR_NUM_TYPES])
- return 0;
-
- memset(range, 0, sizeof(range));
- nr_range = 0;
- if (mtrr_tom2) {
- range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
- range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
- if (highest_pfn < range[nr_range].end)
- highest_pfn = range[nr_range].end;
- nr_range++;
- }
- nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
-
- /* Check the head: */
- total_trim_size = 0;
- if (range[0].start)
- total_trim_size += real_trim_memory(0, range[0].start);
-
- /* Check the holes: */
- for (i = 0; i < nr_range - 1; i++) {
- if (range[i].end < range[i+1].start)
- total_trim_size += real_trim_memory(range[i].end,
- range[i+1].start);
- }
-
- /* Check the top: */
- i = nr_range - 1;
- if (range[i].end < end_pfn)
- total_trim_size += real_trim_memory(range[i].end,
- end_pfn);
-
- if (total_trim_size) {
- pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
-
- if (!changed_by_mtrr_cleanup)
- WARN_ON(1);
-
- pr_info("update e820 for mtrr\n");
- update_e820();
-
- return 1;
- }
-
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cyrix.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cyrix.c
deleted file mode 100644
index 68a3343e..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ /dev/null
@@ -1,282 +0,0 @@
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-
-#include <asm/processor-cyrix.h>
-#include <asm/processor-flags.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "mtrr.h"
-
-static void
-cyrix_get_arr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type)
-{
- unsigned char arr, ccr3, rcr, shift;
- unsigned long flags;
-
- arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
-
- local_irq_save(flags);
-
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- ((unsigned char *)base)[3] = getCx86(arr);
- ((unsigned char *)base)[2] = getCx86(arr + 1);
- ((unsigned char *)base)[1] = getCx86(arr + 2);
- rcr = getCx86(CX86_RCR_BASE + reg);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
-
- local_irq_restore(flags);
-
- shift = ((unsigned char *) base)[1] & 0x0f;
- *base >>= PAGE_SHIFT;
-
- /*
- * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
- * Note: shift==0xf means 4G, this is unsupported.
- */
- if (shift)
- *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
- else
- *size = 0;
-
- /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
- if (reg < 7) {
- switch (rcr) {
- case 1:
- *type = MTRR_TYPE_UNCACHABLE;
- break;
- case 8:
- *type = MTRR_TYPE_WRBACK;
- break;
- case 9:
- *type = MTRR_TYPE_WRCOMB;
- break;
- case 24:
- default:
- *type = MTRR_TYPE_WRTHROUGH;
- break;
- }
- } else {
- switch (rcr) {
- case 0:
- *type = MTRR_TYPE_UNCACHABLE;
- break;
- case 8:
- *type = MTRR_TYPE_WRCOMB;
- break;
- case 9:
- *type = MTRR_TYPE_WRBACK;
- break;
- case 25:
- default:
- *type = MTRR_TYPE_WRTHROUGH;
- break;
- }
- }
-}
-
-/*
- * cyrix_get_free_region - get a free ARR.
- *
- * @base: the starting (base) address of the region.
- * @size: the size (in bytes) of the region.
- *
- * Returns: the index of the region on success, else -1 on error.
-*/
-static int
-cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-{
- unsigned long lbase, lsize;
- mtrr_type ltype;
- int i;
-
- switch (replace_reg) {
- case 7:
- if (size < 0x40)
- break;
- case 6:
- case 5:
- case 4:
- return replace_reg;
- case 3:
- case 2:
- case 1:
- case 0:
- return replace_reg;
- }
- /* If we are to set up a region >32M then look at ARR7 immediately */
- if (size > 0x2000) {
- cyrix_get_arr(7, &lbase, &lsize, &ltype);
- if (lsize == 0)
- return 7;
- /* Else try ARR0-ARR6 first */
- } else {
- for (i = 0; i < 7; i++) {
- cyrix_get_arr(i, &lbase, &lsize, &ltype);
- if (lsize == 0)
- return i;
- }
- /*
- * ARR0-ARR6 isn't free
- * try ARR7 but its size must be at least 256K
- */
- cyrix_get_arr(i, &lbase, &lsize, &ltype);
- if ((lsize == 0) && (size >= 0x40))
- return i;
- }
- return -ENOSPC;
-}
-
-static u32 cr4, ccr3;
-
-static void prepare_set(void)
-{
- u32 cr0;
-
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
- if (cpu_has_pge) {
- cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
- }
-
- /*
- * Disable and flush caches.
- * Note that wbinvd flushes the TLBs as a side-effect
- */
- cr0 = read_cr0() | X86_CR0_CD;
- wbinvd();
- write_cr0(cr0);
- wbinvd();
-
- /* Cyrix ARRs - everything else was excluded at the top */
- ccr3 = getCx86(CX86_CCR3);
-
- /* Cyrix ARRs - everything else was excluded at the top */
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
-}
-
-static void post_set(void)
-{
- /* Flush caches and TLBs */
- wbinvd();
-
- /* Cyrix ARRs - everything else was excluded at the top */
- setCx86(CX86_CCR3, ccr3);
-
- /* Enable caches */
- write_cr0(read_cr0() & 0xbfffffff);
-
- /* Restore value of CR4 */
- if (cpu_has_pge)
- write_cr4(cr4);
-}
-
-static void cyrix_set_arr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-{
- unsigned char arr, arr_type, arr_size;
-
- arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
-
- /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
- if (reg >= 7)
- size >>= 6;
-
- size &= 0x7fff; /* make sure arr_size <= 14 */
- for (arr_size = 0; size; arr_size++, size >>= 1)
- ;
-
- if (reg < 7) {
- switch (type) {
- case MTRR_TYPE_UNCACHABLE:
- arr_type = 1;
- break;
- case MTRR_TYPE_WRCOMB:
- arr_type = 9;
- break;
- case MTRR_TYPE_WRTHROUGH:
- arr_type = 24;
- break;
- default:
- arr_type = 8;
- break;
- }
- } else {
- switch (type) {
- case MTRR_TYPE_UNCACHABLE:
- arr_type = 0;
- break;
- case MTRR_TYPE_WRCOMB:
- arr_type = 8;
- break;
- case MTRR_TYPE_WRTHROUGH:
- arr_type = 25;
- break;
- default:
- arr_type = 9;
- break;
- }
- }
-
- prepare_set();
-
- base <<= PAGE_SHIFT;
- setCx86(arr + 0, ((unsigned char *)&base)[3]);
- setCx86(arr + 1, ((unsigned char *)&base)[2]);
- setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size);
- setCx86(CX86_RCR_BASE + reg, arr_type);
-
- post_set();
-}
-
-typedef struct {
- unsigned long base;
- unsigned long size;
- mtrr_type type;
-} arr_state_t;
-
-static arr_state_t arr_state[8] = {
- {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
- {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
-};
-
-static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
-
-static void cyrix_set_all(void)
-{
- int i;
-
- prepare_set();
-
- /* the CCRs are not contiguous */
- for (i = 0; i < 4; i++)
- setCx86(CX86_CCR0 + i, ccr_state[i]);
- for (; i < 7; i++)
- setCx86(CX86_CCR4 + i, ccr_state[i]);
-
- for (i = 0; i < 8; i++) {
- cyrix_set_arr(i, arr_state[i].base,
- arr_state[i].size, arr_state[i].type);
- }
-
- post_set();
-}
-
-static const struct mtrr_ops cyrix_mtrr_ops = {
- .vendor = X86_VENDOR_CYRIX,
- .set_all = cyrix_set_all,
- .set = cyrix_set_arr,
- .get = cyrix_get_arr,
- .get_free_region = cyrix_get_free_region,
- .validate_add_page = generic_validate_add_page,
- .have_wrcomb = positive_have_wrcomb,
-};
-
-int __init cyrix_init_mtrr(void)
-{
- set_mtrr_ops(&cyrix_mtrr_ops);
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/generic.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/generic.c
deleted file mode 100644
index 75772ae6..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/generic.c
+++ /dev/null
@@ -1,846 +0,0 @@
-/*
- * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
- * because MTRRs can span up to 40 bits (36bits on most modern x86)
- */
-#define DEBUG
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-
-#include <asm/processor-flags.h>
-#include <asm/cpufeature.h>
-#include <asm/tlbflush.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include <asm/pat.h>
-
-#include "mtrr.h"
-
-struct fixed_range_block {
- int base_msr; /* start address of an MTRR block */
- int ranges; /* number of MTRRs in this block */
-};
-
-static struct fixed_range_block fixed_range_blocks[] = {
- { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
- { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
- { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
- {}
-};
-
-static unsigned long smp_changes_mask;
-static int mtrr_state_set;
-u64 mtrr_tom2;
-
-struct mtrr_state_type mtrr_state;
-EXPORT_SYMBOL_GPL(mtrr_state);
-
-/*
- * BIOS is expected to clear MtrrFixDramModEn bit, see for example
- * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
- * Opteron Processors" (26094 Rev. 3.30 February 2006), section
- * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
- * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
- * 0 for operation."
- */
-static inline void k8_check_syscfg_dram_mod_en(void)
-{
- u32 lo, hi;
-
- if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0x0f)))
- return;
-
- rdmsr(MSR_K8_SYSCFG, lo, hi);
- if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
- printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
- " not cleared by BIOS, clearing this bit\n",
- smp_processor_id());
- lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
- mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
- }
-}
-
-/* Get the size of contiguous MTRR range */
-static u64 get_mtrr_size(u64 mask)
-{
- u64 size;
-
- mask >>= PAGE_SHIFT;
- mask |= size_or_mask;
- size = -mask;
- size <<= PAGE_SHIFT;
- return size;
-}
-
-/*
- * Check and return the effective type for MTRR-MTRR type overlap.
- * Returns 1 if the effective type is UNCACHEABLE, else returns 0
- */
-static int check_type_overlap(u8 *prev, u8 *curr)
-{
- if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
- *prev = MTRR_TYPE_UNCACHABLE;
- *curr = MTRR_TYPE_UNCACHABLE;
- return 1;
- }
-
- if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
- (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
- *prev = MTRR_TYPE_WRTHROUGH;
- *curr = MTRR_TYPE_WRTHROUGH;
- }
-
- if (*prev != *curr) {
- *prev = MTRR_TYPE_UNCACHABLE;
- *curr = MTRR_TYPE_UNCACHABLE;
- return 1;
- }
-
- return 0;
-}
-
-/*
- * Error/Semi-error returns:
- * 0xFF - when MTRR is not enabled
- * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
- * corresponds only to [start:*partial_end].
- * Caller has to lookup again for [*partial_end:end].
- */
-static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
-{
- int i;
- u64 base, mask;
- u8 prev_match, curr_match;
-
- *repeat = 0;
- if (!mtrr_state_set)
- return 0xFF;
-
- if (!mtrr_state.enabled)
- return 0xFF;
-
- /* Make end inclusive end, instead of exclusive */
- end--;
-
- /* Look in fixed ranges. Just return the type as per start */
- if (mtrr_state.have_fixed && (start < 0x100000)) {
- int idx;
-
- if (start < 0x80000) {
- idx = 0;
- idx += (start >> 16);
- return mtrr_state.fixed_ranges[idx];
- } else if (start < 0xC0000) {
- idx = 1 * 8;
- idx += ((start - 0x80000) >> 14);
- return mtrr_state.fixed_ranges[idx];
- } else if (start < 0x1000000) {
- idx = 3 * 8;
- idx += ((start - 0xC0000) >> 12);
- return mtrr_state.fixed_ranges[idx];
- }
- }
-
- /*
- * Look in variable ranges
- * Look of multiple ranges matching this address and pick type
- * as per MTRR precedence
- */
- if (!(mtrr_state.enabled & 2))
- return mtrr_state.def_type;
-
- prev_match = 0xFF;
- for (i = 0; i < num_var_ranges; ++i) {
- unsigned short start_state, end_state;
-
- if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
- continue;
-
- base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
- (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
- mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
- (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
-
- start_state = ((start & mask) == (base & mask));
- end_state = ((end & mask) == (base & mask));
-
- if (start_state != end_state) {
- /*
- * We have start:end spanning across an MTRR.
- * We split the region into
- * either
- * (start:mtrr_end) (mtrr_end:end)
- * or
- * (start:mtrr_start) (mtrr_start:end)
- * depending on kind of overlap.
- * Return the type for first region and a pointer to
- * the start of second region so that caller will
- * lookup again on the second region.
- * Note: This way we handle multiple overlaps as well.
- */
- if (start_state)
- *partial_end = base + get_mtrr_size(mask);
- else
- *partial_end = base;
-
- if (unlikely(*partial_end <= start)) {
- WARN_ON(1);
- *partial_end = start + PAGE_SIZE;
- }
-
- end = *partial_end - 1; /* end is inclusive */
- *repeat = 1;
- }
-
- if ((start & mask) != (base & mask))
- continue;
-
- curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
- if (prev_match == 0xFF) {
- prev_match = curr_match;
- continue;
- }
-
- if (check_type_overlap(&prev_match, &curr_match))
- return curr_match;
- }
-
- if (mtrr_tom2) {
- if (start >= (1ULL<<32) && (end < mtrr_tom2))
- return MTRR_TYPE_WRBACK;
- }
-
- if (prev_match != 0xFF)
- return prev_match;
-
- return mtrr_state.def_type;
-}
-
-/*
- * Returns the effective MTRR type for the region
- * Error return:
- * 0xFF - when MTRR is not enabled
- */
-u8 mtrr_type_lookup(u64 start, u64 end)
-{
- u8 type, prev_type;
- int repeat;
- u64 partial_end;
-
- type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
-
- /*
- * Common path is with repeat = 0.
- * However, we can have cases where [start:end] spans across some
- * MTRR range. Do repeated lookups for that case here.
- */
- while (repeat) {
- prev_type = type;
- start = partial_end;
- type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
-
- if (check_type_overlap(&prev_type, &type))
- return type;
- }
-
- return type;
-}
-
-/* Get the MSR pair relating to a var range */
-static void
-get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
-{
- rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
- rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
-}
-
-/* Fill the MSR pair relating to a var range */
-void fill_mtrr_var_range(unsigned int index,
- u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
-{
- struct mtrr_var_range *vr;
-
- vr = mtrr_state.var_ranges;
-
- vr[index].base_lo = base_lo;
- vr[index].base_hi = base_hi;
- vr[index].mask_lo = mask_lo;
- vr[index].mask_hi = mask_hi;
-}
-
-static void get_fixed_ranges(mtrr_type *frs)
-{
- unsigned int *p = (unsigned int *)frs;
- int i;
-
- k8_check_syscfg_dram_mod_en();
-
- rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
-
- for (i = 0; i < 2; i++)
- rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
- for (i = 0; i < 8; i++)
- rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
-}
-
-void mtrr_save_fixed_ranges(void *info)
-{
- if (cpu_has_mtrr)
- get_fixed_ranges(mtrr_state.fixed_ranges);
-}
-
-static unsigned __initdata last_fixed_start;
-static unsigned __initdata last_fixed_end;
-static mtrr_type __initdata last_fixed_type;
-
-static void __init print_fixed_last(void)
-{
- if (!last_fixed_end)
- return;
-
- pr_debug(" %05X-%05X %s\n", last_fixed_start,
- last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
-
- last_fixed_end = 0;
-}
-
-static void __init update_fixed_last(unsigned base, unsigned end,
- mtrr_type type)
-{
- last_fixed_start = base;
- last_fixed_end = end;
- last_fixed_type = type;
-}
-
-static void __init
-print_fixed(unsigned base, unsigned step, const mtrr_type *types)
-{
- unsigned i;
-
- for (i = 0; i < 8; ++i, ++types, base += step) {
- if (last_fixed_end == 0) {
- update_fixed_last(base, base + step, *types);
- continue;
- }
- if (last_fixed_end == base && last_fixed_type == *types) {
- last_fixed_end = base + step;
- continue;
- }
- /* new segments: gap or different type */
- print_fixed_last();
- update_fixed_last(base, base + step, *types);
- }
-}
-
-static void prepare_set(void);
-static void post_set(void);
-
-static void __init print_mtrr_state(void)
-{
- unsigned int i;
- int high_width;
-
- pr_debug("MTRR default type: %s\n",
- mtrr_attrib_to_str(mtrr_state.def_type));
- if (mtrr_state.have_fixed) {
- pr_debug("MTRR fixed ranges %sabled:\n",
- mtrr_state.enabled & 1 ? "en" : "dis");
- print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
- for (i = 0; i < 2; ++i)
- print_fixed(0x80000 + i * 0x20000, 0x04000,
- mtrr_state.fixed_ranges + (i + 1) * 8);
- for (i = 0; i < 8; ++i)
- print_fixed(0xC0000 + i * 0x08000, 0x01000,
- mtrr_state.fixed_ranges + (i + 3) * 8);
-
- /* tail */
- print_fixed_last();
- }
- pr_debug("MTRR variable ranges %sabled:\n",
- mtrr_state.enabled & 2 ? "en" : "dis");
- if (size_or_mask & 0xffffffffUL)
- high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
- else
- high_width = ffs(size_or_mask>>32) + 32 - 1;
- high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
-
- for (i = 0; i < num_var_ranges; ++i) {
- if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
- pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
- i,
- high_width,
- mtrr_state.var_ranges[i].base_hi,
- mtrr_state.var_ranges[i].base_lo >> 12,
- high_width,
- mtrr_state.var_ranges[i].mask_hi,
- mtrr_state.var_ranges[i].mask_lo >> 12,
- mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
- else
- pr_debug(" %u disabled\n", i);
- }
- if (mtrr_tom2)
- pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
-}
-
-/* Grab all of the MTRR state for this CPU into *state */
-void __init get_mtrr_state(void)
-{
- struct mtrr_var_range *vrs;
- unsigned long flags;
- unsigned lo, dummy;
- unsigned int i;
-
- vrs = mtrr_state.var_ranges;
-
- rdmsr(MSR_MTRRcap, lo, dummy);
- mtrr_state.have_fixed = (lo >> 8) & 1;
-
- for (i = 0; i < num_var_ranges; i++)
- get_mtrr_var_range(i, &vrs[i]);
- if (mtrr_state.have_fixed)
- get_fixed_ranges(mtrr_state.fixed_ranges);
-
- rdmsr(MSR_MTRRdefType, lo, dummy);
- mtrr_state.def_type = (lo & 0xff);
- mtrr_state.enabled = (lo & 0xc00) >> 10;
-
- if (amd_special_default_mtrr()) {
- unsigned low, high;
-
- /* TOP_MEM2 */
- rdmsr(MSR_K8_TOP_MEM2, low, high);
- mtrr_tom2 = high;
- mtrr_tom2 <<= 32;
- mtrr_tom2 |= low;
- mtrr_tom2 &= 0xffffff800000ULL;
- }
-
- print_mtrr_state();
-
- mtrr_state_set = 1;
-
- /* PAT setup for BP. We need to go through sync steps here */
- local_irq_save(flags);
- prepare_set();
-
- pat_init();
-
- post_set();
- local_irq_restore(flags);
-}
-
-/* Some BIOS's are messed up and don't set all MTRRs the same! */
-void __init mtrr_state_warn(void)
-{
- unsigned long mask = smp_changes_mask;
-
- if (!mask)
- return;
- if (mask & MTRR_CHANGE_MASK_FIXED)
- pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
- if (mask & MTRR_CHANGE_MASK_VARIABLE)
- pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
- if (mask & MTRR_CHANGE_MASK_DEFTYPE)
- pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
-
- printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
- printk(KERN_INFO "mtrr: corrected configuration.\n");
-}
-
-/*
- * Doesn't attempt to pass an error out to MTRR users
- * because it's quite complicated in some cases and probably not
- * worth it because the best error handling is to ignore it.
- */
-void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
-{
- if (wrmsr_safe(msr, a, b) < 0) {
- printk(KERN_ERR
- "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
- smp_processor_id(), msr, a, b);
- }
-}
-
-/**
- * set_fixed_range - checks & updates a fixed-range MTRR if it
- * differs from the value it should have
- * @msr: MSR address of the MTTR which should be checked and updated
- * @changed: pointer which indicates whether the MTRR needed to be changed
- * @msrwords: pointer to the MSR values which the MSR should have
- */
-static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
-{
- unsigned lo, hi;
-
- rdmsr(msr, lo, hi);
-
- if (lo != msrwords[0] || hi != msrwords[1]) {
- mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
- *changed = true;
- }
-}
-
-/**
- * generic_get_free_region - Get a free MTRR.
- * @base: The starting (base) address of the region.
- * @size: The size (in bytes) of the region.
- * @replace_reg: mtrr index to be replaced; set to invalid value if none.
- *
- * Returns: The index of the region on success, else negative on error.
- */
-int
-generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-{
- unsigned long lbase, lsize;
- mtrr_type ltype;
- int i, max;
-
- max = num_var_ranges;
- if (replace_reg >= 0 && replace_reg < max)
- return replace_reg;
-
- for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (lsize == 0)
- return i;
- }
-
- return -ENOSPC;
-}
-
-static void generic_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type *type)
-{
- unsigned int mask_lo, mask_hi, base_lo, base_hi;
- unsigned int tmp, hi;
-
- /*
- * get_mtrr doesn't need to update mtrr_state, also it could be called
- * from any cpu, so try to print it out directly.
- */
- get_cpu();
-
- rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
-
- if ((mask_lo & 0x800) == 0) {
- /* Invalid (i.e. free) range */
- *base = 0;
- *size = 0;
- *type = 0;
- goto out_put_cpu;
- }
-
- rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
-
- /* Work out the shifted address mask: */
- tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
- mask_lo = size_or_mask | tmp;
-
- /* Expand tmp with high bits to all 1s: */
- hi = fls(tmp);
- if (hi > 0) {
- tmp |= ~((1<<(hi - 1)) - 1);
-
- if (tmp != mask_lo) {
- printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
- add_taint(TAINT_FIRMWARE_WORKAROUND);
- mask_lo = tmp;
- }
- }
-
- /*
- * This works correctly if size is a power of two, i.e. a
- * contiguous range:
- */
- *size = -mask_lo;
- *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
- *type = base_lo & 0xff;
-
-out_put_cpu:
- put_cpu();
-}
-
-/**
- * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
- * differ from the saved set
- * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
- */
-static int set_fixed_ranges(mtrr_type *frs)
-{
- unsigned long long *saved = (unsigned long long *)frs;
- bool changed = false;
- int block = -1, range;
-
- k8_check_syscfg_dram_mod_en();
-
- while (fixed_range_blocks[++block].ranges) {
- for (range = 0; range < fixed_range_blocks[block].ranges; range++)
- set_fixed_range(fixed_range_blocks[block].base_msr + range,
- &changed, (unsigned int *)saved++);
- }
-
- return changed;
-}
-
-/*
- * Set the MSR pair relating to a var range.
- * Returns true if changes are made.
- */
-static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
-{
- unsigned int lo, hi;
- bool changed = false;
-
- rdmsr(MTRRphysBase_MSR(index), lo, hi);
- if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
- || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
- (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
-
- mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
- changed = true;
- }
-
- rdmsr(MTRRphysMask_MSR(index), lo, hi);
-
- if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
- || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
- (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
- mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
- changed = true;
- }
- return changed;
-}
-
-static u32 deftype_lo, deftype_hi;
-
-/**
- * set_mtrr_state - Set the MTRR state for this CPU.
- *
- * NOTE: The CPU must already be in a safe state for MTRR changes.
- * RETURNS: 0 if no changes made, else a mask indicating what was changed.
- */
-static unsigned long set_mtrr_state(void)
-{
- unsigned long change_mask = 0;
- unsigned int i;
-
- for (i = 0; i < num_var_ranges; i++) {
- if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
- change_mask |= MTRR_CHANGE_MASK_VARIABLE;
- }
-
- if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
- change_mask |= MTRR_CHANGE_MASK_FIXED;
-
- /*
- * Set_mtrr_restore restores the old value of MTRRdefType,
- * so to set it we fiddle with the saved value:
- */
- if ((deftype_lo & 0xff) != mtrr_state.def_type
- || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
-
- deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
- (mtrr_state.enabled << 10);
- change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
- }
-
- return change_mask;
-}
-
-
-static unsigned long cr4;
-static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
-
-/*
- * Since we are disabling the cache don't allow any interrupts,
- * they would run extremely slow and would only increase the pain.
- *
- * The caller must ensure that local interrupts are disabled and
- * are reenabled after post_set() has been called.
- */
-static void prepare_set(void) __acquires(set_atomicity_lock)
-{
- unsigned long cr0;
-
- /*
- * Note that this is not ideal
- * since the cache is only flushed/disabled for this CPU while the
- * MTRRs are changed, but changing this requires more invasive
- * changes to the way the kernel boots
- */
-
- raw_spin_lock(&set_atomicity_lock);
-
- /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
- cr0 = read_cr0() | X86_CR0_CD;
- write_cr0(cr0);
- wbinvd();
-
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
- if (cpu_has_pge) {
- cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
- }
-
- /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
- __flush_tlb();
-
- /* Save MTRR state */
- rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
- /* Disable MTRRs, and set the default type to uncached */
- mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
- wbinvd();
-}
-
-static void post_set(void) __releases(set_atomicity_lock)
-{
- /* Flush TLBs (no need to flush caches - they are disabled) */
- __flush_tlb();
-
- /* Intel (P6) standard MTRRs */
- mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
- /* Enable caches */
- write_cr0(read_cr0() & 0xbfffffff);
-
- /* Restore value of CR4 */
- if (cpu_has_pge)
- write_cr4(cr4);
- raw_spin_unlock(&set_atomicity_lock);
-}
-
-static void generic_set_all(void)
-{
- unsigned long mask, count;
- unsigned long flags;
-
- local_irq_save(flags);
- prepare_set();
-
- /* Actually set the state */
- mask = set_mtrr_state();
-
- /* also set PAT */
- pat_init();
-
- post_set();
- local_irq_restore(flags);
-
- /* Use the atomic bitops to update the global mask */
- for (count = 0; count < sizeof mask * 8; ++count) {
- if (mask & 0x01)
- set_bit(count, &smp_changes_mask);
- mask >>= 1;
- }
-
-}
-
-/**
- * generic_set_mtrr - set variable MTRR register on the local CPU.
- *
- * @reg: The register to set.
- * @base: The base address of the region.
- * @size: The size of the region. If this is 0 the region is disabled.
- * @type: The type of the region.
- *
- * Returns nothing.
- */
-static void generic_set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-{
- unsigned long flags;
- struct mtrr_var_range *vr;
-
- vr = &mtrr_state.var_ranges[reg];
-
- local_irq_save(flags);
- prepare_set();
-
- if (size == 0) {
- /*
- * The invalid bit is kept in the mask, so we simply
- * clear the relevant mask register to disable a range.
- */
- mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
- memset(vr, 0, sizeof(struct mtrr_var_range));
- } else {
- vr->base_lo = base << PAGE_SHIFT | type;
- vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
- vr->mask_lo = -size << PAGE_SHIFT | 0x800;
- vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
-
- mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
- mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
- }
-
- post_set();
- local_irq_restore(flags);
-}
-
-int generic_validate_add_page(unsigned long base, unsigned long size,
- unsigned int type)
-{
- unsigned long lbase, last;
-
- /*
- * For Intel PPro stepping <= 7
- * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
- */
- if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 1 &&
- boot_cpu_data.x86_mask <= 7) {
- if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
- pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
- return -EINVAL;
- }
- if (!(base + size < 0x70000 || base > 0x7003F) &&
- (type == MTRR_TYPE_WRCOMB
- || type == MTRR_TYPE_WRBACK)) {
- pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
- return -EINVAL;
- }
- }
-
- /*
- * Check upper bits of base and last are equal and lower bits are 0
- * for base and 1 for last
- */
- last = base + size - 1;
- for (lbase = base; !(lbase & 1) && (last & 1);
- lbase = lbase >> 1, last = last >> 1)
- ;
- if (lbase != last) {
- pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
- return -EINVAL;
- }
- return 0;
-}
-
-static int generic_have_wrcomb(void)
-{
- unsigned long config, dummy;
- rdmsr(MSR_MTRRcap, config, dummy);
- return config & (1 << 10);
-}
-
-int positive_have_wrcomb(void)
-{
- return 1;
-}
-
-/*
- * Generic structure...
- */
-const struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = 1,
- .set_all = generic_set_all,
- .get = generic_get_mtrr,
- .get_free_region = generic_get_free_region,
- .set = generic_set_mtrr,
- .validate_add_page = generic_validate_add_page,
- .have_wrcomb = generic_have_wrcomb,
-};
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/if.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/if.c
deleted file mode 100644
index a041e094..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/if.c
+++ /dev/null
@@ -1,451 +0,0 @@
-#include <linux/capability.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#define LINE_SIZE 80
-
-#include <asm/mtrr.h>
-
-#include "mtrr.h"
-
-#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
-
-static const char *const mtrr_strings[MTRR_NUM_TYPES] =
-{
- "uncachable", /* 0 */
- "write-combining", /* 1 */
- "?", /* 2 */
- "?", /* 3 */
- "write-through", /* 4 */
- "write-protect", /* 5 */
- "write-back", /* 6 */
-};
-
-const char *mtrr_attrib_to_str(int x)
-{
- return (x <= 6) ? mtrr_strings[x] : "?";
-}
-
-#ifdef CONFIG_PROC_FS
-
-static int
-mtrr_file_add(unsigned long base, unsigned long size,
- unsigned int type, bool increment, struct file *file, int page)
-{
- unsigned int *fcount = FILE_FCOUNT(file);
- int reg, max;
-
- max = num_var_ranges;
- if (fcount == NULL) {
- fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
- if (!fcount)
- return -ENOMEM;
- FILE_FCOUNT(file) = fcount;
- }
- if (!page) {
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
- return -EINVAL;
- base >>= PAGE_SHIFT;
- size >>= PAGE_SHIFT;
- }
- reg = mtrr_add_page(base, size, type, true);
- if (reg >= 0)
- ++fcount[reg];
- return reg;
-}
-
-static int
-mtrr_file_del(unsigned long base, unsigned long size,
- struct file *file, int page)
-{
- unsigned int *fcount = FILE_FCOUNT(file);
- int reg;
-
- if (!page) {
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
- return -EINVAL;
- base >>= PAGE_SHIFT;
- size >>= PAGE_SHIFT;
- }
- reg = mtrr_del_page(-1, base, size);
- if (reg < 0)
- return reg;
- if (fcount == NULL)
- return reg;
- if (fcount[reg] < 1)
- return -EINVAL;
- --fcount[reg];
- return reg;
-}
-
-/*
- * seq_file can seek but we ignore it.
- *
- * Format of control line:
- * "base=%Lx size=%Lx type=%s" or "disable=%d"
- */
-static ssize_t
-mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
-{
- int i, err;
- unsigned long reg;
- unsigned long long base, size;
- char *ptr;
- char line[LINE_SIZE];
- int length;
- size_t linelen;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- memset(line, 0, LINE_SIZE);
-
- length = len;
- length--;
-
- if (length > LINE_SIZE - 1)
- length = LINE_SIZE - 1;
-
- if (length < 0)
- return -EINVAL;
-
- if (copy_from_user(line, buf, length))
- return -EFAULT;
-
- linelen = strlen(line);
- ptr = line + linelen - 1;
- if (linelen && *ptr == '\n')
- *ptr = '\0';
-
- if (!strncmp(line, "disable=", 8)) {
- reg = simple_strtoul(line + 8, &ptr, 0);
- err = mtrr_del_page(reg, 0, 0);
- if (err < 0)
- return err;
- return len;
- }
-
- if (strncmp(line, "base=", 5))
- return -EINVAL;
-
- base = simple_strtoull(line + 5, &ptr, 0);
- ptr = skip_spaces(ptr);
-
- if (strncmp(ptr, "size=", 5))
- return -EINVAL;
-
- size = simple_strtoull(ptr + 5, &ptr, 0);
- if ((base & 0xfff) || (size & 0xfff))
- return -EINVAL;
- ptr = skip_spaces(ptr);
-
- if (strncmp(ptr, "type=", 5))
- return -EINVAL;
- ptr = skip_spaces(ptr + 5);
-
- for (i = 0; i < MTRR_NUM_TYPES; ++i) {
- if (strcmp(ptr, mtrr_strings[i]))
- continue;
- base >>= PAGE_SHIFT;
- size >>= PAGE_SHIFT;
- err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
- if (err < 0)
- return err;
- return len;
- }
- return -EINVAL;
-}
-
-static long
-mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
-{
- int err = 0;
- mtrr_type type;
- unsigned long base;
- unsigned long size;
- struct mtrr_sentry sentry;
- struct mtrr_gentry gentry;
- void __user *arg = (void __user *) __arg;
-
- switch (cmd) {
- case MTRRIOC_ADD_ENTRY:
- case MTRRIOC_SET_ENTRY:
- case MTRRIOC_DEL_ENTRY:
- case MTRRIOC_KILL_ENTRY:
- case MTRRIOC_ADD_PAGE_ENTRY:
- case MTRRIOC_SET_PAGE_ENTRY:
- case MTRRIOC_DEL_PAGE_ENTRY:
- case MTRRIOC_KILL_PAGE_ENTRY:
- if (copy_from_user(&sentry, arg, sizeof sentry))
- return -EFAULT;
- break;
- case MTRRIOC_GET_ENTRY:
- case MTRRIOC_GET_PAGE_ENTRY:
- if (copy_from_user(&gentry, arg, sizeof gentry))
- return -EFAULT;
- break;
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_ADD_ENTRY:
- case MTRRIOC32_SET_ENTRY:
- case MTRRIOC32_DEL_ENTRY:
- case MTRRIOC32_KILL_ENTRY:
- case MTRRIOC32_ADD_PAGE_ENTRY:
- case MTRRIOC32_SET_PAGE_ENTRY:
- case MTRRIOC32_DEL_PAGE_ENTRY:
- case MTRRIOC32_KILL_PAGE_ENTRY: {
- struct mtrr_sentry32 __user *s32;
-
- s32 = (struct mtrr_sentry32 __user *)__arg;
- err = get_user(sentry.base, &s32->base);
- err |= get_user(sentry.size, &s32->size);
- err |= get_user(sentry.type, &s32->type);
- if (err)
- return err;
- break;
- }
- case MTRRIOC32_GET_ENTRY:
- case MTRRIOC32_GET_PAGE_ENTRY: {
- struct mtrr_gentry32 __user *g32;
-
- g32 = (struct mtrr_gentry32 __user *)__arg;
- err = get_user(gentry.regnum, &g32->regnum);
- err |= get_user(gentry.base, &g32->base);
- err |= get_user(gentry.size, &g32->size);
- err |= get_user(gentry.type, &g32->type);
- if (err)
- return err;
- break;
- }
-#endif
- }
-
- switch (cmd) {
- default:
- return -ENOTTY;
- case MTRRIOC_ADD_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_ADD_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err =
- mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
- file, 0);
- break;
- case MTRRIOC_SET_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_SET_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = mtrr_add(sentry.base, sentry.size, sentry.type, false);
- break;
- case MTRRIOC_DEL_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_DEL_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = mtrr_file_del(sentry.base, sentry.size, file, 0);
- break;
- case MTRRIOC_KILL_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_KILL_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = mtrr_del(-1, sentry.base, sentry.size);
- break;
- case MTRRIOC_GET_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_GET_ENTRY:
-#endif
- if (gentry.regnum >= num_var_ranges)
- return -EINVAL;
- mtrr_if->get(gentry.regnum, &base, &size, &type);
-
- /* Hide entries that go above 4GB */
- if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
- || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
- gentry.base = gentry.size = gentry.type = 0;
- else {
- gentry.base = base << PAGE_SHIFT;
- gentry.size = size << PAGE_SHIFT;
- gentry.type = type;
- }
-
- break;
- case MTRRIOC_ADD_PAGE_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_ADD_PAGE_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err =
- mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
- file, 1);
- break;
- case MTRRIOC_SET_PAGE_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_SET_PAGE_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err =
- mtrr_add_page(sentry.base, sentry.size, sentry.type, false);
- break;
- case MTRRIOC_DEL_PAGE_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_DEL_PAGE_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = mtrr_file_del(sentry.base, sentry.size, file, 1);
- break;
- case MTRRIOC_KILL_PAGE_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_KILL_PAGE_ENTRY:
-#endif
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = mtrr_del_page(-1, sentry.base, sentry.size);
- break;
- case MTRRIOC_GET_PAGE_ENTRY:
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_GET_PAGE_ENTRY:
-#endif
- if (gentry.regnum >= num_var_ranges)
- return -EINVAL;
- mtrr_if->get(gentry.regnum, &base, &size, &type);
- /* Hide entries that would overflow */
- if (size != (__typeof__(gentry.size))size)
- gentry.base = gentry.size = gentry.type = 0;
- else {
- gentry.base = base;
- gentry.size = size;
- gentry.type = type;
- }
- break;
- }
-
- if (err)
- return err;
-
- switch (cmd) {
- case MTRRIOC_GET_ENTRY:
- case MTRRIOC_GET_PAGE_ENTRY:
- if (copy_to_user(arg, &gentry, sizeof gentry))
- err = -EFAULT;
- break;
-#ifdef CONFIG_COMPAT
- case MTRRIOC32_GET_ENTRY:
- case MTRRIOC32_GET_PAGE_ENTRY: {
- struct mtrr_gentry32 __user *g32;
-
- g32 = (struct mtrr_gentry32 __user *)__arg;
- err = put_user(gentry.base, &g32->base);
- err |= put_user(gentry.size, &g32->size);
- err |= put_user(gentry.regnum, &g32->regnum);
- err |= put_user(gentry.type, &g32->type);
- break;
- }
-#endif
- }
- return err;
-}
-
-static int mtrr_close(struct inode *ino, struct file *file)
-{
- unsigned int *fcount = FILE_FCOUNT(file);
- int i, max;
-
- if (fcount != NULL) {
- max = num_var_ranges;
- for (i = 0; i < max; ++i) {
- while (fcount[i] > 0) {
- mtrr_del(i, 0, 0);
- --fcount[i];
- }
- }
- kfree(fcount);
- FILE_FCOUNT(file) = NULL;
- }
- return single_release(ino, file);
-}
-
-static int mtrr_seq_show(struct seq_file *seq, void *offset);
-
-static int mtrr_open(struct inode *inode, struct file *file)
-{
- if (!mtrr_if)
- return -EIO;
- if (!mtrr_if->get)
- return -ENXIO;
- return single_open(file, mtrr_seq_show, NULL);
-}
-
-static const struct file_operations mtrr_fops = {
- .owner = THIS_MODULE,
- .open = mtrr_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = mtrr_write,
- .unlocked_ioctl = mtrr_ioctl,
- .compat_ioctl = mtrr_ioctl,
- .release = mtrr_close,
-};
-
-static int mtrr_seq_show(struct seq_file *seq, void *offset)
-{
- char factor;
- int i, max, len;
- mtrr_type type;
- unsigned long base, size;
-
- len = 0;
- max = num_var_ranges;
- for (i = 0; i < max; i++) {
- mtrr_if->get(i, &base, &size, &type);
- if (size == 0) {
- mtrr_usage_table[i] = 0;
- continue;
- }
- if (size < (0x100000 >> PAGE_SHIFT)) {
- /* less than 1MB */
- factor = 'K';
- size <<= PAGE_SHIFT - 10;
- } else {
- factor = 'M';
- size >>= 20 - PAGE_SHIFT;
- }
- /* Base can be > 32bit */
- len += seq_printf(seq, "reg%02i: base=0x%06lx000 "
- "(%5luMB), size=%5lu%cB, count=%d: %s\n",
- i, base, base >> (20 - PAGE_SHIFT), size,
- factor, mtrr_usage_table[i],
- mtrr_attrib_to_str(type));
- }
- return 0;
-}
-
-static int __init mtrr_if_init(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
- (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
- (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
- (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
- return -ENODEV;
-
- proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
- return 0;
-}
-arch_initcall(mtrr_if_init);
-#endif /* CONFIG_PROC_FS */
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/main.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/main.c
deleted file mode 100644
index 6b96110b..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/main.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/* Generic MTRR (Memory Type Range Register) driver.
-
- Copyright (C) 1997-2000 Richard Gooch
- Copyright (c) 2002 Patrick Mochel
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Richard Gooch may be reached by email at rgooch@atnf.csiro.au
- The postal address is:
- Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
-
- Source: "Pentium Pro Family Developer's Manual, Volume 3:
- Operating System Writer's Guide" (Intel document number 242692),
- section 11.11.7
-
- This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
- on 6-7 March 2002.
- Source: Intel Architecture Software Developers Manual, Volume 3:
- System Programming Guide; Section 9.11. (1997 edition - PPro).
-*/
-
-#define DEBUG
-
-#include <linux/types.h> /* FIXME: kvm_para.h needs this */
-
-#include <linux/stop_machine.h>
-#include <linux/kvm_para.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/sort.h>
-#include <linux/cpu.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/syscore_ops.h>
-
-#include <asm/processor.h>
-#include <asm/e820.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "mtrr.h"
-
-u32 num_var_ranges;
-
-unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
-static DEFINE_MUTEX(mtrr_mutex);
-
-u64 size_or_mask, size_and_mask;
-static bool mtrr_aps_delayed_init;
-
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
-
-const struct mtrr_ops *mtrr_if;
-
-static void set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type);
-
-void set_mtrr_ops(const struct mtrr_ops *ops)
-{
- if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
- mtrr_ops[ops->vendor] = ops;
-}
-
-/* Returns non-zero if we have the write-combining memory type */
-static int have_wrcomb(void)
-{
- struct pci_dev *dev;
-
- dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
- if (dev != NULL) {
- /*
- * ServerWorks LE chipsets < rev 6 have problems with
- * write-combining. Don't allow it and leave room for other
- * chipsets to be tagged
- */
- if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
- dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
- dev->revision <= 5) {
- pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
- pci_dev_put(dev);
- return 0;
- }
- /*
- * Intel 450NX errata # 23. Non ascending cacheline evictions to
- * write combining memory may resulting in data corruption
- */
- if (dev->vendor == PCI_VENDOR_ID_INTEL &&
- dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
- pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
- pci_dev_put(dev);
- return 0;
- }
- pci_dev_put(dev);
- }
- return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
-}
-
-/* This function returns the number of variable MTRRs */
-static void __init set_num_var_ranges(void)
-{
- unsigned long config = 0, dummy;
-
- if (use_intel())
- rdmsr(MSR_MTRRcap, config, dummy);
- else if (is_cpu(AMD))
- config = 2;
- else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
- config = 8;
-
- num_var_ranges = config & 0xff;
-}
-
-static void __init init_table(void)
-{
- int i, max;
-
- max = num_var_ranges;
- for (i = 0; i < max; i++)
- mtrr_usage_table[i] = 1;
-}
-
-struct set_mtrr_data {
- unsigned long smp_base;
- unsigned long smp_size;
- unsigned int smp_reg;
- mtrr_type smp_type;
-};
-
-/**
- * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
- * by all the CPUs.
- * @info: pointer to mtrr configuration data
- *
- * Returns nothing.
- */
-static int mtrr_rendezvous_handler(void *info)
-{
- struct set_mtrr_data *data = info;
-
- /*
- * We use this same function to initialize the mtrrs during boot,
- * resume, runtime cpu online and on an explicit request to set a
- * specific MTRR.
- *
- * During boot or suspend, the state of the boot cpu's mtrrs has been
- * saved, and we want to replicate that across all the cpus that come
- * online (either at the end of boot or resume or during a runtime cpu
- * online). If we're doing that, @reg is set to something special and on
- * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
- * started the boot/resume sequence, this might be a duplicate
- * set_all()).
- */
- if (data->smp_reg != ~0U) {
- mtrr_if->set(data->smp_reg, data->smp_base,
- data->smp_size, data->smp_type);
- } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
- mtrr_if->set_all();
- }
- return 0;
-}
-
-static inline int types_compatible(mtrr_type type1, mtrr_type type2)
-{
- return type1 == MTRR_TYPE_UNCACHABLE ||
- type2 == MTRR_TYPE_UNCACHABLE ||
- (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
- (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
-}
-
-/**
- * set_mtrr - update mtrrs on all processors
- * @reg: mtrr in question
- * @base: mtrr base
- * @size: mtrr size
- * @type: mtrr type
- *
- * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
- *
- * 1. Queue work to do the following on all processors:
- * 2. Disable Interrupts
- * 3. Wait for all procs to do so
- * 4. Enter no-fill cache mode
- * 5. Flush caches
- * 6. Clear PGE bit
- * 7. Flush all TLBs
- * 8. Disable all range registers
- * 9. Update the MTRRs
- * 10. Enable all range registers
- * 11. Flush all TLBs and caches again
- * 12. Enter normal cache mode and reenable caching
- * 13. Set PGE
- * 14. Wait for buddies to catch up
- * 15. Enable interrupts.
- *
- * What does that mean for us? Well, stop_machine() will ensure that
- * the rendezvous handler is started on each CPU. And in lockstep they
- * do the state transition of disabling interrupts, updating MTRR's
- * (the CPU vendors may each do it differently, so we call mtrr_if->set()
- * callback and let them take care of it.) and enabling interrupts.
- *
- * Note that the mechanism is the same for UP systems, too; all the SMP stuff
- * becomes nops.
- */
-static void
-set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
-{
- struct set_mtrr_data data = { .smp_reg = reg,
- .smp_base = base,
- .smp_size = size,
- .smp_type = type
- };
-
- stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
-}
-
-static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-{
- struct set_mtrr_data data = { .smp_reg = reg,
- .smp_base = base,
- .smp_size = size,
- .smp_type = type
- };
-
- stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
- cpu_callout_mask);
-}
-
-/**
- * mtrr_add_page - Add a memory type region
- * @base: Physical base address of region in pages (in units of 4 kB!)
- * @size: Physical size of region in pages (4 kB)
- * @type: Type of MTRR desired
- * @increment: If this is true do usage counting on the region
- *
- * Memory type region registers control the caching on newer Intel and
- * non Intel processors. This function allows drivers to request an
- * MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
- * caller should expect to need to provide a power of two size on an
- * equivalent power of two boundary.
- *
- * If the region cannot be added either because all regions are in use
- * or the CPU cannot support it a negative value is returned. On success
- * the register number for this entry is returned, but should be treated
- * as a cookie only.
- *
- * On a multiprocessor machine the changes are made to all processors.
- * This is required on x86 by the Intel processors.
- *
- * The available types are
- *
- * %MTRR_TYPE_UNCACHABLE - No caching
- *
- * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
- *
- * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
- *
- * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
- *
- * BUGS: Needs a quiet flag for the cases where drivers do not mind
- * failures and do not wish system log messages to be sent.
- */
-int mtrr_add_page(unsigned long base, unsigned long size,
- unsigned int type, bool increment)
-{
- unsigned long lbase, lsize;
- int i, replace, error;
- mtrr_type ltype;
-
- if (!mtrr_if)
- return -ENXIO;
-
- error = mtrr_if->validate_add_page(base, size, type);
- if (error)
- return error;
-
- if (type >= MTRR_NUM_TYPES) {
- pr_warning("mtrr: type: %u invalid\n", type);
- return -EINVAL;
- }
-
- /* If the type is WC, check that this processor supports it */
- if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
- pr_warning("mtrr: your processor doesn't support write-combining\n");
- return -ENOSYS;
- }
-
- if (!size) {
- pr_warning("mtrr: zero sized request\n");
- return -EINVAL;
- }
-
- if (base & size_or_mask || size & size_or_mask) {
- pr_warning("mtrr: base or size exceeds the MTRR width\n");
- return -EINVAL;
- }
-
- error = -EINVAL;
- replace = -1;
-
- /* No CPU hotplug when we change MTRR entries */
- get_online_cpus();
-
- /* Search for existing MTRR */
- mutex_lock(&mtrr_mutex);
- for (i = 0; i < num_var_ranges; ++i) {
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (!lsize || base > lbase + lsize - 1 ||
- base + size - 1 < lbase)
- continue;
- /*
- * At this point we know there is some kind of
- * overlap/enclosure
- */
- if (base < lbase || base + size - 1 > lbase + lsize - 1) {
- if (base <= lbase &&
- base + size - 1 >= lbase + lsize - 1) {
- /* New region encloses an existing region */
- if (type == ltype) {
- replace = replace == -1 ? i : -2;
- continue;
- } else if (types_compatible(type, ltype))
- continue;
- }
- pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
- " 0x%lx000,0x%lx000\n", base, size, lbase,
- lsize);
- goto out;
- }
- /* New region is enclosed by an existing region */
- if (ltype != type) {
- if (types_compatible(type, ltype))
- continue;
- pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
- base, size, mtrr_attrib_to_str(ltype),
- mtrr_attrib_to_str(type));
- goto out;
- }
- if (increment)
- ++mtrr_usage_table[i];
- error = i;
- goto out;
- }
- /* Search for an empty MTRR */
- i = mtrr_if->get_free_region(base, size, replace);
- if (i >= 0) {
- set_mtrr(i, base, size, type);
- if (likely(replace < 0)) {
- mtrr_usage_table[i] = 1;
- } else {
- mtrr_usage_table[i] = mtrr_usage_table[replace];
- if (increment)
- mtrr_usage_table[i]++;
- if (unlikely(replace != i)) {
- set_mtrr(replace, 0, 0, 0);
- mtrr_usage_table[replace] = 0;
- }
- }
- } else {
- pr_info("mtrr: no more MTRRs available\n");
- }
- error = i;
- out:
- mutex_unlock(&mtrr_mutex);
- put_online_cpus();
- return error;
-}
-
-static int mtrr_check(unsigned long base, unsigned long size)
-{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
- pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
- dump_stack();
- return -1;
- }
- return 0;
-}
-
-/**
- * mtrr_add - Add a memory type region
- * @base: Physical base address of region
- * @size: Physical size of region
- * @type: Type of MTRR desired
- * @increment: If this is true do usage counting on the region
- *
- * Memory type region registers control the caching on newer Intel and
- * non Intel processors. This function allows drivers to request an
- * MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
- * caller should expect to need to provide a power of two size on an
- * equivalent power of two boundary.
- *
- * If the region cannot be added either because all regions are in use
- * or the CPU cannot support it a negative value is returned. On success
- * the register number for this entry is returned, but should be treated
- * as a cookie only.
- *
- * On a multiprocessor machine the changes are made to all processors.
- * This is required on x86 by the Intel processors.
- *
- * The available types are
- *
- * %MTRR_TYPE_UNCACHABLE - No caching
- *
- * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
- *
- * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
- *
- * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
- *
- * BUGS: Needs a quiet flag for the cases where drivers do not mind
- * failures and do not wish system log messages to be sent.
- */
-int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
- bool increment)
-{
- if (mtrr_check(base, size))
- return -EINVAL;
- return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
- increment);
-}
-EXPORT_SYMBOL(mtrr_add);
-
-/**
- * mtrr_del_page - delete a memory type region
- * @reg: Register returned by mtrr_add
- * @base: Physical base address
- * @size: Size of region
- *
- * If register is supplied then base and size are ignored. This is
- * how drivers should call it.
- *
- * Releases an MTRR region. If the usage count drops to zero the
- * register is freed and the region returns to default state.
- * On success the register is returned, on failure a negative error
- * code.
- */
-int mtrr_del_page(int reg, unsigned long base, unsigned long size)
-{
- int i, max;
- mtrr_type ltype;
- unsigned long lbase, lsize;
- int error = -EINVAL;
-
- if (!mtrr_if)
- return -ENXIO;
-
- max = num_var_ranges;
- /* No CPU hotplug when we change MTRR entries */
- get_online_cpus();
- mutex_lock(&mtrr_mutex);
- if (reg < 0) {
- /* Search for existing MTRR */
- for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (lbase == base && lsize == size) {
- reg = i;
- break;
- }
- }
- if (reg < 0) {
- pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
- base, size);
- goto out;
- }
- }
- if (reg >= max) {
- pr_warning("mtrr: register: %d too big\n", reg);
- goto out;
- }
- mtrr_if->get(reg, &lbase, &lsize, &ltype);
- if (lsize < 1) {
- pr_warning("mtrr: MTRR %d not used\n", reg);
- goto out;
- }
- if (mtrr_usage_table[reg] < 1) {
- pr_warning("mtrr: reg: %d has count=0\n", reg);
- goto out;
- }
- if (--mtrr_usage_table[reg] < 1)
- set_mtrr(reg, 0, 0, 0);
- error = reg;
- out:
- mutex_unlock(&mtrr_mutex);
- put_online_cpus();
- return error;
-}
-
-/**
- * mtrr_del - delete a memory type region
- * @reg: Register returned by mtrr_add
- * @base: Physical base address
- * @size: Size of region
- *
- * If register is supplied then base and size are ignored. This is
- * how drivers should call it.
- *
- * Releases an MTRR region. If the usage count drops to zero the
- * register is freed and the region returns to default state.
- * On success the register is returned, on failure a negative error
- * code.
- */
-int mtrr_del(int reg, unsigned long base, unsigned long size)
-{
- if (mtrr_check(base, size))
- return -EINVAL;
- return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(mtrr_del);
-
-/*
- * HACK ALERT!
- * These should be called implicitly, but we can't yet until all the initcall
- * stuff is done...
- */
-static void __init init_ifs(void)
-{
-#ifndef CONFIG_X86_64
- amd_init_mtrr();
- cyrix_init_mtrr();
- centaur_init_mtrr();
-#endif
-}
-
-/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
- * MTRR driver doesn't require this
- */
-struct mtrr_value {
- mtrr_type ltype;
- unsigned long lbase;
- unsigned long lsize;
-};
-
-static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
-
-static int mtrr_save(void)
-{
- int i;
-
- for (i = 0; i < num_var_ranges; i++) {
- mtrr_if->get(i, &mtrr_value[i].lbase,
- &mtrr_value[i].lsize,
- &mtrr_value[i].ltype);
- }
- return 0;
-}
-
-static void mtrr_restore(void)
-{
- int i;
-
- for (i = 0; i < num_var_ranges; i++) {
- if (mtrr_value[i].lsize) {
- set_mtrr(i, mtrr_value[i].lbase,
- mtrr_value[i].lsize,
- mtrr_value[i].ltype);
- }
- }
-}
-
-
-
-static struct syscore_ops mtrr_syscore_ops = {
- .suspend = mtrr_save,
- .resume = mtrr_restore,
-};
-
-int __initdata changed_by_mtrr_cleanup;
-
-/**
- * mtrr_bp_init - initialize mtrrs on the boot CPU
- *
- * This needs to be called early; before any of the other CPUs are
- * initialized (i.e. before smp_init()).
- *
- */
-void __init mtrr_bp_init(void)
-{
- u32 phys_addr;
-
- init_ifs();
-
- phys_addr = 32;
-
- if (cpu_has_mtrr) {
- mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
- size_and_mask = 0x00f00000;
- phys_addr = 36;
-
- /*
- * This is an AMD specific MSR, but we assume(hope?) that
- * Intel will implement it to when they extend the address
- * bus of the Xeon.
- */
- if (cpuid_eax(0x80000000) >= 0x80000008) {
- phys_addr = cpuid_eax(0x80000008) & 0xff;
- /* CPUID workaround for Intel 0F33/0F34 CPU */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 0xF &&
- boot_cpu_data.x86_model == 0x3 &&
- (boot_cpu_data.x86_mask == 0x3 ||
- boot_cpu_data.x86_mask == 0x4))
- phys_addr = 36;
-
- size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
- boot_cpu_data.x86 == 6) {
- /*
- * VIA C* family have Intel style MTRRs,
- * but don't support PAE
- */
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- phys_addr = 32;
- }
- } else {
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_AMD:
- if (cpu_has_k6_mtrr) {
- /* Pre-Athlon (K6) AMD CPU MTRRs */
- mtrr_if = mtrr_ops[X86_VENDOR_AMD];
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
- break;
- case X86_VENDOR_CENTAUR:
- if (cpu_has_centaur_mcr) {
- mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
- break;
- case X86_VENDOR_CYRIX:
- if (cpu_has_cyrix_arr) {
- mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
- break;
- default:
- break;
- }
- }
-
- if (mtrr_if) {
- set_num_var_ranges();
- init_table();
- if (use_intel()) {
- get_mtrr_state();
-
- if (mtrr_cleanup(phys_addr)) {
- changed_by_mtrr_cleanup = 1;
- mtrr_if->set_all();
- }
- }
- }
-}
-
-void mtrr_ap_init(void)
-{
- if (!use_intel() || mtrr_aps_delayed_init)
- return;
- /*
- * Ideally we should hold mtrr_mutex here to avoid mtrr entries
- * changed, but this routine will be called in cpu boot time,
- * holding the lock breaks it.
- *
- * This routine is called in two cases:
- *
- * 1. very earily time of software resume, when there absolutely
- * isn't mtrr entry changes;
- *
- * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
- * lock to prevent mtrr entry changes
- */
- set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
-}
-
-/**
- * Save current fixed-range MTRR state of the BSP
- */
-void mtrr_save_state(void)
-{
- smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
-}
-
-void set_mtrr_aps_delayed_init(void)
-{
- if (!use_intel())
- return;
-
- mtrr_aps_delayed_init = true;
-}
-
-/*
- * Delayed MTRR initialization for all AP's
- */
-void mtrr_aps_init(void)
-{
- if (!use_intel())
- return;
-
- /*
- * Check if someone has requested the delay of AP MTRR initialization,
- * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
- * then we are done.
- */
- if (!mtrr_aps_delayed_init)
- return;
-
- set_mtrr(~0U, 0, 0, 0);
- mtrr_aps_delayed_init = false;
-}
-
-void mtrr_bp_restore(void)
-{
- if (!use_intel())
- return;
-
- mtrr_if->set_all();
-}
-
-static int __init mtrr_init_finialize(void)
-{
- if (!mtrr_if)
- return 0;
-
- if (use_intel()) {
- if (!changed_by_mtrr_cleanup)
- mtrr_state_warn();
- return 0;
- }
-
- /*
- * The CPU has no MTRR and seems to not support SMP. They have
- * specific drivers, we use a tricky method to support
- * suspend/resume for them.
- *
- * TBD: is there any system with such CPU which supports
- * suspend/resume? If no, we should remove the code.
- */
- register_syscore_ops(&mtrr_syscore_ops);
-
- return 0;
-}
-subsys_initcall(mtrr_init_finialize);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/mtrr.h b/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/mtrr.h
deleted file mode 100644
index df5e41f3..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * local MTRR defines.
- */
-
-#include <linux/types.h>
-#include <linux/stddef.h>
-
-#define MTRR_CHANGE_MASK_FIXED 0x01
-#define MTRR_CHANGE_MASK_VARIABLE 0x02
-#define MTRR_CHANGE_MASK_DEFTYPE 0x04
-
-extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
-
-struct mtrr_ops {
- u32 vendor;
- u32 use_intel_if;
- void (*set)(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type);
- void (*set_all)(void);
-
- void (*get)(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type *type);
- int (*get_free_region)(unsigned long base, unsigned long size,
- int replace_reg);
- int (*validate_add_page)(unsigned long base, unsigned long size,
- unsigned int type);
- int (*have_wrcomb)(void);
-};
-
-extern int generic_get_free_region(unsigned long base, unsigned long size,
- int replace_reg);
-extern int generic_validate_add_page(unsigned long base, unsigned long size,
- unsigned int type);
-
-extern const struct mtrr_ops generic_mtrr_ops;
-
-extern int positive_have_wrcomb(void);
-
-/* library functions for processor-specific routines */
-struct set_mtrr_context {
- unsigned long flags;
- unsigned long cr4val;
- u32 deftype_lo;
- u32 deftype_hi;
- u32 ccr3;
-};
-
-void set_mtrr_done(struct set_mtrr_context *ctxt);
-void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
-void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
-
-void fill_mtrr_var_range(unsigned int index,
- u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
-void get_mtrr_state(void);
-
-extern void set_mtrr_ops(const struct mtrr_ops *ops);
-
-extern u64 size_or_mask, size_and_mask;
-extern const struct mtrr_ops *mtrr_if;
-
-#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
-
-extern unsigned int num_var_ranges;
-extern u64 mtrr_tom2;
-extern struct mtrr_state_type mtrr_state;
-
-void mtrr_state_warn(void);
-const char *mtrr_attrib_to_str(int x);
-void mtrr_wrmsr(unsigned, unsigned, unsigned);
-
-/* CPU specific mtrr init functions */
-int amd_init_mtrr(void);
-int cyrix_init_mtrr(void);
-int centaur_init_mtrr(void);
-
-extern int changed_by_mtrr_cleanup;
-extern int mtrr_cleanup(unsigned address_bits);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.c
deleted file mode 100644
index bb8e0340..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.c
+++ /dev/null
@@ -1,1886 +0,0 @@
-/*
- * Performance events x86 architecture code
- *
- * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2009 Jaswinder Singh Rajput
- * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
- * Copyright (C) 2009 Google, Inc., Stephane Eranian
- *
- * For licencing details see kernel-base/COPYING
- */
-
-#include <linux/perf_event.h>
-#include <linux/capability.h>
-#include <linux/notifier.h>
-#include <linux/hardirq.h>
-#include <linux/kprobes.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-#include <linux/bitops.h>
-#include <linux/device.h>
-
-#include <asm/apic.h>
-#include <asm/stacktrace.h>
-#include <asm/nmi.h>
-#include <asm/smp.h>
-#include <asm/alternative.h>
-#include <asm/timer.h>
-
-#include "perf_event.h"
-
-#if 0
-#undef wrmsrl
-#define wrmsrl(msr, val) \
-do { \
- trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
- (unsigned long)(val)); \
- native_write_msr((msr), (u32)((u64)(val)), \
- (u32)((u64)(val) >> 32)); \
-} while (0)
-#endif
-
-struct x86_pmu x86_pmu __read_mostly;
-
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
- .enabled = 1,
-};
-
-u64 __read_mostly hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
-u64 __read_mostly hw_cache_extra_regs
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
-
-/*
- * Propagate event elapsed time into the generic event.
- * Can only be executed on the CPU where the event is active.
- * Returns the delta events processed.
- */
-u64 x86_perf_event_update(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int shift = 64 - x86_pmu.cntval_bits;
- u64 prev_raw_count, new_raw_count;
- int idx = hwc->idx;
- s64 delta;
-
- if (idx == X86_PMC_IDX_FIXED_BTS)
- return 0;
-
- /*
- * Careful: an NMI might modify the previous event value.
- *
- * Our tactic to handle this is to first atomically read and
- * exchange a new raw count - then add that new-prev delta
- * count to the generic event atomically:
- */
-again:
- prev_raw_count = local64_read(&hwc->prev_count);
- rdmsrl(hwc->event_base, new_raw_count);
-
- if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count)
- goto again;
-
- /*
- * Now we have the new raw value and have updated the prev
- * timestamp already. We can now calculate the elapsed delta
- * (event-)time and add that to the generic event.
- *
- * Careful, not all hw sign-extends above the physical width
- * of the count.
- */
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
-
- local64_add(delta, &event->count);
- local64_sub(delta, &hwc->period_left);
-
- return new_raw_count;
-}
-
-/*
- * Find and validate any extra registers to set up.
- */
-static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg;
- struct extra_reg *er;
-
- reg = &event->hw.extra_reg;
-
- if (!x86_pmu.extra_regs)
- return 0;
-
- for (er = x86_pmu.extra_regs; er->msr; er++) {
- if (er->event != (config & er->config_mask))
- continue;
- if (event->attr.config1 & ~er->valid_mask)
- return -EINVAL;
-
- reg->idx = er->idx;
- reg->config = event->attr.config1;
- reg->reg = er->msr;
- break;
- }
- return 0;
-}
-
-static atomic_t active_events;
-static DEFINE_MUTEX(pmc_reserve_mutex);
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-static bool reserve_pmc_hardware(void)
-{
- int i;
-
- for (i = 0; i < x86_pmu.num_counters; i++) {
- if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
- goto perfctr_fail;
- }
-
- for (i = 0; i < x86_pmu.num_counters; i++) {
- if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
- goto eventsel_fail;
- }
-
- return true;
-
-eventsel_fail:
- for (i--; i >= 0; i--)
- release_evntsel_nmi(x86_pmu_config_addr(i));
-
- i = x86_pmu.num_counters;
-
-perfctr_fail:
- for (i--; i >= 0; i--)
- release_perfctr_nmi(x86_pmu_event_addr(i));
-
- return false;
-}
-
-static void release_pmc_hardware(void)
-{
- int i;
-
- for (i = 0; i < x86_pmu.num_counters; i++) {
- release_perfctr_nmi(x86_pmu_event_addr(i));
- release_evntsel_nmi(x86_pmu_config_addr(i));
- }
-}
-
-#else
-
-static bool reserve_pmc_hardware(void) { return true; }
-static void release_pmc_hardware(void) {}
-
-#endif
-
-static bool check_hw_exists(void)
-{
- u64 val, val_new = 0;
- int i, reg, ret = 0;
-
- /*
- * Check to see if the BIOS enabled any of the counters, if so
- * complain and bail.
- */
- for (i = 0; i < x86_pmu.num_counters; i++) {
- reg = x86_pmu_config_addr(i);
- ret = rdmsrl_safe(reg, &val);
- if (ret)
- goto msr_fail;
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- goto bios_fail;
- }
-
- if (x86_pmu.num_counters_fixed) {
- reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- ret = rdmsrl_safe(reg, &val);
- if (ret)
- goto msr_fail;
- for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
- if (val & (0x03 << i*4))
- goto bios_fail;
- }
- }
-
- /*
- * Now write a value and read it back to see if it matches,
- * this is needed to detect certain hardware emulators (qemu/kvm)
- * that don't trap on the MSR access and always return 0s.
- */
- val = 0xabcdUL;
- ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
- ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
- if (ret || val != val_new)
- goto msr_fail;
-
- return true;
-
-bios_fail:
- /*
- * We still allow the PMU driver to operate:
- */
- printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
- printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
-
- return true;
-
-msr_fail:
- printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
-
- return false;
-}
-
-static void hw_perf_event_destroy(struct perf_event *event)
-{
- if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
- release_pmc_hardware();
- release_ds_buffers();
- mutex_unlock(&pmc_reserve_mutex);
- }
-}
-
-static inline int x86_pmu_initialized(void)
-{
- return x86_pmu.handle_irq != NULL;
-}
-
-static inline int
-set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- unsigned int cache_type, cache_op, cache_result;
- u64 config, val;
-
- config = attr->config;
-
- cache_type = (config >> 0) & 0xff;
- if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
- return -EINVAL;
-
- cache_op = (config >> 8) & 0xff;
- if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
- return -EINVAL;
-
- cache_result = (config >> 16) & 0xff;
- if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
- return -EINVAL;
-
- val = hw_cache_event_ids[cache_type][cache_op][cache_result];
-
- if (val == 0)
- return -ENOENT;
-
- if (val == -1)
- return -EINVAL;
-
- hwc->config |= val;
- attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
- return x86_pmu_extra_regs(val, event);
-}
-
-int x86_setup_perfctr(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- struct hw_perf_event *hwc = &event->hw;
- u64 config;
-
- if (!is_sampling_event(event)) {
- hwc->sample_period = x86_pmu.max_period;
- hwc->last_period = hwc->sample_period;
- local64_set(&hwc->period_left, hwc->sample_period);
- } else {
- /*
- * If we have a PMU initialized but no APIC
- * interrupts, we cannot sample hardware
- * events (user-space has to fall back and
- * sample via a hrtimer based software event):
- */
- if (!x86_pmu.apic)
- return -EOPNOTSUPP;
- }
-
- if (attr->type == PERF_TYPE_RAW)
- return x86_pmu_extra_regs(event->attr.config, event);
-
- if (attr->type == PERF_TYPE_HW_CACHE)
- return set_ext_hw_attr(hwc, event);
-
- if (attr->config >= x86_pmu.max_events)
- return -EINVAL;
-
- /*
- * The generic map:
- */
- config = x86_pmu.event_map(attr->config);
-
- if (config == 0)
- return -ENOENT;
-
- if (config == -1LL)
- return -EINVAL;
-
- /*
- * Branch tracing:
- */
- if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
- !attr->freq && hwc->sample_period == 1) {
- /* BTS is not supported by this architecture. */
- if (!x86_pmu.bts_active)
- return -EOPNOTSUPP;
-
- /* BTS is currently only allowed for user-mode. */
- if (!attr->exclude_kernel)
- return -EOPNOTSUPP;
- }
-
- hwc->config |= config;
-
- return 0;
-}
-
-/*
- * check that branch_sample_type is compatible with
- * settings needed for precise_ip > 1 which implies
- * using the LBR to capture ALL taken branches at the
- * priv levels of the measurement
- */
-static inline int precise_br_compat(struct perf_event *event)
-{
- u64 m = event->attr.branch_sample_type;
- u64 b = 0;
-
- /* must capture all branches */
- if (!(m & PERF_SAMPLE_BRANCH_ANY))
- return 0;
-
- m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
-
- if (!event->attr.exclude_user)
- b |= PERF_SAMPLE_BRANCH_USER;
-
- if (!event->attr.exclude_kernel)
- b |= PERF_SAMPLE_BRANCH_KERNEL;
-
- /*
- * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
- */
-
- return m == b;
-}
-
-int x86_pmu_hw_config(struct perf_event *event)
-{
- if (event->attr.precise_ip) {
- int precise = 0;
-
- /* Support for constant skid */
- if (x86_pmu.pebs_active) {
- precise++;
-
- /* Support for IP fixup */
- if (x86_pmu.lbr_nr)
- precise++;
- }
-
- if (event->attr.precise_ip > precise)
- return -EOPNOTSUPP;
- /*
- * check that PEBS LBR correction does not conflict with
- * whatever the user is asking with attr->branch_sample_type
- */
- if (event->attr.precise_ip > 1) {
- u64 *br_type = &event->attr.branch_sample_type;
-
- if (has_branch_stack(event)) {
- if (!precise_br_compat(event))
- return -EOPNOTSUPP;
-
- /* branch_sample_type is compatible */
-
- } else {
- /*
- * user did not specify branch_sample_type
- *
- * For PEBS fixups, we capture all
- * the branches at the priv level of the
- * event.
- */
- *br_type = PERF_SAMPLE_BRANCH_ANY;
-
- if (!event->attr.exclude_user)
- *br_type |= PERF_SAMPLE_BRANCH_USER;
-
- if (!event->attr.exclude_kernel)
- *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
- }
- }
- }
-
- /*
- * Generate PMC IRQs:
- * (keep 'enabled' bit clear for now)
- */
- event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
-
- /*
- * Count user and OS events unless requested not to
- */
- if (!event->attr.exclude_user)
- event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!event->attr.exclude_kernel)
- event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
-
- if (event->attr.type == PERF_TYPE_RAW)
- event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
-
- return x86_setup_perfctr(event);
-}
-
-/*
- * Setup the hardware configuration for a given attr_type
- */
-static int __x86_pmu_event_init(struct perf_event *event)
-{
- int err;
-
- if (!x86_pmu_initialized())
- return -ENODEV;
-
- err = 0;
- if (!atomic_inc_not_zero(&active_events)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&active_events) == 0) {
- if (!reserve_pmc_hardware())
- err = -EBUSY;
- else
- reserve_ds_buffers();
- }
- if (!err)
- atomic_inc(&active_events);
- mutex_unlock(&pmc_reserve_mutex);
- }
- if (err)
- return err;
-
- event->destroy = hw_perf_event_destroy;
-
- event->hw.idx = -1;
- event->hw.last_cpu = -1;
- event->hw.last_tag = ~0ULL;
-
- /* mark unused */
- event->hw.extra_reg.idx = EXTRA_REG_NONE;
-
- /* mark not used */
- event->hw.extra_reg.idx = EXTRA_REG_NONE;
- event->hw.branch_reg.idx = EXTRA_REG_NONE;
-
- return x86_pmu.hw_config(event);
-}
-
-void x86_pmu_disable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx;
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- u64 val;
-
- if (!test_bit(idx, cpuc->active_mask))
- continue;
- rdmsrl(x86_pmu_config_addr(idx), val);
- if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
- continue;
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(x86_pmu_config_addr(idx), val);
- }
-}
-
-static void x86_pmu_disable(struct pmu *pmu)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (!x86_pmu_initialized())
- return;
-
- if (!cpuc->enabled)
- return;
-
- cpuc->n_added = 0;
- cpuc->enabled = 0;
- barrier();
-
- x86_pmu.disable_all();
-}
-
-void x86_pmu_enable_all(int added)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx;
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
-
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
- __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
- }
-}
-
-static struct pmu pmu;
-
-static inline int is_x86_event(struct perf_event *event)
-{
- return event->pmu == &pmu;
-}
-
-/*
- * Event scheduler state:
- *
- * Assign events iterating over all events and counters, beginning
- * with events with least weights first. Keep the current iterator
- * state in struct sched_state.
- */
-struct sched_state {
- int weight;
- int event; /* event index */
- int counter; /* counter index */
- int unassigned; /* number of events to be assigned left */
- unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
-};
-
-/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
-#define SCHED_STATES_MAX 2
-
-struct perf_sched {
- int max_weight;
- int max_events;
- struct event_constraint **constraints;
- struct sched_state state;
- int saved_states;
- struct sched_state saved[SCHED_STATES_MAX];
-};
-
-/*
- * Initialize interator that runs through all events and counters.
- */
-static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
- int num, int wmin, int wmax)
-{
- int idx;
-
- memset(sched, 0, sizeof(*sched));
- sched->max_events = num;
- sched->max_weight = wmax;
- sched->constraints = c;
-
- for (idx = 0; idx < num; idx++) {
- if (c[idx]->weight == wmin)
- break;
- }
-
- sched->state.event = idx; /* start with min weight */
- sched->state.weight = wmin;
- sched->state.unassigned = num;
-}
-
-static void perf_sched_save_state(struct perf_sched *sched)
-{
- if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
- return;
-
- sched->saved[sched->saved_states] = sched->state;
- sched->saved_states++;
-}
-
-static bool perf_sched_restore_state(struct perf_sched *sched)
-{
- if (!sched->saved_states)
- return false;
-
- sched->saved_states--;
- sched->state = sched->saved[sched->saved_states];
-
- /* continue with next counter: */
- clear_bit(sched->state.counter++, sched->state.used);
-
- return true;
-}
-
-/*
- * Select a counter for the current event to schedule. Return true on
- * success.
- */
-static bool __perf_sched_find_counter(struct perf_sched *sched)
-{
- struct event_constraint *c;
- int idx;
-
- if (!sched->state.unassigned)
- return false;
-
- if (sched->state.event >= sched->max_events)
- return false;
-
- c = sched->constraints[sched->state.event];
-
- /* Prefer fixed purpose counters */
- if (x86_pmu.num_counters_fixed) {
- idx = X86_PMC_IDX_FIXED;
- for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
- if (!__test_and_set_bit(idx, sched->state.used))
- goto done;
- }
- }
- /* Grab the first unused counter starting with idx */
- idx = sched->state.counter;
- for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
- if (!__test_and_set_bit(idx, sched->state.used))
- goto done;
- }
-
- return false;
-
-done:
- sched->state.counter = idx;
-
- if (c->overlap)
- perf_sched_save_state(sched);
-
- return true;
-}
-
-static bool perf_sched_find_counter(struct perf_sched *sched)
-{
- while (!__perf_sched_find_counter(sched)) {
- if (!perf_sched_restore_state(sched))
- return false;
- }
-
- return true;
-}
-
-/*
- * Go through all unassigned events and find the next one to schedule.
- * Take events with the least weight first. Return true on success.
- */
-static bool perf_sched_next_event(struct perf_sched *sched)
-{
- struct event_constraint *c;
-
- if (!sched->state.unassigned || !--sched->state.unassigned)
- return false;
-
- do {
- /* next event */
- sched->state.event++;
- if (sched->state.event >= sched->max_events) {
- /* next weight */
- sched->state.event = 0;
- sched->state.weight++;
- if (sched->state.weight > sched->max_weight)
- return false;
- }
- c = sched->constraints[sched->state.event];
- } while (c->weight != sched->state.weight);
-
- sched->state.counter = 0; /* start with first counter */
-
- return true;
-}
-
-/*
- * Assign a counter for each event.
- */
-static int perf_assign_events(struct event_constraint **constraints, int n,
- int wmin, int wmax, int *assign)
-{
- struct perf_sched sched;
-
- perf_sched_init(&sched, constraints, n, wmin, wmax);
-
- do {
- if (!perf_sched_find_counter(&sched))
- break; /* failed */
- if (assign)
- assign[sched.state.event] = sched.state.counter;
- } while (perf_sched_next_event(&sched));
-
- return sched.state.unassigned;
-}
-
-int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
-{
- struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
- unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- int i, wmin, wmax, num = 0;
- struct hw_perf_event *hwc;
-
- bitmap_zero(used_mask, X86_PMC_IDX_MAX);
-
- for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
- c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
- constraints[i] = c;
- wmin = min(wmin, c->weight);
- wmax = max(wmax, c->weight);
- }
-
- /*
- * fastpath, try to reuse previous register
- */
- for (i = 0; i < n; i++) {
- hwc = &cpuc->event_list[i]->hw;
- c = constraints[i];
-
- /* never assigned */
- if (hwc->idx == -1)
- break;
-
- /* constraint still honored */
- if (!test_bit(hwc->idx, c->idxmsk))
- break;
-
- /* not already used */
- if (test_bit(hwc->idx, used_mask))
- break;
-
- __set_bit(hwc->idx, used_mask);
- if (assign)
- assign[i] = hwc->idx;
- }
-
- /* slow path */
- if (i != n)
- num = perf_assign_events(constraints, n, wmin, wmax, assign);
-
- /*
- * scheduling failed or is just a simulation,
- * free resources if necessary
- */
- if (!assign || num) {
- for (i = 0; i < n; i++) {
- if (x86_pmu.put_event_constraints)
- x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
- }
- }
- return num ? -EINVAL : 0;
-}
-
-/*
- * dogrp: true if must collect siblings events (group)
- * returns total number of events and error code
- */
-static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
-{
- struct perf_event *event;
- int n, max_count;
-
- max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
-
- /* current number of events already accepted */
- n = cpuc->n_events;
-
- if (is_x86_event(leader)) {
- if (n >= max_count)
- return -EINVAL;
- cpuc->event_list[n] = leader;
- n++;
- }
- if (!dogrp)
- return n;
-
- list_for_each_entry(event, &leader->sibling_list, group_entry) {
- if (!is_x86_event(event) ||
- event->state <= PERF_EVENT_STATE_OFF)
- continue;
-
- if (n >= max_count)
- return -EINVAL;
-
- cpuc->event_list[n] = event;
- n++;
- }
- return n;
-}
-
-static inline void x86_assign_hw_event(struct perf_event *event,
- struct cpu_hw_events *cpuc, int i)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- hwc->idx = cpuc->assign[i];
- hwc->last_cpu = smp_processor_id();
- hwc->last_tag = ++cpuc->tags[i];
-
- if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
- hwc->config_base = 0;
- hwc->event_base = 0;
- } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
- hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
- } else {
- hwc->config_base = x86_pmu_config_addr(hwc->idx);
- hwc->event_base = x86_pmu_event_addr(hwc->idx);
- }
-}
-
-static inline int match_prev_assignment(struct hw_perf_event *hwc,
- struct cpu_hw_events *cpuc,
- int i)
-{
- return hwc->idx == cpuc->assign[i] &&
- hwc->last_cpu == smp_processor_id() &&
- hwc->last_tag == cpuc->tags[i];
-}
-
-static void x86_pmu_start(struct perf_event *event, int flags);
-
-static void x86_pmu_enable(struct pmu *pmu)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_event *event;
- struct hw_perf_event *hwc;
- int i, added = cpuc->n_added;
-
- if (!x86_pmu_initialized())
- return;
-
- if (cpuc->enabled)
- return;
-
- if (cpuc->n_added) {
- int n_running = cpuc->n_events - cpuc->n_added;
- /*
- * apply assignment obtained either from
- * hw_perf_group_sched_in() or x86_pmu_enable()
- *
- * step1: save events moving to new counters
- * step2: reprogram moved events into new counters
- */
- for (i = 0; i < n_running; i++) {
- event = cpuc->event_list[i];
- hwc = &event->hw;
-
- /*
- * we can avoid reprogramming counter if:
- * - assigned same counter as last time
- * - running on same CPU as last time
- * - no other event has used the counter since
- */
- if (hwc->idx == -1 ||
- match_prev_assignment(hwc, cpuc, i))
- continue;
-
- /*
- * Ensure we don't accidentally enable a stopped
- * counter simply because we rescheduled.
- */
- if (hwc->state & PERF_HES_STOPPED)
- hwc->state |= PERF_HES_ARCH;
-
- x86_pmu_stop(event, PERF_EF_UPDATE);
- }
-
- for (i = 0; i < cpuc->n_events; i++) {
- event = cpuc->event_list[i];
- hwc = &event->hw;
-
- if (!match_prev_assignment(hwc, cpuc, i))
- x86_assign_hw_event(event, cpuc, i);
- else if (i < n_running)
- continue;
-
- if (hwc->state & PERF_HES_ARCH)
- continue;
-
- x86_pmu_start(event, PERF_EF_RELOAD);
- }
- cpuc->n_added = 0;
- perf_events_lapic_init();
- }
-
- cpuc->enabled = 1;
- barrier();
-
- x86_pmu.enable_all(added);
-}
-
-static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
-
-/*
- * Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the event disabled in hw:
- */
-int x86_perf_event_set_period(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- s64 left = local64_read(&hwc->period_left);
- s64 period = hwc->sample_period;
- int ret = 0, idx = hwc->idx;
-
- if (idx == X86_PMC_IDX_FIXED_BTS)
- return 0;
-
- /*
- * If we are way outside a reasonable range then just skip forward:
- */
- if (unlikely(left <= -period)) {
- left = period;
- local64_set(&hwc->period_left, left);
- hwc->last_period = period;
- ret = 1;
- }
-
- if (unlikely(left <= 0)) {
- left += period;
- local64_set(&hwc->period_left, left);
- hwc->last_period = period;
- ret = 1;
- }
- /*
- * Quirk: certain CPUs dont like it if just 1 hw_event is left:
- */
- if (unlikely(left < 2))
- left = 2;
-
- if (left > x86_pmu.max_period)
- left = x86_pmu.max_period;
-
- per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
-
- /*
- * The hw event starts counting from this event offset,
- * mark it to be able to extra future deltas:
- */
- local64_set(&hwc->prev_count, (u64)-left);
-
- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
-
- /*
- * Due to erratum on certan cpu we need
- * a second write to be sure the register
- * is updated properly
- */
- if (x86_pmu.perfctr_second_write) {
- wrmsrl(hwc->event_base,
- (u64)(-left) & x86_pmu.cntval_mask);
- }
-
- perf_event_update_userpage(event);
-
- return ret;
-}
-
-void x86_pmu_enable_event(struct perf_event *event)
-{
- if (__this_cpu_read(cpu_hw_events.enabled))
- __x86_pmu_enable_event(&event->hw,
- ARCH_PERFMON_EVENTSEL_ENABLE);
-}
-
-/*
- * Add a single event to the PMU.
- *
- * The event is added to the group of enabled events
- * but only if it can be scehduled with existing events.
- */
-static int x86_pmu_add(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc;
- int assign[X86_PMC_IDX_MAX];
- int n, n0, ret;
-
- hwc = &event->hw;
-
- perf_pmu_disable(event->pmu);
- n0 = cpuc->n_events;
- ret = n = collect_events(cpuc, event, false);
- if (ret < 0)
- goto out;
-
- hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
- if (!(flags & PERF_EF_START))
- hwc->state |= PERF_HES_ARCH;
-
- /*
- * If group events scheduling transaction was started,
- * skip the schedulability test here, it will be performed
- * at commit time (->commit_txn) as a whole
- */
- if (cpuc->group_flag & PERF_EVENT_TXN)
- goto done_collect;
-
- ret = x86_pmu.schedule_events(cpuc, n, assign);
- if (ret)
- goto out;
- /*
- * copy new assignment, now we know it is possible
- * will be used by hw_perf_enable()
- */
- memcpy(cpuc->assign, assign, n*sizeof(int));
-
-done_collect:
- cpuc->n_events = n;
- cpuc->n_added += n - n0;
- cpuc->n_txn += n - n0;
-
- ret = 0;
-out:
- perf_pmu_enable(event->pmu);
- return ret;
-}
-
-static void x86_pmu_start(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx = event->hw.idx;
-
- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
- return;
-
- if (WARN_ON_ONCE(idx == -1))
- return;
-
- if (flags & PERF_EF_RELOAD) {
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
- x86_perf_event_set_period(event);
- }
-
- event->hw.state = 0;
-
- cpuc->events[idx] = event;
- __set_bit(idx, cpuc->active_mask);
- __set_bit(idx, cpuc->running);
- x86_pmu.enable(event);
- perf_event_update_userpage(event);
-}
-
-void perf_event_print_debug(void)
-{
- u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
- u64 pebs;
- struct cpu_hw_events *cpuc;
- unsigned long flags;
- int cpu, idx;
-
- if (!x86_pmu.num_counters)
- return;
-
- local_irq_save(flags);
-
- cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_events, cpu);
-
- if (x86_pmu.version >= 2) {
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
- rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
- rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
-
- pr_info("\n");
- pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
- pr_info("CPU#%d: status: %016llx\n", cpu, status);
- pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
- pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
- pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
- }
- pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
- rdmsrl(x86_pmu_event_addr(idx), pmc_count);
-
- prev_left = per_cpu(pmc_prev_left[idx], cpu);
-
- pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
- cpu, idx, pmc_ctrl);
- pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
- cpu, idx, pmc_count);
- pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
- cpu, idx, prev_left);
- }
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
- rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
-
- pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
- cpu, idx, pmc_count);
- }
- local_irq_restore(flags);
-}
-
-void x86_pmu_stop(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
-
- if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
- x86_pmu.disable(event);
- cpuc->events[hwc->idx] = NULL;
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
- hwc->state |= PERF_HES_STOPPED;
- }
-
- if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- /*
- * Drain the remaining delta count out of a event
- * that we are disabling:
- */
- x86_perf_event_update(event);
- hwc->state |= PERF_HES_UPTODATE;
- }
-}
-
-static void x86_pmu_del(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int i;
-
- /*
- * If we're called during a txn, we don't need to do anything.
- * The events never got scheduled and ->cancel_txn will truncate
- * the event_list.
- */
- if (cpuc->group_flag & PERF_EVENT_TXN)
- return;
-
- x86_pmu_stop(event, PERF_EF_UPDATE);
-
- for (i = 0; i < cpuc->n_events; i++) {
- if (event == cpuc->event_list[i]) {
-
- if (x86_pmu.put_event_constraints)
- x86_pmu.put_event_constraints(cpuc, event);
-
- while (++i < cpuc->n_events)
- cpuc->event_list[i-1] = cpuc->event_list[i];
-
- --cpuc->n_events;
- break;
- }
- }
- perf_event_update_userpage(event);
-}
-
-int x86_pmu_handle_irq(struct pt_regs *regs)
-{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
- struct perf_event *event;
- int idx, handled = 0;
- u64 val;
-
- perf_sample_data_init(&data, 0);
-
- cpuc = &__get_cpu_var(cpu_hw_events);
-
- /*
- * Some chipsets need to unmask the LVTPC in a particular spot
- * inside the nmi handler. As a result, the unmasking was pushed
- * into all the nmi handlers.
- *
- * This generic handler doesn't seem to have any issues where the
- * unmasking occurs so it was left at the top.
- */
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- if (!test_bit(idx, cpuc->active_mask)) {
- /*
- * Though we deactivated the counter some cpus
- * might still deliver spurious interrupts still
- * in flight. Catch them:
- */
- if (__test_and_clear_bit(idx, cpuc->running))
- handled++;
- continue;
- }
-
- event = cpuc->events[idx];
-
- val = x86_perf_event_update(event);
- if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
- continue;
-
- /*
- * event overflow
- */
- handled++;
- data.period = event->hw.last_period;
-
- if (!x86_perf_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
- }
-
- if (handled)
- inc_irq_stat(apic_perf_irqs);
-
- return handled;
-}
-
-void perf_events_lapic_init(void)
-{
- if (!x86_pmu.apic || !x86_pmu_initialized())
- return;
-
- /*
- * Always use NMI for PMU
- */
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-}
-
-static int __kprobes
-perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
-{
- if (!atomic_read(&active_events))
- return NMI_DONE;
-
- return x86_pmu.handle_irq(regs);
-}
-
-struct event_constraint emptyconstraint;
-struct event_constraint unconstrained;
-
-static int __cpuinit
-x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int ret = NOTIFY_OK;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- cpuc->kfree_on_online = NULL;
- if (x86_pmu.cpu_prepare)
- ret = x86_pmu.cpu_prepare(cpu);
- break;
-
- case CPU_STARTING:
- if (x86_pmu.attr_rdpmc)
- set_in_cr4(X86_CR4_PCE);
- if (x86_pmu.cpu_starting)
- x86_pmu.cpu_starting(cpu);
- break;
-
- case CPU_ONLINE:
- kfree(cpuc->kfree_on_online);
- break;
-
- case CPU_DYING:
- if (x86_pmu.cpu_dying)
- x86_pmu.cpu_dying(cpu);
- break;
-
- case CPU_UP_CANCELED:
- case CPU_DEAD:
- if (x86_pmu.cpu_dead)
- x86_pmu.cpu_dead(cpu);
- break;
-
- default:
- break;
- }
-
- return ret;
-}
-
-static void __init pmu_check_apic(void)
-{
- if (cpu_has_apic)
- return;
-
- x86_pmu.apic = 0;
- pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
- pr_info("no hardware sampling interrupt available.\n");
-}
-
-static struct attribute_group x86_pmu_format_group = {
- .name = "format",
- .attrs = NULL,
-};
-
-static int __init init_hw_perf_events(void)
-{
- struct x86_pmu_quirk *quirk;
- struct event_constraint *c;
- int err;
-
- pr_info("Performance Events: ");
-
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- err = intel_pmu_init();
- break;
- case X86_VENDOR_AMD:
- err = amd_pmu_init();
- break;
- default:
- return 0;
- }
- if (err != 0) {
- pr_cont("no PMU driver, software events only.\n");
- return 0;
- }
-
- pmu_check_apic();
-
- /* sanity check that the hardware exists or is emulated */
- if (!check_hw_exists())
- return 0;
-
- pr_cont("%s PMU driver.\n", x86_pmu.name);
-
- for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
- quirk->func();
-
- if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
- }
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-
- if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
- }
-
- x86_pmu.intel_ctrl |=
- ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
-
- perf_events_lapic_init();
- register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
-
- unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
- 0, x86_pmu.num_counters, 0);
-
- if (x86_pmu.event_constraints) {
- /*
- * event on fixed counter2 (REF_CYCLES) only works on this
- * counter, so do not extend mask to generic counters
- */
- for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != X86_RAW_EVENT_MASK
- || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
- continue;
- }
-
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
- c->weight += x86_pmu.num_counters;
- }
- }
-
- x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
- x86_pmu_format_group.attrs = x86_pmu.format_attrs;
-
- pr_info("... version: %d\n", x86_pmu.version);
- pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
- pr_info("... generic registers: %d\n", x86_pmu.num_counters);
- pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
- pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
- pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
-
- perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
- perf_cpu_notifier(x86_pmu_notifier);
-
- return 0;
-}
-early_initcall(init_hw_perf_events);
-
-static inline void x86_pmu_read(struct perf_event *event)
-{
- x86_perf_event_update(event);
-}
-
-/*
- * Start group events scheduling transaction
- * Set the flag to make pmu::enable() not perform the
- * schedulability test, it will be performed at commit time
- */
-static void x86_pmu_start_txn(struct pmu *pmu)
-{
- perf_pmu_disable(pmu);
- __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
- __this_cpu_write(cpu_hw_events.n_txn, 0);
-}
-
-/*
- * Stop group events scheduling transaction
- * Clear the flag and pmu::enable() will perform the
- * schedulability test.
- */
-static void x86_pmu_cancel_txn(struct pmu *pmu)
-{
- __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
- /*
- * Truncate the collected events.
- */
- __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
- __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
- perf_pmu_enable(pmu);
-}
-
-/*
- * Commit group events scheduling transaction
- * Perform the group schedulability test as a whole
- * Return 0 if success
- */
-static int x86_pmu_commit_txn(struct pmu *pmu)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int assign[X86_PMC_IDX_MAX];
- int n, ret;
-
- n = cpuc->n_events;
-
- if (!x86_pmu_initialized())
- return -EAGAIN;
-
- ret = x86_pmu.schedule_events(cpuc, n, assign);
- if (ret)
- return ret;
-
- /*
- * copy new assignment, now we know it is possible
- * will be used by hw_perf_enable()
- */
- memcpy(cpuc->assign, assign, n*sizeof(int));
-
- cpuc->group_flag &= ~PERF_EVENT_TXN;
- perf_pmu_enable(pmu);
- return 0;
-}
-/*
- * a fake_cpuc is used to validate event groups. Due to
- * the extra reg logic, we need to also allocate a fake
- * per_core and per_cpu structure. Otherwise, group events
- * using extra reg may conflict without the kernel being
- * able to catch this when the last event gets added to
- * the group.
- */
-static void free_fake_cpuc(struct cpu_hw_events *cpuc)
-{
- kfree(cpuc->shared_regs);
- kfree(cpuc);
-}
-
-static struct cpu_hw_events *allocate_fake_cpuc(void)
-{
- struct cpu_hw_events *cpuc;
- int cpu = raw_smp_processor_id();
-
- cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
- if (!cpuc)
- return ERR_PTR(-ENOMEM);
-
- /* only needed, if we have extra_regs */
- if (x86_pmu.extra_regs) {
- cpuc->shared_regs = allocate_shared_regs(cpu);
- if (!cpuc->shared_regs)
- goto error;
- }
- return cpuc;
-error:
- free_fake_cpuc(cpuc);
- return ERR_PTR(-ENOMEM);
-}
-
-/*
- * validate that we can schedule this event
- */
-static int validate_event(struct perf_event *event)
-{
- struct cpu_hw_events *fake_cpuc;
- struct event_constraint *c;
- int ret = 0;
-
- fake_cpuc = allocate_fake_cpuc();
- if (IS_ERR(fake_cpuc))
- return PTR_ERR(fake_cpuc);
-
- c = x86_pmu.get_event_constraints(fake_cpuc, event);
-
- if (!c || !c->weight)
- ret = -EINVAL;
-
- if (x86_pmu.put_event_constraints)
- x86_pmu.put_event_constraints(fake_cpuc, event);
-
- free_fake_cpuc(fake_cpuc);
-
- return ret;
-}
-
-/*
- * validate a single event group
- *
- * validation include:
- * - check events are compatible which each other
- * - events do not compete for the same counter
- * - number of events <= number of counters
- *
- * validation ensures the group can be loaded onto the
- * PMU if it was the only group available.
- */
-static int validate_group(struct perf_event *event)
-{
- struct perf_event *leader = event->group_leader;
- struct cpu_hw_events *fake_cpuc;
- int ret = -EINVAL, n;
-
- fake_cpuc = allocate_fake_cpuc();
- if (IS_ERR(fake_cpuc))
- return PTR_ERR(fake_cpuc);
- /*
- * the event is not yet connected with its
- * siblings therefore we must first collect
- * existing siblings, then add the new event
- * before we can simulate the scheduling
- */
- n = collect_events(fake_cpuc, leader, true);
- if (n < 0)
- goto out;
-
- fake_cpuc->n_events = n;
- n = collect_events(fake_cpuc, event, false);
- if (n < 0)
- goto out;
-
- fake_cpuc->n_events = n;
-
- ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
-
-out:
- free_fake_cpuc(fake_cpuc);
- return ret;
-}
-
-static int x86_pmu_event_init(struct perf_event *event)
-{
- struct pmu *tmp;
- int err;
-
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
- return -ENOENT;
- }
-
- err = __x86_pmu_event_init(event);
- if (!err) {
- /*
- * we temporarily connect event to its pmu
- * such that validate_group() can classify
- * it as an x86 event using is_x86_event()
- */
- tmp = event->pmu;
- event->pmu = &pmu;
-
- if (event->group_leader != event)
- err = validate_group(event);
- else
- err = validate_event(event);
-
- event->pmu = tmp;
- }
- if (err) {
- if (event->destroy)
- event->destroy(event);
- }
-
- return err;
-}
-
-static int x86_pmu_event_idx(struct perf_event *event)
-{
- int idx = event->hw.idx;
-
- if (!x86_pmu.attr_rdpmc)
- return 0;
-
- if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
- idx -= X86_PMC_IDX_FIXED;
- idx |= 1 << 30;
- }
-
- return idx + 1;
-}
-
-static ssize_t get_attr_rdpmc(struct device *cdev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
-}
-
-static void change_rdpmc(void *info)
-{
- bool enable = !!(unsigned long)info;
-
- if (enable)
- set_in_cr4(X86_CR4_PCE);
- else
- clear_in_cr4(X86_CR4_PCE);
-}
-
-static ssize_t set_attr_rdpmc(struct device *cdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val = simple_strtoul(buf, NULL, 0);
-
- if (!!val != !!x86_pmu.attr_rdpmc) {
- x86_pmu.attr_rdpmc = !!val;
- smp_call_function(change_rdpmc, (void *)val, 1);
- }
-
- return count;
-}
-
-static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
-
-static struct attribute *x86_pmu_attrs[] = {
- &dev_attr_rdpmc.attr,
- NULL,
-};
-
-static struct attribute_group x86_pmu_attr_group = {
- .attrs = x86_pmu_attrs,
-};
-
-static const struct attribute_group *x86_pmu_attr_groups[] = {
- &x86_pmu_attr_group,
- &x86_pmu_format_group,
- NULL,
-};
-
-static void x86_pmu_flush_branch_stack(void)
-{
- if (x86_pmu.flush_branch_stack)
- x86_pmu.flush_branch_stack();
-}
-
-static struct pmu pmu = {
- .pmu_enable = x86_pmu_enable,
- .pmu_disable = x86_pmu_disable,
-
- .attr_groups = x86_pmu_attr_groups,
-
- .event_init = x86_pmu_event_init,
-
- .add = x86_pmu_add,
- .del = x86_pmu_del,
- .start = x86_pmu_start,
- .stop = x86_pmu_stop,
- .read = x86_pmu_read,
-
- .start_txn = x86_pmu_start_txn,
- .cancel_txn = x86_pmu_cancel_txn,
- .commit_txn = x86_pmu_commit_txn,
-
- .event_idx = x86_pmu_event_idx,
- .flush_branch_stack = x86_pmu_flush_branch_stack,
-};
-
-void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
-{
- userpg->cap_usr_time = 0;
- userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
- userpg->pmc_width = x86_pmu.cntval_bits;
-
- if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
- return;
-
- if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- return;
-
- userpg->cap_usr_time = 1;
- userpg->time_mult = this_cpu_read(cyc2ns);
- userpg->time_shift = CYC2NS_SCALE_FACTOR;
- userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
-}
-
-/*
- * callchain support
- */
-
-static int backtrace_stack(void *data, char *name)
-{
- return 0;
-}
-
-static void backtrace_address(void *data, unsigned long addr, int reliable)
-{
- struct perf_callchain_entry *entry = data;
-
- perf_callchain_store(entry, addr);
-}
-
-static const struct stacktrace_ops backtrace_ops = {
- .stack = backtrace_stack,
- .address = backtrace_address,
- .walk_stack = print_context_stack_bp,
-};
-
-void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
-{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* TODO: We don't support guest os callchain now */
- return;
- }
-
- perf_callchain_store(entry, regs->ip);
-
- dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
-}
-
-#ifdef CONFIG_COMPAT
-
-#include <asm/compat.h>
-
-static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
- /* 32-bit process in 64-bit kernel. */
- struct stack_frame_ia32 frame;
- const void __user *fp;
-
- if (!test_thread_flag(TIF_IA32))
- return 0;
-
- fp = compat_ptr(regs->bp);
- while (entry->nr < PERF_MAX_STACK_DEPTH) {
- unsigned long bytes;
- frame.next_frame = 0;
- frame.return_address = 0;
-
- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
- if (bytes != sizeof(frame))
- break;
-
- if (fp < compat_ptr(regs->sp))
- break;
-
- perf_callchain_store(entry, frame.return_address);
- fp = compat_ptr(frame.next_frame);
- }
- return 1;
-}
-#else
-static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
- return 0;
-}
-#endif
-
-void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
-{
- struct stack_frame frame;
- const void __user *fp;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* TODO: We don't support guest os callchain now */
- return;
- }
-
- fp = (void __user *)regs->bp;
-
- perf_callchain_store(entry, regs->ip);
-
- if (!current->mm)
- return;
-
- if (perf_callchain_user32(regs, entry))
- return;
-
- while (entry->nr < PERF_MAX_STACK_DEPTH) {
- unsigned long bytes;
- frame.next_frame = NULL;
- frame.return_address = 0;
-
- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
- if (bytes != sizeof(frame))
- break;
-
- if ((unsigned long)fp < regs->sp)
- break;
-
- perf_callchain_store(entry, frame.return_address);
- fp = frame.next_frame;
- }
-}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
- unsigned long ip;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- ip = perf_guest_cbs->get_guest_ip();
- else
- ip = instruction_pointer(regs);
-
- return ip;
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
- int misc = 0;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
- misc |= PERF_RECORD_MISC_GUEST_USER;
- else
- misc |= PERF_RECORD_MISC_GUEST_KERNEL;
- } else {
- if (user_mode(regs))
- misc |= PERF_RECORD_MISC_USER;
- else
- misc |= PERF_RECORD_MISC_KERNEL;
- }
-
- if (regs->flags & PERF_EFLAGS_EXACT)
- misc |= PERF_RECORD_MISC_EXACT_IP;
-
- return misc;
-}
-
-void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
-{
- cap->version = x86_pmu.version;
- cap->num_counters_gp = x86_pmu.num_counters;
- cap->num_counters_fixed = x86_pmu.num_counters_fixed;
- cap->bit_width_gp = x86_pmu.cntval_bits;
- cap->bit_width_fixed = x86_pmu.cntval_bits;
- cap->events_mask = (unsigned int)x86_pmu.events_maskl;
- cap->events_mask_len = x86_pmu.events_mask_len;
-}
-EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.h b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.h
deleted file mode 100644
index 6638aaf5..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event.h
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * Performance events x86 architecture header
- *
- * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2009 Jaswinder Singh Rajput
- * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
- * Copyright (C) 2009 Google, Inc., Stephane Eranian
- *
- * For licencing details see kernel-base/COPYING
- */
-
-#include <linux/perf_event.h>
-
-/*
- * | NHM/WSM | SNB |
- * register -------------------------------
- * | HT | no HT | HT | no HT |
- *-----------------------------------------
- * offcore | core | core | cpu | core |
- * lbr_sel | core | core | cpu | core |
- * ld_lat | cpu | core | cpu | core |
- *-----------------------------------------
- *
- * Given that there is a small number of shared regs,
- * we can pre-allocate their slot in the per-cpu
- * per-core reg tables.
- */
-enum extra_reg_type {
- EXTRA_REG_NONE = -1, /* not used */
-
- EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
- EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
- EXTRA_REG_LBR = 2, /* lbr_select */
-
- EXTRA_REG_MAX /* number of entries needed */
-};
-
-struct event_constraint {
- union {
- unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- u64 idxmsk64;
- };
- u64 code;
- u64 cmask;
- int weight;
- int overlap;
-};
-
-struct amd_nb {
- int nb_id; /* NorthBridge id */
- int refcnt; /* reference count */
- struct perf_event *owners[X86_PMC_IDX_MAX];
- struct event_constraint event_constraints[X86_PMC_IDX_MAX];
-};
-
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS 4
-
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
- u64 bts_buffer_base;
- u64 bts_index;
- u64 bts_absolute_maximum;
- u64 bts_interrupt_threshold;
- u64 pebs_buffer_base;
- u64 pebs_index;
- u64 pebs_absolute_maximum;
- u64 pebs_interrupt_threshold;
- u64 pebs_event_reset[MAX_PEBS_EVENTS];
-};
-
-/*
- * Per register state.
- */
-struct er_account {
- raw_spinlock_t lock; /* per-core: protect structure */
- u64 config; /* extra MSR config */
- u64 reg; /* extra MSR number */
- atomic_t ref; /* reference count */
-};
-
-/*
- * Per core/cpu state
- *
- * Used to coordinate shared registers between HT threads or
- * among events on a single PMU.
- */
-struct intel_shared_regs {
- struct er_account regs[EXTRA_REG_MAX];
- int refcnt; /* per-core: #HT threads */
- unsigned core_id; /* per-core: core id */
-};
-
-#define MAX_LBR_ENTRIES 16
-
-struct cpu_hw_events {
- /*
- * Generic x86 PMC bits
- */
- struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
- unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- int enabled;
-
- int n_events;
- int n_added;
- int n_txn;
- int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
- u64 tags[X86_PMC_IDX_MAX];
- struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
-
- unsigned int group_flag;
-
- /*
- * Intel DebugStore bits
- */
- struct debug_store *ds;
- u64 pebs_enabled;
-
- /*
- * Intel LBR bits
- */
- int lbr_users;
- void *lbr_context;
- struct perf_branch_stack lbr_stack;
- struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
- struct er_account *lbr_sel;
- u64 br_sel;
-
- /*
- * Intel host/guest exclude bits
- */
- u64 intel_ctrl_guest_mask;
- u64 intel_ctrl_host_mask;
- struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
-
- /*
- * manage shared (per-core, per-cpu) registers
- * used on Intel NHM/WSM/SNB
- */
- struct intel_shared_regs *shared_regs;
-
- /*
- * AMD specific bits
- */
- struct amd_nb *amd_nb;
- /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
- u64 perf_ctr_virt_mask;
-
- void *kfree_on_online;
-};
-
-#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
- { .idxmsk64 = (n) }, \
- .code = (c), \
- .cmask = (m), \
- .weight = (w), \
- .overlap = (o), \
-}
-
-#define EVENT_CONSTRAINT(c, n, m) \
- __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
-
-/*
- * The overlap flag marks event constraints with overlapping counter
- * masks. This is the case if the counter mask of such an event is not
- * a subset of any other counter mask of a constraint with an equal or
- * higher weight, e.g.:
- *
- * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
- * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
- * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
- *
- * The event scheduler may not select the correct counter in the first
- * cycle because it needs to know which subsequent events will be
- * scheduled. It may fail to schedule the events then. So we set the
- * overlap flag for such constraints to give the scheduler a hint which
- * events to select for counter rescheduling.
- *
- * Care must be taken as the rescheduling algorithm is O(n!) which
- * will increase scheduling cycles for an over-commited system
- * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
- * and its counter masks must be kept at a minimum.
- */
-#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
- __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
-
-/*
- * Constraint on the Event code.
- */
-#define INTEL_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
-
-/*
- * Constraint on the Event code + UMask + fixed-mask
- *
- * filter mask to validate fixed counter events.
- * the following filters disqualify for fixed counters:
- * - inv
- * - edge
- * - cnt-mask
- * The other filters are supported by fixed counters.
- * The any-thread option is supported starting with v3.
- */
-#define FIXED_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
-
-/*
- * Constraint on the Event code + UMask
- */
-#define INTEL_UEVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
-
-#define EVENT_CONSTRAINT_END \
- EVENT_CONSTRAINT(0, 0, 0)
-
-#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->weight; (e)++)
-
-/*
- * Extra registers for specific events.
- *
- * Some events need large masks and require external MSRs.
- * Those extra MSRs end up being shared for all events on
- * a PMU and sometimes between PMU of sibling HT threads.
- * In either case, the kernel needs to handle conflicting
- * accesses to those extra, shared, regs. The data structure
- * to manage those registers is stored in cpu_hw_event.
- */
-struct extra_reg {
- unsigned int event;
- unsigned int msr;
- u64 config_mask;
- u64 valid_mask;
- int idx; /* per_xxx->regs[] reg index */
-};
-
-#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
- .event = (e), \
- .msr = (ms), \
- .config_mask = (m), \
- .valid_mask = (vm), \
- .idx = EXTRA_REG_##i \
- }
-
-#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
- EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
-
-#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
-
-union perf_capabilities {
- struct {
- u64 lbr_format:6;
- u64 pebs_trap:1;
- u64 pebs_arch_reg:1;
- u64 pebs_format:4;
- u64 smm_freeze:1;
- };
- u64 capabilities;
-};
-
-struct x86_pmu_quirk {
- struct x86_pmu_quirk *next;
- void (*func)(void);
-};
-
-union x86_pmu_config {
- struct {
- u64 event:8,
- umask:8,
- usr:1,
- os:1,
- edge:1,
- pc:1,
- interrupt:1,
- __reserved1:1,
- en:1,
- inv:1,
- cmask:8,
- event2:4,
- __reserved2:4,
- go:1,
- ho:1;
- } bits;
- u64 value;
-};
-
-#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
-
-/*
- * struct x86_pmu - generic x86 pmu
- */
-struct x86_pmu {
- /*
- * Generic x86 PMC bits
- */
- const char *name;
- int version;
- int (*handle_irq)(struct pt_regs *);
- void (*disable_all)(void);
- void (*enable_all)(int added);
- void (*enable)(struct perf_event *);
- void (*disable)(struct perf_event *);
- int (*hw_config)(struct perf_event *event);
- int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
- unsigned eventsel;
- unsigned perfctr;
- u64 (*event_map)(int);
- int max_events;
- int num_counters;
- int num_counters_fixed;
- int cntval_bits;
- u64 cntval_mask;
- union {
- unsigned long events_maskl;
- unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
- };
- int events_mask_len;
- int apic;
- u64 max_period;
- struct event_constraint *
- (*get_event_constraints)(struct cpu_hw_events *cpuc,
- struct perf_event *event);
-
- void (*put_event_constraints)(struct cpu_hw_events *cpuc,
- struct perf_event *event);
- struct event_constraint *event_constraints;
- struct x86_pmu_quirk *quirks;
- int perfctr_second_write;
-
- /*
- * sysfs attrs
- */
- int attr_rdpmc;
- struct attribute **format_attrs;
-
- /*
- * CPU Hotplug hooks
- */
- int (*cpu_prepare)(int cpu);
- void (*cpu_starting)(int cpu);
- void (*cpu_dying)(int cpu);
- void (*cpu_dead)(int cpu);
- void (*flush_branch_stack)(void);
-
- /*
- * Intel Arch Perfmon v2+
- */
- u64 intel_ctrl;
- union perf_capabilities intel_cap;
-
- /*
- * Intel DebugStore bits
- */
- int bts, pebs;
- int bts_active, pebs_active;
- int pebs_record_size;
- void (*drain_pebs)(struct pt_regs *regs);
- struct event_constraint *pebs_constraints;
-
- /*
- * Intel LBR
- */
- unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
- int lbr_nr; /* hardware stack size */
- u64 lbr_sel_mask; /* LBR_SELECT valid bits */
- const int *lbr_sel_map; /* lbr_select mappings */
-
- /*
- * Extra registers for events
- */
- struct extra_reg *extra_regs;
- unsigned int er_flags;
-
- /*
- * Intel host/guest support (KVM)
- */
- struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
-};
-
-#define x86_add_quirk(func_) \
-do { \
- static struct x86_pmu_quirk __quirk __initdata = { \
- .func = func_, \
- }; \
- __quirk.next = x86_pmu.quirks; \
- x86_pmu.quirks = &__quirk; \
-} while (0)
-
-#define ERF_NO_HT_SHARING 1
-#define ERF_HAS_RSP_1 2
-
-extern struct x86_pmu x86_pmu __read_mostly;
-
-DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
-
-int x86_perf_event_set_period(struct perf_event *event);
-
-/*
- * Generalized hw caching related hw_event table, filled
- * in on a per model basis. A value of 0 means
- * 'not supported', -1 means 'hw_event makes no sense on
- * this CPU', any other value means the raw hw_event
- * ID.
- */
-
-#define C(x) PERF_COUNT_HW_CACHE_##x
-
-extern u64 __read_mostly hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
-extern u64 __read_mostly hw_cache_extra_regs
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
-
-u64 x86_perf_event_update(struct perf_event *event);
-
-static inline int x86_pmu_addr_offset(int index)
-{
- int offset;
-
- /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
- alternative_io(ASM_NOP2,
- "shll $1, %%eax",
- X86_FEATURE_PERFCTR_CORE,
- "=a" (offset),
- "a" (index));
-
- return offset;
-}
-
-static inline unsigned int x86_pmu_config_addr(int index)
-{
- return x86_pmu.eventsel + x86_pmu_addr_offset(index);
-}
-
-static inline unsigned int x86_pmu_event_addr(int index)
-{
- return x86_pmu.perfctr + x86_pmu_addr_offset(index);
-}
-
-int x86_setup_perfctr(struct perf_event *event);
-
-int x86_pmu_hw_config(struct perf_event *event);
-
-void x86_pmu_disable_all(void);
-
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
- u64 enable_mask)
-{
- u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
-
- if (hwc->extra_reg.reg)
- wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
- wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
-}
-
-void x86_pmu_enable_all(int added);
-
-int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
-
-void x86_pmu_stop(struct perf_event *event, int flags);
-
-static inline void x86_pmu_disable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- wrmsrl(hwc->config_base, hwc->config);
-}
-
-void x86_pmu_enable_event(struct perf_event *event);
-
-int x86_pmu_handle_irq(struct pt_regs *regs);
-
-extern struct event_constraint emptyconstraint;
-
-extern struct event_constraint unconstrained;
-
-static inline bool kernel_ip(unsigned long ip)
-{
-#ifdef CONFIG_X86_32
- return ip > PAGE_OFFSET;
-#else
- return (long)ip < 0;
-#endif
-}
-
-#ifdef CONFIG_CPU_SUP_AMD
-
-int amd_pmu_init(void);
-
-#else /* CONFIG_CPU_SUP_AMD */
-
-static inline int amd_pmu_init(void)
-{
- return 0;
-}
-
-#endif /* CONFIG_CPU_SUP_AMD */
-
-#ifdef CONFIG_CPU_SUP_INTEL
-
-int intel_pmu_save_and_restart(struct perf_event *event);
-
-struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
-
-struct intel_shared_regs *allocate_shared_regs(int cpu);
-
-int intel_pmu_init(void);
-
-void init_debug_store_on_cpu(int cpu);
-
-void fini_debug_store_on_cpu(int cpu);
-
-void release_ds_buffers(void);
-
-void reserve_ds_buffers(void);
-
-extern struct event_constraint bts_constraint;
-
-void intel_pmu_enable_bts(u64 config);
-
-void intel_pmu_disable_bts(void);
-
-int intel_pmu_drain_bts_buffer(void);
-
-extern struct event_constraint intel_core2_pebs_event_constraints[];
-
-extern struct event_constraint intel_atom_pebs_event_constraints[];
-
-extern struct event_constraint intel_nehalem_pebs_event_constraints[];
-
-extern struct event_constraint intel_westmere_pebs_event_constraints[];
-
-extern struct event_constraint intel_snb_pebs_event_constraints[];
-
-struct event_constraint *intel_pebs_constraints(struct perf_event *event);
-
-void intel_pmu_pebs_enable(struct perf_event *event);
-
-void intel_pmu_pebs_disable(struct perf_event *event);
-
-void intel_pmu_pebs_enable_all(void);
-
-void intel_pmu_pebs_disable_all(void);
-
-void intel_ds_init(void);
-
-void intel_pmu_lbr_reset(void);
-
-void intel_pmu_lbr_enable(struct perf_event *event);
-
-void intel_pmu_lbr_disable(struct perf_event *event);
-
-void intel_pmu_lbr_enable_all(void);
-
-void intel_pmu_lbr_disable_all(void);
-
-void intel_pmu_lbr_read(void);
-
-void intel_pmu_lbr_init_core(void);
-
-void intel_pmu_lbr_init_nhm(void);
-
-void intel_pmu_lbr_init_atom(void);
-
-void intel_pmu_lbr_init_snb(void);
-
-int intel_pmu_setup_lbr_filter(struct perf_event *event);
-
-int p4_pmu_init(void);
-
-int p6_pmu_init(void);
-
-#else /* CONFIG_CPU_SUP_INTEL */
-
-static inline void reserve_ds_buffers(void)
-{
-}
-
-static inline void release_ds_buffers(void)
-{
-}
-
-static inline int intel_pmu_init(void)
-{
- return 0;
-}
-
-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
-{
- return NULL;
-}
-
-#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd.c
deleted file mode 100644
index 9edc786a..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd.c
+++ /dev/null
@@ -1,686 +0,0 @@
-#include <linux/perf_event.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/apicdef.h>
-
-#include "perf_event.h"
-
-static __initconst const u64 amd_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
- [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
- [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
- [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
- [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
- [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(NODE) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
- [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-/*
- * AMD Performance Monitor K7 and later.
- */
-static const u64 amd_perfmon_event_map[] =
-{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
-};
-
-static u64 amd_pmu_event_map(int hw_event)
-{
- return amd_perfmon_event_map[hw_event];
-}
-
-static int amd_pmu_hw_config(struct perf_event *event)
-{
- int ret = x86_pmu_hw_config(event);
-
- if (ret)
- return ret;
-
- if (has_branch_stack(event))
- return -EOPNOTSUPP;
-
- if (event->attr.exclude_host && event->attr.exclude_guest)
- /*
- * When HO == GO == 1 the hardware treats that as GO == HO == 0
- * and will count in both modes. We don't want to count in that
- * case so we emulate no-counting by setting US = OS = 0.
- */
- event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
- ARCH_PERFMON_EVENTSEL_OS);
- else if (event->attr.exclude_host)
- event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
- else if (event->attr.exclude_guest)
- event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
-
- if (event->attr.type != PERF_TYPE_RAW)
- return 0;
-
- event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
-
- return 0;
-}
-
-/*
- * AMD64 events are detected based on their event codes.
- */
-static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
-{
- return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
-}
-
-static inline int amd_is_nb_event(struct hw_perf_event *hwc)
-{
- return (hwc->config & 0xe0) == 0xe0;
-}
-
-static inline int amd_has_nb(struct cpu_hw_events *cpuc)
-{
- struct amd_nb *nb = cpuc->amd_nb;
-
- return nb && nb->nb_id != -1;
-}
-
-static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct amd_nb *nb = cpuc->amd_nb;
- int i;
-
- /*
- * only care about NB events
- */
- if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
- return;
-
- /*
- * need to scan whole list because event may not have
- * been assigned during scheduling
- *
- * no race condition possible because event can only
- * be removed on one CPU at a time AND PMU is disabled
- * when we come here
- */
- for (i = 0; i < x86_pmu.num_counters; i++) {
- if (nb->owners[i] == event) {
- cmpxchg(nb->owners+i, event, NULL);
- break;
- }
- }
-}
-
- /*
- * AMD64 NorthBridge events need special treatment because
- * counter access needs to be synchronized across all cores
- * of a package. Refer to BKDG section 3.12
- *
- * NB events are events measuring L3 cache, Hypertransport
- * traffic. They are identified by an event code >= 0xe00.
- * They measure events on the NorthBride which is shared
- * by all cores on a package. NB events are counted on a
- * shared set of counters. When a NB event is programmed
- * in a counter, the data actually comes from a shared
- * counter. Thus, access to those counters needs to be
- * synchronized.
- *
- * We implement the synchronization such that no two cores
- * can be measuring NB events using the same counters. Thus,
- * we maintain a per-NB allocation table. The available slot
- * is propagated using the event_constraint structure.
- *
- * We provide only one choice for each NB event based on
- * the fact that only NB events have restrictions. Consequently,
- * if a counter is available, there is a guarantee the NB event
- * will be assigned to it. If no slot is available, an empty
- * constraint is returned and scheduling will eventually fail
- * for this event.
- *
- * Note that all cores attached the same NB compete for the same
- * counters to host NB events, this is why we use atomic ops. Some
- * multi-chip CPUs may have more than one NB.
- *
- * Given that resources are allocated (cmpxchg), they must be
- * eventually freed for others to use. This is accomplished by
- * calling amd_put_event_constraints().
- *
- * Non NB events are not impacted by this restriction.
- */
-static struct event_constraint *
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct amd_nb *nb = cpuc->amd_nb;
- struct perf_event *old = NULL;
- int max = x86_pmu.num_counters;
- int i, j, k = -1;
-
- /*
- * if not NB event or no NB, then no constraints
- */
- if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
- return &unconstrained;
-
- /*
- * detect if already present, if so reuse
- *
- * cannot merge with actual allocation
- * because of possible holes
- *
- * event can already be present yet not assigned (in hwc->idx)
- * because of successive calls to x86_schedule_events() from
- * hw_perf_group_sched_in() without hw_perf_enable()
- */
- for (i = 0; i < max; i++) {
- /*
- * keep track of first free slot
- */
- if (k == -1 && !nb->owners[i])
- k = i;
-
- /* already present, reuse */
- if (nb->owners[i] == event)
- goto done;
- }
- /*
- * not present, so grab a new slot
- * starting either at:
- */
- if (hwc->idx != -1) {
- /* previous assignment */
- i = hwc->idx;
- } else if (k != -1) {
- /* start from free slot found */
- i = k;
- } else {
- /*
- * event not found, no slot found in
- * first pass, try again from the
- * beginning
- */
- i = 0;
- }
- j = i;
- do {
- old = cmpxchg(nb->owners+i, NULL, event);
- if (!old)
- break;
- if (++i == max)
- i = 0;
- } while (i != j);
-done:
- if (!old)
- return &nb->event_constraints[i];
-
- return &emptyconstraint;
-}
-
-static struct amd_nb *amd_alloc_nb(int cpu)
-{
- struct amd_nb *nb;
- int i;
-
- nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
- cpu_to_node(cpu));
- if (!nb)
- return NULL;
-
- nb->nb_id = -1;
-
- /*
- * initialize all possible NB constraints
- */
- for (i = 0; i < x86_pmu.num_counters; i++) {
- __set_bit(i, nb->event_constraints[i].idxmsk);
- nb->event_constraints[i].weight = 1;
- }
- return nb;
-}
-
-static int amd_pmu_cpu_prepare(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-
- WARN_ON_ONCE(cpuc->amd_nb);
-
- if (boot_cpu_data.x86_max_cores < 2)
- return NOTIFY_OK;
-
- cpuc->amd_nb = amd_alloc_nb(cpu);
- if (!cpuc->amd_nb)
- return NOTIFY_BAD;
-
- return NOTIFY_OK;
-}
-
-static void amd_pmu_cpu_starting(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- struct amd_nb *nb;
- int i, nb_id;
-
- cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
-
- if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
- return;
-
- nb_id = amd_get_nb_id(cpu);
- WARN_ON_ONCE(nb_id == BAD_APICID);
-
- for_each_online_cpu(i) {
- nb = per_cpu(cpu_hw_events, i).amd_nb;
- if (WARN_ON_ONCE(!nb))
- continue;
-
- if (nb->nb_id == nb_id) {
- cpuc->kfree_on_online = cpuc->amd_nb;
- cpuc->amd_nb = nb;
- break;
- }
- }
-
- cpuc->amd_nb->nb_id = nb_id;
- cpuc->amd_nb->refcnt++;
-}
-
-static void amd_pmu_cpu_dead(int cpu)
-{
- struct cpu_hw_events *cpuhw;
-
- if (boot_cpu_data.x86_max_cores < 2)
- return;
-
- cpuhw = &per_cpu(cpu_hw_events, cpu);
-
- if (cpuhw->amd_nb) {
- struct amd_nb *nb = cpuhw->amd_nb;
-
- if (nb->nb_id == -1 || --nb->refcnt == 0)
- kfree(nb);
-
- cpuhw->amd_nb = NULL;
- }
-}
-
-PMU_FORMAT_ATTR(event, "config:0-7,32-35");
-PMU_FORMAT_ATTR(umask, "config:8-15" );
-PMU_FORMAT_ATTR(edge, "config:18" );
-PMU_FORMAT_ATTR(inv, "config:23" );
-PMU_FORMAT_ATTR(cmask, "config:24-31" );
-
-static struct attribute *amd_format_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_cmask.attr,
- NULL,
-};
-
-static __initconst const struct x86_pmu amd_pmu = {
- .name = "AMD",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = x86_pmu_enable_all,
- .enable = x86_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = amd_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_K7_EVNTSEL0,
- .perfctr = MSR_K7_PERFCTR0,
- .event_map = amd_pmu_event_map,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS,
- .cntval_bits = 48,
- .cntval_mask = (1ULL << 48) - 1,
- .apic = 1,
- /* use highest bit to detect overflow */
- .max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints,
- .put_event_constraints = amd_put_event_constraints,
-
- .format_attrs = amd_format_attr,
-
- .cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_starting = amd_pmu_cpu_starting,
- .cpu_dead = amd_pmu_cpu_dead,
-};
-
-/* AMD Family 15h */
-
-#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
-
-#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
-#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
-#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
-#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
-#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
-#define AMD_EVENT_EX_LS 0x000000C0ULL
-#define AMD_EVENT_DE 0x000000D0ULL
-#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
-
-/*
- * AMD family 15h event code/PMC mappings:
- *
- * type = event_code & 0x0F0:
- *
- * 0x000 FP PERF_CTL[5:3]
- * 0x010 FP PERF_CTL[5:3]
- * 0x020 LS PERF_CTL[5:0]
- * 0x030 LS PERF_CTL[5:0]
- * 0x040 DC PERF_CTL[5:0]
- * 0x050 DC PERF_CTL[5:0]
- * 0x060 CU PERF_CTL[2:0]
- * 0x070 CU PERF_CTL[2:0]
- * 0x080 IC/DE PERF_CTL[2:0]
- * 0x090 IC/DE PERF_CTL[2:0]
- * 0x0A0 ---
- * 0x0B0 ---
- * 0x0C0 EX/LS PERF_CTL[5:0]
- * 0x0D0 DE PERF_CTL[2:0]
- * 0x0E0 NB NB_PERF_CTL[3:0]
- * 0x0F0 NB NB_PERF_CTL[3:0]
- *
- * Exceptions:
- *
- * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
- * 0x003 FP PERF_CTL[3]
- * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
- * 0x00B FP PERF_CTL[3]
- * 0x00D FP PERF_CTL[3]
- * 0x023 DE PERF_CTL[2:0]
- * 0x02D LS PERF_CTL[3]
- * 0x02E LS PERF_CTL[3,0]
- * 0x031 LS PERF_CTL[2:0] (**)
- * 0x043 CU PERF_CTL[2:0]
- * 0x045 CU PERF_CTL[2:0]
- * 0x046 CU PERF_CTL[2:0]
- * 0x054 CU PERF_CTL[2:0]
- * 0x055 CU PERF_CTL[2:0]
- * 0x08F IC PERF_CTL[0]
- * 0x187 DE PERF_CTL[0]
- * 0x188 DE PERF_CTL[0]
- * 0x0DB EX PERF_CTL[5:0]
- * 0x0DC LS PERF_CTL[5:0]
- * 0x0DD LS PERF_CTL[5:0]
- * 0x0DE LS PERF_CTL[5:0]
- * 0x0DF LS PERF_CTL[5:0]
- * 0x1C0 EX PERF_CTL[5:3]
- * 0x1D6 EX PERF_CTL[5:0]
- * 0x1D8 EX PERF_CTL[5:0]
- *
- * (*) depending on the umask all FPU counters may be used
- * (**) only one unitmask enabled at a time
- */
-
-static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
-static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
-static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
-static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
-static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
-static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
-
-static struct event_constraint *
-amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned int event_code = amd_get_event_code(hwc);
-
- switch (event_code & AMD_EVENT_TYPE_MASK) {
- case AMD_EVENT_FP:
- switch (event_code) {
- case 0x000:
- if (!(hwc->config & 0x0000F000ULL))
- break;
- if (!(hwc->config & 0x00000F00ULL))
- break;
- return &amd_f15_PMC3;
- case 0x004:
- if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
- break;
- return &amd_f15_PMC3;
- case 0x003:
- case 0x00B:
- case 0x00D:
- return &amd_f15_PMC3;
- }
- return &amd_f15_PMC53;
- case AMD_EVENT_LS:
- case AMD_EVENT_DC:
- case AMD_EVENT_EX_LS:
- switch (event_code) {
- case 0x023:
- case 0x043:
- case 0x045:
- case 0x046:
- case 0x054:
- case 0x055:
- return &amd_f15_PMC20;
- case 0x02D:
- return &amd_f15_PMC3;
- case 0x02E:
- return &amd_f15_PMC30;
- case 0x031:
- if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
- return &amd_f15_PMC20;
- return &emptyconstraint;
- case 0x1C0:
- return &amd_f15_PMC53;
- default:
- return &amd_f15_PMC50;
- }
- case AMD_EVENT_CU:
- case AMD_EVENT_IC_DE:
- case AMD_EVENT_DE:
- switch (event_code) {
- case 0x08F:
- case 0x187:
- case 0x188:
- return &amd_f15_PMC0;
- case 0x0DB ... 0x0DF:
- case 0x1D6:
- case 0x1D8:
- return &amd_f15_PMC50;
- default:
- return &amd_f15_PMC20;
- }
- case AMD_EVENT_NB:
- /* not yet implemented */
- return &emptyconstraint;
- default:
- return &emptyconstraint;
- }
-}
-
-static __initconst const struct x86_pmu amd_pmu_f15h = {
- .name = "AMD Family 15h",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = x86_pmu_enable_all,
- .enable = x86_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = amd_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_F15H_PERF_CTL,
- .perfctr = MSR_F15H_PERF_CTR,
- .event_map = amd_pmu_event_map,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS_F15H,
- .cntval_bits = 48,
- .cntval_mask = (1ULL << 48) - 1,
- .apic = 1,
- /* use highest bit to detect overflow */
- .max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints_f15h,
- /* nortbridge counters not yet implemented: */
-#if 0
- .put_event_constraints = amd_put_event_constraints,
-
- .cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_dead = amd_pmu_cpu_dead,
-#endif
- .cpu_starting = amd_pmu_cpu_starting,
- .format_attrs = amd_format_attr,
-};
-
-__init int amd_pmu_init(void)
-{
- /* Performance-monitoring supported from K7 and later: */
- if (boot_cpu_data.x86 < 6)
- return -ENODEV;
-
- /*
- * If core performance counter extensions exists, it must be
- * family 15h, otherwise fail. See x86_pmu_addr_offset().
- */
- switch (boot_cpu_data.x86) {
- case 0x15:
- if (!cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu_f15h;
- break;
- default:
- if (cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu;
- break;
- }
-
- /* Events are common for all AMDs */
- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- return 0;
-}
-
-void amd_pmu_enable_virt(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- cpuc->perf_ctr_virt_mask = 0;
-
- /* Reload all events */
- x86_pmu_disable_all();
- x86_pmu_enable_all(0);
-}
-EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
-
-void amd_pmu_disable_virt(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- /*
- * We only mask out the Host-only bit so that host-only counting works
- * when SVM is disabled. If someone sets up a guest-only counter when
- * SVM is disabled the Guest-only bits still gets set and the counter
- * will not count anything.
- */
- cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
-
- /* Reload all events */
- x86_pmu_disable_all();
- x86_pmu_enable_all(0);
-}
-EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd_ibs.c
deleted file mode 100644
index 3b8a2d30..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Performance events - AMD IBS
- *
- * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
- *
- * For licencing details see kernel-base/COPYING
- */
-
-#include <linux/perf_event.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <asm/apic.h>
-
-static u32 ibs_caps;
-
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
-
-static struct pmu perf_ibs;
-
-static int perf_ibs_init(struct perf_event *event)
-{
- if (perf_ibs.type != event->attr.type)
- return -ENOENT;
- return 0;
-}
-
-static int perf_ibs_add(struct perf_event *event, int flags)
-{
- return 0;
-}
-
-static void perf_ibs_del(struct perf_event *event, int flags)
-{
-}
-
-static struct pmu perf_ibs = {
- .event_init= perf_ibs_init,
- .add= perf_ibs_add,
- .del= perf_ibs_del,
-};
-
-static __init int perf_event_ibs_init(void)
-{
- if (!ibs_caps)
- return -ENODEV; /* ibs not supported by the cpu */
-
- perf_pmu_register(&perf_ibs, "ibs", -1);
- printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
-
- return 0;
-}
-
-#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
-
-static __init int perf_event_ibs_init(void) { return 0; }
-
-#endif
-
-/* IBS - apic initialization, for perf and oprofile */
-
-static __init u32 __get_ibs_caps(void)
-{
- u32 caps;
- unsigned int max_level;
-
- if (!boot_cpu_has(X86_FEATURE_IBS))
- return 0;
-
- /* check IBS cpuid feature flags */
- max_level = cpuid_eax(0x80000000);
- if (max_level < IBS_CPUID_FEATURES)
- return IBS_CAPS_DEFAULT;
-
- caps = cpuid_eax(IBS_CPUID_FEATURES);
- if (!(caps & IBS_CAPS_AVAIL))
- /* cpuid flags not valid */
- return IBS_CAPS_DEFAULT;
-
- return caps;
-}
-
-u32 get_ibs_caps(void)
-{
- return ibs_caps;
-}
-
-EXPORT_SYMBOL(get_ibs_caps);
-
-static inline int get_eilvt(int offset)
-{
- return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
-}
-
-static inline int put_eilvt(int offset)
-{
- return !setup_APIC_eilvt(offset, 0, 0, 1);
-}
-
-/*
- * Check and reserve APIC extended interrupt LVT offset for IBS if available.
- */
-static inline int ibs_eilvt_valid(void)
-{
- int offset;
- u64 val;
- int valid = 0;
-
- preempt_disable();
-
- rdmsrl(MSR_AMD64_IBSCTL, val);
- offset = val & IBSCTL_LVT_OFFSET_MASK;
-
- if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
- pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
- smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
- goto out;
- }
-
- if (!get_eilvt(offset)) {
- pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
- smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
- goto out;
- }
-
- valid = 1;
-out:
- preempt_enable();
-
- return valid;
-}
-
-static int setup_ibs_ctl(int ibs_eilvt_off)
-{
- struct pci_dev *cpu_cfg;
- int nodes;
- u32 value = 0;
-
- nodes = 0;
- cpu_cfg = NULL;
- do {
- cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_10H_NB_MISC,
- cpu_cfg);
- if (!cpu_cfg)
- break;
- ++nodes;
- pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
- | IBSCTL_LVT_OFFSET_VALID);
- pci_read_config_dword(cpu_cfg, IBSCTL, &value);
- if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
- pci_dev_put(cpu_cfg);
- printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
- "IBSCTL = 0x%08x\n", value);
- return -EINVAL;
- }
- } while (1);
-
- if (!nodes) {
- printk(KERN_DEBUG "No CPU node configured for IBS\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
- * This runs only on the current cpu. We try to find an LVT offset and
- * setup the local APIC. For this we must disable preemption. On
- * success we initialize all nodes with this offset. This updates then
- * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
- * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
- * is using the new offset.
- */
-static int force_ibs_eilvt_setup(void)
-{
- int offset;
- int ret;
-
- preempt_disable();
- /* find the next free available EILVT entry, skip offset 0 */
- for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
- if (get_eilvt(offset))
- break;
- }
- preempt_enable();
-
- if (offset == APIC_EILVT_NR_MAX) {
- printk(KERN_DEBUG "No EILVT entry available\n");
- return -EBUSY;
- }
-
- ret = setup_ibs_ctl(offset);
- if (ret)
- goto out;
-
- if (!ibs_eilvt_valid()) {
- ret = -EFAULT;
- goto out;
- }
-
- pr_info("IBS: LVT offset %d assigned\n", offset);
-
- return 0;
-out:
- preempt_disable();
- put_eilvt(offset);
- preempt_enable();
- return ret;
-}
-
-static inline int get_ibs_lvt_offset(void)
-{
- u64 val;
-
- rdmsrl(MSR_AMD64_IBSCTL, val);
- if (!(val & IBSCTL_LVT_OFFSET_VALID))
- return -EINVAL;
-
- return val & IBSCTL_LVT_OFFSET_MASK;
-}
-
-static void setup_APIC_ibs(void *dummy)
-{
- int offset;
-
- offset = get_ibs_lvt_offset();
- if (offset < 0)
- goto failed;
-
- if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
- return;
-failed:
- pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
- smp_processor_id());
-}
-
-static void clear_APIC_ibs(void *dummy)
-{
- int offset;
-
- offset = get_ibs_lvt_offset();
- if (offset >= 0)
- setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
-}
-
-static int __cpuinit
-perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- setup_APIC_ibs(NULL);
- break;
- case CPU_DYING:
- clear_APIC_ibs(NULL);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static __init int amd_ibs_init(void)
-{
- u32 caps;
- int ret = -EINVAL;
-
- caps = __get_ibs_caps();
- if (!caps)
- return -ENODEV; /* ibs not supported by the cpu */
-
- /*
- * Force LVT offset assignment for family 10h: The offsets are
- * not assigned by the BIOS for this family, so the OS is
- * responsible for doing it. If the OS assignment fails, fall
- * back to BIOS settings and try to setup this.
- */
- if (boot_cpu_data.x86 == 0x10)
- force_ibs_eilvt_setup();
-
- if (!ibs_eilvt_valid())
- goto out;
-
- get_online_cpus();
- ibs_caps = caps;
- /* make ibs_caps visible to other cpus: */
- smp_mb();
- perf_cpu_notifier(perf_ibs_cpu_notifier);
- smp_call_function(setup_APIC_ibs, NULL, 1);
- put_online_cpus();
-
- ret = perf_event_ibs_init();
-out:
- if (ret)
- pr_err("Failed to setup IBS, %d\n", ret);
- return ret;
-}
-
-/* Since we need the pci subsystem to init ibs we can't do this earlier: */
-device_initcall(amd_ibs_init);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel.c
deleted file mode 100644
index 26b3e2fe..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel.c
+++ /dev/null
@@ -1,1886 +0,0 @@
-/*
- * Per core/cpu state
- *
- * Used to coordinate shared registers between HT threads or
- * among events on a single PMU.
- */
-
-#include <linux/stddef.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include <asm/hardirq.h>
-#include <asm/apic.h>
-
-#include "perf_event.h"
-
-/*
- * Intel PerfMon, used on Core and later.
- */
-static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
-{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
- [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
-};
-
-static struct event_constraint intel_core_event_constraints[] __read_mostly =
-{
- INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
- INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
- INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
- INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
- INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
- INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_core2_event_constraints[] __read_mostly =
-{
- FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
- INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
- INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
- INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
- INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
- INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
- INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
- INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
- INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
- INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
-{
- FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
- INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
- INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
- INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
- INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
- INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
- INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
- INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
- INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
- EVENT_CONSTRAINT_END
-};
-
-static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
-{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
- EVENT_EXTRA_END
-};
-
-static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
-{
- FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
- INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
- INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
- INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
- INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_snb_event_constraints[] __read_mostly =
-{
- FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
- INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
- INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
- INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
- EVENT_CONSTRAINT_END
-};
-
-static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
-{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
- EVENT_EXTRA_END
-};
-
-static struct event_constraint intel_v1_event_constraints[] __read_mostly =
-{
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_gen_event_constraints[] __read_mostly =
-{
- FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
- EVENT_CONSTRAINT_END
-};
-
-static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
- EVENT_EXTRA_END
-};
-
-static u64 intel_pmu_event_map(int hw_event)
-{
- return intel_perfmon_event_map[hw_event];
-}
-
-static __initconst const u64 snb_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
- [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
- [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_WRITE) ] = {
- /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_PREFETCH) ] = {
- /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
- [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
- [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
- [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
- [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(NODE) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-
-};
-
-static __initconst const u64 westmere_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
- [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
- [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
- [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- /*
- * Use RFO, not WRITEBACK, because a write miss would typically occur
- * on RFO.
- */
- [ C(OP_WRITE) ] = {
- /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_PREFETCH) ] = {
- /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
- [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
- [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
- [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(NODE) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x01b7,
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x01b7,
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x01b7,
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- },
-};
-
-/*
- * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
- * See IA32 SDM Vol 3B 30.6.1.3
- */
-
-#define NHM_DMND_DATA_RD (1 << 0)
-#define NHM_DMND_RFO (1 << 1)
-#define NHM_DMND_IFETCH (1 << 2)
-#define NHM_DMND_WB (1 << 3)
-#define NHM_PF_DATA_RD (1 << 4)
-#define NHM_PF_DATA_RFO (1 << 5)
-#define NHM_PF_IFETCH (1 << 6)
-#define NHM_OFFCORE_OTHER (1 << 7)
-#define NHM_UNCORE_HIT (1 << 8)
-#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
-#define NHM_OTHER_CORE_HITM (1 << 10)
- /* reserved */
-#define NHM_REMOTE_CACHE_FWD (1 << 12)
-#define NHM_REMOTE_DRAM (1 << 13)
-#define NHM_LOCAL_DRAM (1 << 14)
-#define NHM_NON_DRAM (1 << 15)
-
-#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
-#define NHM_REMOTE (NHM_REMOTE_DRAM)
-
-#define NHM_DMND_READ (NHM_DMND_DATA_RD)
-#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
-#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
-
-#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
-#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
-#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
-
-static __initconst const u64 nehalem_hw_cache_extra_regs
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
- [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
- [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
- [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
- },
- },
- [ C(NODE) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
- [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
- [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
- [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
- },
- },
-};
-
-static __initconst const u64 nehalem_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
- [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
- [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
- [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- /*
- * Use RFO, not WRITEBACK, because a write miss would typically occur
- * on RFO.
- */
- [ C(OP_WRITE) ] = {
- /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_PREFETCH) ] = {
- /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
- [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(NODE) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x01b7,
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x01b7,
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x01b7,
- [ C(RESULT_MISS) ] = 0x01b7,
- },
- },
-};
-
-static __initconst const u64 core2_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
- [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
- [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
- [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-static __initconst const u64 atom_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
- [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
- [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
- [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
-{
- /* user explicitly requested branch sampling */
- if (has_branch_stack(event))
- return true;
-
- /* implicit branch sampling to correct PEBS skid */
- if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
- return true;
-
- return false;
-}
-
-static void intel_pmu_disable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
- intel_pmu_disable_bts();
-
- intel_pmu_pebs_disable_all();
- intel_pmu_lbr_disable_all();
-}
-
-static void intel_pmu_enable_all(int added)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- intel_pmu_pebs_enable_all();
- intel_pmu_lbr_enable_all();
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
- x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
-
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
- struct perf_event *event =
- cpuc->events[X86_PMC_IDX_FIXED_BTS];
-
- if (WARN_ON_ONCE(!event))
- return;
-
- intel_pmu_enable_bts(event->hw.config);
- }
-}
-
-/*
- * Workaround for:
- * Intel Errata AAK100 (model 26)
- * Intel Errata AAP53 (model 30)
- * Intel Errata BD53 (model 44)
- *
- * The official story:
- * These chips need to be 'reset' when adding counters by programming the
- * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
- * in sequence on the same PMC or on different PMCs.
- *
- * In practise it appears some of these events do in fact count, and
- * we need to programm all 4 events.
- */
-static void intel_pmu_nhm_workaround(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- static const unsigned long nhm_magic[4] = {
- 0x4300B5,
- 0x4300D2,
- 0x4300B1,
- 0x4300B1
- };
- struct perf_event *event;
- int i;
-
- /*
- * The Errata requires below steps:
- * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
- * 2) Configure 4 PERFEVTSELx with the magic events and clear
- * the corresponding PMCx;
- * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
- * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
- * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
- */
-
- /*
- * The real steps we choose are a little different from above.
- * A) To reduce MSR operations, we don't run step 1) as they
- * are already cleared before this function is called;
- * B) Call x86_perf_event_update to save PMCx before configuring
- * PERFEVTSELx with magic number;
- * C) With step 5), we do clear only when the PERFEVTSELx is
- * not used currently.
- * D) Call x86_perf_event_set_period to restore PMCx;
- */
-
- /* We always operate 4 pairs of PERF Counters */
- for (i = 0; i < 4; i++) {
- event = cpuc->events[i];
- if (event)
- x86_perf_event_update(event);
- }
-
- for (i = 0; i < 4; i++) {
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
- wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
- }
-
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
-
- for (i = 0; i < 4; i++) {
- event = cpuc->events[i];
-
- if (event) {
- x86_perf_event_set_period(event);
- __x86_pmu_enable_event(&event->hw,
- ARCH_PERFMON_EVENTSEL_ENABLE);
- } else
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
- }
-}
-
-static void intel_pmu_nhm_enable_all(int added)
-{
- if (added)
- intel_pmu_nhm_workaround();
- intel_pmu_enable_all(added);
-}
-
-static inline u64 intel_pmu_get_status(void)
-{
- u64 status;
-
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
-
- return status;
-}
-
-static inline void intel_pmu_ack_status(u64 ack)
-{
- wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
-}
-
-static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
-{
- int idx = hwc->idx - X86_PMC_IDX_FIXED;
- u64 ctrl_val, mask;
-
- mask = 0xfULL << (idx * 4);
-
- rdmsrl(hwc->config_base, ctrl_val);
- ctrl_val &= ~mask;
- wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static void intel_pmu_disable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
- intel_pmu_disable_bts();
- intel_pmu_drain_bts_buffer();
- return;
- }
-
- cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
- cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
-
- /*
- * must disable before any actual event
- * because any event may be combined with LBR
- */
- if (intel_pmu_needs_lbr_smpl(event))
- intel_pmu_lbr_disable(event);
-
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_disable_fixed(hwc);
- return;
- }
-
- x86_pmu_disable_event(event);
-
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
-}
-
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
-{
- int idx = hwc->idx - X86_PMC_IDX_FIXED;
- u64 ctrl_val, bits, mask;
-
- /*
- * Enable IRQ generation (0x8),
- * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
- * if requested:
- */
- bits = 0x8ULL;
- if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
- bits |= 0x2;
- if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
- bits |= 0x1;
-
- /*
- * ANY bit is supported in v3 and up
- */
- if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
- bits |= 0x4;
-
- bits <<= (idx * 4);
- mask = 0xfULL << (idx * 4);
-
- rdmsrl(hwc->config_base, ctrl_val);
- ctrl_val &= ~mask;
- ctrl_val |= bits;
- wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static void intel_pmu_enable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
- if (!__this_cpu_read(cpu_hw_events.enabled))
- return;
-
- intel_pmu_enable_bts(hwc->config);
- return;
- }
- /*
- * must enabled before any actual event
- * because any event may be combined with LBR
- */
- if (intel_pmu_needs_lbr_smpl(event))
- intel_pmu_lbr_enable(event);
-
- if (event->attr.exclude_host)
- cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
- if (event->attr.exclude_guest)
- cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
-
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_enable_fixed(hwc);
- return;
- }
-
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_enable(event);
-
- __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
-}
-
-/*
- * Save and restart an expired event. Called by NMI contexts,
- * so it has to be careful about preempting normal event ops:
- */
-int intel_pmu_save_and_restart(struct perf_event *event)
-{
- x86_perf_event_update(event);
- return x86_perf_event_set_period(event);
-}
-
-static void intel_pmu_reset(void)
-{
- struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
- unsigned long flags;
- int idx;
-
- if (!x86_pmu.num_counters)
- return;
-
- local_irq_save(flags);
-
- printk("clearing PMU state on CPU#%d\n", smp_processor_id());
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
- checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
- }
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
- checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
-
- if (ds)
- ds->bts_index = ds->bts_buffer_base;
-
- local_irq_restore(flags);
-}
-
-/*
- * This handler is triggered by the local APIC, so the APIC IRQ handling
- * rules apply:
- */
-static int intel_pmu_handle_irq(struct pt_regs *regs)
-{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
- int bit, loops;
- u64 status;
- int handled;
-
- perf_sample_data_init(&data, 0);
-
- cpuc = &__get_cpu_var(cpu_hw_events);
-
- /*
- * Some chipsets need to unmask the LVTPC in a particular spot
- * inside the nmi handler. As a result, the unmasking was pushed
- * into all the nmi handlers.
- *
- * This handler doesn't seem to have any issues with the unmasking
- * so it was left at the top.
- */
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-
- intel_pmu_disable_all();
- handled = intel_pmu_drain_bts_buffer();
- status = intel_pmu_get_status();
- if (!status) {
- intel_pmu_enable_all(0);
- return handled;
- }
-
- loops = 0;
-again:
- intel_pmu_ack_status(status);
- if (++loops > 100) {
- WARN_ONCE(1, "perfevents: irq loop stuck!\n");
- perf_event_print_debug();
- intel_pmu_reset();
- goto done;
- }
-
- inc_irq_stat(apic_perf_irqs);
-
- intel_pmu_lbr_read();
-
- /*
- * PEBS overflow sets bit 62 in the global status register
- */
- if (__test_and_clear_bit(62, (unsigned long *)&status)) {
- handled++;
- x86_pmu.drain_pebs(regs);
- }
-
- for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
- struct perf_event *event = cpuc->events[bit];
-
- handled++;
-
- if (!test_bit(bit, cpuc->active_mask))
- continue;
-
- if (!intel_pmu_save_and_restart(event))
- continue;
-
- data.period = event->hw.last_period;
-
- if (has_branch_stack(event))
- data.br_stack = &cpuc->lbr_stack;
-
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
- }
-
- /*
- * Repeat if there is more work to be done:
- */
- status = intel_pmu_get_status();
- if (status)
- goto again;
-
-done:
- intel_pmu_enable_all(0);
- return handled;
-}
-
-static struct event_constraint *
-intel_bts_constraints(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned int hw_event, bts_event;
-
- if (event->attr.freq)
- return NULL;
-
- hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
- bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-
- if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
- return &bts_constraint;
-
- return NULL;
-}
-
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
-{
- if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
- return false;
-
- if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
- event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= 0x01bb;
- event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
- event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
- } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
- event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= 0x01b7;
- event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
- event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
- }
-
- if (event->hw.extra_reg.idx == orig_idx)
- return false;
-
- return true;
-}
-
-/*
- * manage allocation of shared extra msr for certain events
- *
- * sharing can be:
- * per-cpu: to be shared between the various events on a single PMU
- * per-core: per-cpu + shared by HT threads
- */
-static struct event_constraint *
-__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event,
- struct hw_perf_event_extra *reg)
-{
- struct event_constraint *c = &emptyconstraint;
- struct er_account *era;
- unsigned long flags;
- int orig_idx = reg->idx;
-
- /* already allocated shared msr */
- if (reg->alloc)
- return NULL; /* call x86_get_event_constraint() */
-
-again:
- era = &cpuc->shared_regs->regs[reg->idx];
- /*
- * we use spin_lock_irqsave() to avoid lockdep issues when
- * passing a fake cpuc
- */
- raw_spin_lock_irqsave(&era->lock, flags);
-
- if (!atomic_read(&era->ref) || era->config == reg->config) {
-
- /* lock in msr value */
- era->config = reg->config;
- era->reg = reg->reg;
-
- /* one more user */
- atomic_inc(&era->ref);
-
- /* no need to reallocate during incremental event scheduling */
- reg->alloc = 1;
-
- /*
- * need to call x86_get_event_constraint()
- * to check if associated event has constraints
- */
- c = NULL;
- } else if (intel_try_alt_er(event, orig_idx)) {
- raw_spin_unlock_irqrestore(&era->lock, flags);
- goto again;
- }
- raw_spin_unlock_irqrestore(&era->lock, flags);
-
- return c;
-}
-
-static void
-__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
- struct hw_perf_event_extra *reg)
-{
- struct er_account *era;
-
- /*
- * only put constraint if extra reg was actually
- * allocated. Also takes care of event which do
- * not use an extra shared reg
- */
- if (!reg->alloc)
- return;
-
- era = &cpuc->shared_regs->regs[reg->idx];
-
- /* one fewer user */
- atomic_dec(&era->ref);
-
- /* allocate again next time */
- reg->alloc = 0;
-}
-
-static struct event_constraint *
-intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct event_constraint *c = NULL, *d;
- struct hw_perf_event_extra *xreg, *breg;
-
- xreg = &event->hw.extra_reg;
- if (xreg->idx != EXTRA_REG_NONE) {
- c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
- if (c == &emptyconstraint)
- return c;
- }
- breg = &event->hw.branch_reg;
- if (breg->idx != EXTRA_REG_NONE) {
- d = __intel_shared_reg_get_constraints(cpuc, event, breg);
- if (d == &emptyconstraint) {
- __intel_shared_reg_put_constraints(cpuc, xreg);
- c = d;
- }
- }
- return c;
-}
-
-struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- struct event_constraint *c;
-
- if (x86_pmu.event_constraints) {
- for_each_event_constraint(c, x86_pmu.event_constraints) {
- if ((event->hw.config & c->cmask) == c->code)
- return c;
- }
- }
-
- return &unconstrained;
-}
-
-static struct event_constraint *
-intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- struct event_constraint *c;
-
- c = intel_bts_constraints(event);
- if (c)
- return c;
-
- c = intel_pebs_constraints(event);
- if (c)
- return c;
-
- c = intel_shared_regs_constraints(cpuc, event);
- if (c)
- return c;
-
- return x86_get_event_constraints(cpuc, event);
-}
-
-static void
-intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct hw_perf_event_extra *reg;
-
- reg = &event->hw.extra_reg;
- if (reg->idx != EXTRA_REG_NONE)
- __intel_shared_reg_put_constraints(cpuc, reg);
-
- reg = &event->hw.branch_reg;
- if (reg->idx != EXTRA_REG_NONE)
- __intel_shared_reg_put_constraints(cpuc, reg);
-}
-
-static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- intel_put_shared_regs_event_constraints(cpuc, event);
-}
-
-static int intel_pmu_hw_config(struct perf_event *event)
-{
- int ret = x86_pmu_hw_config(event);
-
- if (ret)
- return ret;
-
- if (event->attr.precise_ip &&
- (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
- /*
- * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
- * (0x003c) so that we can use it with PEBS.
- *
- * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
- * PEBS capable. However we can use INST_RETIRED.ANY_P
- * (0x00c0), which is a PEBS capable event, to get the same
- * count.
- *
- * INST_RETIRED.ANY_P counts the number of cycles that retires
- * CNTMASK instructions. By setting CNTMASK to a value (16)
- * larger than the maximum number of instructions that can be
- * retired per cycle (4) and then inverting the condition, we
- * count all cycles that retire 16 or less instructions, which
- * is every cycle.
- *
- * Thereby we gain a PEBS capable cycle counter.
- */
- u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
-
-
- alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
- event->hw.config = alt_config;
- }
-
- if (intel_pmu_needs_lbr_smpl(event)) {
- ret = intel_pmu_setup_lbr_filter(event);
- if (ret)
- return ret;
- }
-
- if (event->attr.type != PERF_TYPE_RAW)
- return 0;
-
- if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
- return 0;
-
- if (x86_pmu.version < 3)
- return -EINVAL;
-
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
-
- return 0;
-}
-
-struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
-{
- if (x86_pmu.guest_get_msrs)
- return x86_pmu.guest_get_msrs(nr);
- *nr = 0;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
-
-static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
-
- arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
- arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
- arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
-
- *nr = 1;
- return arr;
-}
-
-static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
- int idx;
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct perf_event *event = cpuc->events[idx];
-
- arr[idx].msr = x86_pmu_config_addr(idx);
- arr[idx].host = arr[idx].guest = 0;
-
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
- arr[idx].host = arr[idx].guest =
- event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
-
- if (event->attr.exclude_host)
- arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- else if (event->attr.exclude_guest)
- arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- }
-
- *nr = x86_pmu.num_counters;
- return arr;
-}
-
-static void core_pmu_enable_event(struct perf_event *event)
-{
- if (!event->attr.exclude_host)
- x86_pmu_enable_event(event);
-}
-
-static void core_pmu_enable_all(int added)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx;
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
-
- if (!test_bit(idx, cpuc->active_mask) ||
- cpuc->events[idx]->attr.exclude_host)
- continue;
-
- __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
- }
-}
-
-PMU_FORMAT_ATTR(event, "config:0-7" );
-PMU_FORMAT_ATTR(umask, "config:8-15" );
-PMU_FORMAT_ATTR(edge, "config:18" );
-PMU_FORMAT_ATTR(pc, "config:19" );
-PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
-PMU_FORMAT_ATTR(inv, "config:23" );
-PMU_FORMAT_ATTR(cmask, "config:24-31" );
-
-static struct attribute *intel_arch_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_pc.attr,
- &format_attr_inv.attr,
- &format_attr_cmask.attr,
- NULL,
-};
-
-static __initconst const struct x86_pmu core_pmu = {
- .name = "core",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = core_pmu_enable_all,
- .enable = core_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = x86_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = intel_pmu_event_map,
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
- .apic = 1,
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic event period:
- */
- .max_period = (1ULL << 31) - 1,
- .get_event_constraints = intel_get_event_constraints,
- .put_event_constraints = intel_put_event_constraints,
- .event_constraints = intel_core_event_constraints,
- .guest_get_msrs = core_guest_get_msrs,
- .format_attrs = intel_arch_formats_attr,
-};
-
-struct intel_shared_regs *allocate_shared_regs(int cpu)
-{
- struct intel_shared_regs *regs;
- int i;
-
- regs = kzalloc_node(sizeof(struct intel_shared_regs),
- GFP_KERNEL, cpu_to_node(cpu));
- if (regs) {
- /*
- * initialize the locks to keep lockdep happy
- */
- for (i = 0; i < EXTRA_REG_MAX; i++)
- raw_spin_lock_init(&regs->regs[i].lock);
-
- regs->core_id = -1;
- }
- return regs;
-}
-
-static int intel_pmu_cpu_prepare(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-
- if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
- return NOTIFY_OK;
-
- cpuc->shared_regs = allocate_shared_regs(cpu);
- if (!cpuc->shared_regs)
- return NOTIFY_BAD;
-
- return NOTIFY_OK;
-}
-
-static void intel_pmu_cpu_starting(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int core_id = topology_core_id(cpu);
- int i;
-
- init_debug_store_on_cpu(cpu);
- /*
- * Deal with CPUs that don't clear their LBRs on power-up.
- */
- intel_pmu_lbr_reset();
-
- cpuc->lbr_sel = NULL;
-
- if (!cpuc->shared_regs)
- return;
-
- if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
- for_each_cpu(i, topology_thread_cpumask(cpu)) {
- struct intel_shared_regs *pc;
-
- pc = per_cpu(cpu_hw_events, i).shared_regs;
- if (pc && pc->core_id == core_id) {
- cpuc->kfree_on_online = cpuc->shared_regs;
- cpuc->shared_regs = pc;
- break;
- }
- }
- cpuc->shared_regs->core_id = core_id;
- cpuc->shared_regs->refcnt++;
- }
-
- if (x86_pmu.lbr_sel_map)
- cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
-}
-
-static void intel_pmu_cpu_dying(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- struct intel_shared_regs *pc;
-
- pc = cpuc->shared_regs;
- if (pc) {
- if (pc->core_id == -1 || --pc->refcnt == 0)
- kfree(pc);
- cpuc->shared_regs = NULL;
- }
-
- fini_debug_store_on_cpu(cpu);
-}
-
-static void intel_pmu_flush_branch_stack(void)
-{
- /*
- * Intel LBR does not tag entries with the
- * PID of the current task, then we need to
- * flush it on ctxsw
- * For now, we simply reset it
- */
- if (x86_pmu.lbr_nr)
- intel_pmu_lbr_reset();
-}
-
-PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
-
-static struct attribute *intel_arch3_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_pc.attr,
- &format_attr_any.attr,
- &format_attr_inv.attr,
- &format_attr_cmask.attr,
-
- &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
- NULL,
-};
-
-static __initconst const struct x86_pmu intel_pmu = {
- .name = "Intel",
- .handle_irq = intel_pmu_handle_irq,
- .disable_all = intel_pmu_disable_all,
- .enable_all = intel_pmu_enable_all,
- .enable = intel_pmu_enable_event,
- .disable = intel_pmu_disable_event,
- .hw_config = intel_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = intel_pmu_event_map,
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
- .apic = 1,
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic event period:
- */
- .max_period = (1ULL << 31) - 1,
- .get_event_constraints = intel_get_event_constraints,
- .put_event_constraints = intel_put_event_constraints,
-
- .format_attrs = intel_arch3_formats_attr,
-
- .cpu_prepare = intel_pmu_cpu_prepare,
- .cpu_starting = intel_pmu_cpu_starting,
- .cpu_dying = intel_pmu_cpu_dying,
- .guest_get_msrs = intel_guest_get_msrs,
- .flush_branch_stack = intel_pmu_flush_branch_stack,
-};
-
-static __init void intel_clovertown_quirk(void)
-{
- /*
- * PEBS is unreliable due to:
- *
- * AJ67 - PEBS may experience CPL leaks
- * AJ68 - PEBS PMI may be delayed by one event
- * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
- * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
- *
- * AJ67 could be worked around by restricting the OS/USR flags.
- * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
- *
- * AJ106 could possibly be worked around by not allowing LBR
- * usage from PEBS, including the fixup.
- * AJ68 could possibly be worked around by always programming
- * a pebs_event_reset[0] value and coping with the lost events.
- *
- * But taken together it might just make sense to not enable PEBS on
- * these chips.
- */
- printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
- x86_pmu.pebs = 0;
- x86_pmu.pebs_constraints = NULL;
-}
-
-static __init void intel_sandybridge_quirk(void)
-{
- printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
- x86_pmu.pebs = 0;
- x86_pmu.pebs_constraints = NULL;
-}
-
-static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
- { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
- { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
- { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
- { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
- { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
- { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
- { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
-};
-
-static __init void intel_arch_events_quirk(void)
-{
- int bit;
-
- /* disable event that reported as not presend by cpuid */
- for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
- intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
- printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
- intel_arch_events_map[bit].name);
- }
-}
-
-static __init void intel_nehalem_quirk(void)
-{
- union cpuid10_ebx ebx;
-
- ebx.full = x86_pmu.events_maskl;
- if (ebx.split.no_branch_misses_retired) {
- /*
- * Erratum AAJ80 detected, we work it around by using
- * the BR_MISP_EXEC.ANY event. This will over-count
- * branch-misses, but it's still much better than the
- * architectural event which is often completely bogus:
- */
- intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
- ebx.split.no_branch_misses_retired = 0;
- x86_pmu.events_maskl = ebx.full;
- printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
- }
-}
-
-__init int intel_pmu_init(void)
-{
- union cpuid10_edx edx;
- union cpuid10_eax eax;
- union cpuid10_ebx ebx;
- unsigned int unused;
- int version;
-
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
- switch (boot_cpu_data.x86) {
- case 0x6:
- return p6_pmu_init();
- case 0xf:
- return p4_pmu_init();
- }
- return -ENODEV;
- }
-
- /*
- * Check whether the Architectural PerfMon supports
- * Branch Misses Retired hw_event or not.
- */
- cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
- if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
- return -ENODEV;
-
- version = eax.split.version_id;
- if (version < 2)
- x86_pmu = core_pmu;
- else
- x86_pmu = intel_pmu;
-
- x86_pmu.version = version;
- x86_pmu.num_counters = eax.split.num_counters;
- x86_pmu.cntval_bits = eax.split.bit_width;
- x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
-
- x86_pmu.events_maskl = ebx.full;
- x86_pmu.events_mask_len = eax.split.mask_length;
-
- /*
- * Quirk: v2 perfmon does not report fixed-purpose events, so
- * assume at least 3 events:
- */
- if (version > 1)
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
-
- /*
- * v2 and above have a perf capabilities MSR
- */
- if (version > 1) {
- u64 capabilities;
-
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
- x86_pmu.intel_cap.capabilities = capabilities;
- }
-
- intel_ds_init();
-
- x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
-
- /*
- * Install the hw-cache-events table:
- */
- switch (boot_cpu_data.x86_model) {
- case 14: /* 65 nm core solo/duo, "Yonah" */
- pr_cont("Core events, ");
- break;
-
- case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
- x86_add_quirk(intel_clovertown_quirk);
- case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
- case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
- case 29: /* six-core 45 nm xeon "Dunnington" */
- memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- intel_pmu_lbr_init_core();
-
- x86_pmu.event_constraints = intel_core2_event_constraints;
- x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
- pr_cont("Core2 events, ");
- break;
-
- case 26: /* 45 nm nehalem, "Bloomfield" */
- case 30: /* 45 nm nehalem, "Lynnfield" */
- case 46: /* 45 nm nehalem-ex, "Beckton" */
- memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
- sizeof(hw_cache_extra_regs));
-
- intel_pmu_lbr_init_nhm();
-
- x86_pmu.event_constraints = intel_nehalem_event_constraints;
- x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
- x86_pmu.enable_all = intel_pmu_nhm_enable_all;
- x86_pmu.extra_regs = intel_nehalem_extra_regs;
-
- /* UOPS_ISSUED.STALLED_CYCLES */
- intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
- X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
- /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
- intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
- X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
-
- x86_add_quirk(intel_nehalem_quirk);
-
- pr_cont("Nehalem events, ");
- break;
-
- case 28: /* Atom */
- memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- intel_pmu_lbr_init_atom();
-
- x86_pmu.event_constraints = intel_gen_event_constraints;
- x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
- pr_cont("Atom events, ");
- break;
-
- case 37: /* 32 nm nehalem, "Clarkdale" */
- case 44: /* 32 nm nehalem, "Gulftown" */
- case 47: /* 32 nm Xeon E7 */
- memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
- sizeof(hw_cache_extra_regs));
-
- intel_pmu_lbr_init_nhm();
-
- x86_pmu.event_constraints = intel_westmere_event_constraints;
- x86_pmu.enable_all = intel_pmu_nhm_enable_all;
- x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
- x86_pmu.extra_regs = intel_westmere_extra_regs;
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
-
- /* UOPS_ISSUED.STALLED_CYCLES */
- intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
- X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
- /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
- intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
- X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
-
- pr_cont("Westmere events, ");
- break;
-
- case 42: /* SandyBridge */
- x86_add_quirk(intel_sandybridge_quirk);
- case 45: /* SandyBridge, "Romely-EP" */
- memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- intel_pmu_lbr_init_snb();
-
- x86_pmu.event_constraints = intel_snb_event_constraints;
- x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
- x86_pmu.extra_regs = intel_snb_extra_regs;
- /* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
-
- /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
- intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
- X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
- /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
- intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
- X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
-
- pr_cont("SandyBridge events, ");
- break;
-
- default:
- switch (x86_pmu.version) {
- case 1:
- x86_pmu.event_constraints = intel_v1_event_constraints;
- pr_cont("generic architected perfmon v1, ");
- break;
- default:
- /*
- * default constraints for v2 and up
- */
- x86_pmu.event_constraints = intel_gen_event_constraints;
- pr_cont("generic architected perfmon, ");
- break;
- }
- }
-
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_ds.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_ds.c
deleted file mode 100644
index 7f64df19..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ /dev/null
@@ -1,725 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-
-#include <asm/perf_event.h>
-#include <asm/insn.h>
-
-#include "perf_event.h"
-
-/* The size of a BTS record in bytes: */
-#define BTS_RECORD_SIZE 24
-
-#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
-#define PEBS_BUFFER_SIZE PAGE_SIZE
-
-/*
- * pebs_record_32 for p4 and core not supported
-
-struct pebs_record_32 {
- u32 flags, ip;
- u32 ax, bc, cx, dx;
- u32 si, di, bp, sp;
-};
-
- */
-
-struct pebs_record_core {
- u64 flags, ip;
- u64 ax, bx, cx, dx;
- u64 si, di, bp, sp;
- u64 r8, r9, r10, r11;
- u64 r12, r13, r14, r15;
-};
-
-struct pebs_record_nhm {
- u64 flags, ip;
- u64 ax, bx, cx, dx;
- u64 si, di, bp, sp;
- u64 r8, r9, r10, r11;
- u64 r12, r13, r14, r15;
- u64 status, dla, dse, lat;
-};
-
-void init_debug_store_on_cpu(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- return;
-
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
- (u32)((u64)(unsigned long)ds),
- (u32)((u64)(unsigned long)ds >> 32));
-}
-
-void fini_debug_store_on_cpu(int cpu)
-{
- if (!per_cpu(cpu_hw_events, cpu).ds)
- return;
-
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
-}
-
-static int alloc_pebs_buffer(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
- int node = cpu_to_node(cpu);
- int max, thresh = 1; /* always use a single PEBS record */
- void *buffer;
-
- if (!x86_pmu.pebs)
- return 0;
-
- buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
- if (unlikely(!buffer))
- return -ENOMEM;
-
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-
- ds->pebs_buffer_base = (u64)(unsigned long)buffer;
- ds->pebs_index = ds->pebs_buffer_base;
- ds->pebs_absolute_maximum = ds->pebs_buffer_base +
- max * x86_pmu.pebs_record_size;
-
- ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
- thresh * x86_pmu.pebs_record_size;
-
- return 0;
-}
-
-static void release_pebs_buffer(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds || !x86_pmu.pebs)
- return;
-
- kfree((void *)(unsigned long)ds->pebs_buffer_base);
- ds->pebs_buffer_base = 0;
-}
-
-static int alloc_bts_buffer(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
- int node = cpu_to_node(cpu);
- int max, thresh;
- void *buffer;
-
- if (!x86_pmu.bts)
- return 0;
-
- buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
- if (unlikely(!buffer))
- return -ENOMEM;
-
- max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
- thresh = max / 16;
-
- ds->bts_buffer_base = (u64)(unsigned long)buffer;
- ds->bts_index = ds->bts_buffer_base;
- ds->bts_absolute_maximum = ds->bts_buffer_base +
- max * BTS_RECORD_SIZE;
- ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
- thresh * BTS_RECORD_SIZE;
-
- return 0;
-}
-
-static void release_bts_buffer(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds || !x86_pmu.bts)
- return;
-
- kfree((void *)(unsigned long)ds->bts_buffer_base);
- ds->bts_buffer_base = 0;
-}
-
-static int alloc_ds_buffer(int cpu)
-{
- int node = cpu_to_node(cpu);
- struct debug_store *ds;
-
- ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
- if (unlikely(!ds))
- return -ENOMEM;
-
- per_cpu(cpu_hw_events, cpu).ds = ds;
-
- return 0;
-}
-
-static void release_ds_buffer(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- return;
-
- per_cpu(cpu_hw_events, cpu).ds = NULL;
- kfree(ds);
-}
-
-void release_ds_buffers(void)
-{
- int cpu;
-
- if (!x86_pmu.bts && !x86_pmu.pebs)
- return;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- fini_debug_store_on_cpu(cpu);
-
- for_each_possible_cpu(cpu) {
- release_pebs_buffer(cpu);
- release_bts_buffer(cpu);
- release_ds_buffer(cpu);
- }
- put_online_cpus();
-}
-
-void reserve_ds_buffers(void)
-{
- int bts_err = 0, pebs_err = 0;
- int cpu;
-
- x86_pmu.bts_active = 0;
- x86_pmu.pebs_active = 0;
-
- if (!x86_pmu.bts && !x86_pmu.pebs)
- return;
-
- if (!x86_pmu.bts)
- bts_err = 1;
-
- if (!x86_pmu.pebs)
- pebs_err = 1;
-
- get_online_cpus();
-
- for_each_possible_cpu(cpu) {
- if (alloc_ds_buffer(cpu)) {
- bts_err = 1;
- pebs_err = 1;
- }
-
- if (!bts_err && alloc_bts_buffer(cpu))
- bts_err = 1;
-
- if (!pebs_err && alloc_pebs_buffer(cpu))
- pebs_err = 1;
-
- if (bts_err && pebs_err)
- break;
- }
-
- if (bts_err) {
- for_each_possible_cpu(cpu)
- release_bts_buffer(cpu);
- }
-
- if (pebs_err) {
- for_each_possible_cpu(cpu)
- release_pebs_buffer(cpu);
- }
-
- if (bts_err && pebs_err) {
- for_each_possible_cpu(cpu)
- release_ds_buffer(cpu);
- } else {
- if (x86_pmu.bts && !bts_err)
- x86_pmu.bts_active = 1;
-
- if (x86_pmu.pebs && !pebs_err)
- x86_pmu.pebs_active = 1;
-
- for_each_online_cpu(cpu)
- init_debug_store_on_cpu(cpu);
- }
-
- put_online_cpus();
-}
-
-/*
- * BTS
- */
-
-struct event_constraint bts_constraint =
- EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
-
-void intel_pmu_enable_bts(u64 config)
-{
- unsigned long debugctlmsr;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr |= DEBUGCTLMSR_TR;
- debugctlmsr |= DEBUGCTLMSR_BTS;
- debugctlmsr |= DEBUGCTLMSR_BTINT;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_OS))
- debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_USR))
- debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
-
- update_debugctlmsr(debugctlmsr);
-}
-
-void intel_pmu_disable_bts(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- unsigned long debugctlmsr;
-
- if (!cpuc->ds)
- return;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr &=
- ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
- DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
-
- update_debugctlmsr(debugctlmsr);
-}
-
-int intel_pmu_drain_bts_buffer(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct bts_record {
- u64 from;
- u64 to;
- u64 flags;
- };
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
- struct bts_record *at, *top;
- struct perf_output_handle handle;
- struct perf_event_header header;
- struct perf_sample_data data;
- struct pt_regs regs;
-
- if (!event)
- return 0;
-
- if (!x86_pmu.bts_active)
- return 0;
-
- at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
- top = (struct bts_record *)(unsigned long)ds->bts_index;
-
- if (top <= at)
- return 0;
-
- ds->bts_index = ds->bts_buffer_base;
-
- perf_sample_data_init(&data, 0);
- data.period = event->hw.last_period;
- regs.ip = 0;
-
- /*
- * Prepare a generic sample, i.e. fill in the invariant fields.
- * We will overwrite the from and to address before we output
- * the sample.
- */
- perf_prepare_sample(&header, &data, event, &regs);
-
- if (perf_output_begin(&handle, event, header.size * (top - at)))
- return 1;
-
- for (; at < top; at++) {
- data.ip = at->from;
- data.addr = at->to;
-
- perf_output_sample(&handle, &header, &data, event);
- }
-
- perf_output_end(&handle);
-
- /* There's new data available. */
- event->hw.interrupts++;
- event->pending_kill = POLL_IN;
- return 1;
-}
-
-/*
- * PEBS
- */
-struct event_constraint intel_core2_pebs_event_constraints[] = {
- INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
- INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
- INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
- INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
- EVENT_CONSTRAINT_END
-};
-
-struct event_constraint intel_atom_pebs_event_constraints[] = {
- INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
- INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
- EVENT_CONSTRAINT_END
-};
-
-struct event_constraint intel_nehalem_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
- INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
- INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
- INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
- EVENT_CONSTRAINT_END
-};
-
-struct event_constraint intel_westmere_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
- INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
- EVENT_CONSTRAINT_END
-};
-
-struct event_constraint intel_snb_pebs_event_constraints[] = {
- INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
- INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
- INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
- INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
- INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
- INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
- EVENT_CONSTRAINT_END
-};
-
-struct event_constraint *intel_pebs_constraints(struct perf_event *event)
-{
- struct event_constraint *c;
-
- if (!event->attr.precise_ip)
- return NULL;
-
- if (x86_pmu.pebs_constraints) {
- for_each_event_constraint(c, x86_pmu.pebs_constraints) {
- if ((event->hw.config & c->cmask) == c->code)
- return c;
- }
- }
-
- return &emptyconstraint;
-}
-
-void intel_pmu_pebs_enable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
-
- hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
-
- cpuc->pebs_enabled |= 1ULL << hwc->idx;
-}
-
-void intel_pmu_pebs_disable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
-
- cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
- if (cpuc->enabled)
- wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
-
- hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
-}
-
-void intel_pmu_pebs_enable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (cpuc->pebs_enabled)
- wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
-}
-
-void intel_pmu_pebs_disable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (cpuc->pebs_enabled)
- wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
-}
-
-static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- unsigned long from = cpuc->lbr_entries[0].from;
- unsigned long old_to, to = cpuc->lbr_entries[0].to;
- unsigned long ip = regs->ip;
- int is_64bit = 0;
-
- /*
- * We don't need to fixup if the PEBS assist is fault like
- */
- if (!x86_pmu.intel_cap.pebs_trap)
- return 1;
-
- /*
- * No LBR entry, no basic block, no rewinding
- */
- if (!cpuc->lbr_stack.nr || !from || !to)
- return 0;
-
- /*
- * Basic blocks should never cross user/kernel boundaries
- */
- if (kernel_ip(ip) != kernel_ip(to))
- return 0;
-
- /*
- * unsigned math, either ip is before the start (impossible) or
- * the basic block is larger than 1 page (sanity)
- */
- if ((ip - to) > PAGE_SIZE)
- return 0;
-
- /*
- * We sampled a branch insn, rewind using the LBR stack
- */
- if (ip == to) {
- regs->ip = from;
- return 1;
- }
-
- do {
- struct insn insn;
- u8 buf[MAX_INSN_SIZE];
- void *kaddr;
-
- old_to = to;
- if (!kernel_ip(ip)) {
- int bytes, size = MAX_INSN_SIZE;
-
- bytes = copy_from_user_nmi(buf, (void __user *)to, size);
- if (bytes != size)
- return 0;
-
- kaddr = buf;
- } else
- kaddr = (void *)to;
-
-#ifdef CONFIG_X86_64
- is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
-#endif
- insn_init(&insn, kaddr, is_64bit);
- insn_get_length(&insn);
- to += insn.length;
- } while (to < ip);
-
- if (to == ip) {
- regs->ip = old_to;
- return 1;
- }
-
- /*
- * Even though we decoded the basic block, the instruction stream
- * never matched the given IP, either the TO or the IP got corrupted.
- */
- return 0;
-}
-
-static void __intel_pmu_pebs_event(struct perf_event *event,
- struct pt_regs *iregs, void *__pebs)
-{
- /*
- * We cast to pebs_record_core since that is a subset of
- * both formats and we don't use the other fields in this
- * routine.
- */
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct pebs_record_core *pebs = __pebs;
- struct perf_sample_data data;
- struct pt_regs regs;
-
- if (!intel_pmu_save_and_restart(event))
- return;
-
- perf_sample_data_init(&data, 0);
- data.period = event->hw.last_period;
-
- /*
- * We use the interrupt regs as a base because the PEBS record
- * does not contain a full regs set, specifically it seems to
- * lack segment descriptors, which get used by things like
- * user_mode().
- *
- * In the simple case fix up only the IP and BP,SP regs, for
- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
- */
- regs = *iregs;
- regs.ip = pebs->ip;
- regs.bp = pebs->bp;
- regs.sp = pebs->sp;
-
- if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
- regs.flags |= PERF_EFLAGS_EXACT;
- else
- regs.flags &= ~PERF_EFLAGS_EXACT;
-
- if (has_branch_stack(event))
- data.br_stack = &cpuc->lbr_stack;
-
- if (perf_event_overflow(event, &data, &regs))
- x86_pmu_stop(event, 0);
-}
-
-static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct perf_event *event = cpuc->events[0]; /* PMC0 only */
- struct pebs_record_core *at, *top;
- int n;
-
- if (!x86_pmu.pebs_active)
- return;
-
- at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
- top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
-
- /*
- * Whatever else happens, drain the thing
- */
- ds->pebs_index = ds->pebs_buffer_base;
-
- if (!test_bit(0, cpuc->active_mask))
- return;
-
- WARN_ON_ONCE(!event);
-
- if (!event->attr.precise_ip)
- return;
-
- n = top - at;
- if (n <= 0)
- return;
-
- /*
- * Should not happen, we program the threshold at 1 and do not
- * set a reset value.
- */
- WARN_ON_ONCE(n > 1);
- at += n - 1;
-
- __intel_pmu_pebs_event(event, iregs, at);
-}
-
-static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct pebs_record_nhm *at, *top;
- struct perf_event *event = NULL;
- u64 status = 0;
- int bit, n;
-
- if (!x86_pmu.pebs_active)
- return;
-
- at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
- top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
-
- ds->pebs_index = ds->pebs_buffer_base;
-
- n = top - at;
- if (n <= 0)
- return;
-
- /*
- * Should not happen, we program the threshold at 1 and do not
- * set a reset value.
- */
- WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
-
- for ( ; at < top; at++) {
- for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
- event = cpuc->events[bit];
- if (!test_bit(bit, cpuc->active_mask))
- continue;
-
- WARN_ON_ONCE(!event);
-
- if (!event->attr.precise_ip)
- continue;
-
- if (__test_and_set_bit(bit, (unsigned long *)&status))
- continue;
-
- break;
- }
-
- if (!event || bit >= MAX_PEBS_EVENTS)
- continue;
-
- __intel_pmu_pebs_event(event, iregs, at);
- }
-}
-
-/*
- * BTS, PEBS probe and setup
- */
-
-void intel_ds_init(void)
-{
- /*
- * No support for 32bit formats
- */
- if (!boot_cpu_has(X86_FEATURE_DTES64))
- return;
-
- x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
- x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
- if (x86_pmu.pebs) {
- char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
- int format = x86_pmu.intel_cap.pebs_format;
-
- switch (format) {
- case 0:
- printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
- x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
- x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
- break;
-
- case 1:
- printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
- x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
- x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
- break;
-
- default:
- printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
- x86_pmu.pebs = 0;
- }
- }
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_lbr.c
deleted file mode 100644
index 520b4265..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ /dev/null
@@ -1,704 +0,0 @@
-#include <linux/perf_event.h>
-#include <linux/types.h>
-
-#include <asm/perf_event.h>
-#include <asm/msr.h>
-#include <asm/insn.h>
-
-#include "perf_event.h"
-
-enum {
- LBR_FORMAT_32 = 0x00,
- LBR_FORMAT_LIP = 0x01,
- LBR_FORMAT_EIP = 0x02,
- LBR_FORMAT_EIP_FLAGS = 0x03,
-};
-
-/*
- * Intel LBR_SELECT bits
- * Intel Vol3a, April 2011, Section 16.7 Table 16-10
- *
- * Hardware branch filter (not available on all CPUs)
- */
-#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
-#define LBR_USER_BIT 1 /* do not capture at ring > 0 */
-#define LBR_JCC_BIT 2 /* do not capture conditional branches */
-#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
-#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
-#define LBR_RETURN_BIT 5 /* do not capture near returns */
-#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
-#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
-#define LBR_FAR_BIT 8 /* do not capture far branches */
-
-#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
-#define LBR_USER (1 << LBR_USER_BIT)
-#define LBR_JCC (1 << LBR_JCC_BIT)
-#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
-#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
-#define LBR_RETURN (1 << LBR_RETURN_BIT)
-#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
-#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
-#define LBR_FAR (1 << LBR_FAR_BIT)
-
-#define LBR_PLM (LBR_KERNEL | LBR_USER)
-
-#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */
-#define LBR_NOT_SUPP -1 /* LBR filter not supported */
-#define LBR_IGN 0 /* ignored */
-
-#define LBR_ANY \
- (LBR_JCC |\
- LBR_REL_CALL |\
- LBR_IND_CALL |\
- LBR_RETURN |\
- LBR_REL_JMP |\
- LBR_IND_JMP |\
- LBR_FAR)
-
-#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
-
-#define for_each_branch_sample_type(x) \
- for ((x) = PERF_SAMPLE_BRANCH_USER; \
- (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
-
-/*
- * x86control flow change classification
- * x86control flow changes include branches, interrupts, traps, faults
- */
-enum {
- X86_BR_NONE = 0, /* unknown */
-
- X86_BR_USER = 1 << 0, /* branch target is user */
- X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
-
- X86_BR_CALL = 1 << 2, /* call */
- X86_BR_RET = 1 << 3, /* return */
- X86_BR_SYSCALL = 1 << 4, /* syscall */
- X86_BR_SYSRET = 1 << 5, /* syscall return */
- X86_BR_INT = 1 << 6, /* sw interrupt */
- X86_BR_IRET = 1 << 7, /* return from interrupt */
- X86_BR_JCC = 1 << 8, /* conditional */
- X86_BR_JMP = 1 << 9, /* jump */
- X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
- X86_BR_IND_CALL = 1 << 11,/* indirect calls */
-};
-
-#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
-
-#define X86_BR_ANY \
- (X86_BR_CALL |\
- X86_BR_RET |\
- X86_BR_SYSCALL |\
- X86_BR_SYSRET |\
- X86_BR_INT |\
- X86_BR_IRET |\
- X86_BR_JCC |\
- X86_BR_JMP |\
- X86_BR_IRQ |\
- X86_BR_IND_CALL)
-
-#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
-
-#define X86_BR_ANY_CALL \
- (X86_BR_CALL |\
- X86_BR_IND_CALL |\
- X86_BR_SYSCALL |\
- X86_BR_IRQ |\
- X86_BR_INT)
-
-static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
-
-/*
- * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
- * otherwise it becomes near impossible to get a reliable stack.
- */
-
-static void __intel_pmu_lbr_enable(void)
-{
- u64 debugctl;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (cpuc->lbr_sel)
- wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
-
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
-}
-
-static void __intel_pmu_lbr_disable(void)
-{
- u64 debugctl;
-
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
-}
-
-static void intel_pmu_lbr_reset_32(void)
-{
- int i;
-
- for (i = 0; i < x86_pmu.lbr_nr; i++)
- wrmsrl(x86_pmu.lbr_from + i, 0);
-}
-
-static void intel_pmu_lbr_reset_64(void)
-{
- int i;
-
- for (i = 0; i < x86_pmu.lbr_nr; i++) {
- wrmsrl(x86_pmu.lbr_from + i, 0);
- wrmsrl(x86_pmu.lbr_to + i, 0);
- }
-}
-
-void intel_pmu_lbr_reset(void)
-{
- if (!x86_pmu.lbr_nr)
- return;
-
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
- intel_pmu_lbr_reset_32();
- else
- intel_pmu_lbr_reset_64();
-}
-
-void intel_pmu_lbr_enable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (!x86_pmu.lbr_nr)
- return;
-
- /*
- * Reset the LBR stack if we changed task context to
- * avoid data leaks.
- */
- if (event->ctx->task && cpuc->lbr_context != event->ctx) {
- intel_pmu_lbr_reset();
- cpuc->lbr_context = event->ctx;
- }
- cpuc->br_sel = event->hw.branch_reg.reg;
-
- cpuc->lbr_users++;
-}
-
-void intel_pmu_lbr_disable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (!x86_pmu.lbr_nr)
- return;
-
- cpuc->lbr_users--;
- WARN_ON_ONCE(cpuc->lbr_users < 0);
-
- if (cpuc->enabled && !cpuc->lbr_users) {
- __intel_pmu_lbr_disable();
- /* avoid stale pointer */
- cpuc->lbr_context = NULL;
- }
-}
-
-void intel_pmu_lbr_enable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (cpuc->lbr_users)
- __intel_pmu_lbr_enable();
-}
-
-void intel_pmu_lbr_disable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (cpuc->lbr_users)
- __intel_pmu_lbr_disable();
-}
-
-/*
- * TOS = most recently recorded branch
- */
-static inline u64 intel_pmu_lbr_tos(void)
-{
- u64 tos;
-
- rdmsrl(x86_pmu.lbr_tos, tos);
-
- return tos;
-}
-
-static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
-{
- unsigned long mask = x86_pmu.lbr_nr - 1;
- u64 tos = intel_pmu_lbr_tos();
- int i;
-
- for (i = 0; i < x86_pmu.lbr_nr; i++) {
- unsigned long lbr_idx = (tos - i) & mask;
- union {
- struct {
- u32 from;
- u32 to;
- };
- u64 lbr;
- } msr_lastbranch;
-
- rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
-
- cpuc->lbr_entries[i].from = msr_lastbranch.from;
- cpuc->lbr_entries[i].to = msr_lastbranch.to;
- cpuc->lbr_entries[i].mispred = 0;
- cpuc->lbr_entries[i].predicted = 0;
- cpuc->lbr_entries[i].reserved = 0;
- }
- cpuc->lbr_stack.nr = i;
-}
-
-/*
- * Due to lack of segmentation in Linux the effective address (offset)
- * is the same as the linear address, allowing us to merge the LIP and EIP
- * LBR formats.
- */
-static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
-{
- unsigned long mask = x86_pmu.lbr_nr - 1;
- int lbr_format = x86_pmu.intel_cap.lbr_format;
- u64 tos = intel_pmu_lbr_tos();
- int i;
-
- for (i = 0; i < x86_pmu.lbr_nr; i++) {
- unsigned long lbr_idx = (tos - i) & mask;
- u64 from, to, mis = 0, pred = 0;
-
- rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
- rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
-
- if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
- mis = !!(from & LBR_FROM_FLAG_MISPRED);
- pred = !mis;
- from = (u64)((((s64)from) << 1) >> 1);
- }
-
- cpuc->lbr_entries[i].from = from;
- cpuc->lbr_entries[i].to = to;
- cpuc->lbr_entries[i].mispred = mis;
- cpuc->lbr_entries[i].predicted = pred;
- cpuc->lbr_entries[i].reserved = 0;
- }
- cpuc->lbr_stack.nr = i;
-}
-
-void intel_pmu_lbr_read(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (!cpuc->lbr_users)
- return;
-
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
- intel_pmu_lbr_read_32(cpuc);
- else
- intel_pmu_lbr_read_64(cpuc);
-
- intel_pmu_lbr_filter(cpuc);
-}
-
-/*
- * SW filter is used:
- * - in case there is no HW filter
- * - in case the HW filter has errata or limitations
- */
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
-{
- u64 br_type = event->attr.branch_sample_type;
- int mask = 0;
-
- if (br_type & PERF_SAMPLE_BRANCH_USER)
- mask |= X86_BR_USER;
-
- if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
- mask |= X86_BR_KERNEL;
-
- /* we ignore BRANCH_HV here */
-
- if (br_type & PERF_SAMPLE_BRANCH_ANY)
- mask |= X86_BR_ANY;
-
- if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
- mask |= X86_BR_ANY_CALL;
-
- if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
- mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
-
- if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
- mask |= X86_BR_IND_CALL;
- /*
- * stash actual user request into reg, it may
- * be used by fixup code for some CPU
- */
- event->hw.branch_reg.reg = mask;
-}
-
-/*
- * setup the HW LBR filter
- * Used only when available, may not be enough to disambiguate
- * all branches, may need the help of the SW filter
- */
-static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
-{
- struct hw_perf_event_extra *reg;
- u64 br_type = event->attr.branch_sample_type;
- u64 mask = 0, m;
- u64 v;
-
- for_each_branch_sample_type(m) {
- if (!(br_type & m))
- continue;
-
- v = x86_pmu.lbr_sel_map[m];
- if (v == LBR_NOT_SUPP)
- return -EOPNOTSUPP;
-
- if (v != LBR_IGN)
- mask |= v;
- }
- reg = &event->hw.branch_reg;
- reg->idx = EXTRA_REG_LBR;
-
- /* LBR_SELECT operates in suppress mode so invert mask */
- reg->config = ~mask & x86_pmu.lbr_sel_mask;
-
- return 0;
-}
-
-int intel_pmu_setup_lbr_filter(struct perf_event *event)
-{
- int ret = 0;
-
- /*
- * no LBR on this PMU
- */
- if (!x86_pmu.lbr_nr)
- return -EOPNOTSUPP;
-
- /*
- * setup SW LBR filter
- */
- intel_pmu_setup_sw_lbr_filter(event);
-
- /*
- * setup HW LBR filter, if any
- */
- if (x86_pmu.lbr_sel_map)
- ret = intel_pmu_setup_hw_lbr_filter(event);
-
- return ret;
-}
-
-/*
- * return the type of control flow change at address "from"
- * intruction is not necessarily a branch (in case of interrupt).
- *
- * The branch type returned also includes the priv level of the
- * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
- *
- * If a branch type is unknown OR the instruction cannot be
- * decoded (e.g., text page not present), then X86_BR_NONE is
- * returned.
- */
-static int branch_type(unsigned long from, unsigned long to)
-{
- struct insn insn;
- void *addr;
- int bytes, size = MAX_INSN_SIZE;
- int ret = X86_BR_NONE;
- int ext, to_plm, from_plm;
- u8 buf[MAX_INSN_SIZE];
- int is64 = 0;
-
- to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
- from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
-
- /*
- * maybe zero if lbr did not fill up after a reset by the time
- * we get a PMU interrupt
- */
- if (from == 0 || to == 0)
- return X86_BR_NONE;
-
- if (from_plm == X86_BR_USER) {
- /*
- * can happen if measuring at the user level only
- * and we interrupt in a kernel thread, e.g., idle.
- */
- if (!current->mm)
- return X86_BR_NONE;
-
- /* may fail if text not present */
- bytes = copy_from_user_nmi(buf, (void __user *)from, size);
- if (bytes != size)
- return X86_BR_NONE;
-
- addr = buf;
- } else
- addr = (void *)from;
-
- /*
- * decoder needs to know the ABI especially
- * on 64-bit systems running 32-bit apps
- */
-#ifdef CONFIG_X86_64
- is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
-#endif
- insn_init(&insn, addr, is64);
- insn_get_opcode(&insn);
-
- switch (insn.opcode.bytes[0]) {
- case 0xf:
- switch (insn.opcode.bytes[1]) {
- case 0x05: /* syscall */
- case 0x34: /* sysenter */
- ret = X86_BR_SYSCALL;
- break;
- case 0x07: /* sysret */
- case 0x35: /* sysexit */
- ret = X86_BR_SYSRET;
- break;
- case 0x80 ... 0x8f: /* conditional */
- ret = X86_BR_JCC;
- break;
- default:
- ret = X86_BR_NONE;
- }
- break;
- case 0x70 ... 0x7f: /* conditional */
- ret = X86_BR_JCC;
- break;
- case 0xc2: /* near ret */
- case 0xc3: /* near ret */
- case 0xca: /* far ret */
- case 0xcb: /* far ret */
- ret = X86_BR_RET;
- break;
- case 0xcf: /* iret */
- ret = X86_BR_IRET;
- break;
- case 0xcc ... 0xce: /* int */
- ret = X86_BR_INT;
- break;
- case 0xe8: /* call near rel */
- case 0x9a: /* call far absolute */
- ret = X86_BR_CALL;
- break;
- case 0xe0 ... 0xe3: /* loop jmp */
- ret = X86_BR_JCC;
- break;
- case 0xe9 ... 0xeb: /* jmp */
- ret = X86_BR_JMP;
- break;
- case 0xff: /* call near absolute, call far absolute ind */
- insn_get_modrm(&insn);
- ext = (insn.modrm.bytes[0] >> 3) & 0x7;
- switch (ext) {
- case 2: /* near ind call */
- case 3: /* far ind call */
- ret = X86_BR_IND_CALL;
- break;
- case 4:
- case 5:
- ret = X86_BR_JMP;
- break;
- }
- break;
- default:
- ret = X86_BR_NONE;
- }
- /*
- * interrupts, traps, faults (and thus ring transition) may
- * occur on any instructions. Thus, to classify them correctly,
- * we need to first look at the from and to priv levels. If they
- * are different and to is in the kernel, then it indicates
- * a ring transition. If the from instruction is not a ring
- * transition instr (syscall, systenter, int), then it means
- * it was a irq, trap or fault.
- *
- * we have no way of detecting kernel to kernel faults.
- */
- if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
- && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
- ret = X86_BR_IRQ;
-
- /*
- * branch priv level determined by target as
- * is done by HW when LBR_SELECT is implemented
- */
- if (ret != X86_BR_NONE)
- ret |= to_plm;
-
- return ret;
-}
-
-/*
- * implement actual branch filter based on user demand.
- * Hardware may not exactly satisfy that request, thus
- * we need to inspect opcodes. Mismatched branches are
- * discarded. Therefore, the number of branches returned
- * in PERF_SAMPLE_BRANCH_STACK sample may vary.
- */
-static void
-intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
-{
- u64 from, to;
- int br_sel = cpuc->br_sel;
- int i, j, type;
- bool compress = false;
-
- /* if sampling all branches, then nothing to filter */
- if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
- return;
-
- for (i = 0; i < cpuc->lbr_stack.nr; i++) {
-
- from = cpuc->lbr_entries[i].from;
- to = cpuc->lbr_entries[i].to;
-
- type = branch_type(from, to);
-
- /* if type does not correspond, then discard */
- if (type == X86_BR_NONE || (br_sel & type) != type) {
- cpuc->lbr_entries[i].from = 0;
- compress = true;
- }
- }
-
- if (!compress)
- return;
-
- /* remove all entries with from=0 */
- for (i = 0; i < cpuc->lbr_stack.nr; ) {
- if (!cpuc->lbr_entries[i].from) {
- j = i;
- while (++j < cpuc->lbr_stack.nr)
- cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
- cpuc->lbr_stack.nr--;
- if (!cpuc->lbr_entries[i].from)
- continue;
- }
- i++;
- }
-}
-
-/*
- * Map interface branch filters onto LBR filters
- */
-static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
- [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
- [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
- [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
- [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
- [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
- | LBR_IND_JMP | LBR_FAR,
- /*
- * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
- */
- [PERF_SAMPLE_BRANCH_ANY_CALL] =
- LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
- /*
- * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
- */
- [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
-};
-
-static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
- [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
- [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
- [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
- [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
- [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
- [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
- | LBR_FAR,
- [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
-};
-
-/* core */
-void intel_pmu_lbr_init_core(void)
-{
- x86_pmu.lbr_nr = 4;
- x86_pmu.lbr_tos = MSR_LBR_TOS;
- x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
- x86_pmu.lbr_to = MSR_LBR_CORE_TO;
-
- /*
- * SW branch filter usage:
- * - compensate for lack of HW filter
- */
- pr_cont("4-deep LBR, ");
-}
-
-/* nehalem/westmere */
-void intel_pmu_lbr_init_nhm(void)
-{
- x86_pmu.lbr_nr = 16;
- x86_pmu.lbr_tos = MSR_LBR_TOS;
- x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
- x86_pmu.lbr_to = MSR_LBR_NHM_TO;
-
- x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
- x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
-
- /*
- * SW branch filter usage:
- * - workaround LBR_SEL errata (see above)
- * - support syscall, sysret capture.
- * That requires LBR_FAR but that means far
- * jmp need to be filtered out
- */
- pr_cont("16-deep LBR, ");
-}
-
-/* sandy bridge */
-void intel_pmu_lbr_init_snb(void)
-{
- x86_pmu.lbr_nr = 16;
- x86_pmu.lbr_tos = MSR_LBR_TOS;
- x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
- x86_pmu.lbr_to = MSR_LBR_NHM_TO;
-
- x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
- x86_pmu.lbr_sel_map = snb_lbr_sel_map;
-
- /*
- * SW branch filter usage:
- * - support syscall, sysret capture.
- * That requires LBR_FAR but that means far
- * jmp need to be filtered out
- */
- pr_cont("16-deep LBR, ");
-}
-
-/* atom */
-void intel_pmu_lbr_init_atom(void)
-{
- /*
- * only models starting at stepping 10 seems
- * to have an operational LBR which can freeze
- * on PMU interrupt
- */
- if (boot_cpu_data.x86_mask < 10) {
- pr_cont("LBR disabled due to erratum");
- return;
- }
-
- x86_pmu.lbr_nr = 8;
- x86_pmu.lbr_tos = MSR_LBR_TOS;
- x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
- x86_pmu.lbr_to = MSR_LBR_CORE_TO;
-
- /*
- * SW branch filter usage:
- * - compensate for lack of HW filter
- */
- pr_cont("8-deep LBR, ");
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p4.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p4.c
deleted file mode 100644
index a2dfacfd..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p4.c
+++ /dev/null
@@ -1,1345 +0,0 @@
-/*
- * Netburst Performance Events (P4, old Xeon)
- *
- * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
- * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
- *
- * For licencing details see kernel-base/COPYING
- */
-
-#include <linux/perf_event.h>
-
-#include <asm/perf_event_p4.h>
-#include <asm/hardirq.h>
-#include <asm/apic.h>
-
-#include "perf_event.h"
-
-#define P4_CNTR_LIMIT 3
-/*
- * array indices: 0,1 - HT threads, used with HT enabled cpu
- */
-struct p4_event_bind {
- unsigned int opcode; /* Event code and ESCR selector */
- unsigned int escr_msr[2]; /* ESCR MSR for this event */
- unsigned int escr_emask; /* valid ESCR EventMask bits */
- unsigned int shared; /* event is shared across threads */
- char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
-};
-
-struct p4_pebs_bind {
- unsigned int metric_pebs;
- unsigned int metric_vert;
-};
-
-/* it sets P4_PEBS_ENABLE_UOP_TAG as well */
-#define P4_GEN_PEBS_BIND(name, pebs, vert) \
- [P4_PEBS_METRIC__##name] = { \
- .metric_pebs = pebs | P4_PEBS_ENABLE_UOP_TAG, \
- .metric_vert = vert, \
- }
-
-/*
- * note we have P4_PEBS_ENABLE_UOP_TAG always set here
- *
- * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
- * event configuration to find out which values are to be
- * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
- * resgisters
- */
-static struct p4_pebs_bind p4_pebs_bind_map[] = {
- P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
- P4_GEN_PEBS_BIND(2ndl_cache_load_miss_retired, 0x0000002, 0x0000001),
- P4_GEN_PEBS_BIND(dtlb_load_miss_retired, 0x0000004, 0x0000001),
- P4_GEN_PEBS_BIND(dtlb_store_miss_retired, 0x0000004, 0x0000002),
- P4_GEN_PEBS_BIND(dtlb_all_miss_retired, 0x0000004, 0x0000003),
- P4_GEN_PEBS_BIND(tagged_mispred_branch, 0x0018000, 0x0000010),
- P4_GEN_PEBS_BIND(mob_load_replay_retired, 0x0000200, 0x0000001),
- P4_GEN_PEBS_BIND(split_load_retired, 0x0000400, 0x0000001),
- P4_GEN_PEBS_BIND(split_store_retired, 0x0000400, 0x0000002),
-};
-
-/*
- * Note that we don't use CCCR1 here, there is an
- * exception for P4_BSQ_ALLOCATION but we just have
- * no workaround
- *
- * consider this binding as resources which particular
- * event may borrow, it doesn't contain EventMask,
- * Tags and friends -- they are left to a caller
- */
-static struct p4_event_bind p4_event_bind_map[] = {
- [P4_EVENT_TC_DELIVER_MODE] = {
- .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
- .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) |
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) |
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) |
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) |
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) |
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) |
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
- .shared = 1,
- .cntr = { {4, 5, -1}, {6, 7, -1} },
- },
- [P4_EVENT_BPU_FETCH_REQUEST] = {
- .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
- .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_ITLB_REFERENCE] = {
- .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
- .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) |
- P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_MEMORY_CANCEL] = {
- .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
- .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_MEMORY_COMPLETE] = {
- .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
- .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_LOAD_PORT_REPLAY] = {
- .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
- .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_STORE_PORT_REPLAY] = {
- .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
- .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_MOB_LOAD_REPLAY] = {
- .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
- .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_PAGE_WALK_TYPE] = {
- .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
- .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
- .shared = 1,
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_BSQ_CACHE_REFERENCE] = {
- .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
- .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_IOQ_ALLOCATION] = {
- .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
- .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
- .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) |
- P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
- .cntr = { {2, -1, -1}, {3, -1, -1} },
- },
- [P4_EVENT_FSB_DATA_ACTIVITY] = {
- .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
- .shared = 1,
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
- .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
- .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
- .cntr = { {0, -1, -1}, {1, -1, -1} },
- },
- [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
- .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
- .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
- .cntr = { {2, -1, -1}, {3, -1, -1} },
- },
- [P4_EVENT_SSE_INPUT_ASSIST] = {
- .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_PACKED_SP_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_PACKED_DP_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_SCALAR_SP_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_SCALAR_DP_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_64BIT_MMX_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_128BIT_MMX_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_X87_FP_UOP] = {
- .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
- .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_TC_MISC] = {
- .opcode = P4_OPCODE(P4_EVENT_TC_MISC),
- .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
- .cntr = { {4, 5, -1}, {6, 7, -1} },
- },
- [P4_EVENT_GLOBAL_POWER_EVENTS] = {
- .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_TC_MS_XFER] = {
- .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
- .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
- .cntr = { {4, 5, -1}, {6, 7, -1} },
- },
- [P4_EVENT_UOP_QUEUE_WRITES] = {
- .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
- .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) |
- P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) |
- P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
- .cntr = { {4, 5, -1}, {6, 7, -1} },
- },
- [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
- .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
- .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
- .cntr = { {4, 5, -1}, {6, 7, -1} },
- },
- [P4_EVENT_RETIRED_BRANCH_TYPE] = {
- .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
- .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
- .cntr = { {4, 5, -1}, {6, 7, -1} },
- },
- [P4_EVENT_RESOURCE_STALL] = {
- .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
- .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_WC_BUFFER] = {
- .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
- .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
- .shared = 1,
- .cntr = { {8, 9, -1}, {10, 11, -1} },
- },
- [P4_EVENT_B2B_CYCLES] = {
- .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask = 0,
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_BNR] = {
- .opcode = P4_OPCODE(P4_EVENT_BNR),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask = 0,
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_SNOOP] = {
- .opcode = P4_OPCODE(P4_EVENT_SNOOP),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask = 0,
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_RESPONSE] = {
- .opcode = P4_OPCODE(P4_EVENT_RESPONSE),
- .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
- .escr_emask = 0,
- .cntr = { {0, -1, -1}, {2, -1, -1} },
- },
- [P4_EVENT_FRONT_END_EVENT] = {
- .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
- .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_EXECUTION_EVENT] = {
- .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
- .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_REPLAY_EVENT] = {
- .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
- .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_INSTR_RETIRED] = {
- .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
- .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) |
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) |
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_UOPS_RETIRED] = {
- .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
- .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_UOP_TYPE] = {
- .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
- .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_BRANCH_RETIRED] = {
- .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
- .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_MISPRED_BRANCH_RETIRED] = {
- .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
- .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_X87_ASSIST] = {
- .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
- .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) |
- P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) |
- P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) |
- P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) |
- P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_MACHINE_CLEAR] = {
- .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
- .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
- [P4_EVENT_INSTR_COMPLETED] = {
- .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
- .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
- .escr_emask =
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
- .cntr = { {12, 13, 16}, {14, 15, 17} },
- },
-};
-
-#define P4_GEN_CACHE_EVENT(event, bit, metric) \
- p4_config_pack_escr(P4_ESCR_EVENT(event) | \
- P4_ESCR_EMASK_BIT(event, bit)) | \
- p4_config_pack_cccr(metric | \
- P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
-
-static __initconst const u64 p4_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
- P4_PEBS_METRIC__1stl_cache_load_miss_retired),
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
- P4_PEBS_METRIC__2ndl_cache_load_miss_retired),
- },
-},
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
- P4_PEBS_METRIC__dtlb_load_miss_retired),
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
- P4_PEBS_METRIC__dtlb_store_miss_retired),
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
- P4_PEBS_METRIC__none),
- [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
- P4_PEBS_METRIC__none),
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(NODE) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-/*
- * Because of Netburst being quite restricted in how many
- * identical events may run simultaneously, we introduce event aliases,
- * ie the different events which have the same functionality but
- * utilize non-intersected resources (ESCR/CCCR/counter registers).
- *
- * This allow us to relax restrictions a bit and run two or more
- * identical events together.
- *
- * Never set any custom internal bits such as P4_CONFIG_HT,
- * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
- * either up to date automatically or not applicable at all.
- */
-struct p4_event_alias {
- u64 original;
- u64 alternative;
-} p4_event_aliases[] = {
- {
- /*
- * Non-halted cycles can be substituted with non-sleeping cycles (see
- * Intel SDM Vol3b for details). We need this alias to be able
- * to run nmi-watchdog and 'perf top' (or any other user space tool
- * which is interested in running PERF_COUNT_HW_CPU_CYCLES)
- * simultaneously.
- */
- .original =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
- .alternative =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)|
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)|
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)|
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)|
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
- P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))|
- p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
- P4_CCCR_COMPARE),
- },
-};
-
-static u64 p4_get_alias_event(u64 config)
-{
- u64 config_match;
- int i;
-
- /*
- * Only event with special mark is allowed,
- * we're to be sure it didn't come as malformed
- * RAW event.
- */
- if (!(config & P4_CONFIG_ALIASABLE))
- return 0;
-
- config_match = config & P4_CONFIG_EVENT_ALIAS_MASK;
-
- for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) {
- if (config_match == p4_event_aliases[i].original) {
- config_match = p4_event_aliases[i].alternative;
- break;
- } else if (config_match == p4_event_aliases[i].alternative) {
- config_match = p4_event_aliases[i].original;
- break;
- }
- }
-
- if (i >= ARRAY_SIZE(p4_event_aliases))
- return 0;
-
- return config_match | (config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS);
-}
-
-static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
- /* non-halted CPU clocks */
- [PERF_COUNT_HW_CPU_CYCLES] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) |
- P4_CONFIG_ALIASABLE,
-
- /*
- * retired instructions
- * in a sake of simplicity we don't use the FSB tagging
- */
- [PERF_COUNT_HW_INSTRUCTIONS] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
- P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
-
- /* cache hits */
- [PERF_COUNT_HW_CACHE_REFERENCES] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
-
- /* cache misses */
- [PERF_COUNT_HW_CACHE_MISSES] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
-
- /* branch instructions retired */
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
- P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
-
- /* mispredicted branches retired */
- [PERF_COUNT_HW_BRANCH_MISSES] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
- P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
-
- /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
- [PERF_COUNT_HW_BUS_CYCLES] =
- p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
- P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
- p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
-};
-
-static struct p4_event_bind *p4_config_get_bind(u64 config)
-{
- unsigned int evnt = p4_config_unpack_event(config);
- struct p4_event_bind *bind = NULL;
-
- if (evnt < ARRAY_SIZE(p4_event_bind_map))
- bind = &p4_event_bind_map[evnt];
-
- return bind;
-}
-
-static u64 p4_pmu_event_map(int hw_event)
-{
- struct p4_event_bind *bind;
- unsigned int esel;
- u64 config;
-
- config = p4_general_events[hw_event];
- bind = p4_config_get_bind(config);
- esel = P4_OPCODE_ESEL(bind->opcode);
- config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
-
- return config;
-}
-
-/* check cpu model specifics */
-static bool p4_event_match_cpu_model(unsigned int event_idx)
-{
- /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
- if (event_idx == P4_EVENT_INSTR_COMPLETED) {
- if (boot_cpu_data.x86_model != 3 &&
- boot_cpu_data.x86_model != 4 &&
- boot_cpu_data.x86_model != 6)
- return false;
- }
-
- /*
- * For info
- * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
- */
-
- return true;
-}
-
-static int p4_validate_raw_event(struct perf_event *event)
-{
- unsigned int v, emask;
-
- /* User data may have out-of-bound event index */
- v = p4_config_unpack_event(event->attr.config);
- if (v >= ARRAY_SIZE(p4_event_bind_map))
- return -EINVAL;
-
- /* It may be unsupported: */
- if (!p4_event_match_cpu_model(v))
- return -EINVAL;
-
- /*
- * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
- * in Architectural Performance Monitoring, it means not
- * on _which_ logical cpu to count but rather _when_, ie it
- * depends on logical cpu state -- count event if one cpu active,
- * none, both or any, so we just allow user to pass any value
- * desired.
- *
- * In turn we always set Tx_OS/Tx_USR bits bound to logical
- * cpu without their propagation to another cpu
- */
-
- /*
- * if an event is shared across the logical threads
- * the user needs special permissions to be able to use it
- */
- if (p4_ht_active() && p4_event_bind_map[v].shared) {
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
- }
-
- /* ESCR EventMask bits may be invalid */
- emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
- if (emask & ~p4_event_bind_map[v].escr_emask)
- return -EINVAL;
-
- /*
- * it may have some invalid PEBS bits
- */
- if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
- return -EINVAL;
-
- v = p4_config_unpack_metric(event->attr.config);
- if (v >= ARRAY_SIZE(p4_pebs_bind_map))
- return -EINVAL;
-
- return 0;
-}
-
-static int p4_hw_config(struct perf_event *event)
-{
- int cpu = get_cpu();
- int rc = 0;
- u32 escr, cccr;
-
- /*
- * the reason we use cpu that early is that: if we get scheduled
- * first time on the same cpu -- we will not need swap thread
- * specific flags in config (and will save some cpu cycles)
- */
-
- cccr = p4_default_cccr_conf(cpu);
- escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
- event->attr.exclude_user);
- event->hw.config = p4_config_pack_escr(escr) |
- p4_config_pack_cccr(cccr);
-
- if (p4_ht_active() && p4_ht_thread(cpu))
- event->hw.config = p4_set_ht_bit(event->hw.config);
-
- if (event->attr.type == PERF_TYPE_RAW) {
- struct p4_event_bind *bind;
- unsigned int esel;
- /*
- * Clear bits we reserve to be managed by kernel itself
- * and never allowed from a user space
- */
- event->attr.config &= P4_CONFIG_MASK;
-
- rc = p4_validate_raw_event(event);
- if (rc)
- goto out;
-
- /*
- * Note that for RAW events we allow user to use P4_CCCR_RESERVED
- * bits since we keep additional info here (for cache events and etc)
- */
- event->hw.config |= event->attr.config;
- bind = p4_config_get_bind(event->attr.config);
- if (!bind) {
- rc = -EINVAL;
- goto out;
- }
- esel = P4_OPCODE_ESEL(bind->opcode);
- event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
- }
-
- rc = x86_setup_perfctr(event);
-out:
- put_cpu();
- return rc;
-}
-
-static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
-{
- u64 v;
-
- /* an official way for overflow indication */
- rdmsrl(hwc->config_base, v);
- if (v & P4_CCCR_OVF) {
- wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
- return 1;
- }
-
- /*
- * In some circumstances the overflow might issue an NMI but did
- * not set P4_CCCR_OVF bit. Because a counter holds a negative value
- * we simply check for high bit being set, if it's cleared it means
- * the counter has reached zero value and continued counting before
- * real NMI signal was received:
- */
- rdmsrl(hwc->event_base, v);
- if (!(v & ARCH_P4_UNFLAGGED_BIT))
- return 1;
-
- return 0;
-}
-
-static void p4_pmu_disable_pebs(void)
-{
- /*
- * FIXME
- *
- * It's still allowed that two threads setup same cache
- * events so we can't simply clear metrics until we knew
- * no one is depending on us, so we need kind of counter
- * for "ReplayEvent" users.
- *
- * What is more complex -- RAW events, if user (for some
- * reason) will pass some cache event metric with improper
- * event opcode -- it's fine from hardware point of view
- * but completely nonsense from "meaning" of such action.
- *
- * So at moment let leave metrics turned on forever -- it's
- * ok for now but need to be revisited!
- *
- * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
- * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
- */
-}
-
-static inline void p4_pmu_disable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- /*
- * If event gets disabled while counter is in overflowed
- * state we need to clear P4_CCCR_OVF, otherwise interrupt get
- * asserted again and again
- */
- (void)checking_wrmsrl(hwc->config_base,
- (u64)(p4_config_unpack_cccr(hwc->config)) &
- ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
-}
-
-static void p4_pmu_disable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx;
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct perf_event *event = cpuc->events[idx];
- if (!test_bit(idx, cpuc->active_mask))
- continue;
- p4_pmu_disable_event(event);
- }
-
- p4_pmu_disable_pebs();
-}
-
-/* configuration must be valid */
-static void p4_pmu_enable_pebs(u64 config)
-{
- struct p4_pebs_bind *bind;
- unsigned int idx;
-
- BUILD_BUG_ON(P4_PEBS_METRIC__max > P4_PEBS_CONFIG_METRIC_MASK);
-
- idx = p4_config_unpack_metric(config);
- if (idx == P4_PEBS_METRIC__none)
- return;
-
- bind = &p4_pebs_bind_map[idx];
-
- (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
- (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
-}
-
-static void p4_pmu_enable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int thread = p4_ht_config_thread(hwc->config);
- u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
- unsigned int idx = p4_config_unpack_event(hwc->config);
- struct p4_event_bind *bind;
- u64 escr_addr, cccr;
-
- bind = &p4_event_bind_map[idx];
- escr_addr = (u64)bind->escr_msr[thread];
-
- /*
- * - we dont support cascaded counters yet
- * - and counter 1 is broken (erratum)
- */
- WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
- WARN_ON_ONCE(hwc->idx == 1);
-
- /* we need a real Event value */
- escr_conf &= ~P4_ESCR_EVENT_MASK;
- escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
-
- cccr = p4_config_unpack_cccr(hwc->config);
-
- /*
- * it could be Cache event so we need to write metrics
- * into additional MSRs
- */
- p4_pmu_enable_pebs(hwc->config);
-
- (void)checking_wrmsrl(escr_addr, escr_conf);
- (void)checking_wrmsrl(hwc->config_base,
- (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
-}
-
-static void p4_pmu_enable_all(int added)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx;
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct perf_event *event = cpuc->events[idx];
- if (!test_bit(idx, cpuc->active_mask))
- continue;
- p4_pmu_enable_event(event);
- }
-}
-
-static int p4_pmu_handle_irq(struct pt_regs *regs)
-{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
- struct perf_event *event;
- struct hw_perf_event *hwc;
- int idx, handled = 0;
- u64 val;
-
- perf_sample_data_init(&data, 0);
-
- cpuc = &__get_cpu_var(cpu_hw_events);
-
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- int overflow;
-
- if (!test_bit(idx, cpuc->active_mask)) {
- /* catch in-flight IRQs */
- if (__test_and_clear_bit(idx, cpuc->running))
- handled++;
- continue;
- }
-
- event = cpuc->events[idx];
- hwc = &event->hw;
-
- WARN_ON_ONCE(hwc->idx != idx);
-
- /* it might be unflagged overflow */
- overflow = p4_pmu_clear_cccr_ovf(hwc);
-
- val = x86_perf_event_update(event);
- if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
- continue;
-
- handled += overflow;
-
- /* event overflow for sure */
- data.period = event->hw.last_period;
-
- if (!x86_perf_event_set_period(event))
- continue;
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
- }
-
- if (handled)
- inc_irq_stat(apic_perf_irqs);
-
- /*
- * When dealing with the unmasking of the LVTPC on P4 perf hw, it has
- * been observed that the OVF bit flag has to be cleared first _before_
- * the LVTPC can be unmasked.
- *
- * The reason is the NMI line will continue to be asserted while the OVF
- * bit is set. This causes a second NMI to generate if the LVTPC is
- * unmasked before the OVF bit is cleared, leading to unknown NMI
- * messages.
- */
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-
- return handled;
-}
-
-/*
- * swap thread specific fields according to a thread
- * we are going to run on
- */
-static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
-{
- u32 escr, cccr;
-
- /*
- * we either lucky and continue on same cpu or no HT support
- */
- if (!p4_should_swap_ts(hwc->config, cpu))
- return;
-
- /*
- * the event is migrated from an another logical
- * cpu, so we need to swap thread specific flags
- */
-
- escr = p4_config_unpack_escr(hwc->config);
- cccr = p4_config_unpack_cccr(hwc->config);
-
- if (p4_ht_thread(cpu)) {
- cccr &= ~P4_CCCR_OVF_PMI_T0;
- cccr |= P4_CCCR_OVF_PMI_T1;
- if (escr & P4_ESCR_T0_OS) {
- escr &= ~P4_ESCR_T0_OS;
- escr |= P4_ESCR_T1_OS;
- }
- if (escr & P4_ESCR_T0_USR) {
- escr &= ~P4_ESCR_T0_USR;
- escr |= P4_ESCR_T1_USR;
- }
- hwc->config = p4_config_pack_escr(escr);
- hwc->config |= p4_config_pack_cccr(cccr);
- hwc->config |= P4_CONFIG_HT;
- } else {
- cccr &= ~P4_CCCR_OVF_PMI_T1;
- cccr |= P4_CCCR_OVF_PMI_T0;
- if (escr & P4_ESCR_T1_OS) {
- escr &= ~P4_ESCR_T1_OS;
- escr |= P4_ESCR_T0_OS;
- }
- if (escr & P4_ESCR_T1_USR) {
- escr &= ~P4_ESCR_T1_USR;
- escr |= P4_ESCR_T0_USR;
- }
- hwc->config = p4_config_pack_escr(escr);
- hwc->config |= p4_config_pack_cccr(cccr);
- hwc->config &= ~P4_CONFIG_HT;
- }
-}
-
-/*
- * ESCR address hashing is tricky, ESCRs are not sequential
- * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
- * the metric between any ESCRs is laid in range [0xa0,0xe1]
- *
- * so we make ~70% filled hashtable
- */
-
-#define P4_ESCR_MSR_BASE 0x000003a0
-#define P4_ESCR_MSR_MAX 0x000003e1
-#define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
-#define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
-#define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
-
-static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = {
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0),
- P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1),
-};
-
-static int p4_get_escr_idx(unsigned int addr)
-{
- unsigned int idx = P4_ESCR_MSR_IDX(addr);
-
- if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
- !p4_escr_table[idx] ||
- p4_escr_table[idx] != addr)) {
- WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
- return -1;
- }
-
- return idx;
-}
-
-static int p4_next_cntr(int thread, unsigned long *used_mask,
- struct p4_event_bind *bind)
-{
- int i, j;
-
- for (i = 0; i < P4_CNTR_LIMIT; i++) {
- j = bind->cntr[thread][i];
- if (j != -1 && !test_bit(j, used_mask))
- return j;
- }
-
- return -1;
-}
-
-static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
-{
- unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
- int cpu = smp_processor_id();
- struct hw_perf_event *hwc;
- struct p4_event_bind *bind;
- unsigned int i, thread, num;
- int cntr_idx, escr_idx;
- u64 config_alias;
- int pass;
-
- bitmap_zero(used_mask, X86_PMC_IDX_MAX);
- bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
-
- for (i = 0, num = n; i < n; i++, num--) {
-
- hwc = &cpuc->event_list[i]->hw;
- thread = p4_ht_thread(cpu);
- pass = 0;
-
-again:
- /*
- * It's possible to hit a circular lock
- * between original and alternative events
- * if both are scheduled already.
- */
- if (pass > 2)
- goto done;
-
- bind = p4_config_get_bind(hwc->config);
- escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
- if (unlikely(escr_idx == -1))
- goto done;
-
- if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
- cntr_idx = hwc->idx;
- if (assign)
- assign[i] = hwc->idx;
- goto reserve;
- }
-
- cntr_idx = p4_next_cntr(thread, used_mask, bind);
- if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) {
- /*
- * Check whether an event alias is still available.
- */
- config_alias = p4_get_alias_event(hwc->config);
- if (!config_alias)
- goto done;
- hwc->config = config_alias;
- pass++;
- goto again;
- }
-
- p4_pmu_swap_config_ts(hwc, cpu);
- if (assign)
- assign[i] = cntr_idx;
-reserve:
- set_bit(cntr_idx, used_mask);
- set_bit(escr_idx, escr_mask);
- }
-
-done:
- return num ? -EINVAL : 0;
-}
-
-PMU_FORMAT_ATTR(cccr, "config:0-31" );
-PMU_FORMAT_ATTR(escr, "config:32-62");
-PMU_FORMAT_ATTR(ht, "config:63" );
-
-static struct attribute *intel_p4_formats_attr[] = {
- &format_attr_cccr.attr,
- &format_attr_escr.attr,
- &format_attr_ht.attr,
- NULL,
-};
-
-static __initconst const struct x86_pmu p4_pmu = {
- .name = "Netburst P4/Xeon",
- .handle_irq = p4_pmu_handle_irq,
- .disable_all = p4_pmu_disable_all,
- .enable_all = p4_pmu_enable_all,
- .enable = p4_pmu_enable_event,
- .disable = p4_pmu_disable_event,
- .eventsel = MSR_P4_BPU_CCCR0,
- .perfctr = MSR_P4_BPU_PERFCTR0,
- .event_map = p4_pmu_event_map,
- .max_events = ARRAY_SIZE(p4_general_events),
- .get_event_constraints = x86_get_event_constraints,
- /*
- * IF HT disabled we may need to use all
- * ARCH_P4_MAX_CCCR counters simulaneously
- * though leave it restricted at moment assuming
- * HT is on
- */
- .num_counters = ARCH_P4_MAX_CCCR,
- .apic = 1,
- .cntval_bits = ARCH_P4_CNTRVAL_BITS,
- .cntval_mask = ARCH_P4_CNTRVAL_MASK,
- .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
- .hw_config = p4_hw_config,
- .schedule_events = p4_pmu_schedule_events,
- /*
- * This handles erratum N15 in intel doc 249199-029,
- * the counter may not be updated correctly on write
- * so we need a second write operation to do the trick
- * (the official workaround didn't work)
- *
- * the former idea is taken from OProfile code
- */
- .perfctr_second_write = 1,
-
- .format_attrs = intel_p4_formats_attr,
-};
-
-__init int p4_pmu_init(void)
-{
- unsigned int low, high;
-
- /* If we get stripped -- indexing fails */
- BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
-
- rdmsr(MSR_IA32_MISC_ENABLE, low, high);
- if (!(low & (1 << 7))) {
- pr_cont("unsupported Netburst CPU model %d ",
- boot_cpu_data.x86_model);
- return -ENODEV;
- }
-
- memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- pr_cont("Netburst events, ");
-
- x86_pmu = p4_pmu;
-
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p6.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p6.c
deleted file mode 100644
index 32bcfc7d..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perf_event_p6.c
+++ /dev/null
@@ -1,162 +0,0 @@
-#include <linux/perf_event.h>
-#include <linux/types.h>
-
-#include "perf_event.h"
-
-/*
- * Not sure about some of these
- */
-static const u64 p6_perfmon_event_map[] =
-{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
-};
-
-static u64 p6_pmu_event_map(int hw_event)
-{
- return p6_perfmon_event_map[hw_event];
-}
-
-/*
- * Event setting that is specified not to count anything.
- * We use this to effectively disable a counter.
- *
- * L2_RQSTS with 0 MESI unit mask.
- */
-#define P6_NOP_EVENT 0x0000002EULL
-
-static struct event_constraint p6_event_constraints[] =
-{
- INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
- INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
- INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
- INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
- INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
- INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
- EVENT_CONSTRAINT_END
-};
-
-static void p6_pmu_disable_all(void)
-{
- u64 val;
-
- /* p6 only has one enable register */
- rdmsrl(MSR_P6_EVNTSEL0, val);
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(MSR_P6_EVNTSEL0, val);
-}
-
-static void p6_pmu_enable_all(int added)
-{
- unsigned long val;
-
- /* p6 only has one enable register */
- rdmsrl(MSR_P6_EVNTSEL0, val);
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(MSR_P6_EVNTSEL0, val);
-}
-
-static inline void
-p6_pmu_disable_event(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- u64 val = P6_NOP_EVENT;
-
- if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
-
- (void)checking_wrmsrl(hwc->config_base, val);
-}
-
-static void p6_pmu_enable_event(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- u64 val;
-
- val = hwc->config;
- if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
-
- (void)checking_wrmsrl(hwc->config_base, val);
-}
-
-PMU_FORMAT_ATTR(event, "config:0-7" );
-PMU_FORMAT_ATTR(umask, "config:8-15" );
-PMU_FORMAT_ATTR(edge, "config:18" );
-PMU_FORMAT_ATTR(pc, "config:19" );
-PMU_FORMAT_ATTR(inv, "config:23" );
-PMU_FORMAT_ATTR(cmask, "config:24-31" );
-
-static struct attribute *intel_p6_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_pc.attr,
- &format_attr_inv.attr,
- &format_attr_cmask.attr,
- NULL,
-};
-
-static __initconst const struct x86_pmu p6_pmu = {
- .name = "p6",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = p6_pmu_disable_all,
- .enable_all = p6_pmu_enable_all,
- .enable = p6_pmu_enable_event,
- .disable = p6_pmu_disable_event,
- .hw_config = x86_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_P6_EVNTSEL0,
- .perfctr = MSR_P6_PERFCTR0,
- .event_map = p6_pmu_event_map,
- .max_events = ARRAY_SIZE(p6_perfmon_event_map),
- .apic = 1,
- .max_period = (1ULL << 31) - 1,
- .version = 0,
- .num_counters = 2,
- /*
- * Events have 40 bits implemented. However they are designed such
- * that bits [32-39] are sign extensions of bit 31. As such the
- * effective width of a event for P6-like PMU is 32 bits only.
- *
- * See IA-32 Intel Architecture Software developer manual Vol 3B
- */
- .cntval_bits = 32,
- .cntval_mask = (1ULL << 32) - 1,
- .get_event_constraints = x86_get_event_constraints,
- .event_constraints = p6_event_constraints,
-
- .format_attrs = intel_p6_formats_attr,
-};
-
-__init int p6_pmu_init(void)
-{
- switch (boot_cpu_data.x86_model) {
- case 1:
- case 3: /* Pentium Pro */
- case 5:
- case 6: /* Pentium II */
- case 7:
- case 8:
- case 11: /* Pentium III */
- case 9:
- case 13:
- /* Pentium M */
- break;
- default:
- pr_cont("unsupported p6 CPU model %d ",
- boot_cpu_data.x86_model);
- return -ENODEV;
- }
-
- x86_pmu = p6_pmu;
-
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/perfctr-watchdog.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/perfctr-watchdog.c
deleted file mode 100644
index 966512b2..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * local apic based NMI watchdog for various CPUs.
- *
- * This file also handles reservation of performance counters for coordination
- * with other users (like oprofile).
- *
- * Note that these events normally don't tick when the CPU idles. This means
- * the frequency varies with CPU load.
- *
- * Original code for K7/P6 written by Keith Owens
- *
- */
-
-#include <linux/percpu.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/smp.h>
-#include <asm/nmi.h>
-#include <linux/kprobes.h>
-
-#include <asm/apic.h>
-#include <asm/perf_event.h>
-
-/*
- * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
- * offset from MSR_P4_BSU_ESCR0.
- *
- * It will be the max for all platforms (for now)
- */
-#define NMI_MAX_COUNTER_BITS 66
-
-/*
- * perfctr_nmi_owner tracks the ownership of the perfctr registers:
- * evtsel_nmi_owner tracks the ownership of the event selection
- * - different performance counters/ event selection may be reserved for
- * different subsystems this reservation system just tries to coordinate
- * things a little
- */
-static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
-static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
-
-/* converts an msr to an appropriate reservation bit */
-static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
-{
- /* returns the bit offset of the performance counter register */
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_AMD:
- if (msr >= MSR_F15H_PERF_CTR)
- return (msr - MSR_F15H_PERF_CTR) >> 1;
- return msr - MSR_K7_PERFCTR0;
- case X86_VENDOR_INTEL:
- if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return msr - MSR_ARCH_PERFMON_PERFCTR0;
-
- switch (boot_cpu_data.x86) {
- case 6:
- return msr - MSR_P6_PERFCTR0;
- case 15:
- return msr - MSR_P4_BPU_PERFCTR0;
- }
- }
- return 0;
-}
-
-/*
- * converts an msr to an appropriate reservation bit
- * returns the bit offset of the event selection register
- */
-static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
-{
- /* returns the bit offset of the event selection register */
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_AMD:
- if (msr >= MSR_F15H_PERF_CTL)
- return (msr - MSR_F15H_PERF_CTL) >> 1;
- return msr - MSR_K7_EVNTSEL0;
- case X86_VENDOR_INTEL:
- if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return msr - MSR_ARCH_PERFMON_EVENTSEL0;
-
- switch (boot_cpu_data.x86) {
- case 6:
- return msr - MSR_P6_EVNTSEL0;
- case 15:
- return msr - MSR_P4_BSU_ESCR0;
- }
- }
- return 0;
-
-}
-
-/* checks for a bit availability (hack for oprofile) */
-int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
-{
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return !test_bit(counter, perfctr_nmi_owner);
-}
-EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
-
-int reserve_perfctr_nmi(unsigned int msr)
-{
- unsigned int counter;
-
- counter = nmi_perfctr_msr_to_bit(msr);
- /* register not managed by the allocator? */
- if (counter > NMI_MAX_COUNTER_BITS)
- return 1;
-
- if (!test_and_set_bit(counter, perfctr_nmi_owner))
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(reserve_perfctr_nmi);
-
-void release_perfctr_nmi(unsigned int msr)
-{
- unsigned int counter;
-
- counter = nmi_perfctr_msr_to_bit(msr);
- /* register not managed by the allocator? */
- if (counter > NMI_MAX_COUNTER_BITS)
- return;
-
- clear_bit(counter, perfctr_nmi_owner);
-}
-EXPORT_SYMBOL(release_perfctr_nmi);
-
-int reserve_evntsel_nmi(unsigned int msr)
-{
- unsigned int counter;
-
- counter = nmi_evntsel_msr_to_bit(msr);
- /* register not managed by the allocator? */
- if (counter > NMI_MAX_COUNTER_BITS)
- return 1;
-
- if (!test_and_set_bit(counter, evntsel_nmi_owner))
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(reserve_evntsel_nmi);
-
-void release_evntsel_nmi(unsigned int msr)
-{
- unsigned int counter;
-
- counter = nmi_evntsel_msr_to_bit(msr);
- /* register not managed by the allocator? */
- if (counter > NMI_MAX_COUNTER_BITS)
- return;
-
- clear_bit(counter, evntsel_nmi_owner);
-}
-EXPORT_SYMBOL(release_evntsel_nmi);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/powerflags.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/powerflags.c
deleted file mode 100644
index 7b3fe56b..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/powerflags.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Strings for the various x86 power flags
- *
- * This file must not contain any executable code.
- */
-
-#include <asm/cpufeature.h>
-
-const char *const x86_power_flags[32] = {
- "ts", /* temperature sensor */
- "fid", /* frequency id control */
- "vid", /* voltage id control */
- "ttp", /* thermal trip */
- "tm",
- "stc",
- "100mhzsteps",
- "hwpstate",
- "", /* tsc invariant mapped to constant_tsc */
- "cpb", /* core performance boost */
- "eff_freq_ro", /* Readonly aperf/mperf */
-};
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/proc.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/proc.c
deleted file mode 100644
index 8022c668..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/proc.c
+++ /dev/null
@@ -1,167 +0,0 @@
-#include <linux/smp.h>
-#include <linux/timex.h>
-#include <linux/string.h>
-#include <linux/seq_file.h>
-#include <linux/cpufreq.h>
-
-/*
- * Get CPU information for use by the procfs.
- */
-static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
- unsigned int cpu)
-{
-#ifdef CONFIG_SMP
- if (c->x86_max_cores * smp_num_siblings > 1) {
- seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
- seq_printf(m, "siblings\t: %d\n",
- cpumask_weight(cpu_core_mask(cpu)));
- seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
- seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
- seq_printf(m, "apicid\t\t: %d\n", c->apicid);
- seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
- }
-#endif
-}
-
-#ifdef CONFIG_X86_32
-static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
-{
- /*
- * We use exception 16 if we have hardware math and we've either seen
- * it or the CPU claims it is internal
- */
- int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
- seq_printf(m,
- "fdiv_bug\t: %s\n"
- "hlt_bug\t\t: %s\n"
- "f00f_bug\t: %s\n"
- "coma_bug\t: %s\n"
- "fpu\t\t: %s\n"
- "fpu_exception\t: %s\n"
- "cpuid level\t: %d\n"
- "wp\t\t: %s\n",
- c->fdiv_bug ? "yes" : "no",
- c->hlt_works_ok ? "no" : "yes",
- c->f00f_bug ? "yes" : "no",
- c->coma_bug ? "yes" : "no",
- c->hard_math ? "yes" : "no",
- fpu_exception ? "yes" : "no",
- c->cpuid_level,
- c->wp_works_ok ? "yes" : "no");
-}
-#else
-static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
-{
- seq_printf(m,
- "fpu\t\t: yes\n"
- "fpu_exception\t: yes\n"
- "cpuid level\t: %d\n"
- "wp\t\t: yes\n",
- c->cpuid_level);
-}
-#endif
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- struct cpuinfo_x86 *c = v;
- unsigned int cpu;
- int i;
-
- cpu = c->cpu_index;
- seq_printf(m, "processor\t: %u\n"
- "vendor_id\t: %s\n"
- "cpu family\t: %d\n"
- "model\t\t: %u\n"
- "model name\t: %s\n",
- cpu,
- c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
- c->x86,
- c->x86_model,
- c->x86_model_id[0] ? c->x86_model_id : "unknown");
-
- if (c->x86_mask || c->cpuid_level >= 0)
- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
- else
- seq_printf(m, "stepping\t: unknown\n");
- if (c->microcode)
- seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
-
- if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = cpufreq_quick_get(cpu);
-
- if (!freq)
- freq = cpu_khz;
- seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
- freq / 1000, (freq % 1000));
- }
-
- /* Cache size */
- if (c->x86_cache_size >= 0)
- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-
- show_cpuinfo_core(m, c, cpu);
- show_cpuinfo_misc(m, c);
-
- seq_printf(m, "flags\t\t:");
- for (i = 0; i < 32*NCAPINTS; i++)
- if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
- seq_printf(m, " %s", x86_cap_flags[i]);
-
- seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
- c->loops_per_jiffy/(500000/HZ),
- (c->loops_per_jiffy/(5000/HZ)) % 100);
-
-#ifdef CONFIG_X86_64
- if (c->x86_tlbsize > 0)
- seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-#endif
- seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
- seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
- seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
- c->x86_phys_bits, c->x86_virt_bits);
-
- seq_printf(m, "power management:");
- for (i = 0; i < 32; i++) {
- if (c->x86_power & (1 << i)) {
- if (i < ARRAY_SIZE(x86_power_flags) &&
- x86_power_flags[i])
- seq_printf(m, "%s%s",
- x86_power_flags[i][0] ? " " : "",
- x86_power_flags[i]);
- else
- seq_printf(m, " [%d]", i);
- }
- }
-
- seq_printf(m, "\n\n");
-
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- if (*pos == 0) /* just in case, cpu 0 is not the first */
- *pos = cpumask_first(cpu_online_mask);
- else
- *pos = cpumask_next(*pos - 1, cpu_online_mask);
- if ((*pos) < nr_cpu_ids)
- return &cpu_data(*pos);
- return NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- (*pos)++;
- return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/rdrand.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/rdrand.c
deleted file mode 100644
index feca286c..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/rdrand.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * This file is part of the Linux kernel.
- *
- * Copyright (c) 2011, Intel Corporation
- * Authors: Fenghua Yu <fenghua.yu@intel.com>,
- * H. Peter Anvin <hpa@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <asm/processor.h>
-#include <asm/archrandom.h>
-#include <asm/sections.h>
-
-static int __init x86_rdrand_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_RDRAND);
- return 1;
-}
-__setup("nordrand", x86_rdrand_setup);
-
-/* We can't use arch_get_random_long() here since alternatives haven't run */
-static inline int rdrand_long(unsigned long *v)
-{
- int ok;
- asm volatile("1: " RDRAND_LONG "\n\t"
- "jc 2f\n\t"
- "decl %0\n\t"
- "jnz 1b\n\t"
- "2:"
- : "=r" (ok), "=a" (*v)
- : "0" (RDRAND_RETRY_LOOPS));
- return ok;
-}
-
-/*
- * Force a reseed cycle; we are architecturally guaranteed a reseed
- * after no more than 512 128-bit chunks of random data. This also
- * acts as a test of the CPU capability.
- */
-#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
-
-void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_ARCH_RANDOM
- unsigned long tmp;
- int i, count, ok;
-
- if (!cpu_has(c, X86_FEATURE_RDRAND))
- return; /* Nothing to do */
-
- for (count = i = 0; i < RESEED_LOOP; i++) {
- ok = rdrand_long(&tmp);
- if (ok)
- count++;
- }
-
- if (count != RESEED_LOOP)
- clear_cpu_cap(c, X86_FEATURE_RDRAND);
-#endif
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/scattered.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/scattered.c
deleted file mode 100644
index ee8e9abc..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/scattered.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Routines to indentify additional cpu features that are scattered in
- * cpuid space.
- */
-#include <linux/cpu.h>
-
-#include <asm/pat.h>
-#include <asm/processor.h>
-
-#include <asm/apic.h>
-
-struct cpuid_bit {
- u16 feature;
- u8 reg;
- u8 bit;
- u32 level;
- u32 sub_leaf;
-};
-
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
-};
-
-void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
-{
- u32 max_level;
- u32 regs[4];
- const struct cpuid_bit *cb;
-
- static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
- { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
- { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
- { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
- { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
- { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 },
- { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
- { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
- { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
- { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
- { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
- { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
- { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
- { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
- { 0, 0, 0, 0, 0 }
- };
-
- for (cb = cpuid_bits; cb->feature; cb++) {
-
- /* Verify that the level is valid */
- max_level = cpuid_eax(cb->level & 0xffff0000);
- if (max_level < cb->level ||
- max_level > (cb->level | 0xffff))
- continue;
-
- cpuid_count(cb->level, cb->sub_leaf, &regs[CR_EAX],
- &regs[CR_EBX], &regs[CR_ECX], &regs[CR_EDX]);
-
- if (regs[cb->reg] & (1 << cb->bit))
- set_cpu_cap(c, cb->feature);
- }
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/sched.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/sched.c
deleted file mode 100644
index a640ae5a..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/sched.c
+++ /dev/null
@@ -1,55 +0,0 @@
-#include <linux/sched.h>
-#include <linux/math64.h>
-#include <linux/percpu.h>
-#include <linux/irqflags.h>
-
-#include <asm/cpufeature.h>
-#include <asm/processor.h>
-
-#ifdef CONFIG_SMP
-
-static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched);
-
-static unsigned long scale_aperfmperf(void)
-{
- struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched);
- unsigned long ratio, flags;
-
- local_irq_save(flags);
- get_aperfmperf(&val);
- local_irq_restore(flags);
-
- ratio = calc_aperfmperf_ratio(old, &val);
- *old = val;
-
- return ratio;
-}
-
-unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
-{
- /*
- * do aperf/mperf on the cpu level because it includes things
- * like turbo mode, which are relevant to full cores.
- */
- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
- return scale_aperfmperf();
-
- /*
- * maybe have something cpufreq here
- */
-
- return default_scale_freq_power(sd, cpu);
-}
-
-unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu)
-{
- /*
- * aperf/mperf already includes the smt gain
- */
- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
- return SCHED_LOAD_SCALE;
-
- return default_scale_smt_power(sd, cpu);
-}
-
-#endif
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/topology.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/topology.c
deleted file mode 100644
index 4397e987..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/topology.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Check for extended topology enumeration cpuid leaf 0xb and if it
- * exists, use it for populating initial_apicid and cpu topology
- * detection.
- */
-
-#include <linux/cpu.h>
-#include <asm/apic.h>
-#include <asm/pat.h>
-#include <asm/processor.h>
-
-/* leaf 0xb SMT level */
-#define SMT_LEVEL 0
-
-/* leaf 0xb sub-leaf types */
-#define INVALID_TYPE 0
-#define SMT_TYPE 1
-#define CORE_TYPE 2
-
-#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
-#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
-#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
-
-/*
- * Check for extended topology enumeration cpuid leaf 0xb and if it
- * exists, use it for populating initial_apicid and cpu topology
- * detection.
- */
-void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_SMP
- unsigned int eax, ebx, ecx, edx, sub_index;
- unsigned int ht_mask_width, core_plus_mask_width;
- unsigned int core_select_mask, core_level_siblings;
- static bool printed;
-
- if (c->cpuid_level < 0xb)
- return;
-
- cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
-
- /*
- * check if the cpuid leaf 0xb is actually implemented.
- */
- if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
- return;
-
- set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
-
- /*
- * initial apic id, which also represents 32-bit extended x2apic id.
- */
- c->initial_apicid = edx;
-
- /*
- * Populate HT related information from sub-leaf level 0.
- */
- core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
- core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
-
- sub_index = 1;
- do {
- cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
-
- /*
- * Check for the Core type in the implemented sub leaves.
- */
- if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
- core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
- core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
- break;
- }
-
- sub_index++;
- } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
-
- core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
-
- c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
- & core_select_mask;
- c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
- /*
- * Reinit the apicid, now that we have extended initial_apicid.
- */
- c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
-
- c->x86_max_cores = (core_level_siblings / smp_num_siblings);
-
- if (!printed) {
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
- return;
-#endif
-}
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/transmeta.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/transmeta.c
deleted file mode 100644
index 28000743..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/transmeta.c
+++ /dev/null
@@ -1,109 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-#include "cpu.h"
-
-static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
-{
- u32 xlvl;
-
- /* Transmeta-defined flags: level 0x80860001 */
- xlvl = cpuid_eax(0x80860000);
- if ((xlvl & 0xffff0000) == 0x80860000) {
- if (xlvl >= 0x80860001)
- c->x86_capability[2] = cpuid_edx(0x80860001);
- }
-}
-
-static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
-{
- unsigned int cap_mask, uk, max, dummy;
- unsigned int cms_rev1, cms_rev2;
- unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
- char cpu_info[65];
-
- early_init_transmeta(c);
-
- cpu_detect_cache_sizes(c);
-
- /* Print CMS and CPU revision */
- max = cpuid_eax(0x80860000);
- cpu_rev = 0;
- if (max >= 0x80860001) {
- cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
- if (cpu_rev != 0x02000000) {
- printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
- (cpu_rev >> 24) & 0xff,
- (cpu_rev >> 16) & 0xff,
- (cpu_rev >> 8) & 0xff,
- cpu_rev & 0xff,
- cpu_freq);
- }
- }
- if (max >= 0x80860002) {
- cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
- if (cpu_rev == 0x02000000) {
- printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
- new_cpu_rev, cpu_freq);
- }
- printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
- (cms_rev1 >> 24) & 0xff,
- (cms_rev1 >> 16) & 0xff,
- (cms_rev1 >> 8) & 0xff,
- cms_rev1 & 0xff,
- cms_rev2);
- }
- if (max >= 0x80860006) {
- cpuid(0x80860003,
- (void *)&cpu_info[0],
- (void *)&cpu_info[4],
- (void *)&cpu_info[8],
- (void *)&cpu_info[12]);
- cpuid(0x80860004,
- (void *)&cpu_info[16],
- (void *)&cpu_info[20],
- (void *)&cpu_info[24],
- (void *)&cpu_info[28]);
- cpuid(0x80860005,
- (void *)&cpu_info[32],
- (void *)&cpu_info[36],
- (void *)&cpu_info[40],
- (void *)&cpu_info[44]);
- cpuid(0x80860006,
- (void *)&cpu_info[48],
- (void *)&cpu_info[52],
- (void *)&cpu_info[56],
- (void *)&cpu_info[60]);
- cpu_info[64] = '\0';
- printk(KERN_INFO "CPU: %s\n", cpu_info);
- }
-
- /* Unhide possibly hidden capability flags */
- rdmsr(0x80860004, cap_mask, uk);
- wrmsr(0x80860004, ~0, uk);
- c->x86_capability[0] = cpuid_edx(0x00000001);
- wrmsr(0x80860004, cap_mask, uk);
-
- /* All Transmeta CPUs have a constant TSC */
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-
-#ifdef CONFIG_SYSCTL
- /*
- * randomize_va_space slows us down enormously;
- * it probably triggers retranslation of x86->native bytecode
- */
- randomize_va_space = 0;
-#endif
-}
-
-static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
- .c_vendor = "Transmeta",
- .c_ident = { "GenuineTMx86", "TransmetaCPU" },
- .c_early_init = early_init_transmeta,
- .c_init = init_transmeta,
- .c_x86_vendor = X86_VENDOR_TRANSMETA,
-};
-
-cpu_dev_register(transmeta_cpu_dev);
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/umc.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/umc.c
deleted file mode 100644
index fd2c37bf..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/umc.c
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include "cpu.h"
-
-/*
- * UMC chips appear to be only either 386 or 486,
- * so no special init takes place.
- */
-
-static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
- .c_vendor = "UMC",
- .c_ident = { "UMC UMC UMC" },
- .c_models = {
- { .vendor = X86_VENDOR_UMC, .family = 4, .model_names =
- {
- [1] = "U5D",
- [2] = "U5S",
- }
- },
- },
- .c_x86_vendor = X86_VENDOR_UMC,
-};
-
-cpu_dev_register(umc_cpu_dev);
-
diff --git a/ANDROID_3.4.5/arch/x86/kernel/cpu/vmware.c b/ANDROID_3.4.5/arch/x86/kernel/cpu/vmware.c
deleted file mode 100644
index d22d0c4e..00000000
--- a/ANDROID_3.4.5/arch/x86/kernel/cpu/vmware.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * VMware Detection code.
- *
- * Copyright (C) 2008, VMware, Inc.
- * Author : Alok N Kataria <akataria@vmware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/dmi.h>
-#include <linux/module.h>
-#include <asm/div64.h>
-#include <asm/x86_init.h>
-#include <asm/hypervisor.h>
-
-#define CPUID_VMWARE_INFO_LEAF 0x40000000
-#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
-#define VMWARE_HYPERVISOR_PORT 0x5658
-
-#define VMWARE_PORT_CMD_GETVERSION 10
-#define VMWARE_PORT_CMD_GETHZ 45
-
-#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
- __asm__("inl (%%dx)" : \
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
- "0"(VMWARE_HYPERVISOR_MAGIC), \
- "1"(VMWARE_PORT_CMD_##cmd), \
- "2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \
- "memory");
-
-static inline int __vmware_platform(void)
-{
- uint32_t eax, ebx, ecx, edx;
- VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx);
- return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
-}
-
-static unsigned long vmware_get_tsc_khz(void)
-{
- uint64_t tsc_hz, lpj;
- uint32_t eax, ebx, ecx, edx;
-
- VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
-
- tsc_hz = eax | (((uint64_t)ebx) << 32);
- do_div(tsc_hz, 1000);
- BUG_ON(tsc_hz >> 32);
- printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
- (unsigned long) tsc_hz / 1000,
- (unsigned long) tsc_hz % 1000);
-
- if (!preset_lpj) {
- lpj = ((u64)tsc_hz * 1000);
- do_div(lpj, HZ);
- preset_lpj = lpj;
- }
-
- return tsc_hz;
-}
-
-static void __init vmware_platform_setup(void)
-{
- uint32_t eax, ebx, ecx, edx;
-
- VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
-
- if (ebx != UINT_MAX)
- x86_platform.calibrate_tsc = vmware_get_tsc_khz;
- else
- printk(KERN_WARNING
- "Failed to get TSC freq from the hypervisor\n");
-}
-
-/*
- * While checking the dmi string information, just checking the product
- * serial key should be enough, as this will always have a VMware
- * specific string when running under VMware hypervisor.
- */
-static bool __init vmware_platform(void)
-{
- if (cpu_has_hypervisor) {
- unsigned int eax;
- unsigned int hyper_vendor_id[3];
-
- cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
- &hyper_vendor_id[1], &hyper_vendor_id[2]);
- if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
- return true;
- } else if (dmi_available && dmi_name_in_serial("VMware") &&
- __vmware_platform())
- return true;
-
- return false;
-}
-
-/*
- * VMware hypervisor takes care of exporting a reliable TSC to the guest.
- * Still, due to timing difference when running on virtual cpus, the TSC can
- * be marked as unstable in some cases. For example, the TSC sync check at
- * bootup can fail due to a marginal offset between vcpus' TSCs (though the
- * TSCs do not drift from each other). Also, the ACPI PM timer clocksource
- * is not suitable as a watchdog when running on a hypervisor because the
- * kernel may miss a wrap of the counter if the vcpu is descheduled for a
- * long time. To skip these checks at runtime we set these capability bits,
- * so that the kernel could just trust the hypervisor with providing a
- * reliable virtual TSC that is suitable for timekeeping.
- */
-static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
-{
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
-}
-
-const __refconst struct hypervisor_x86 x86_hyper_vmware = {
- .name = "VMware",
- .detect = vmware_platform,
- .set_cpu_features = vmware_set_cpu_features,
- .init_platform = vmware_platform_setup,
-};
-EXPORT_SYMBOL(x86_hyper_vmware);