From 392e8802486cb573b916e746010e141a75f507e6 Mon Sep 17 00:00:00 2001 From: Kevin Date: Sat, 15 Nov 2014 09:58:27 +0800 Subject: init android origin source code --- ANDROID_3.4.5/arch/x86/include/asm/Kbuild | 28 + ANDROID_3.4.5/arch/x86/include/asm/a.out-core.h | 65 + ANDROID_3.4.5/arch/x86/include/asm/a.out.h | 20 + ANDROID_3.4.5/arch/x86/include/asm/acpi.h | 193 ++ ANDROID_3.4.5/arch/x86/include/asm/aes.h | 11 + ANDROID_3.4.5/arch/x86/include/asm/agp.h | 31 + .../arch/x86/include/asm/alternative-asm.h | 26 + ANDROID_3.4.5/arch/x86/include/asm/alternative.h | 195 ++ ANDROID_3.4.5/arch/x86/include/asm/amd_nb.h | 72 + ANDROID_3.4.5/arch/x86/include/asm/apb_timer.h | 49 + ANDROID_3.4.5/arch/x86/include/asm/apic.h | 636 ++++++ ANDROID_3.4.5/arch/x86/include/asm/apic_flat_64.h | 7 + ANDROID_3.4.5/arch/x86/include/asm/apicdef.h | 445 +++++ ANDROID_3.4.5/arch/x86/include/asm/apm.h | 73 + ANDROID_3.4.5/arch/x86/include/asm/arch_hweight.h | 61 + ANDROID_3.4.5/arch/x86/include/asm/archrandom.h | 75 + ANDROID_3.4.5/arch/x86/include/asm/asm-offsets.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/asm.h | 58 + ANDROID_3.4.5/arch/x86/include/asm/atomic.h | 317 +++ ANDROID_3.4.5/arch/x86/include/asm/atomic64_32.h | 316 +++ ANDROID_3.4.5/arch/x86/include/asm/atomic64_64.h | 243 +++ ANDROID_3.4.5/arch/x86/include/asm/auxvec.h | 19 + ANDROID_3.4.5/arch/x86/include/asm/barrier.h | 116 ++ ANDROID_3.4.5/arch/x86/include/asm/bios_ebda.h | 60 + ANDROID_3.4.5/arch/x86/include/asm/bitops.h | 514 +++++ ANDROID_3.4.5/arch/x86/include/asm/bitsperlong.h | 13 + ANDROID_3.4.5/arch/x86/include/asm/boot.h | 47 + ANDROID_3.4.5/arch/x86/include/asm/bootparam.h | 136 ++ ANDROID_3.4.5/arch/x86/include/asm/bug.h | 43 + ANDROID_3.4.5/arch/x86/include/asm/bugs.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/byteorder.h | 6 + ANDROID_3.4.5/arch/x86/include/asm/cache.h | 23 + ANDROID_3.4.5/arch/x86/include/asm/cacheflush.h | 166 ++ ANDROID_3.4.5/arch/x86/include/asm/calgary.h | 70 + ANDROID_3.4.5/arch/x86/include/asm/calling.h | 196 ++ ANDROID_3.4.5/arch/x86/include/asm/ce4100.h | 6 + ANDROID_3.4.5/arch/x86/include/asm/checksum.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/checksum_32.h | 190 ++ ANDROID_3.4.5/arch/x86/include/asm/checksum_64.h | 191 ++ ANDROID_3.4.5/arch/x86/include/asm/clocksource.h | 18 + ANDROID_3.4.5/arch/x86/include/asm/cmpxchg.h | 233 +++ ANDROID_3.4.5/arch/x86/include/asm/cmpxchg_32.h | 171 ++ ANDROID_3.4.5/arch/x86/include/asm/cmpxchg_64.h | 25 + ANDROID_3.4.5/arch/x86/include/asm/compat.h | 252 +++ ANDROID_3.4.5/arch/x86/include/asm/cpu.h | 37 + ANDROID_3.4.5/arch/x86/include/asm/cpu_device_id.h | 13 + ANDROID_3.4.5/arch/x86/include/asm/cpufeature.h | 400 ++++ ANDROID_3.4.5/arch/x86/include/asm/cpumask.h | 14 + ANDROID_3.4.5/arch/x86/include/asm/cputime.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/current.h | 21 + ANDROID_3.4.5/arch/x86/include/asm/debugreg.h | 195 ++ ANDROID_3.4.5/arch/x86/include/asm/delay.h | 8 + ANDROID_3.4.5/arch/x86/include/asm/desc.h | 407 ++++ ANDROID_3.4.5/arch/x86/include/asm/desc_defs.h | 101 + ANDROID_3.4.5/arch/x86/include/asm/device.h | 19 + ANDROID_3.4.5/arch/x86/include/asm/div64.h | 66 + ANDROID_3.4.5/arch/x86/include/asm/dma-mapping.h | 163 ++ ANDROID_3.4.5/arch/x86/include/asm/dma.h | 317 +++ ANDROID_3.4.5/arch/x86/include/asm/dmi.h | 19 + ANDROID_3.4.5/arch/x86/include/asm/dwarf2.h | 146 ++ ANDROID_3.4.5/arch/x86/include/asm/e820.h | 149 ++ ANDROID_3.4.5/arch/x86/include/asm/edac.h | 18 + ANDROID_3.4.5/arch/x86/include/asm/efi.h | 115 ++ ANDROID_3.4.5/arch/x86/include/asm/elf.h | 372 ++++ .../arch/x86/include/asm/emergency-restart.h | 20 + ANDROID_3.4.5/arch/x86/include/asm/entry_arch.h | 56 + ANDROID_3.4.5/arch/x86/include/asm/errno.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/exec.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/fb.h | 17 + ANDROID_3.4.5/arch/x86/include/asm/fcntl.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/fixmap.h | 238 +++ ANDROID_3.4.5/arch/x86/include/asm/floppy.h | 281 +++ ANDROID_3.4.5/arch/x86/include/asm/fpu-internal.h | 520 +++++ ANDROID_3.4.5/arch/x86/include/asm/frame.h | 26 + ANDROID_3.4.5/arch/x86/include/asm/ftrace.h | 57 + ANDROID_3.4.5/arch/x86/include/asm/futex.h | 141 ++ ANDROID_3.4.5/arch/x86/include/asm/gart.h | 113 ++ ANDROID_3.4.5/arch/x86/include/asm/genapic.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/geode.h | 36 + ANDROID_3.4.5/arch/x86/include/asm/gpio.h | 53 + ANDROID_3.4.5/arch/x86/include/asm/hardirq.h | 55 + ANDROID_3.4.5/arch/x86/include/asm/highmem.h | 77 + ANDROID_3.4.5/arch/x86/include/asm/hpet.h | 118 ++ ANDROID_3.4.5/arch/x86/include/asm/hugetlb.h | 93 + ANDROID_3.4.5/arch/x86/include/asm/hw_breakpoint.h | 76 + ANDROID_3.4.5/arch/x86/include/asm/hw_irq.h | 182 ++ .../arch/x86/include/asm/hypertransport.h | 45 + ANDROID_3.4.5/arch/x86/include/asm/hyperv.h | 194 ++ ANDROID_3.4.5/arch/x86/include/asm/hypervisor.h | 62 + ANDROID_3.4.5/arch/x86/include/asm/i387.h | 79 + ANDROID_3.4.5/arch/x86/include/asm/i8259.h | 70 + ANDROID_3.4.5/arch/x86/include/asm/ia32.h | 163 ++ ANDROID_3.4.5/arch/x86/include/asm/ia32_unistd.h | 11 + ANDROID_3.4.5/arch/x86/include/asm/idle.h | 15 + ANDROID_3.4.5/arch/x86/include/asm/inat.h | 221 +++ ANDROID_3.4.5/arch/x86/include/asm/inat_types.h | 29 + ANDROID_3.4.5/arch/x86/include/asm/init.h | 20 + ANDROID_3.4.5/arch/x86/include/asm/insn.h | 199 ++ ANDROID_3.4.5/arch/x86/include/asm/inst.h | 240 +++ ANDROID_3.4.5/arch/x86/include/asm/intel_scu_ipc.h | 73 + ANDROID_3.4.5/arch/x86/include/asm/io.h | 348 ++++ ANDROID_3.4.5/arch/x86/include/asm/io_apic.h | 217 ++ ANDROID_3.4.5/arch/x86/include/asm/ioctl.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/ioctls.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/iomap.h | 41 + ANDROID_3.4.5/arch/x86/include/asm/iommu.h | 13 + ANDROID_3.4.5/arch/x86/include/asm/iommu_table.h | 100 + ANDROID_3.4.5/arch/x86/include/asm/ipcbuf.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/ipi.h | 162 ++ ANDROID_3.4.5/arch/x86/include/asm/irq.h | 44 + ANDROID_3.4.5/arch/x86/include/asm/irq_regs.h | 31 + ANDROID_3.4.5/arch/x86/include/asm/irq_remapping.h | 45 + ANDROID_3.4.5/arch/x86/include/asm/irq_vectors.h | 172 ++ ANDROID_3.4.5/arch/x86/include/asm/irqflags.h | 209 ++ ANDROID_3.4.5/arch/x86/include/asm/ist.h | 34 + ANDROID_3.4.5/arch/x86/include/asm/jump_label.h | 42 + ANDROID_3.4.5/arch/x86/include/asm/kdebug.h | 40 + ANDROID_3.4.5/arch/x86/include/asm/kexec.h | 168 ++ ANDROID_3.4.5/arch/x86/include/asm/kgdb.h | 89 + ANDROID_3.4.5/arch/x86/include/asm/kmap_types.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/kmemcheck.h | 42 + ANDROID_3.4.5/arch/x86/include/asm/kprobes.h | 117 ++ ANDROID_3.4.5/arch/x86/include/asm/kvm.h | 328 ++++ ANDROID_3.4.5/arch/x86/include/asm/kvm_emulate.h | 395 ++++ ANDROID_3.4.5/arch/x86/include/asm/kvm_host.h | 970 +++++++++ ANDROID_3.4.5/arch/x86/include/asm/kvm_para.h | 216 ++ ANDROID_3.4.5/arch/x86/include/asm/ldt.h | 40 + ANDROID_3.4.5/arch/x86/include/asm/lguest.h | 99 + ANDROID_3.4.5/arch/x86/include/asm/lguest_hcall.h | 76 + ANDROID_3.4.5/arch/x86/include/asm/linkage.h | 61 + ANDROID_3.4.5/arch/x86/include/asm/local.h | 197 ++ ANDROID_3.4.5/arch/x86/include/asm/local64.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/mach_timer.h | 48 + ANDROID_3.4.5/arch/x86/include/asm/mach_traps.h | 43 + ANDROID_3.4.5/arch/x86/include/asm/math_emu.h | 18 + ANDROID_3.4.5/arch/x86/include/asm/mc146818rtc.h | 103 + ANDROID_3.4.5/arch/x86/include/asm/mca.h | 43 + ANDROID_3.4.5/arch/x86/include/asm/mca_dma.h | 201 ++ ANDROID_3.4.5/arch/x86/include/asm/mce.h | 252 +++ ANDROID_3.4.5/arch/x86/include/asm/microcode.h | 66 + ANDROID_3.4.5/arch/x86/include/asm/mman.h | 8 + ANDROID_3.4.5/arch/x86/include/asm/mmconfig.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/mmu.h | 32 + ANDROID_3.4.5/arch/x86/include/asm/mmu_context.h | 93 + ANDROID_3.4.5/arch/x86/include/asm/mmx.h | 14 + ANDROID_3.4.5/arch/x86/include/asm/mmzone.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/mmzone_32.h | 70 + ANDROID_3.4.5/arch/x86/include/asm/mmzone_64.h | 17 + ANDROID_3.4.5/arch/x86/include/asm/module.h | 66 + ANDROID_3.4.5/arch/x86/include/asm/mpspec.h | 175 ++ ANDROID_3.4.5/arch/x86/include/asm/mpspec_def.h | 174 ++ ANDROID_3.4.5/arch/x86/include/asm/mrst-vrtc.h | 9 + ANDROID_3.4.5/arch/x86/include/asm/mrst.h | 81 + ANDROID_3.4.5/arch/x86/include/asm/msgbuf.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/mshyperv.h | 14 + ANDROID_3.4.5/arch/x86/include/asm/msidef.h | 56 + ANDROID_3.4.5/arch/x86/include/asm/msr-index.h | 472 +++++ ANDROID_3.4.5/arch/x86/include/asm/msr.h | 319 +++ ANDROID_3.4.5/arch/x86/include/asm/mtrr.h | 213 ++ ANDROID_3.4.5/arch/x86/include/asm/mutex.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/mutex_32.h | 125 ++ ANDROID_3.4.5/arch/x86/include/asm/mutex_64.h | 100 + ANDROID_3.4.5/arch/x86/include/asm/mwait.h | 15 + ANDROID_3.4.5/arch/x86/include/asm/nmi.h | 47 + ANDROID_3.4.5/arch/x86/include/asm/nops.h | 142 ++ ANDROID_3.4.5/arch/x86/include/asm/numa.h | 85 + ANDROID_3.4.5/arch/x86/include/asm/numa_32.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/numa_64.h | 6 + .../arch/x86/include/asm/numachip/numachip_csr.h | 167 ++ ANDROID_3.4.5/arch/x86/include/asm/numaq.h | 171 ++ ANDROID_3.4.5/arch/x86/include/asm/olpc.h | 151 ++ ANDROID_3.4.5/arch/x86/include/asm/olpc_ofw.h | 37 + ANDROID_3.4.5/arch/x86/include/asm/page.h | 71 + ANDROID_3.4.5/arch/x86/include/asm/page_32.h | 51 + ANDROID_3.4.5/arch/x86/include/asm/page_32_types.h | 57 + ANDROID_3.4.5/arch/x86/include/asm/page_64.h | 6 + ANDROID_3.4.5/arch/x86/include/asm/page_64_types.h | 75 + ANDROID_3.4.5/arch/x86/include/asm/page_types.h | 61 + ANDROID_3.4.5/arch/x86/include/asm/param.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/paravirt.h | 1051 ++++++++++ .../arch/x86/include/asm/paravirt_types.h | 703 +++++++ ANDROID_3.4.5/arch/x86/include/asm/parport.h | 10 + ANDROID_3.4.5/arch/x86/include/asm/pat.h | 27 + ANDROID_3.4.5/arch/x86/include/asm/pci-direct.h | 21 + ANDROID_3.4.5/arch/x86/include/asm/pci-functions.h | 19 + ANDROID_3.4.5/arch/x86/include/asm/pci.h | 174 ++ ANDROID_3.4.5/arch/x86/include/asm/pci_64.h | 27 + ANDROID_3.4.5/arch/x86/include/asm/pci_x86.h | 199 ++ ANDROID_3.4.5/arch/x86/include/asm/percpu.h | 589 ++++++ ANDROID_3.4.5/arch/x86/include/asm/perf_event.h | 252 +++ ANDROID_3.4.5/arch/x86/include/asm/perf_event_p4.h | 876 +++++++++ ANDROID_3.4.5/arch/x86/include/asm/pgalloc.h | 139 ++ .../arch/x86/include/asm/pgtable-2level.h | 111 ++ .../arch/x86/include/asm/pgtable-2level_types.h | 37 + .../arch/x86/include/asm/pgtable-3level.h | 196 ++ .../arch/x86/include/asm/pgtable-3level_types.h | 48 + ANDROID_3.4.5/arch/x86/include/asm/pgtable.h | 770 ++++++++ ANDROID_3.4.5/arch/x86/include/asm/pgtable_32.h | 87 + .../arch/x86/include/asm/pgtable_32_types.h | 55 + ANDROID_3.4.5/arch/x86/include/asm/pgtable_64.h | 187 ++ .../arch/x86/include/asm/pgtable_64_types.h | 63 + ANDROID_3.4.5/arch/x86/include/asm/pgtable_types.h | 340 ++++ ANDROID_3.4.5/arch/x86/include/asm/poll.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/posix_types.h | 15 + .../arch/x86/include/asm/posix_types_32.h | 28 + .../arch/x86/include/asm/posix_types_64.h | 19 + .../arch/x86/include/asm/posix_types_x32.h | 19 + ANDROID_3.4.5/arch/x86/include/asm/prctl.h | 9 + ANDROID_3.4.5/arch/x86/include/asm/probe_roms.h | 8 + .../arch/x86/include/asm/processor-cyrix.h | 38 + .../arch/x86/include/asm/processor-flags.h | 103 + ANDROID_3.4.5/arch/x86/include/asm/processor.h | 987 ++++++++++ ANDROID_3.4.5/arch/x86/include/asm/prom.h | 48 + ANDROID_3.4.5/arch/x86/include/asm/proto.h | 26 + ANDROID_3.4.5/arch/x86/include/asm/ptrace-abi.h | 87 + ANDROID_3.4.5/arch/x86/include/asm/ptrace.h | 306 +++ ANDROID_3.4.5/arch/x86/include/asm/pvclock-abi.h | 44 + ANDROID_3.4.5/arch/x86/include/asm/pvclock.h | 59 + ANDROID_3.4.5/arch/x86/include/asm/reboot.h | 29 + ANDROID_3.4.5/arch/x86/include/asm/reboot_fixups.h | 6 + .../arch/x86/include/asm/required-features.h | 90 + ANDROID_3.4.5/arch/x86/include/asm/resource.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/resume-trace.h | 21 + ANDROID_3.4.5/arch/x86/include/asm/rio.h | 63 + ANDROID_3.4.5/arch/x86/include/asm/rtc.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/rwlock.h | 49 + ANDROID_3.4.5/arch/x86/include/asm/rwsem.h | 211 ++ ANDROID_3.4.5/arch/x86/include/asm/scatterlist.h | 8 + ANDROID_3.4.5/arch/x86/include/asm/seccomp.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/seccomp_32.h | 11 + ANDROID_3.4.5/arch/x86/include/asm/seccomp_64.h | 17 + ANDROID_3.4.5/arch/x86/include/asm/sections.h | 14 + ANDROID_3.4.5/arch/x86/include/asm/segment.h | 272 +++ ANDROID_3.4.5/arch/x86/include/asm/sembuf.h | 24 + ANDROID_3.4.5/arch/x86/include/asm/serial.h | 29 + ANDROID_3.4.5/arch/x86/include/asm/serpent.h | 63 + ANDROID_3.4.5/arch/x86/include/asm/setup.h | 128 ++ ANDROID_3.4.5/arch/x86/include/asm/setup_arch.h | 3 + ANDROID_3.4.5/arch/x86/include/asm/shmbuf.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/shmparam.h | 6 + ANDROID_3.4.5/arch/x86/include/asm/sigcontext.h | 293 +++ ANDROID_3.4.5/arch/x86/include/asm/sigcontext32.h | 77 + ANDROID_3.4.5/arch/x86/include/asm/sigframe.h | 83 + ANDROID_3.4.5/arch/x86/include/asm/sighandling.h | 24 + ANDROID_3.4.5/arch/x86/include/asm/siginfo.h | 16 + ANDROID_3.4.5/arch/x86/include/asm/signal.h | 264 +++ ANDROID_3.4.5/arch/x86/include/asm/smp.h | 235 +++ ANDROID_3.4.5/arch/x86/include/asm/smpboot_hooks.h | 68 + ANDROID_3.4.5/arch/x86/include/asm/socket.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/sockios.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/sparsemem.h | 34 + ANDROID_3.4.5/arch/x86/include/asm/special_insns.h | 199 ++ ANDROID_3.4.5/arch/x86/include/asm/spinlock.h | 243 +++ .../arch/x86/include/asm/spinlock_types.h | 33 + .../arch/x86/include/asm/stackprotector.h | 121 ++ ANDROID_3.4.5/arch/x86/include/asm/stacktrace.h | 118 ++ ANDROID_3.4.5/arch/x86/include/asm/stat.h | 114 ++ ANDROID_3.4.5/arch/x86/include/asm/statfs.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/string.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/string_32.h | 342 ++++ ANDROID_3.4.5/arch/x86/include/asm/string_64.h | 68 + ANDROID_3.4.5/arch/x86/include/asm/suspend.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/suspend_32.h | 27 + ANDROID_3.4.5/arch/x86/include/asm/suspend_64.h | 49 + ANDROID_3.4.5/arch/x86/include/asm/svm.h | 352 ++++ ANDROID_3.4.5/arch/x86/include/asm/swab.h | 61 + ANDROID_3.4.5/arch/x86/include/asm/swiotlb.h | 32 + ANDROID_3.4.5/arch/x86/include/asm/switch_to.h | 129 ++ ANDROID_3.4.5/arch/x86/include/asm/sync_bitops.h | 130 ++ ANDROID_3.4.5/arch/x86/include/asm/sys_ia32.h | 89 + ANDROID_3.4.5/arch/x86/include/asm/syscall.h | 217 ++ ANDROID_3.4.5/arch/x86/include/asm/syscalls.h | 69 + ANDROID_3.4.5/arch/x86/include/asm/tce.h | 48 + ANDROID_3.4.5/arch/x86/include/asm/termbits.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/termios.h | 1 + ANDROID_3.4.5/arch/x86/include/asm/thread_info.h | 289 +++ ANDROID_3.4.5/arch/x86/include/asm/time.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/timer.h | 79 + ANDROID_3.4.5/arch/x86/include/asm/timex.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/tlb.h | 11 + ANDROID_3.4.5/arch/x86/include/asm/tlbflush.h | 175 ++ ANDROID_3.4.5/arch/x86/include/asm/topology.h | 196 ++ ANDROID_3.4.5/arch/x86/include/asm/trampoline.h | 39 + ANDROID_3.4.5/arch/x86/include/asm/traps.h | 117 ++ ANDROID_3.4.5/arch/x86/include/asm/tsc.h | 67 + ANDROID_3.4.5/arch/x86/include/asm/types.h | 6 + ANDROID_3.4.5/arch/x86/include/asm/uaccess.h | 581 ++++++ ANDROID_3.4.5/arch/x86/include/asm/uaccess_32.h | 236 +++ ANDROID_3.4.5/arch/x86/include/asm/uaccess_64.h | 249 +++ ANDROID_3.4.5/arch/x86/include/asm/ucontext.h | 12 + ANDROID_3.4.5/arch/x86/include/asm/unaligned.h | 14 + ANDROID_3.4.5/arch/x86/include/asm/unistd.h | 73 + ANDROID_3.4.5/arch/x86/include/asm/user.h | 63 + ANDROID_3.4.5/arch/x86/include/asm/user32.h | 70 + ANDROID_3.4.5/arch/x86/include/asm/user_32.h | 131 ++ ANDROID_3.4.5/arch/x86/include/asm/user_64.h | 137 ++ ANDROID_3.4.5/arch/x86/include/asm/uv/bios.h | 114 ++ ANDROID_3.4.5/arch/x86/include/asm/uv/uv.h | 34 + ANDROID_3.4.5/arch/x86/include/asm/uv/uv_bau.h | 775 ++++++++ ANDROID_3.4.5/arch/x86/include/asm/uv/uv_hub.h | 609 ++++++ ANDROID_3.4.5/arch/x86/include/asm/uv/uv_irq.h | 38 + ANDROID_3.4.5/arch/x86/include/asm/uv/uv_mmrs.h | 2077 ++++++++++++++++++++ ANDROID_3.4.5/arch/x86/include/asm/vdso.h | 33 + ANDROID_3.4.5/arch/x86/include/asm/vga.h | 20 + ANDROID_3.4.5/arch/x86/include/asm/vgtod.h | 30 + ANDROID_3.4.5/arch/x86/include/asm/virtext.h | 131 ++ ANDROID_3.4.5/arch/x86/include/asm/visws/cobalt.h | 127 ++ ANDROID_3.4.5/arch/x86/include/asm/visws/lithium.h | 53 + ANDROID_3.4.5/arch/x86/include/asm/visws/piix4.h | 107 + ANDROID_3.4.5/arch/x86/include/asm/visws/sgivw.h | 5 + ANDROID_3.4.5/arch/x86/include/asm/vm86.h | 208 ++ ANDROID_3.4.5/arch/x86/include/asm/vmx.h | 484 +++++ ANDROID_3.4.5/arch/x86/include/asm/vsyscall.h | 38 + ANDROID_3.4.5/arch/x86/include/asm/vvar.h | 50 + .../arch/x86/include/asm/word-at-a-time.h | 79 + ANDROID_3.4.5/arch/x86/include/asm/x2apic.h | 67 + ANDROID_3.4.5/arch/x86/include/asm/x86_init.h | 199 ++ ANDROID_3.4.5/arch/x86/include/asm/xcr.h | 49 + ANDROID_3.4.5/arch/x86/include/asm/xen/events.h | 18 + ANDROID_3.4.5/arch/x86/include/asm/xen/hypercall.h | 602 ++++++ .../arch/x86/include/asm/xen/hypervisor.h | 75 + ANDROID_3.4.5/arch/x86/include/asm/xen/interface.h | 177 ++ .../arch/x86/include/asm/xen/interface_32.h | 102 + .../arch/x86/include/asm/xen/interface_64.h | 148 ++ ANDROID_3.4.5/arch/x86/include/asm/xen/page.h | 213 ++ ANDROID_3.4.5/arch/x86/include/asm/xen/pci.h | 82 + .../arch/x86/include/asm/xen/swiotlb-xen.h | 14 + .../arch/x86/include/asm/xen/trace_types.h | 18 + ANDROID_3.4.5/arch/x86/include/asm/xor.h | 10 + ANDROID_3.4.5/arch/x86/include/asm/xor_32.h | 888 +++++++++ ANDROID_3.4.5/arch/x86/include/asm/xor_64.h | 361 ++++ ANDROID_3.4.5/arch/x86/include/asm/xsave.h | 150 ++ 332 files changed, 43046 insertions(+) create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/Kbuild create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/a.out-core.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/a.out.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/acpi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/aes.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/agp.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/alternative-asm.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/alternative.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/amd_nb.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/apb_timer.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/apic.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/apic_flat_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/apicdef.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/apm.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/arch_hweight.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/archrandom.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/asm-offsets.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/asm.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/atomic.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/atomic64_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/atomic64_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/auxvec.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/barrier.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/bios_ebda.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/bitops.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/bitsperlong.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/boot.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/bootparam.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/bug.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/bugs.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/byteorder.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cache.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cacheflush.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/calgary.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/calling.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ce4100.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/checksum.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/checksum_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/checksum_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/clocksource.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cmpxchg.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cmpxchg_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cmpxchg_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/compat.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cpu.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cpu_device_id.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cpufeature.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cpumask.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/cputime.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/current.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/debugreg.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/delay.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/desc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/desc_defs.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/device.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/div64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/dma-mapping.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/dma.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/dmi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/dwarf2.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/e820.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/edac.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/efi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/elf.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/emergency-restart.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/entry_arch.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/errno.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/exec.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/fb.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/fcntl.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/fixmap.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/floppy.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/fpu-internal.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/frame.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ftrace.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/futex.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/gart.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/genapic.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/geode.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/gpio.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hardirq.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/highmem.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hpet.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hugetlb.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hw_breakpoint.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hw_irq.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hypertransport.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hyperv.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/hypervisor.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/i387.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/i8259.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ia32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ia32_unistd.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/idle.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/inat.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/inat_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/init.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/insn.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/inst.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/intel_scu_ipc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/io.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/io_apic.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ioctl.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ioctls.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/iomap.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/iommu.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/iommu_table.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ipcbuf.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ipi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/irq.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/irq_regs.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/irq_remapping.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/irq_vectors.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/irqflags.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ist.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/jump_label.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kdebug.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kexec.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kgdb.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kmap_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kmemcheck.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kprobes.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kvm.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kvm_emulate.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kvm_host.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/kvm_para.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ldt.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/lguest.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/lguest_hcall.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/linkage.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/local.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/local64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mach_timer.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mach_traps.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/math_emu.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mc146818rtc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mca.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mca_dma.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mce.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/microcode.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mman.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmconfig.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmu.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmu_context.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmx.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmzone.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmzone_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mmzone_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/module.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mpspec.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mpspec_def.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mrst-vrtc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mrst.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/msgbuf.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mshyperv.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/msidef.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/msr-index.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/msr.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mtrr.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mutex.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mutex_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mutex_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/mwait.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/nmi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/nops.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/numa.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/numa_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/numa_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/numachip/numachip_csr.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/numaq.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/olpc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/olpc_ofw.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/page.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/page_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/page_32_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/page_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/page_64_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/page_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/param.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/paravirt.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/paravirt_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/parport.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pat.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pci-direct.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pci-functions.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pci.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pci_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pci_x86.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/percpu.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/perf_event.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/perf_event_p4.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgalloc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable-2level.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable-2level_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable-3level.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable-3level_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable_32_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable_64_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pgtable_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/poll.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/posix_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/posix_types_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/posix_types_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/posix_types_x32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/prctl.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/probe_roms.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/processor-cyrix.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/processor-flags.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/processor.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/prom.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/proto.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ptrace-abi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ptrace.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pvclock-abi.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/pvclock.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/reboot.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/reboot_fixups.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/required-features.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/resource.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/resume-trace.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/rio.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/rtc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/rwlock.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/rwsem.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/scatterlist.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/seccomp.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/seccomp_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/seccomp_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sections.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/segment.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sembuf.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/serial.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/serpent.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/setup.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/setup_arch.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/shmbuf.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/shmparam.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sigcontext.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sigcontext32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sigframe.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sighandling.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/siginfo.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/signal.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/smp.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/smpboot_hooks.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/socket.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sockios.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sparsemem.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/special_insns.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/spinlock.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/spinlock_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/stackprotector.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/stacktrace.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/stat.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/statfs.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/string.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/string_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/string_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/suspend.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/suspend_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/suspend_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/svm.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/swab.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/swiotlb.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/switch_to.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sync_bitops.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/sys_ia32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/syscall.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/syscalls.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/tce.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/termbits.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/termios.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/thread_info.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/time.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/timer.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/timex.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/tlb.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/tlbflush.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/topology.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/trampoline.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/traps.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/tsc.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uaccess.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uaccess_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uaccess_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/ucontext.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/unaligned.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/unistd.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/user.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/user32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/user_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/user_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uv/bios.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uv/uv.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uv/uv_bau.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uv/uv_hub.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uv/uv_irq.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/uv/uv_mmrs.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vdso.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vga.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vgtod.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/virtext.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/visws/cobalt.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/visws/lithium.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/visws/piix4.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/visws/sgivw.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vm86.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vmx.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vsyscall.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/vvar.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/word-at-a-time.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/x2apic.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/x86_init.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xcr.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/events.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/hypercall.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/hypervisor.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/interface.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/interface_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/interface_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/page.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/pci.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/swiotlb-xen.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xen/trace_types.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xor.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xor_32.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xor_64.h create mode 100644 ANDROID_3.4.5/arch/x86/include/asm/xsave.h (limited to 'ANDROID_3.4.5/arch/x86/include/asm') diff --git a/ANDROID_3.4.5/arch/x86/include/asm/Kbuild b/ANDROID_3.4.5/arch/x86/include/asm/Kbuild new file mode 100644 index 00000000..f9c0d3ba --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/Kbuild @@ -0,0 +1,28 @@ +include include/asm-generic/Kbuild.asm + +header-y += boot.h +header-y += bootparam.h +header-y += debugreg.h +header-y += e820.h +header-y += hw_breakpoint.h +header-y += hyperv.h +header-y += ist.h +header-y += ldt.h +header-y += mce.h +header-y += msr-index.h +header-y += msr.h +header-y += mtrr.h +header-y += posix_types_32.h +header-y += posix_types_64.h +header-y += posix_types_x32.h +header-y += prctl.h +header-y += processor-flags.h +header-y += ptrace-abi.h +header-y += sigcontext32.h +header-y += ucontext.h +header-y += vm86.h +header-y += vsyscall.h + +genhdr-y += unistd_32.h +genhdr-y += unistd_64.h +genhdr-y += unistd_x32.h diff --git a/ANDROID_3.4.5/arch/x86/include/asm/a.out-core.h b/ANDROID_3.4.5/arch/x86/include/asm/a.out-core.h new file mode 100644 index 00000000..7a15588e --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/a.out-core.h @@ -0,0 +1,65 @@ +/* a.out coredump register dumper + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_X86_A_OUT_CORE_H +#define _ASM_X86_A_OUT_CORE_H + +#ifdef __KERNEL__ +#ifdef CONFIG_X86_32 + +#include +#include +#include + +/* + * fill in the user structure for an a.out core dump + */ +static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) +{ +/* changed the size calculations - should hopefully work better. lbt */ + dump->magic = CMAGIC; + dump->start_code = 0; + dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); + dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; + dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) + >> PAGE_SHIFT; + dump->u_dsize -= dump->u_tsize; + dump->u_ssize = 0; + aout_dump_debugregs(dump); + + if (dump->start_stack < TASK_SIZE) + dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) + >> PAGE_SHIFT; + + dump->regs.bx = regs->bx; + dump->regs.cx = regs->cx; + dump->regs.dx = regs->dx; + dump->regs.si = regs->si; + dump->regs.di = regs->di; + dump->regs.bp = regs->bp; + dump->regs.ax = regs->ax; + dump->regs.ds = (u16)regs->ds; + dump->regs.es = (u16)regs->es; + dump->regs.fs = (u16)regs->fs; + dump->regs.gs = get_user_gs(regs); + dump->regs.orig_ax = regs->orig_ax; + dump->regs.ip = regs->ip; + dump->regs.cs = (u16)regs->cs; + dump->regs.flags = regs->flags; + dump->regs.sp = regs->sp; + dump->regs.ss = (u16)regs->ss; + + dump->u_fpvalid = dump_fpu(regs, &dump->i387); +} + +#endif /* CONFIG_X86_32 */ +#endif /* __KERNEL__ */ +#endif /* _ASM_X86_A_OUT_CORE_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/a.out.h b/ANDROID_3.4.5/arch/x86/include/asm/a.out.h new file mode 100644 index 00000000..4684f97a --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/a.out.h @@ -0,0 +1,20 @@ +#ifndef _ASM_X86_A_OUT_H +#define _ASM_X86_A_OUT_H + +struct exec +{ + unsigned int a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#endif /* _ASM_X86_A_OUT_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/acpi.h b/ANDROID_3.4.5/arch/x86/include/asm/acpi.h new file mode 100644 index 00000000..610001d3 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/acpi.h @@ -0,0 +1,193 @@ +#ifndef _ASM_X86_ACPI_H +#define _ASM_X86_ACPI_H + +/* + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include + +#include +#include +#include +#include +#include + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() wbinvd() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + asm("divl %2;" \ + : "=a"(q32), "=d"(r32) \ + : "r"(d32), \ + "0"(n_lo), "1"(n_hi)) + + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + asm("shrl $1,%2 ;" \ + "rcrl $1,%3;" \ + : "=r"(n_hi), "=r"(n_lo) \ + : "0"(n_hi), "1"(n_lo)) + +#ifdef CONFIG_ACPI +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; + +extern u8 acpi_sci_flags; +extern int acpi_sci_override_gsi; +void acpi_pic_sci_set_trigger(unsigned int, u16); + +extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, + int trigger, int polarity); + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); + +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +/* Low-level suspend routine. */ +extern int acpi_suspend_lowlevel(void); + +extern const unsigned char acpi_wakeup_code[]; +#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code))) + +/* early initialization routine */ +extern void acpi_reserve_wakeup_memory(void); + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + /* + * Early models (<=5) of AMD Opterons are not supposed to go into + * C2 state. + * + * Steppings 0x0A and later are good + */ + if (boot_cpu_data.x86 == 0x0F && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86_model <= 0x05 && + boot_cpu_data.x86_mask < 0x0A) + return 1; + else if (amd_e400_c1e_detected) + return 1; + else + return max_cstate; +} + +static inline bool arch_has_acpi_pdc(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + return (c->x86_vendor == X86_VENDOR_INTEL || + c->x86_vendor == X86_VENDOR_CENTAUR); +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + buf[2] |= ACPI_PDC_C_CAPABILITY_SMP; + + if (cpu_has(c, X86_FEATURE_EST)) + buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; + + if (cpu_has(c, X86_FEATURE_ACPI)) + buf[2] |= ACPI_PDC_T_FFH; + + /* + * If mwait/monitor is unsupported, C2/C3_FFH will be disabled + */ + if (!cpu_has(c, X86_FEATURE_MWAIT)) + buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); +} + +#else /* !CONFIG_ACPI */ + +#define acpi_lapic 0 +#define acpi_ioapic 0 +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define ARCH_HAS_POWER_INIT 1 + +#ifdef CONFIG_ACPI_NUMA +extern int acpi_numa; +extern int x86_acpi_numa_init(void); +#endif /* CONFIG_ACPI_NUMA */ + +#define acpi_unlazy_tlb(x) leave_mm(x) + +#endif /* _ASM_X86_ACPI_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/aes.h b/ANDROID_3.4.5/arch/x86/include/asm/aes.h new file mode 100644 index 00000000..80545a1c --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/aes.h @@ -0,0 +1,11 @@ +#ifndef ASM_X86_AES_H +#define ASM_X86_AES_H + +#include +#include + +void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, + const u8 *src); +void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, + const u8 *src); +#endif diff --git a/ANDROID_3.4.5/arch/x86/include/asm/agp.h b/ANDROID_3.4.5/arch/x86/include/asm/agp.h new file mode 100644 index 00000000..eec2a70d --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/agp.h @@ -0,0 +1,31 @@ +#ifndef _ASM_X86_AGP_H +#define _ASM_X86_AGP_H + +#include +#include + +/* + * Functions to keep the agpgart mappings coherent with the MMU. The + * GART gives the CPU a physical alias of pages in memory. The alias + * region is mapped uncacheable. Make sure there are no conflicting + * mappings with different cachability attributes for the same + * page. This avoids data corruption on some CPUs. + */ + +#define map_page_into_agp(page) set_pages_uc(page, 1) +#define unmap_page_from_agp(page) set_pages_wb(page, 1) + +/* + * Could use CLFLUSH here if the cpu supports it. But then it would + * need to be called for each cacheline of the whole page so it may + * not be worth it. Would need a page for it. + */ +#define flush_agp_cache() wbinvd() + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + +#endif /* _ASM_X86_AGP_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/alternative-asm.h b/ANDROID_3.4.5/arch/x86/include/asm/alternative-asm.h new file mode 100644 index 00000000..952bd010 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/alternative-asm.h @@ -0,0 +1,26 @@ +#ifdef __ASSEMBLY__ + +#include + +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +672: lock + .section .smp_locks,"a" + .balign 4 + .long 672b - . + .previous + .endm +#else + .macro LOCK_PREFIX + .endm +#endif + +.macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . + .word \feature + .byte \orig_len + .byte \alt_len +.endm + +#endif /* __ASSEMBLY__ */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/alternative.h b/ANDROID_3.4.5/arch/x86/include/asm/alternative.h new file mode 100644 index 00000000..49331bed --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/alternative.h @@ -0,0 +1,195 @@ +#ifndef _ASM_X86_ALTERNATIVE_H +#define _ASM_X86_ALTERNATIVE_H + +#include +#include +#include +#include + +/* + * Alternative inline assembly for SMP. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define LOCK_PREFIX_HERE \ + ".section .smp_locks,\"a\"\n" \ + ".balign 4\n" \ + ".long 671f - .\n" /* offset */ \ + ".previous\n" \ + "671:" + +#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define LOCK_PREFIX_HERE "" +#define LOCK_PREFIX "" +#endif + +struct alt_instr { + s32 instr_offset; /* original instruction */ + s32 repl_offset; /* offset to replacement instruction */ + u16 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ +}; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; + +#ifdef CONFIG_SMP +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); +extern int alternatives_text_reserved(void *start, void *end); +extern bool skip_smp_alternatives; +#else +static inline void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end) {} +static inline void alternatives_smp_module_del(struct module *mod) {} +static inline void alternatives_smp_switch(int smp) {} +static inline int alternatives_text_reserved(void *start, void *end) +{ + return 0; +} +#endif /* CONFIG_SMP */ + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, newinstr, feature) \ + \ + "661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .long 661b - .\n" /* label */ \ + " .long 663f - .\n" /* new instruction */ \ + " .word " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .discard,\"aw\",@progbits\n" \ + " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ + ".previous\n" \ + ".section .altinstr_replacement, \"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" + +/* + * This must be included *after* the definition of ALTERNATIVE due to + * + */ +#include + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement make sure to pad to the worst case length. + * Leaving an unused argument 0 to keep API compatibility. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ + : : "i" (0), ## input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ + : output : "i" (0), ## input) + +/* Like alternative_io, but for replacing a direct call with another one. */ +#define alternative_call(oldfunc, newfunc, feature, output, input...) \ + asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ + : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) + +/* + * use this macro(s) if you need more than one output parameter + * in alternative_io + */ +#define ASM_OUTPUT2(a...) a + +/* + * use this macro if you need clobbers but no inputs in + * alternative_{input,io,call}() + */ +#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr + +struct paravirt_patch_site; +#ifdef CONFIG_PARAVIRT +void apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end); +#else +static inline void apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end) +{} +#define __parainstructions NULL +#define __parainstructions_end NULL +#endif + +extern void *text_poke_early(void *addr, const void *opcode, size_t len); + +/* + * Clear and restore the kernel write-protection flag on the local CPU. + * Allows the kernel to edit read-only pages. + * Side-effect: any interrupt handler running between save and restore will have + * the ability to write to read-only pages. + * + * Warning: + * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and + * no thread can be preempted in the instructions being modified (no iret to an + * invalid instruction possible) or if the instructions are changed from a + * consistent state to another consistent state atomically. + * More care must be taken when modifying code in the SMP case because of + * Intel's errata. text_poke_smp() takes care that errata, but still + * doesn't support NMI/MCE handler code modifying. + * On the local CPU you need to be protected again NMI or MCE handlers seeing an + * inconsistent instruction while you patch. + */ +struct text_poke_param { + void *addr; + const void *opcode; + size_t len; +}; + +extern void *text_poke(void *addr, const void *opcode, size_t len); +extern void *text_poke_smp(void *addr, const void *opcode, size_t len); +extern void text_poke_smp_batch(struct text_poke_param *params, int n); + +#endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/amd_nb.h b/ANDROID_3.4.5/arch/x86/include/asm/amd_nb.h new file mode 100644 index 00000000..49ad773f --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/amd_nb.h @@ -0,0 +1,72 @@ +#ifndef _ASM_X86_AMD_NB_H +#define _ASM_X86_AMD_NB_H + +#include +#include + +struct amd_nb_bus_dev_range { + u8 bus; + u8 dev_base; + u8 dev_limit; +}; + +extern const struct pci_device_id amd_nb_misc_ids[]; +extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; + +extern bool early_is_amd_nb(u32 value); +extern struct resource *amd_get_mmconfig_range(struct resource *res); +extern int amd_cache_northbridges(void); +extern void amd_flush_garts(void); +extern int amd_numa_init(void); +extern int amd_get_subcaches(int); +extern int amd_set_subcaches(int, int); + +struct amd_l3_cache { + unsigned indices; + u8 subcaches[4]; +}; + +struct amd_northbridge { + struct pci_dev *misc; + struct pci_dev *link; + struct amd_l3_cache l3_cache; +}; + +struct amd_northbridge_info { + u16 num; + u64 flags; + struct amd_northbridge *nb; +}; +extern struct amd_northbridge_info amd_northbridges; + +#define AMD_NB_GART BIT(0) +#define AMD_NB_L3_INDEX_DISABLE BIT(1) +#define AMD_NB_L3_PARTITIONING BIT(2) + +#ifdef CONFIG_AMD_NB + +static inline u16 amd_nb_num(void) +{ + return amd_northbridges.num; +} + +static inline bool amd_nb_has_feature(unsigned feature) +{ + return ((amd_northbridges.flags & feature) == feature); +} + +static inline struct amd_northbridge *node_to_amd_nb(int node) +{ + return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; +} + +#else + +#define amd_nb_num(x) 0 +#define amd_nb_has_feature(x) false +#define node_to_amd_nb(x) NULL + +#endif + + +#endif /* _ASM_X86_AMD_NB_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/apb_timer.h b/ANDROID_3.4.5/arch/x86/include/asm/apb_timer.h new file mode 100644 index 00000000..0acbac29 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/apb_timer.h @@ -0,0 +1,49 @@ +/* + * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare + * + * (C) Copyright 2009 Intel Corporation + * Author: Jacob Pan (jacob.jun.pan@intel.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + * + * Note: + */ + +#ifndef ASM_X86_APBT_H +#define ASM_X86_APBT_H +#include + +#ifdef CONFIG_APB_TIMER + +/* default memory mapped register base */ +#define LNW_SCU_ADDR 0xFF100000 +#define LNW_EXT_TIMER_OFFSET 0x1B800 +#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET) +#define LNW_EXT_TIMER_PGOFFSET 0x800 + +/* APBT clock speed range from PCLK to fabric base, 25-100MHz */ +#define APBT_MAX_FREQ 50000000 +#define APBT_MIN_FREQ 1000000 +#define APBT_MMAP_SIZE 1024 + +#define APBT_DEV_USED 1 + +extern void apbt_time_init(void); +extern unsigned long apbt_quick_calibrate(void); +extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); +extern void apbt_setup_secondary_clock(void); + +extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); +extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); +extern int sfi_mtimer_num; + +#else /* CONFIG_APB_TIMER */ + +static inline unsigned long apbt_quick_calibrate(void) {return 0; } +static inline void apbt_time_init(void) { } + +#endif +#endif /* ASM_X86_APBT_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/apic.h b/ANDROID_3.4.5/arch/x86/include/asm/apic.h new file mode 100644 index 00000000..d8541017 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/apic.h @@ -0,0 +1,636 @@ +#ifndef _ASM_X86_APIC_H +#define _ASM_X86_APIC_H + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ARCH_APICTIMER_STOPS_ON_C3 1 + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + + +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) +extern void generic_apic_probe(void); +#else +static inline void generic_apic_probe(void) +{ +} +#endif + +#ifdef CONFIG_X86_LOCAL_APIC + +extern unsigned int apic_verbosity; +extern int local_apic_timer_c2_ok; + +extern int disable_apic; +extern unsigned int lapic_timer_frequency; + +#ifdef CONFIG_SMP +extern void __inquire_remote_apic(int apicid); +#else /* CONFIG_SMP */ +static inline void __inquire_remote_apic(int apicid) +{ +} +#endif /* CONFIG_SMP */ + +static inline void default_inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= APIC_DEBUG) + __inquire_remote_apic(apicid); +} + +/* + * With 82489DX we can't rely on apic feature bit + * retrieved via cpuid but still have to deal with + * such an apic chip so we assume that SMP configuration + * is found from MP table (64bit case uses ACPI mostly + * which set smp presence flag as well so we are safe + * to use this helper too). + */ +static inline bool apic_from_smp_config(void) +{ + return smp_found_config && !disable_apic; +} + +/* + * Basic functions accessing APICs. + */ +#ifdef CONFIG_PARAVIRT +#include +#endif + +#ifdef CONFIG_X86_64 +extern int is_vsmp_box(void); +#else +static inline int is_vsmp_box(void) +{ + return 0; +} +#endif +extern void xapic_wait_icr_idle(void); +extern u32 safe_xapic_wait_icr_idle(void); +extern void xapic_icr_write(u32, u32); +extern int setup_profiling_timer(unsigned int); + +static inline void native_apic_mem_write(u32 reg, u32 v) +{ + volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); + + alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, + ASM_OUTPUT2("=r" (v), "=m" (*addr)), + ASM_OUTPUT2("0" (v), "m" (*addr))); +} + +static inline u32 native_apic_mem_read(u32 reg) +{ + return *((volatile u32 *)(APIC_BASE + reg)); +} + +extern void native_apic_wait_icr_idle(void); +extern u32 native_safe_apic_wait_icr_idle(void); +extern void native_apic_icr_write(u32 low, u32 id); +extern u64 native_apic_icr_read(void); + +extern int x2apic_mode; + +#ifdef CONFIG_X86_X2APIC +/* + * Make previous memory operations globally visible before + * sending the IPI through x2apic wrmsr. We need a serializing instruction or + * mfence for this. + */ +static inline void x2apic_wrmsr_fence(void) +{ + asm volatile("mfence" : : : "memory"); +} + +static inline void native_apic_msr_write(u32 reg, u32 v) +{ + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || + reg == APIC_LVR) + return; + + wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); +} + +static inline u32 native_apic_msr_read(u32 reg) +{ + u64 msr; + + if (reg == APIC_DFR) + return -1; + + rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); + return (u32)msr; +} + +static inline void native_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return; +} + +static inline u32 native_safe_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return 0; +} + +static inline void native_x2apic_icr_write(u32 low, u32 id) +{ + wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); +} + +static inline u64 native_x2apic_icr_read(void) +{ + unsigned long val; + + rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); + return val; +} + +extern int x2apic_phys; +extern int x2apic_preenabled; +extern void check_x2apic(void); +extern void enable_x2apic(void); +extern void x2apic_icr_write(u32 low, u32 id); +static inline int x2apic_enabled(void) +{ + u64 msr; + + if (!cpu_has_x2apic) + return 0; + + rdmsrl(MSR_IA32_APICBASE, msr); + if (msr & X2APIC_ENABLE) + return 1; + return 0; +} + +#define x2apic_supported() (cpu_has_x2apic) +static inline void x2apic_force_phys(void) +{ + x2apic_phys = 1; +} +#else +static inline void disable_x2apic(void) +{ +} +static inline void check_x2apic(void) +{ +} +static inline void enable_x2apic(void) +{ +} +static inline int x2apic_enabled(void) +{ + return 0; +} +static inline void x2apic_force_phys(void) +{ +} + +#define nox2apic 0 +#define x2apic_preenabled 0 +#define x2apic_supported() 0 +#endif + +extern void enable_IR_x2apic(void); + +extern int get_physical_broadcast(void); + +extern int lapic_get_maxlvt(void); +extern void clear_local_APIC(void); +extern void connect_bsp_APIC(void); +extern void disconnect_bsp_APIC(int virt_wire_setup); +extern void disable_local_APIC(void); +extern void lapic_shutdown(void); +extern int verify_local_APIC(void); +extern void sync_Arb_IDs(void); +extern void init_bsp_APIC(void); +extern void setup_local_APIC(void); +extern void end_local_APIC_setup(void); +extern void bsp_end_local_APIC_setup(void); +extern void init_apic_mappings(void); +void register_lapic_address(unsigned long address); +extern void setup_boot_APIC_clock(void); +extern void setup_secondary_APIC_clock(void); +extern int APIC_init_uniprocessor(void); +extern int apic_force_enable(unsigned long addr); + +/* + * On 32bit this is mach-xxx local + */ +#ifdef CONFIG_X86_64 +extern int apic_is_clustered_box(void); +#else +static inline int apic_is_clustered_box(void) +{ + return 0; +} +#endif + +extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); + +#else /* !CONFIG_X86_LOCAL_APIC */ +static inline void lapic_shutdown(void) { } +#define local_apic_timer_c2_ok 1 +static inline void init_apic_mappings(void) { } +static inline void disable_local_APIC(void) { } +# define setup_boot_APIC_clock x86_init_noop +# define setup_secondary_APIC_clock x86_init_noop +#endif /* !CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_X86_64 +#define SET_APIC_ID(x) (apic->set_apic_id(x)) +#else + +#endif + +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch data struct. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +struct apic { + char *name; + + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_valid)(int apicid); + int (*apic_id_registered)(void); + + u32 irq_delivery_mode; + u32 irq_dest_mode; + + const struct cpumask *(*target_cpus)(void); + + int disable_esr; + + int dest_logical; + unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); + unsigned long (*check_apicid_present)(int apicid); + + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); + void (*init_apic_ldr)(void); + + void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); + + void (*setup_apic_routing)(void); + int (*multi_timer_check)(int apic, int irq); + int (*cpu_present_to_apicid)(int mps_cpu); + void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); + void (*setup_portio_remap)(void); + int (*check_phys_apicid_present)(int phys_apicid); + void (*enable_apic_mode)(void); + int (*phys_pkg_id)(int cpuid_apic, int index_msb); + + /* + * When one of the next two hooks returns 1 the apic + * is switched to this. Essentially they are additional + * probe functions: + */ + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); + + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); + unsigned long apic_id_mask; + + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); + + /* ipi */ + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); + void (*send_IPI_allbutself)(int vector); + void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); + + /* wakeup_secondary_cpu */ + int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); + + int trampoline_phys_low; + int trampoline_phys_high; + + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*inquire_remote_apic)(int apicid); + + /* apic ops */ + u32 (*read)(u32 reg); + void (*write)(u32 reg, u32 v); + u64 (*icr_read)(void); + void (*icr_write)(u32 low, u32 high); + void (*wait_icr_idle)(void); + u32 (*safe_wait_icr_idle)(void); + +#ifdef CONFIG_X86_32 + /* + * Called very early during boot from get_smp_config(). It should + * return the logical apicid. x86_[bios]_cpu_to_apicid is + * initialized before this function is called. + * + * If logical apicid can't be determined that early, the function + * may return BAD_APICID. Logical apicid will be configured after + * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity + * won't be applied properly during early boot in this case. + */ + int (*x86_32_early_logical_apicid)(int cpu); + + /* + * Optional method called from setup_local_APIC() after logical + * apicid is guaranteed to be known to initialize apicid -> node + * mapping if NUMA initialization hasn't done so already. Don't + * add new users. + */ + int (*x86_32_numa_cpu_node)(int cpu); +#endif +}; + +/* + * Pointer to the local APIC driver in use on this system (there's + * always just one such driver in use - the kernel decides via an + * early probing process which one it picks - and then sticks to it): + */ +extern struct apic *apic; + +/* + * APIC drivers are probed based on how they are listed in the .apicdrivers + * section. So the order is important and enforced by the ordering + * of different apic driver files in the Makefile. + * + * For the files having two apic drivers, we use apic_drivers() + * to enforce the order with in them. + */ +#define apic_driver(sym) \ + static struct apic *__apicdrivers_##sym __used \ + __aligned(sizeof(struct apic *)) \ + __section(.apicdrivers) = { &sym } + +#define apic_drivers(sym1, sym2) \ + static struct apic *__apicdrivers_##sym1##sym2[2] __used \ + __aligned(sizeof(struct apic *)) \ + __section(.apicdrivers) = { &sym1, &sym2 } + +extern struct apic *__apicdrivers[], *__apicdrivers_end[]; + +/* + * APIC functionality to boot other CPUs - only used on SMP: + */ +#ifdef CONFIG_SMP +extern atomic_t init_deasserted; +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +#endif + +#ifdef CONFIG_X86_LOCAL_APIC + +static inline u32 apic_read(u32 reg) +{ + return apic->read(reg); +} + +static inline void apic_write(u32 reg, u32 val) +{ + apic->write(reg, val); +} + +static inline u64 apic_icr_read(void) +{ + return apic->icr_read(); +} + +static inline void apic_icr_write(u32 low, u32 high) +{ + apic->icr_write(low, high); +} + +static inline void apic_wait_icr_idle(void) +{ + apic->wait_icr_idle(); +} + +static inline u32 safe_apic_wait_icr_idle(void) +{ + return apic->safe_wait_icr_idle(); +} + +#else /* CONFIG_X86_LOCAL_APIC */ + +static inline u32 apic_read(u32 reg) { return 0; } +static inline void apic_write(u32 reg, u32 val) { } +static inline u64 apic_icr_read(void) { return 0; } +static inline void apic_icr_write(u32 low, u32 high) { } +static inline void apic_wait_icr_idle(void) { } +static inline u32 safe_apic_wait_icr_idle(void) { return 0; } + +#endif /* CONFIG_X86_LOCAL_APIC */ + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction + * ... yummie. + */ + + /* Docs say use 0 for future compatibility */ + apic_write(APIC_EOI, 0); +} + +static inline unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + + if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID)) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + +/* + * Warm reset vector default position: + */ +#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 +#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 + +#ifdef CONFIG_X86_64 +extern int default_acpi_madt_oem_check(char *, char *); + +extern void apic_send_IPI_self(int vector); + +DECLARE_PER_CPU(int, x2apic_extra_bits); + +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int phys_apicid); +#endif + +static inline void default_wait_for_init_deassert(atomic_t *deassert) +{ + while (!atomic_read(deassert)) + cpu_relax(); + return; +} + +extern void generic_bigsmp_probe(void); + + +#ifdef CONFIG_X86_LOCAL_APIC + +#include + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +static inline const struct cpumask *default_target_cpus(void) +{ +#ifdef CONFIG_SMP + return cpu_online_mask; +#else + return cpumask_of(0); +#endif +} + +DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); + + +static inline unsigned int read_apic_id(void) +{ + unsigned int reg; + + reg = apic_read(APIC_ID); + + return apic->get_apic_id(reg); +} + +static inline int default_apic_id_valid(int apicid) +{ + return (apicid < 255); +} + +extern void default_setup_apic_routing(void); + +extern struct apic apic_noop; + +#ifdef CONFIG_X86_32 + +static inline int noop_x86_32_early_logical_apicid(int cpu) +{ + return BAD_APICID; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +extern void default_init_apic_ldr(void); + +static inline int default_apic_id_registered(void) +{ + return physid_isset(read_apic_id(), phys_cpu_present_map); +} + +static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +#endif + +static inline unsigned int +default_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; +} + +static inline unsigned int +default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + unsigned long mask1 = cpumask_bits(cpumask)[0]; + unsigned long mask2 = cpumask_bits(andmask)[0]; + unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; + + return (unsigned int)(mask1 & mask2 & mask3); +} + +static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) +{ + return physid_isset(apicid, *map); +} + +static inline unsigned long default_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) +{ + *retmap = *phys_map; +} + +static inline int __default_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline int +__default_check_phys_apicid_present(int phys_apicid) +{ + return physid_isset(phys_apicid, phys_cpu_present_map); +} + +#ifdef CONFIG_X86_32 +static inline int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} + +static inline int +default_check_phys_apicid_present(int phys_apicid) +{ + return __default_check_phys_apicid_present(phys_apicid); +} +#else +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int phys_apicid); +#endif + +#endif /* CONFIG_X86_LOCAL_APIC */ + +#endif /* _ASM_X86_APIC_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/apic_flat_64.h b/ANDROID_3.4.5/arch/x86/include/asm/apic_flat_64.h new file mode 100644 index 00000000..a2d31279 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/apic_flat_64.h @@ -0,0 +1,7 @@ +#ifndef _ASM_X86_APIC_FLAT_64_H +#define _ASM_X86_APIC_FLAT_64_H + +extern void flat_init_apic_ldr(void); + +#endif + diff --git a/ANDROID_3.4.5/arch/x86/include/asm/apicdef.h b/ANDROID_3.4.5/arch/x86/include/asm/apicdef.h new file mode 100644 index 00000000..134bba00 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/apicdef.h @@ -0,0 +1,445 @@ +#ifndef _ASM_X86_APICDEF_H +#define _ASM_X86_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox , 1995. + * Ingo Molnar , 1999, 2000 + */ + +#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 +#define APIC_DEFAULT_PHYS_BASE 0xfee00000 + +/* + * This is the IO-APIC register space as specified + * by Intel docs: + */ +#define IO_APIC_SLOT_SIZE 1024 + +#define APIC_ID 0x20 + +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define APIC_LVR_DIRECTED_EOI (1 << 24) +#define GET_APIC_VERSION(x) ((x) & 0xFFu) +#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) +#ifdef CONFIG_X86_32 +# define APIC_INTEGRATED(x) ((x) & 0xF0u) +#else +# define APIC_INTEGRATED(x) (1) +#endif +#define APIC_XAPIC(x) ((x) >= 0x14) +#define APIC_EXT_SPACE(x) ((x) & 0x80000000) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFFu +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFFu +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EIO_ACK 0x0 +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFFu << 24) +#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x) << 24)) +#define APIC_ALL_CPUS 0xFFu +#define APIC_DFR 0xE0 +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_DIRECTED_EOI (1 << 12) +#define APIC_SPIV_FOCUS_DISABLED (1 << 9) +#define APIC_SPIV_APIC_ENABLED (1 << 8) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_LVTCMCI 0x2f0 +#define APIC_ICR 0x300 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_FIXED_MASK 0x00700 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) +#define SET_APIC_DEST_FIELD(x) ((x) << 24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) +#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) +#define SET_APIC_TIMER_BASE(x) (((x) << 18)) +#define APIC_TIMER_BASE_CLKIN 0x0 +#define APIC_TIMER_BASE_TMBASE 0x1 +#define APIC_TIMER_BASE_DIV 0x2 +#define APIC_LVT_TIMER_ONESHOT (0 << 17) +#define APIC_LVT_TIMER_PERIODIC (1 << 17) +#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17) +#define APIC_LVT_MASKED (1 << 16) +#define APIC_LVT_LEVEL_TRIGGER (1 << 15) +#define APIC_LVT_REMOTE_IRR (1 << 14) +#define APIC_INPUT_POLARITY (1 << 13) +#define APIC_SEND_PENDING (1 << 12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) +#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_SELF_IPI 0x3F0 +#define APIC_TDR_DIV_TMBASE (1 << 2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA +#define APIC_EFEAT 0x400 +#define APIC_ECTRL 0x410 +#define APIC_EILVTn(n) (0x500 + 0x10 * n) +#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ +#define APIC_EILVT_NR_AMD_10H 4 +#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H +#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) +#define APIC_EILVT_MSG_FIX 0x0 +#define APIC_EILVT_MSG_SMI 0x2 +#define APIC_EILVT_MSG_NMI 0x4 +#define APIC_EILVT_MSG_EXT 0x7 +#define APIC_EILVT_MASKED (1 << 16) + +#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) +#define APIC_BASE_MSR 0x800 +#define XAPIC_ENABLE (1UL << 11) +#define X2APIC_ENABLE (1UL << 10) + +#ifdef CONFIG_X86_32 +# define MAX_IO_APICS 64 +# define MAX_LOCAL_APIC 256 +#else +# define MAX_IO_APICS 128 +# define MAX_LOCAL_APIC 32768 +#endif + +/* + * All x86-64 systems are xAPIC compatible. + * In the following, "apicid" is a physical APIC ID. + */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) +#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) +#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) +#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) +#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved[4]; } __reserved_02; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { /* LVT - Thermal Sensor */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_thermal; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); + +#undef u32 + +#ifdef CONFIG_X86_32 + #define BAD_APICID 0xFFu +#else + #define BAD_APICID 0xFFFFu +#endif + +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; + +#endif /* _ASM_X86_APICDEF_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/apm.h b/ANDROID_3.4.5/arch/x86/include/asm/apm.h new file mode 100644 index 00000000..20370c6d --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/apm.h @@ -0,0 +1,73 @@ +/* + * Machine specific APM BIOS functions for generic. + * Split out from apm.c by Osamu Tomita + */ + +#ifndef _ASM_X86_MACH_DEFAULT_APM_H +#define _ASM_X86_MACH_DEFAULT_APM_H + +#ifdef APM_ZERO_SEGS +# define APM_DO_ZERO_SEGS \ + "pushl %%ds\n\t" \ + "pushl %%es\n\t" \ + "xorl %%edx, %%edx\n\t" \ + "mov %%dx, %%ds\n\t" \ + "mov %%dx, %%es\n\t" \ + "mov %%dx, %%fs\n\t" \ + "mov %%dx, %%gs\n\t" +# define APM_DO_POP_SEGS \ + "popl %%es\n\t" \ + "popl %%ds\n\t" +#else +# define APM_DO_ZERO_SEGS +# define APM_DO_POP_SEGS +#endif + +static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + u32 *eax, u32 *ebx, u32 *ecx, + u32 *edx, u32 *esi) +{ + /* + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" + "lcall *%%cs:apm_bios_entry\n\t" + "setc %%al\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" + APM_DO_POP_SEGS + : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), + "=S" (*esi) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); +} + +static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + u32 ecx_in, u32 *eax) +{ + int cx, dx, si; + u8 error; + + /* + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" + "lcall *%%cs:apm_bios_entry\n\t" + "setc %%bl\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" + APM_DO_POP_SEGS + : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), + "=S" (si) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); + return error; +} + +#endif /* _ASM_X86_MACH_DEFAULT_APM_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/arch_hweight.h b/ANDROID_3.4.5/arch/x86/include/asm/arch_hweight.h new file mode 100644 index 00000000..9686c3d9 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/arch_hweight.h @@ -0,0 +1,61 @@ +#ifndef _ASM_X86_HWEIGHT_H +#define _ASM_X86_HWEIGHT_H + +#ifdef CONFIG_64BIT +/* popcnt %edi, %eax -- redundant REX prefix for alignment */ +#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7" +/* popcnt %rdi, %rax */ +#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" +#define REG_IN "D" +#define REG_OUT "a" +#else +/* popcnt %eax, %eax */ +#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0" +#define REG_IN "a" +#define REG_OUT "a" +#endif + +/* + * __sw_hweightXX are called from within the alternatives below + * and callee-clobbered registers need to be taken care of. See + * ARCH_HWEIGHT_CFLAGS in for the respective + * compiler switches. + */ +static inline unsigned int __arch_hweight32(unsigned int w) +{ + unsigned int res = 0; + + asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT) + : "="REG_OUT (res) + : REG_IN (w)); + + return res; +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight32(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight32(w & 0xff); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + unsigned long res = 0; + +#ifdef CONFIG_X86_32 + return __arch_hweight32((u32)w) + + __arch_hweight32((u32)(w >> 32)); +#else + asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) + : "="REG_OUT (res) + : REG_IN (w)); +#endif /* CONFIG_X86_32 */ + + return res; +} + +#endif diff --git a/ANDROID_3.4.5/arch/x86/include/asm/archrandom.h b/ANDROID_3.4.5/arch/x86/include/asm/archrandom.h new file mode 100644 index 00000000..0d9ec770 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/archrandom.h @@ -0,0 +1,75 @@ +/* + * This file is part of the Linux kernel. + * + * Copyright (c) 2011, Intel Corporation + * Authors: Fenghua Yu , + * H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef ASM_X86_ARCHRANDOM_H +#define ASM_X86_ARCHRANDOM_H + +#include +#include +#include +#include + +#define RDRAND_RETRY_LOOPS 10 + +#define RDRAND_INT ".byte 0x0f,0xc7,0xf0" +#ifdef CONFIG_X86_64 +# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0" +#else +# define RDRAND_LONG RDRAND_INT +#endif + +#ifdef CONFIG_ARCH_RANDOM + +#define GET_RANDOM(name, type, rdrand, nop) \ +static inline int name(type *v) \ +{ \ + int ok; \ + alternative_io("movl $0, %0\n\t" \ + nop, \ + "\n1: " rdrand "\n\t" \ + "jc 2f\n\t" \ + "decl %0\n\t" \ + "jnz 1b\n\t" \ + "2:", \ + X86_FEATURE_RDRAND, \ + ASM_OUTPUT2("=r" (ok), "=a" (*v)), \ + "0" (RDRAND_RETRY_LOOPS)); \ + return ok; \ +} + +#ifdef CONFIG_X86_64 + +GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5); +GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4); + +#else + +GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3); +GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); + +#endif /* CONFIG_X86_64 */ + +#endif /* CONFIG_ARCH_RANDOM */ + +extern void x86_init_rdrand(struct cpuinfo_x86 *c); + +#endif /* ASM_X86_ARCHRANDOM_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/asm-offsets.h b/ANDROID_3.4.5/arch/x86/include/asm/asm-offsets.h new file mode 100644 index 00000000..d370ee36 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/ANDROID_3.4.5/arch/x86/include/asm/asm.h b/ANDROID_3.4.5/arch/x86/include/asm/asm.h new file mode 100644 index 00000000..9412d655 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/asm.h @@ -0,0 +1,58 @@ +#ifndef _ASM_X86_ASM_H +#define _ASM_X86_ASM_H + +#ifdef __ASSEMBLY__ +# define __ASM_FORM(x) x +# define __ASM_FORM_COMMA(x) x, +# define __ASM_EX_SEC .section __ex_table, "a" +#else +# define __ASM_FORM(x) " " #x " " +# define __ASM_FORM_COMMA(x) " " #x "," +# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" +#endif + +#ifdef CONFIG_X86_32 +# define __ASM_SEL(a,b) __ASM_FORM(a) +#else +# define __ASM_SEL(a,b) __ASM_FORM(b) +#endif + +#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ + inst##q##__VA_ARGS__) +#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) + +#define _ASM_PTR __ASM_SEL(.long, .quad) +#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) + +#define _ASM_MOV __ASM_SIZE(mov) +#define _ASM_INC __ASM_SIZE(inc) +#define _ASM_DEC __ASM_SIZE(dec) +#define _ASM_ADD __ASM_SIZE(add) +#define _ASM_SUB __ASM_SIZE(sub) +#define _ASM_XADD __ASM_SIZE(xadd) + +#define _ASM_AX __ASM_REG(ax) +#define _ASM_BX __ASM_REG(bx) +#define _ASM_CX __ASM_REG(cx) +#define _ASM_DX __ASM_REG(dx) +#define _ASM_SP __ASM_REG(sp) +#define _ASM_BP __ASM_REG(bp) +#define _ASM_SI __ASM_REG(si) +#define _ASM_DI __ASM_REG(di) + +/* Exception table entry */ +#ifdef __ASSEMBLY__ +# define _ASM_EXTABLE(from,to) \ + __ASM_EX_SEC ; \ + _ASM_ALIGN ; \ + _ASM_PTR from , to ; \ + .previous +#else +# define _ASM_EXTABLE(from,to) \ + __ASM_EX_SEC \ + _ASM_ALIGN "\n" \ + _ASM_PTR #from "," #to "\n" \ + " .previous\n" +#endif + +#endif /* _ASM_X86_ASM_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/atomic.h b/ANDROID_3.4.5/arch/x86/include/asm/atomic.h new file mode 100644 index 00000000..58cb6d40 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/atomic.h @@ -0,0 +1,317 @@ +#ifndef _ASM_X86_ATOMIC_H +#define _ASM_X86_ATOMIC_H + +#include +#include +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +static inline int atomic_read(const atomic_t *v) +{ + return (*(volatile int *)&(v)->counter); +} + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic_set(atomic_t *v, int i) +{ + v->counter = i; +} + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + asm volatile(LOCK_PREFIX "addl %1,%0" + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_sub - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic_sub(int i, atomic_t *v) +{ + asm volatile(LOCK_PREFIX "subl %1,%0" + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline int atomic_sub_and_test(int i, atomic_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +} + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +static inline void atomic_inc(atomic_t *v) +{ + asm volatile(LOCK_PREFIX "incl %0" + : "+m" (v->counter)); +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic_dec(atomic_t *v) +{ + asm volatile(LOCK_PREFIX "decl %0" + : "+m" (v->counter)); +} + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline int atomic_dec_and_test(atomic_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "decl %0; sete %1" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic_inc_and_test(atomic_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "incl %0; sete %1" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline int atomic_add_negative(int i, atomic_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +} + +/** + * atomic_add_return - add integer and return + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline int atomic_add_return(int i, atomic_t *v) +{ +#ifdef CONFIG_M386 + int __i; + unsigned long flags; + if (unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; +#endif + /* Modern 486+ processor */ + return i + xadd(&v->counter, i); + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + raw_local_irq_save(flags); + __i = atomic_read(v); + atomic_set(v, i + __i); + raw_local_irq_restore(flags); + return i + __i; +#endif +} + +/** + * atomic_sub_return - subtract integer and return + * @v: pointer of type atomic_t + * @i: integer value to subtract + * + * Atomically subtracts @i from @v and returns @v - @i + */ +static inline int atomic_sub_return(int i, atomic_t *v) +{ + return atomic_add_return(-i, v); +} + +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + +static inline int atomic_xchg(atomic_t *v, int new) +{ + return xchg(&v->counter, new); +} + +/** + * __atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns the old value of @v. + */ +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c; +} + + +/* + * atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline int atomic_dec_if_positive(atomic_t *v) +{ + int c, old, dec; + c = atomic_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + +/** + * atomic_inc_short - increment of a short integer + * @v: pointer to type int + * + * Atomically adds 1 to @v + * Returns the new value of @u + */ +static inline short int atomic_inc_short(short int *v) +{ + asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); + return *v; +} + +#ifdef CONFIG_X86_64 +/** + * atomic_or_long - OR of two long integers + * @v1: pointer to type unsigned long + * @v2: pointer to type unsigned long + * + * Atomically ORs @v1 and @v2 + * Returns the result of the OR + */ +static inline void atomic_or_long(unsigned long *v1, unsigned long v2) +{ + asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); +} +#endif + +/* These are x86-specific, used by some header files */ +#define atomic_clear_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "andl %0,%1" \ + : : "r" (~(mask)), "m" (*(addr)) : "memory") + +#define atomic_set_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "orl %0,%1" \ + : : "r" ((unsigned)(mask)), "m" (*(addr)) \ + : "memory") + +/* Atomic operations are already serializing on x86 */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#ifdef CONFIG_X86_32 +# include "atomic64_32.h" +#else +# include "atomic64_64.h" +#endif + +#endif /* _ASM_X86_ATOMIC_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/atomic64_32.h b/ANDROID_3.4.5/arch/x86/include/asm/atomic64_32.h new file mode 100644 index 00000000..19811991 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/atomic64_32.h @@ -0,0 +1,316 @@ +#ifndef _ASM_X86_ATOMIC64_32_H +#define _ASM_X86_ATOMIC64_32_H + +#include +#include +#include +//#include + +/* An 64bit atomic type */ + +typedef struct { + u64 __aligned(8) counter; +} atomic64_t; + +#define ATOMIC64_INIT(val) { (val) } + +#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) +#ifndef ATOMIC64_EXPORT +#define ATOMIC64_DECL_ONE __ATOMIC64_DECL +#else +#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \ + ATOMIC64_EXPORT(atomic64_##sym) +#endif + +#ifdef CONFIG_X86_CMPXCHG64 +#define __alternative_atomic64(f, g, out, in...) \ + asm volatile("call %P[func]" \ + : out : [func] "i" (atomic64_##g##_cx8), ## in) + +#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8) +#else +#define __alternative_atomic64(f, g, out, in...) \ + alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \ + X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in) + +#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \ + ATOMIC64_DECL_ONE(sym##_386) + +ATOMIC64_DECL_ONE(add_386); +ATOMIC64_DECL_ONE(sub_386); +ATOMIC64_DECL_ONE(inc_386); +ATOMIC64_DECL_ONE(dec_386); +#endif + +#define alternative_atomic64(f, out, in...) \ + __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) + +ATOMIC64_DECL(read); +ATOMIC64_DECL(set); +ATOMIC64_DECL(xchg); +ATOMIC64_DECL(add_return); +ATOMIC64_DECL(sub_return); +ATOMIC64_DECL(inc_return); +ATOMIC64_DECL(dec_return); +ATOMIC64_DECL(dec_if_positive); +ATOMIC64_DECL(inc_not_zero); +ATOMIC64_DECL(add_unless); + +#undef ATOMIC64_DECL +#undef ATOMIC64_DECL_ONE +#undef __ATOMIC64_DECL +#undef ATOMIC64_EXPORT + +/** + * atomic64_cmpxchg - cmpxchg atomic64 variable + * @p: pointer to type atomic64_t + * @o: expected value + * @n: new value + * + * Atomically sets @v to @n if it was equal to @o and returns + * the old value. + */ + +static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +{ + return cmpxchg64(&v->counter, o, n); +} + +/** + * atomic64_xchg - xchg atomic64 variable + * @v: pointer to type atomic64_t + * @n: value to assign + * + * Atomically xchgs the value of @v to @n and returns + * the old value. + */ +static inline long long atomic64_xchg(atomic64_t *v, long long n) +{ + long long o; + unsigned high = (unsigned)(n >> 32); + unsigned low = (unsigned)n; + alternative_atomic64(xchg, "=&A" (o), + "S" (v), "b" (low), "c" (high) + : "memory"); + return o; +} + +/** + * atomic64_set - set atomic64 variable + * @v: pointer to type atomic64_t + * @n: value to assign + * + * Atomically sets the value of @v to @n. + */ +static inline void atomic64_set(atomic64_t *v, long long i) +{ + unsigned high = (unsigned)(i >> 32); + unsigned low = (unsigned)i; + alternative_atomic64(set, /* no output */, + "S" (v), "b" (low), "c" (high) + : "eax", "edx", "memory"); +} + +/** + * atomic64_read - read atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically reads the value of @v and returns it. + */ +static inline long long atomic64_read(const atomic64_t *v) +{ + long long r; + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); + return r; + } + +/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns @i + *@v + */ +static inline long long atomic64_add_return(long long i, atomic64_t *v) +{ + alternative_atomic64(add_return, + ASM_OUTPUT2("+A" (i), "+c" (v)), + ASM_NO_INPUT_CLOBBER("memory")); + return i; +} + +/* + * Other variants with different arithmetic operators: + */ +static inline long long atomic64_sub_return(long long i, atomic64_t *v) +{ + alternative_atomic64(sub_return, + ASM_OUTPUT2("+A" (i), "+c" (v)), + ASM_NO_INPUT_CLOBBER("memory")); + return i; +} + +static inline long long atomic64_inc_return(atomic64_t *v) +{ + long long a; + alternative_atomic64(inc_return, "=&A" (a), + "S" (v) : "memory", "ecx"); + return a; +} + +static inline long long atomic64_dec_return(atomic64_t *v) +{ + long long a; + alternative_atomic64(dec_return, "=&A" (a), + "S" (v) : "memory", "ecx"); + return a; +} + +/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v. + */ +static inline long long atomic64_add(long long i, atomic64_t *v) +{ + __alternative_atomic64(add, add_return, + ASM_OUTPUT2("+A" (i), "+c" (v)), + ASM_NO_INPUT_CLOBBER("memory")); + return i; +} + +/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v. + */ +static inline long long atomic64_sub(long long i, atomic64_t *v) +{ + __alternative_atomic64(sub, sub_return, + ASM_OUTPUT2("+A" (i), "+c" (v)), + ASM_NO_INPUT_CLOBBER("memory")); + return i; +} + +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline int atomic64_sub_and_test(long long i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} + +/** + * atomic64_inc - increment atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1. + */ +static inline void atomic64_inc(atomic64_t *v) +{ + __alternative_atomic64(inc, inc_return, /* no output */, + "S" (v) : "memory", "eax", "ecx", "edx"); +} + +/** + * atomic64_dec - decrement atomic64 variable + * @ptr: pointer to type atomic64_t + * + * Atomically decrements @ptr by 1. + */ +static inline void atomic64_dec(atomic64_t *v) +{ + __alternative_atomic64(dec, dec_return, /* no output */, + "S" (v) : "memory", "eax", "ecx", "edx"); +} + +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline int atomic64_dec_and_test(atomic64_t *v) +{ + return atomic64_dec_return(v) == 0; +} + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic64_inc_and_test(atomic64_t *v) +{ + return atomic64_inc_return(v) == 0; +} + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline int atomic64_add_negative(long long i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if the add was done, zero otherwise. + */ +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +{ + unsigned low = (unsigned)u; + unsigned high = (unsigned)(u >> 32); + alternative_atomic64(add_unless, + ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)), + "S" (v) : "memory"); + return (int)a; +} + + +static inline int atomic64_inc_not_zero(atomic64_t *v) +{ + int r; + alternative_atomic64(inc_not_zero, "=&a" (r), + "S" (v) : "ecx", "edx", "memory"); + return r; +} + +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long r; + alternative_atomic64(dec_if_positive, "=&A" (r), + "S" (v) : "ecx", "memory"); + return r; +} + +#undef alternative_atomic64 +#undef __alternative_atomic64 + +#endif /* _ASM_X86_ATOMIC64_32_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/atomic64_64.h b/ANDROID_3.4.5/arch/x86/include/asm/atomic64_64.h new file mode 100644 index 00000000..0e1cbfc8 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/atomic64_64.h @@ -0,0 +1,243 @@ +#ifndef _ASM_X86_ATOMIC64_64_H +#define _ASM_X86_ATOMIC64_64_H + +#include +#include +#include + +/* The 64-bit atomic type */ + +#define ATOMIC64_INIT(i) { (i) } + +/** + * atomic64_read - read atomic64 variable + * @v: pointer of type atomic64_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +static inline long atomic64_read(const atomic64_t *v) +{ + return (*(volatile long *)&(v)->counter); +} + +/** + * atomic64_set - set atomic64 variable + * @v: pointer to type atomic64_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic64_set(atomic64_t *v, long i) +{ + v->counter = i; +} + +/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v. + */ +static inline void atomic64_add(long i, atomic64_t *v) +{ + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +} + +/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic64_sub(long i, atomic64_t *v) +{ + asm volatile(LOCK_PREFIX "subq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +} + +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline int atomic64_sub_and_test(long i, atomic64_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +} + +/** + * atomic64_inc - increment atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1. + */ +static inline void atomic64_inc(atomic64_t *v) +{ + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_dec - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic64_dec(atomic64_t *v) +{ + asm volatile(LOCK_PREFIX "decq %0" + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline int atomic64_dec_and_test(atomic64_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "decq %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer to type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic64_inc_and_test(atomic64_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "incq %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline int atomic64_add_negative(long i, atomic64_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +} + +/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline long atomic64_add_return(long i, atomic64_t *v) +{ + return i + xadd(&v->counter, i); +} + +static inline long atomic64_sub_return(long i, atomic64_t *v) +{ + return atomic64_add_return(-i, v); +} + +#define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +#define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) + +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +{ + return cmpxchg(&v->counter, old, new); +} + +static inline long atomic64_xchg(atomic64_t *v, long new) +{ + return xchg(&v->counter, new); +} + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +/* + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long atomic64_dec_if_positive(atomic64_t *v) +{ + long c, old, dec; + c = atomic64_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic64_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + +#endif /* _ASM_X86_ATOMIC64_64_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/auxvec.h b/ANDROID_3.4.5/arch/x86/include/asm/auxvec.h new file mode 100644 index 00000000..77203ac3 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/auxvec.h @@ -0,0 +1,19 @@ +#ifndef _ASM_X86_AUXVEC_H +#define _ASM_X86_AUXVEC_H +/* + * Architecture-neutral AT_ values in 0-17, leave some room + * for more of them, start the x86-specific ones at 32. + */ +#ifdef __i386__ +#define AT_SYSINFO 32 +#endif +#define AT_SYSINFO_EHDR 33 + +/* entries in ARCH_DLINFO: */ +#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) +# define AT_VECTOR_SIZE_ARCH 2 +#else /* else it's non-compat x86-64 */ +# define AT_VECTOR_SIZE_ARCH 1 +#endif + +#endif /* _ASM_X86_AUXVEC_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/barrier.h b/ANDROID_3.4.5/arch/x86/include/asm/barrier.h new file mode 100644 index 00000000..c6cd358a --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/barrier.h @@ -0,0 +1,116 @@ +#ifndef _ASM_X86_BARRIER_H +#define _ASM_X86_BARRIER_H + +#include +#include + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + */ + +#ifdef CONFIG_X86_32 +/* + * Some non-Intel clones support out of order store. wmb() ceases to be a + * nop for these. + */ +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +#else +#define mb() asm volatile("mfence":::"memory") +#define rmb() asm volatile("lfence":::"memory") +#define wmb() asm volatile("sfence" ::: "memory") +#endif + +/** + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + **/ + +#define read_barrier_depends() do { } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#ifdef CONFIG_X86_PPRO_FENCE +# define smp_rmb() rmb() +#else +# define smp_rmb() barrier() +#endif +#ifdef CONFIG_X86_OOSTORE +# define smp_wmb() wmb() +#else +# define smp_wmb() barrier() +#endif +#define smp_read_barrier_depends() read_barrier_depends() +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif + +/* + * Stop RDTSC speculation. This is needed when you need to use RDTSC + * (or get_cycles or vread that possibly accesses the TSC) in a defined + * code region. + * + * (Could use an alternative three way for this if there was one.) + */ +static __always_inline void rdtsc_barrier(void) +{ + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); +} + +#endif /* _ASM_X86_BARRIER_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/bios_ebda.h b/ANDROID_3.4.5/arch/x86/include/asm/bios_ebda.h new file mode 100644 index 00000000..aa6a3170 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/bios_ebda.h @@ -0,0 +1,60 @@ +#ifndef _ASM_X86_BIOS_EBDA_H +#define _ASM_X86_BIOS_EBDA_H + +#include + +/* + * Returns physical address of EBDA. Returns 0 if there is no EBDA. + */ +static inline unsigned int get_bios_ebda(void) +{ + /* + * There is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E. + */ + unsigned int address = *(unsigned short *)phys_to_virt(0x40E); + address <<= 4; + return address; /* 0 means none */ +} + +/* + * Return the sanitized length of the EBDA in bytes, if it exists. + */ +static inline unsigned int get_bios_ebda_length(void) +{ + unsigned int address; + unsigned int length; + + address = get_bios_ebda(); + if (!address) + return 0; + + /* EBDA length is byte 0 of the EBDA (stored in KiB) */ + length = *(unsigned char *)phys_to_virt(address); + length <<= 10; + + /* Trim the length if it extends beyond 640KiB */ + length = min_t(unsigned int, (640 * 1024) - address, length); + return length; +} + +void reserve_ebda_region(void); + +#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION +/* + * This is obviously not a great place for this, but we want to be + * able to scatter it around anywhere in the kernel. + */ +void check_for_bios_corruption(void); +void start_periodic_check_for_corruption(void); +#else +static inline void check_for_bios_corruption(void) +{ +} + +static inline void start_periodic_check_for_corruption(void) +{ +} +#endif + +#endif /* _ASM_X86_BIOS_EBDA_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/bitops.h b/ANDROID_3.4.5/arch/x86/include/asm/bitops.h new file mode 100644 index 00000000..b97596e2 --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/bitops.h @@ -0,0 +1,514 @@ +#ifndef _ASM_X86_BITOPS_H +#define _ASM_X86_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + * + * Note: inlines with more than a single statement should be marked + * __always_inline to avoid problems with older gcc's inlining heuristics. + */ + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ + +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) +/* Technically wrong, but this avoids compilation errors on some gcc + versions. */ +#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) +#else +#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) +#endif + +#define ADDR BITOP_ADDR(addr) + +/* + * We do the locked ops that don't return the old value as + * a mask operation on a byte. + */ +#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) +#define CONST_MASK(nr) (1 << ((nr) & 7)) + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __always_inline void +set_bit(unsigned int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + asm volatile(LOCK_PREFIX "orb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr)) + : "memory"); + } else { + asm volatile(LOCK_PREFIX "bts %1,%0" + : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + } +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __always_inline void +clear_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + asm volatile(LOCK_PREFIX "andb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)~CONST_MASK(nr))); + } else { + asm volatile(LOCK_PREFIX "btr %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +} + +/* + * clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and implies release semantics before the memory + * operation. It can be used for an unlock. + */ +static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) +{ + barrier(); + clear_bit(nr, addr); +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); +} + +/* + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * __clear_bit() is non-atomic and implies release semantics before the memory + * operation. It can be used for an unlock if no other CPUs can concurrently + * modify other bits in the word. + * + * No memory barrier is required here, because x86 cannot reorder stores past + * older loads. Same principle as spin_unlock. + */ +static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) +{ + barrier(); + __clear_bit(nr, addr); +} + +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + asm volatile(LOCK_PREFIX "xorb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr))); + } else { + asm volatile(LOCK_PREFIX "btc %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + asm volatile(LOCK_PREFIX "bts %2,%1\n\t" + "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return oldbit; +} + +/** + * test_and_set_bit_lock - Set a bit and return its old value for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This is the same as test_and_set_bit on x86. + */ +static __always_inline int +test_and_set_bit_lock(int nr, volatile unsigned long *addr) +{ + return test_and_set_bit(nr, addr); +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + asm("bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + return oldbit; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + asm volatile(LOCK_PREFIX "btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return oldbit; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + asm volatile("btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + return oldbit; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + asm volatile("btc %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); + + return oldbit; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + asm volatile(LOCK_PREFIX "btc %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return oldbit; +} + +static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) +{ + return ((1UL << (nr % BITS_PER_LONG)) & + (addr[nr / BITS_PER_LONG])) != 0; +} + +static inline int variable_test_bit(int nr, volatile const unsigned long *addr) +{ + int oldbit; + + asm volatile("bt %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); + + return oldbit; +} + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static int test_bit(int nr, const volatile unsigned long *addr); +#endif + +#define test_bit(nr, addr) \ + (__builtin_constant_p((nr)) \ + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) + +/** + * __ffs - find first set bit in word + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + asm("bsf %1,%0" + : "=r" (word) + : "rm" (word)); + return word; +} + +/** + * ffz - find first zero bit in word + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static inline unsigned long ffz(unsigned long word) +{ + asm("bsf %1,%0" + : "=r" (word) + : "r" (~word)); + return word; +} + +/* + * __fls: find last set bit in word + * @word: The word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static inline unsigned long __fls(unsigned long word) +{ + asm("bsr %1,%0" + : "=r" (word) + : "rm" (word)); + return word; +} + +#undef ADDR + +#ifdef __KERNEL__ +/** + * ffs - find first set bit in word + * @x: the word to search + * + * This is defined the same way as the libc and compiler builtin ffs + * routines, therefore differs in spirit from the other bitops. + * + * ffs(value) returns 0 if value is 0 or the position of the first + * set bit if value is nonzero. The first (least significant) bit + * is at position 1. + */ +static inline int ffs(int x) +{ + int r; + +#ifdef CONFIG_X86_64 + /* + * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the + * dest reg is undefined if x==0, but their CPU architect says its + * value is written to set it to the same as before, except that the + * top 32 bits will be cleared. + * + * We cannot do this on 32 bits because at the very least some + * 486 CPUs did not behave this way. + */ + long tmp = -1; + asm("bsfl %1,%0" + : "=r" (r) + : "rm" (x), "0" (tmp)); +#elif defined(CONFIG_X86_CMOV) + asm("bsfl %1,%0\n\t" + "cmovzl %2,%0" + : "=&r" (r) : "rm" (x), "r" (-1)); +#else + asm("bsfl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); +#endif + return r + 1; +} + +/** + * fls - find last set bit in word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffs, but returns the position of the most significant set bit. + * + * fls(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 32. + */ +static inline int fls(int x) +{ + int r; + +#ifdef CONFIG_X86_64 + /* + * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the + * dest reg is undefined if x==0, but their CPU architect says its + * value is written to set it to the same as before, except that the + * top 32 bits will be cleared. + * + * We cannot do this on 32 bits because at the very least some + * 486 CPUs did not behave this way. + */ + long tmp = -1; + asm("bsrl %1,%0" + : "=r" (r) + : "rm" (x), "0" (tmp)); +#elif defined(CONFIG_X86_CMOV) + asm("bsrl %1,%0\n\t" + "cmovzl %2,%0" + : "=&r" (r) : "rm" (x), "rm" (-1)); +#else + asm("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); +#endif + return r + 1; +} + +/** + * fls64 - find last set bit in a 64-bit word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. + */ +#ifdef CONFIG_X86_64 +static __always_inline int fls64(__u64 x) +{ + long bitpos = -1; + /* + * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the + * dest reg is undefined if x==0, but their CPU architect says its + * value is written to set it to the same as before. + */ + asm("bsrq %1,%0" + : "+r" (bitpos) + : "rm" (x)); + return bitpos + 1; +} +#else +#include +#endif + +#include + +#include + +#define ARCH_HAS_FAST_MULTIPLIER 1 + +#include + +#include + +#include + +#include + +#endif /* __KERNEL__ */ +#endif /* _ASM_X86_BITOPS_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/bitsperlong.h b/ANDROID_3.4.5/arch/x86/include/asm/bitsperlong.h new file mode 100644 index 00000000..b0ae1c4d --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/bitsperlong.h @@ -0,0 +1,13 @@ +#ifndef __ASM_X86_BITSPERLONG_H +#define __ASM_X86_BITSPERLONG_H + +#ifdef __x86_64__ +# define __BITS_PER_LONG 64 +#else +# define __BITS_PER_LONG 32 +#endif + +#include + +#endif /* __ASM_X86_BITSPERLONG_H */ + diff --git a/ANDROID_3.4.5/arch/x86/include/asm/boot.h b/ANDROID_3.4.5/arch/x86/include/asm/boot.h new file mode 100644 index 00000000..5e1a2eef --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/boot.h @@ -0,0 +1,47 @@ +#ifndef _ASM_X86_BOOT_H +#define _ASM_X86_BOOT_H + +/* Internal svga startup constants */ +#define NORMAL_VGA 0xffff /* 80x25 mode */ +#define EXTENDED_VGA 0xfffe /* 80x50 mode */ +#define ASK_VGA 0xfffd /* ask for it at bootup */ + +#ifdef __KERNEL__ + +#include + +/* Physical address where kernel should be loaded. */ +#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + + (CONFIG_PHYSICAL_ALIGN - 1)) \ + & ~(CONFIG_PHYSICAL_ALIGN - 1)) + +/* Minimum kernel alignment, as a power of two */ +#ifdef CONFIG_X86_64 +#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT +#else +#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) +#endif +#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) + +#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \ + (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN) +#error "Invalid value for CONFIG_PHYSICAL_ALIGN" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#define BOOT_HEAP_SIZE 0x400000 +#else /* !CONFIG_KERNEL_BZIP2 */ + +#define BOOT_HEAP_SIZE 0x8000 + +#endif /* !CONFIG_KERNEL_BZIP2 */ + +#ifdef CONFIG_X86_64 +#define BOOT_STACK_SIZE 0x4000 +#else +#define BOOT_STACK_SIZE 0x1000 +#endif + +#endif /* __KERNEL__ */ + +#endif /* _ASM_X86_BOOT_H */ diff --git a/ANDROID_3.4.5/arch/x86/include/asm/bootparam.h b/ANDROID_3.4.5/arch/x86/include/asm/bootparam.h new file mode 100644 index 00000000..2f90c51c --- /dev/null +++ b/ANDROID_3.4.5/arch/x86/include/asm/bootparam.h @@ -0,0 +1,136 @@ +#ifndef _ASM_X86_BOOTPARAM_H +#define _ASM_X86_BOOTPARAM_H + +#include +#include +#include +#include +#include +#include +#include