diff options
author | Srikant Patnaik | 2015-01-13 15:08:24 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-13 15:08:24 +0530 |
commit | 97327692361306d1e6259021bc425e32832fdb50 (patch) | |
tree | fe9088f3248ec61e24f404f21b9793cb644b7f01 /arch/mips/kernel | |
parent | 2d05a8f663478a44e088d122e0d62109bbc801d0 (diff) | |
parent | a3a8b90b61e21be3dde9101c4e86c881e0f06210 (diff) | |
download | FOSSEE-netbook-kernel-source-97327692361306d1e6259021bc425e32832fdb50.tar.gz FOSSEE-netbook-kernel-source-97327692361306d1e6259021bc425e32832fdb50.tar.bz2 FOSSEE-netbook-kernel-source-97327692361306d1e6259021bc425e32832fdb50.zip |
dirty fix to merging
Diffstat (limited to 'arch/mips/kernel')
100 files changed, 29744 insertions, 0 deletions
diff --git a/arch/mips/kernel/8250-platform.c b/arch/mips/kernel/8250-platform.c new file mode 100644 index 00000000..5c6b2ab1 --- /dev/null +++ b/arch/mips/kernel/8250-platform.c @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) + */ +#include <linux/init.h> +#include <linux/serial_8250.h> + +#define PORT(base, int) \ +{ \ + .iobase = base, \ + .irq = int, \ + .uartclk = 1843200, \ + .iotype = UPIO_PORT, \ + .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ + .regshift = 0, \ +} + +static struct plat_serial8250_port uart8250_data[] = { + PORT(0x3F8, 4), + PORT(0x2F8, 3), + PORT(0x3E8, 4), + PORT(0x2E8, 3), + { }, +}; + +static struct platform_device uart8250_device = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev = { + .platform_data = uart8250_data, + }, +}; + +static int __init uart8250_init(void) +{ + return platform_device_register(&uart8250_device); +} + +module_init(uart8250_init); + +MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Generic 8250 UART probe driver"); diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile new file mode 100644 index 00000000..0c6877ea --- /dev/null +++ b/arch/mips/kernel/Makefile @@ -0,0 +1,118 @@ +# +# Makefile for the Linux/MIPS kernel. +# + +extra-y := head.o init_task.o vmlinux.lds + +obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ + ptrace.o reset.o setup.o signal.o syscall.o \ + time.o topology.o traps.o unaligned.o watch.o vdso.o + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_early_printk.o = -pg +CFLAGS_REMOVE_perf_event.o = -pg +CFLAGS_REMOVE_perf_event_mipsxx.o = -pg +endif + +obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o +obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o +obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o +obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o +obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o +obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o +obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o +obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o +obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o +obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o +obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o +obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o +obj-$(CONFIG_SYNC_R4K) += sync-r4k.o + +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_MODULES) += mips_ksyms.o module.o + +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o + +obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o +obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R5500) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o +obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o +obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_XLP) += r4k_fpu.o r4k_switch.o + +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SMP_UP) += smp-up.o +obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o + +obj-$(CONFIG_MIPS_MT) += mips-mt.o +obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o +obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o +obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o +obj-$(CONFIG_MIPS_CMP) += smp-cmp.o +obj-$(CONFIG_CPU_MIPSR2) += spram.o + +obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o +obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o +obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o + +obj-$(CONFIG_I8259) += i8259.o +obj-$(CONFIG_IRQ_CPU) += irq_cpu.o +obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o +obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o +obj-$(CONFIG_MIPS_MSC) += irq-msc01.o +obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o +obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o +obj-$(CONFIG_IRQ_GIC) += irq-gic.o + +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_32BIT) += scall32-o32.o +obj-$(CONFIG_64BIT) += scall64-64.o +obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o +obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o +obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o + +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_PROC_FS) += proc.o + +obj-$(CONFIG_64BIT) += cpu-bugs64.o + +obj-$(CONFIG_I8253) += i8253.o + +obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o + +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o +obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o + +obj-$(CONFIG_OF) += prom.o + +CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) + +obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o + +obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ + +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o + +obj-$(CONFIG_JUMP_LABEL) += jump_label.o + +CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c new file mode 100644 index 00000000..6b30fb2c --- /dev/null +++ b/arch/mips/kernel/asm-offsets.c @@ -0,0 +1,341 @@ +/* + * offset.c: Calculate pt_regs and task_struct offsets. + * + * Copyright (C) 1996 David S. Miller + * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * + * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include <linux/compat.h> +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/kbuild.h> +#include <linux/suspend.h> +#include <asm/ptrace.h> +#include <asm/processor.h> + +void output_ptreg_defines(void) +{ + COMMENT("MIPS pt_regs offsets."); + OFFSET(PT_R0, pt_regs, regs[0]); + OFFSET(PT_R1, pt_regs, regs[1]); + OFFSET(PT_R2, pt_regs, regs[2]); + OFFSET(PT_R3, pt_regs, regs[3]); + OFFSET(PT_R4, pt_regs, regs[4]); + OFFSET(PT_R5, pt_regs, regs[5]); + OFFSET(PT_R6, pt_regs, regs[6]); + OFFSET(PT_R7, pt_regs, regs[7]); + OFFSET(PT_R8, pt_regs, regs[8]); + OFFSET(PT_R9, pt_regs, regs[9]); + OFFSET(PT_R10, pt_regs, regs[10]); + OFFSET(PT_R11, pt_regs, regs[11]); + OFFSET(PT_R12, pt_regs, regs[12]); + OFFSET(PT_R13, pt_regs, regs[13]); + OFFSET(PT_R14, pt_regs, regs[14]); + OFFSET(PT_R15, pt_regs, regs[15]); + OFFSET(PT_R16, pt_regs, regs[16]); + OFFSET(PT_R17, pt_regs, regs[17]); + OFFSET(PT_R18, pt_regs, regs[18]); + OFFSET(PT_R19, pt_regs, regs[19]); + OFFSET(PT_R20, pt_regs, regs[20]); + OFFSET(PT_R21, pt_regs, regs[21]); + OFFSET(PT_R22, pt_regs, regs[22]); + OFFSET(PT_R23, pt_regs, regs[23]); + OFFSET(PT_R24, pt_regs, regs[24]); + OFFSET(PT_R25, pt_regs, regs[25]); + OFFSET(PT_R26, pt_regs, regs[26]); + OFFSET(PT_R27, pt_regs, regs[27]); + OFFSET(PT_R28, pt_regs, regs[28]); + OFFSET(PT_R29, pt_regs, regs[29]); + OFFSET(PT_R30, pt_regs, regs[30]); + OFFSET(PT_R31, pt_regs, regs[31]); + OFFSET(PT_LO, pt_regs, lo); + OFFSET(PT_HI, pt_regs, hi); +#ifdef CONFIG_CPU_HAS_SMARTMIPS + OFFSET(PT_ACX, pt_regs, acx); +#endif + OFFSET(PT_EPC, pt_regs, cp0_epc); + OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); + OFFSET(PT_STATUS, pt_regs, cp0_status); + OFFSET(PT_CAUSE, pt_regs, cp0_cause); +#ifdef CONFIG_MIPS_MT_SMTC + OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); +#endif /* CONFIG_MIPS_MT_SMTC */ +#ifdef CONFIG_CPU_CAVIUM_OCTEON + OFFSET(PT_MPL, pt_regs, mpl); + OFFSET(PT_MTP, pt_regs, mtp); +#endif /* CONFIG_CPU_CAVIUM_OCTEON */ + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + BLANK(); +} + +void output_task_defines(void) +{ + COMMENT("MIPS task_struct offsets."); + OFFSET(TASK_STATE, task_struct, state); + OFFSET(TASK_THREAD_INFO, task_struct, stack); + OFFSET(TASK_FLAGS, task_struct, flags); + OFFSET(TASK_MM, task_struct, mm); + OFFSET(TASK_PID, task_struct, pid); + DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); + BLANK(); +} + +void output_thread_info_defines(void) +{ + COMMENT("MIPS thread_info offsets."); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_TP_VALUE, thread_info, tp_value); + OFFSET(TI_CPU, thread_info, cpu); + OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); + OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); + OFFSET(TI_REGS, thread_info, regs); + DEFINE(_THREAD_SIZE, THREAD_SIZE); + DEFINE(_THREAD_MASK, THREAD_MASK); + BLANK(); +} + +void output_thread_defines(void) +{ + COMMENT("MIPS specific thread_struct offsets."); + OFFSET(THREAD_REG16, task_struct, thread.reg16); + OFFSET(THREAD_REG17, task_struct, thread.reg17); + OFFSET(THREAD_REG18, task_struct, thread.reg18); + OFFSET(THREAD_REG19, task_struct, thread.reg19); + OFFSET(THREAD_REG20, task_struct, thread.reg20); + OFFSET(THREAD_REG21, task_struct, thread.reg21); + OFFSET(THREAD_REG22, task_struct, thread.reg22); + OFFSET(THREAD_REG23, task_struct, thread.reg23); + OFFSET(THREAD_REG29, task_struct, thread.reg29); + OFFSET(THREAD_REG30, task_struct, thread.reg30); + OFFSET(THREAD_REG31, task_struct, thread.reg31); + OFFSET(THREAD_STATUS, task_struct, + thread.cp0_status); + OFFSET(THREAD_FPU, task_struct, thread.fpu); + + OFFSET(THREAD_BVADDR, task_struct, \ + thread.cp0_badvaddr); + OFFSET(THREAD_BUADDR, task_struct, \ + thread.cp0_baduaddr); + OFFSET(THREAD_ECODE, task_struct, \ + thread.error_code); + OFFSET(THREAD_TRAMP, task_struct, \ + thread.irix_trampoline); + OFFSET(THREAD_OLDCTX, task_struct, \ + thread.irix_oldctx); + BLANK(); +} + +void output_thread_fpu_defines(void) +{ + OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); + OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); + OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); + OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); + OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); + OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); + OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); + OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); + OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); + OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); + OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); + OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); + OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); + OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); + OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); + OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); + OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); + OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); + OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); + OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); + OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); + OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); + OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); + OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); + OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); + OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); + OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); + OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); + OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); + OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); + OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); + OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); + + OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); + BLANK(); +} + +void output_mm_defines(void) +{ + COMMENT("Size of struct page"); + DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); + BLANK(); + COMMENT("Linux mm_struct offsets."); + OFFSET(MM_USERS, mm_struct, mm_users); + OFFSET(MM_PGD, mm_struct, pgd); + OFFSET(MM_CONTEXT, mm_struct, context); + BLANK(); + DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); + DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); + DEFINE(_PTE_T_SIZE, sizeof(pte_t)); + BLANK(); + DEFINE(_PGD_T_LOG2, PGD_T_LOG2); +#ifndef __PAGETABLE_PMD_FOLDED + DEFINE(_PMD_T_LOG2, PMD_T_LOG2); +#endif + DEFINE(_PTE_T_LOG2, PTE_T_LOG2); + BLANK(); + DEFINE(_PGD_ORDER, PGD_ORDER); +#ifndef __PAGETABLE_PMD_FOLDED + DEFINE(_PMD_ORDER, PMD_ORDER); +#endif + DEFINE(_PTE_ORDER, PTE_ORDER); + BLANK(); + DEFINE(_PMD_SHIFT, PMD_SHIFT); + DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); + BLANK(); + DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); + DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); + BLANK(); +} + +#ifdef CONFIG_32BIT +void output_sc_defines(void) +{ + COMMENT("Linux sigcontext offsets."); + OFFSET(SC_REGS, sigcontext, sc_regs); + OFFSET(SC_FPREGS, sigcontext, sc_fpregs); + OFFSET(SC_ACX, sigcontext, sc_acx); + OFFSET(SC_MDHI, sigcontext, sc_mdhi); + OFFSET(SC_MDLO, sigcontext, sc_mdlo); + OFFSET(SC_PC, sigcontext, sc_pc); + OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); + OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); + OFFSET(SC_HI1, sigcontext, sc_hi1); + OFFSET(SC_LO1, sigcontext, sc_lo1); + OFFSET(SC_HI2, sigcontext, sc_hi2); + OFFSET(SC_LO2, sigcontext, sc_lo2); + OFFSET(SC_HI3, sigcontext, sc_hi3); + OFFSET(SC_LO3, sigcontext, sc_lo3); + BLANK(); +} +#endif + +#ifdef CONFIG_64BIT +void output_sc_defines(void) +{ + COMMENT("Linux sigcontext offsets."); + OFFSET(SC_REGS, sigcontext, sc_regs); + OFFSET(SC_FPREGS, sigcontext, sc_fpregs); + OFFSET(SC_MDHI, sigcontext, sc_mdhi); + OFFSET(SC_MDLO, sigcontext, sc_mdlo); + OFFSET(SC_PC, sigcontext, sc_pc); + OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); + BLANK(); +} +#endif + +#ifdef CONFIG_MIPS32_COMPAT +void output_sc32_defines(void) +{ + COMMENT("Linux 32-bit sigcontext offsets."); + OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); + OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); + OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); + BLANK(); +} +#endif + +void output_signal_defined(void) +{ + COMMENT("Linux signal numbers."); + DEFINE(_SIGHUP, SIGHUP); + DEFINE(_SIGINT, SIGINT); + DEFINE(_SIGQUIT, SIGQUIT); + DEFINE(_SIGILL, SIGILL); + DEFINE(_SIGTRAP, SIGTRAP); + DEFINE(_SIGIOT, SIGIOT); + DEFINE(_SIGABRT, SIGABRT); + DEFINE(_SIGEMT, SIGEMT); + DEFINE(_SIGFPE, SIGFPE); + DEFINE(_SIGKILL, SIGKILL); + DEFINE(_SIGBUS, SIGBUS); + DEFINE(_SIGSEGV, SIGSEGV); + DEFINE(_SIGSYS, SIGSYS); + DEFINE(_SIGPIPE, SIGPIPE); + DEFINE(_SIGALRM, SIGALRM); + DEFINE(_SIGTERM, SIGTERM); + DEFINE(_SIGUSR1, SIGUSR1); + DEFINE(_SIGUSR2, SIGUSR2); + DEFINE(_SIGCHLD, SIGCHLD); + DEFINE(_SIGPWR, SIGPWR); + DEFINE(_SIGWINCH, SIGWINCH); + DEFINE(_SIGURG, SIGURG); + DEFINE(_SIGIO, SIGIO); + DEFINE(_SIGSTOP, SIGSTOP); + DEFINE(_SIGTSTP, SIGTSTP); + DEFINE(_SIGCONT, SIGCONT); + DEFINE(_SIGTTIN, SIGTTIN); + DEFINE(_SIGTTOU, SIGTTOU); + DEFINE(_SIGVTALRM, SIGVTALRM); + DEFINE(_SIGPROF, SIGPROF); + DEFINE(_SIGXCPU, SIGXCPU); + DEFINE(_SIGXFSZ, SIGXFSZ); + BLANK(); +} + +void output_irq_cpustat_t_defines(void) +{ + COMMENT("Linux irq_cpustat_t offsets."); + DEFINE(IC_SOFTIRQ_PENDING, + offsetof(irq_cpustat_t, __softirq_pending)); + DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t)); + BLANK(); +} + +#ifdef CONFIG_CPU_CAVIUM_OCTEON +void output_octeon_cop2_state_defines(void) +{ + COMMENT("Octeon specific octeon_cop2_state offsets."); + OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv); + OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length); + OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly); + OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat); + OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv); + OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key); + OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result); + OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0); + OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv); + OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key); + OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen); + OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result); + OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult); + OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly); + OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); + OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); + OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); + OFFSET(THREAD_CP2, task_struct, thread.cp2); + OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); + BLANK(); +} +#endif + +#ifdef CONFIG_HIBERNATION +void output_pbe_defines(void) +{ + COMMENT(" Linux struct pbe offsets. "); + OFFSET(PBE_ADDRESS, pbe, address); + OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + DEFINE(PBE_SIZE, sizeof(struct pbe)); + BLANK(); +} +#endif diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c new file mode 100644 index 00000000..9fdd8bcd --- /dev/null +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -0,0 +1,122 @@ +/* + * Support for n32 Linux/MIPS ELF binaries. + * + * Copyright (C) 1999, 2001 Ralf Baechle + * Copyright (C) 1999, 2001 Silicon Graphics, Inc. + * + * Heavily inspired by the 32-bit Sparc compat code which is + * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) + * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#define ELF_ARCH EM_MIPS +#define ELF_CLASS ELFCLASS32 +#ifdef __MIPSEB__ +#define ELF_DATA ELFDATA2MSB; +#else /* __MIPSEL__ */ +#define ELF_DATA ELFDATA2LSB; +#endif + +/* ELF register definitions */ +#define ELF_NGREG 45 +#define ELF_NFPREG 33 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (__h->e_machine != EM_MIPS) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ + __res = 0; \ + if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \ + ((__h->e_flags & EF_MIPS_ABI) != 0)) \ + __res = 0; \ + \ + __res; \ +}) + +#define TASK32_SIZE 0x7fff8000UL +#undef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + +#include <asm/processor.h> +#include <linux/module.h> +#include <linux/elfcore.h> +#include <linux/compat.h> +#include <linux/math64.h> + +#define elf_prstatus elf_prstatus32 +struct elf_prstatus32 +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned int pr_sigpend; /* Set of pending signals */ + unsigned int pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct compat_timeval pr_utime; /* User time */ + struct compat_timeval pr_stime; /* System time */ + struct compat_timeval pr_cutime;/* Cumulative user time */ + struct compat_timeval pr_cstime;/* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define elf_prpsinfo elf_prpsinfo32 +struct elf_prpsinfo32 +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + unsigned int pr_flag; /* flags */ + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + +#define elf_caddr_t u32 +#define init_elf_binfmt init_elfn32_binfmt + +#define jiffies_to_timeval jiffies_to_compat_timeval +static __inline__ void +jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) +{ + /* + * Convert jiffies to nanoseconds and separate with + * one divide. + */ + u64 nsec = (u64)jiffies * TICK_NSEC; + u32 rem; + value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; +} + +#define ELF_CORE_EFLAGS EF_MIPS_ABI2 + +MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries"); +MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); + +#undef MODULE_DESCRIPTION +#undef MODULE_AUTHOR + +#undef TASK_SIZE +#define TASK_SIZE TASK_SIZE32 + +#include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c new file mode 100644 index 00000000..ff448233 --- /dev/null +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -0,0 +1,161 @@ +/* + * Support for o32 Linux/MIPS ELF binaries. + * + * Copyright (C) 1999, 2001 Ralf Baechle + * Copyright (C) 1999, 2001 Silicon Graphics, Inc. + * + * Heavily inspired by the 32-bit Sparc compat code which is + * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) + * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#define ELF_ARCH EM_MIPS +#define ELF_CLASS ELFCLASS32 +#ifdef __MIPSEB__ +#define ELF_DATA ELFDATA2MSB; +#else /* __MIPSEL__ */ +#define ELF_DATA ELFDATA2LSB; +#endif + +/* ELF register definitions */ +#define ELF_NGREG 45 +#define ELF_NFPREG 33 + +typedef unsigned int elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (__h->e_machine != EM_MIPS) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ + __res = 0; \ + if ((__h->e_flags & EF_MIPS_ABI2) != 0) \ + __res = 0; \ + if (((__h->e_flags & EF_MIPS_ABI) != 0) && \ + ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \ + __res = 0; \ + \ + __res; \ +}) + +#define TASK32_SIZE 0x7fff8000UL +#undef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + +#include <asm/processor.h> + +/* + * When this file is selected, we are definitely running a 64bit kernel. + * So using the right regs define in asm/reg.h + */ +#define WANT_COMPAT_REG_H + +/* These MUST be defined before elf.h gets included */ +extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs); +#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs); +#define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \ +({ \ + int __res = 1; \ + elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \ + __res; \ +}) + +#include <linux/module.h> +#include <linux/elfcore.h> +#include <linux/compat.h> +#include <linux/math64.h> + +#define elf_prstatus elf_prstatus32 +struct elf_prstatus32 +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned int pr_sigpend; /* Set of pending signals */ + unsigned int pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct compat_timeval pr_utime; /* User time */ + struct compat_timeval pr_stime; /* System time */ + struct compat_timeval pr_cutime;/* Cumulative user time */ + struct compat_timeval pr_cstime;/* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define elf_prpsinfo elf_prpsinfo32 +struct elf_prpsinfo32 +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + unsigned int pr_flag; /* flags */ + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + +#define elf_caddr_t u32 +#define init_elf_binfmt init_elf32_binfmt + +#define jiffies_to_timeval jiffies_to_compat_timeval +static inline void +jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) +{ + /* + * Convert jiffies to nanoseconds and separate with + * one divide. + */ + u64 nsec = (u64)jiffies * TICK_NSEC; + u32 rem; + value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; +} + +void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs) +{ + int i; + + for (i = 0; i < EF_R0; i++) + grp[i] = 0; + grp[EF_R0] = 0; + for (i = 1; i <= 31; i++) + grp[EF_R0 + i] = (elf_greg_t) regs->regs[i]; + grp[EF_R26] = 0; + grp[EF_R27] = 0; + grp[EF_LO] = (elf_greg_t) regs->lo; + grp[EF_HI] = (elf_greg_t) regs->hi; + grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; + grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; + grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; + grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; +#ifdef EF_UNUSED0 + grp[EF_UNUSED0] = 0; +#endif +} + +MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries"); +MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); + +#undef MODULE_DESCRIPTION +#undef MODULE_AUTHOR + +#undef TASK_SIZE +#define TASK_SIZE TASK_SIZE32 + +#include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S new file mode 100644 index 00000000..e908e813 --- /dev/null +++ b/arch/mips/kernel/bmips_vec.S @@ -0,0 +1,255 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) + * + * Reset/NMI/re-entry vectors for BMIPS processors + */ + +#include <linux/init.h> + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/cacheops.h> +#include <asm/regdef.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/addrspace.h> +#include <asm/hazards.h> +#include <asm/bmips.h> + + .macro BARRIER + .set mips32 + _ssnop + _ssnop + _ssnop + .set mips0 + .endm + + __CPUINIT + +/*********************************************************************** + * Alternate CPU1 startup vector for BMIPS4350 + * + * On some systems the bootloader has already started CPU1 and configured + * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is + * triggered by the SW1 interrupt. If that is the case we try to move + * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. + ***********************************************************************/ + +LEAF(bmips_smp_movevec) + la k0, 1f + li k1, CKSEG1 + or k0, k1 + jr k0 + +1: + /* clear IV, pending IPIs */ + mtc0 zero, CP0_CAUSE + + /* re-enable IRQs to wait for SW1 */ + li k0, ST0_IE | ST0_BEV | STATUSF_IP1 + mtc0 k0, CP0_STATUS + + /* set up CPU1 CBR; move BASE to 0xa000_0000 */ + li k0, 0xff400000 + mtc0 k0, $22, 6 + li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1 + or k0, k1 + li k1, 0xa0080000 + sw k1, 0(k0) + + /* wait here for SW1 interrupt from bmips_boot_secondary() */ + wait + + la k0, bmips_reset_nmi_vec + li k1, CKSEG1 + or k0, k1 + jr k0 +END(bmips_smp_movevec) + +/*********************************************************************** + * Reset/NMI vector + * For BMIPS processors that can relocate their exception vectors, this + * entire function gets copied to 0x8000_0000. + ***********************************************************************/ + +NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) + .set push + .set noat + .align 4 + +#ifdef CONFIG_SMP + /* if the NMI bit is clear, assume this is a CPU1 reset instead */ + li k1, (1 << 19) + mfc0 k0, CP0_STATUS + and k0, k1 + beqz k0, bmips_smp_entry + +#if defined(CONFIG_CPU_BMIPS5000) + /* if we're not on core 0, this must be the SMP boot signal */ + li k1, (3 << 25) + mfc0 k0, $22 + and k0, k1 + bnez k0, bmips_smp_entry +#endif +#endif /* CONFIG_SMP */ + + /* nope, it's just a regular NMI */ + SAVE_ALL + move a0, sp + + /* clear EXL, ERL, BEV so that TLB refills still work */ + mfc0 k0, CP0_STATUS + li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE + or k0, k1 + xor k0, k1 + mtc0 k0, CP0_STATUS + BARRIER + + /* jump to the NMI handler function */ + la k0, nmi_handler + jr k0 + + RESTORE_ALL + .set mips3 + eret + +/*********************************************************************** + * CPU1 reset vector (used for the initial boot only) + * This is still part of bmips_reset_nmi_vec(). + ***********************************************************************/ + +#ifdef CONFIG_SMP + +bmips_smp_entry: + + /* set up CP0 STATUS; enable FPU */ + li k0, 0x30000000 + mtc0 k0, CP0_STATUS + BARRIER + + /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ + mfc0 k0, CP0_CONFIG + ori k0, 0x07 + xori k0, 0x04 + mtc0 k0, CP0_CONFIG + +#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + /* initialize CPU1's local I-cache */ + li k0, 0x80000000 + li k1, 0x80010000 + mtc0 zero, $28 + mtc0 zero, $28, 1 + BARRIER + +1: cache Index_Store_Tag_I, 0(k0) + addiu k0, 16 + bne k0, k1, 1b +#elif defined(CONFIG_CPU_BMIPS5000) + /* set exception vector base */ + la k0, ebase + lw k0, 0(k0) + mtc0 k0, $15, 1 + BARRIER +#endif + + /* jump back to kseg0 in case we need to remap the kseg1 area */ + la k0, 1f + jr k0 +1: + la k0, bmips_enable_xks01 + jalr k0 + + /* use temporary stack to set up upper memory TLB */ + li sp, BMIPS_WARM_RESTART_VEC + la k0, plat_wired_tlb_setup + jalr k0 + + /* switch to permanent stack and continue booting */ + + .global bmips_secondary_reentry +bmips_secondary_reentry: + la k0, bmips_smp_boot_sp + lw sp, 0(k0) + la k0, bmips_smp_boot_gp + lw gp, 0(k0) + la k0, start_secondary + jr k0 + +#endif /* CONFIG_SMP */ + + .align 4 + .global bmips_reset_nmi_vec_end +bmips_reset_nmi_vec_end: + +END(bmips_reset_nmi_vec) + + .set pop + .previous + +/*********************************************************************** + * CPU1 warm restart vector (used for second and subsequent boots). + * Also used for S2 standby recovery (PM). + * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) + ***********************************************************************/ + +LEAF(bmips_smp_int_vec) + + .align 4 + mfc0 k0, CP0_STATUS + ori k0, 0x01 + xori k0, 0x01 + mtc0 k0, CP0_STATUS + eret + + .align 4 + .global bmips_smp_int_vec_end +bmips_smp_int_vec_end: + +END(bmips_smp_int_vec) + +/*********************************************************************** + * XKS01 support + * Certain CPUs support extending kseg0 to 1024MB. + ***********************************************************************/ + + __CPUINIT + +LEAF(bmips_enable_xks01) + +#if defined(CONFIG_XKS01) + +#if defined(CONFIG_CPU_BMIPS4380) + mfc0 t0, $22, 3 + li t1, 0x1ff0 + li t2, (1 << 12) | (1 << 9) + or t0, t1 + xor t0, t1 + or t0, t2 + mtc0 t0, $22, 3 + BARRIER +#elif defined(CONFIG_CPU_BMIPS5000) + mfc0 t0, $22, 5 + li t1, 0x01ff + li t2, (1 << 8) | (1 << 5) + or t0, t1 + xor t0, t1 + or t0, t2 + mtc0 t0, $22, 5 + BARRIER +#else + +#error Missing XKS01 setup + +#endif + +#endif /* defined(CONFIG_XKS01) */ + + jr ra + +END(bmips_enable_xks01) + + .previous diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c new file mode 100644 index 00000000..4d735d0e --- /dev/null +++ b/arch/mips/kernel/branch.c @@ -0,0 +1,293 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/module.h> +#include <asm/branch.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/fpu.h> +#include <asm/inst.h> +#include <asm/ptrace.h> +#include <asm/uaccess.h> + +/** + * __compute_return_epc_for_insn - Computes the return address and do emulate + * branch simulation, if required. + * + * @regs: Pointer to pt_regs + * @insn: branch instruction to decode + * @returns: -EFAULT on error and forces SIGBUS, and on success + * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after + * evaluating the branch. + */ +int __compute_return_epc_for_insn(struct pt_regs *regs, + union mips_instruction insn) +{ + unsigned int bit, fcr31, dspcontrol; + long epc = regs->cp0_epc; + int ret = 0; + + switch (insn.i_format.opcode) { + /* + * jr and jalr are in r_format format. + */ + case spec_op: + switch (insn.r_format.func) { + case jalr_op: + regs->regs[insn.r_format.rd] = epc + 8; + /* Fall through */ + case jr_op: + regs->cp0_epc = regs->regs[insn.r_format.rs]; + break; + } + break; + + /* + * This group contains: + * bltz_op, bgez_op, bltzl_op, bgezl_op, + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. + */ + case bcond_op: + switch (insn.i_format.rt) { + case bltz_op: + case bltzl_op: + if ((long)regs->regs[insn.i_format.rs] < 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bltzl_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case bgez_op: + case bgezl_op: + if ((long)regs->regs[insn.i_format.rs] >= 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bgezl_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case bltzal_op: + case bltzall_op: + regs->regs[31] = epc + 8; + if ((long)regs->regs[insn.i_format.rs] < 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bltzall_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case bgezal_op: + case bgezall_op: + regs->regs[31] = epc + 8; + if ((long)regs->regs[insn.i_format.rs] >= 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bgezall_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case bposge32_op: + if (!cpu_has_dsp) + goto sigill; + + dspcontrol = rddsp(0x01); + + if (dspcontrol >= 32) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + } else + epc += 8; + regs->cp0_epc = epc; + break; + } + break; + + /* + * These are unconditional and in j_format. + */ + case jal_op: + regs->regs[31] = regs->cp0_epc + 8; + case j_op: + epc += 4; + epc >>= 28; + epc <<= 28; + epc |= (insn.j_format.target << 2); + regs->cp0_epc = epc; + break; + + /* + * These are conditional and in i_format. + */ + case beq_op: + case beql_op: + if (regs->regs[insn.i_format.rs] == + regs->regs[insn.i_format.rt]) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == beql_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case bne_op: + case bnel_op: + if (regs->regs[insn.i_format.rs] != + regs->regs[insn.i_format.rt]) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bnel_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case blez_op: /* not really i_format */ + case blezl_op: + /* rt field assumed to be zero */ + if ((long)regs->regs[insn.i_format.rs] <= 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bnel_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case bgtz_op: + case bgtzl_op: + /* rt field assumed to be zero */ + if ((long)regs->regs[insn.i_format.rs] > 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == bnel_op) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + /* + * And now the FPA/cp1 branch instructions. + */ + case cop1_op: + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + bit = (insn.i_format.rt >> 2); + bit += (bit != 0); + bit += 23; + switch (insn.i_format.rt & 3) { + case 0: /* bc1f */ + case 2: /* bc1fl */ + if (~fcr31 & (1 << bit)) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 2) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case 1: /* bc1t */ + case 3: /* bc1tl */ + if (fcr31 & (1 << bit)) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 3) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + } + break; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) + == 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + regs->cp0_epc = epc; + break; + case ldc2_op: /* This is bbit032 on Octeon */ + if ((regs->regs[insn.i_format.rs] & + (1ull<<(insn.i_format.rt+32))) == 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + regs->cp0_epc = epc; + break; + case swc2_op: /* This is bbit1 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + regs->cp0_epc = epc; + break; + case sdc2_op: /* This is bbit132 on Octeon */ + if (regs->regs[insn.i_format.rs] & + (1ull<<(insn.i_format.rt+32))) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + regs->cp0_epc = epc; + break; +#endif + } + + return ret; + +sigill: + printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); + force_sig(SIGBUS, current); + return -EFAULT; +} +EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); + +int __compute_return_epc(struct pt_regs *regs) +{ + unsigned int __user *addr; + long epc; + union mips_instruction insn; + + epc = regs->cp0_epc; + if (epc & 3) + goto unaligned; + + /* + * Read the instruction + */ + addr = (unsigned int __user *) epc; + if (__get_user(insn.word, addr)) { + force_sig(SIGSEGV, current); + return -EFAULT; + } + + return __compute_return_epc_for_insn(regs, insn); + +unaligned: + printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); + force_sig(SIGBUS, current); + return -EFAULT; + +} diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c new file mode 100644 index 00000000..69bbfae1 --- /dev/null +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2000,2001,2004 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/irq.h> + +#include <asm/addrspace.h> +#include <asm/io.h> +#include <asm/time.h> + +#include <asm/sibyte/bcm1480_regs.h> +#include <asm/sibyte/sb1250_regs.h> +#include <asm/sibyte/bcm1480_int.h> +#include <asm/sibyte/bcm1480_scd.h> + +#include <asm/sibyte/sb1250.h> + +#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0 +#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1 +#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2 + +/* + * The general purpose timer ticks at 1MHz independent if + * the rest of the system + */ +static void sibyte_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + unsigned int cpu = smp_processor_id(); + void __iomem *cfg, *init; + + cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); + init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + __raw_writeq(0, cfg); + __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init); + __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, + cfg); + break; + + case CLOCK_EVT_MODE_ONESHOT: + /* Stop the timer until we actually program a shot */ + case CLOCK_EVT_MODE_SHUTDOWN: + __raw_writeq(0, cfg); + break; + + case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ + case CLOCK_EVT_MODE_RESUME: + ; + } +} + +static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) +{ + unsigned int cpu = smp_processor_id(); + void __iomem *cfg, *init; + + cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); + init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); + + __raw_writeq(0, cfg); + __raw_writeq(delta - 1, init); + __raw_writeq(M_SCD_TIMER_ENABLE, cfg); + + return 0; +} + +static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd = dev_id; + void __iomem *cfg; + unsigned long tmode; + + if (cd->mode == CLOCK_EVT_MODE_PERIODIC) + tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS; + else + tmode = 0; + + /* ACK interrupt */ + cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); + ____raw_writeq(tmode, cfg); + + cd->event_handler(cd); + + return IRQ_HANDLED; +} + +static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); +static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); +static DEFINE_PER_CPU(char [18], sibyte_hpt_name); + +void __cpuinit sb1480_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; + struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); + struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); + unsigned char *name = per_cpu(sibyte_hpt_name, cpu); + + BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ + + sprintf(name, "bcm1480-counter-%d", cpu); + cd->name = name; + cd->features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT; + clockevent_set_clock(cd, V_SCD_TIMER_FREQ); + cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); + cd->min_delta_ns = clockevent_delta2ns(2, cd); + cd->rating = 200; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = sibyte_next_event; + cd->set_mode = sibyte_set_mode; + clockevents_register_device(cd); + + bcm1480_mask_irq(cpu, irq); + + /* + * Map the timer interrupt to IP[4] of this cpu + */ + __raw_writeq(IMR_IP4_VAL, + IOADDR(A_BCM1480_IMR_REGISTER(cpu, + R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3))); + + bcm1480_unmask_irq(cpu, irq); + + action->handler = sibyte_counter_handler; + action->flags = IRQF_PERCPU | IRQF_TIMER; + action->name = name; + action->dev_id = cd; + + irq_set_affinity(irq, cpumask_of(cpu)); + setup_irq(irq, action); +} diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c new file mode 100644 index 00000000..ed648cb5 --- /dev/null +++ b/arch/mips/kernel/cevt-ds1287.c @@ -0,0 +1,130 @@ +/* + * DS1287 clockevent driver + * + * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <linux/clockchips.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/mc146818rtc.h> +#include <linux/irq.h> + +#include <asm/time.h> + +int ds1287_timer_state(void) +{ + return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0; +} + +int ds1287_set_base_clock(unsigned int hz) +{ + u8 rate; + + switch (hz) { + case 128: + rate = 0x9; + break; + case 256: + rate = 0x8; + break; + case 1024: + rate = 0x6; + break; + default: + return -EINVAL; + } + + CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A); + + return 0; +} + +static int ds1287_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + return -EINVAL; +} + +static void ds1287_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + u8 val; + + spin_lock(&rtc_lock); + + val = CMOS_READ(RTC_REG_B); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + val |= RTC_PIE; + break; + default: + val &= ~RTC_PIE; + break; + } + + CMOS_WRITE(val, RTC_REG_B); + + spin_unlock(&rtc_lock); +} + +static void ds1287_event_handler(struct clock_event_device *dev) +{ +} + +static struct clock_event_device ds1287_clockevent = { + .name = "ds1287", + .features = CLOCK_EVT_FEAT_PERIODIC, + .set_next_event = ds1287_set_next_event, + .set_mode = ds1287_set_mode, + .event_handler = ds1287_event_handler, +}; + +static irqreturn_t ds1287_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *cd = &ds1287_clockevent; + + /* Ack the RTC interrupt. */ + CMOS_READ(RTC_REG_C); + + cd->event_handler(cd); + + return IRQ_HANDLED; +} + +static struct irqaction ds1287_irqaction = { + .handler = ds1287_interrupt, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "ds1287", +}; + +int __init ds1287_clockevent_init(int irq) +{ + struct clock_event_device *cd; + + cd = &ds1287_clockevent; + cd->rating = 100; + cd->irq = irq; + clockevent_set_clock(cd, 32768); + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + cd->cpumask = cpumask_of(0); + + clockevents_register_device(&ds1287_clockevent); + + return setup_irq(irq, &ds1287_irqaction); +} diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c new file mode 100644 index 00000000..831b4758 --- /dev/null +++ b/arch/mips/kernel/cevt-gt641xx.c @@ -0,0 +1,141 @@ +/* + * GT641xx clockevent routines. + * + * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <linux/clockchips.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/irq.h> + +#include <asm/gt64120.h> +#include <asm/time.h> + +static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock); +static unsigned int gt641xx_base_clock; + +void gt641xx_set_base_clock(unsigned int clock) +{ + gt641xx_base_clock = clock; +} + +int gt641xx_timer0_state(void) +{ + if (GT_READ(GT_TC0_OFS)) + return 0; + + GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ); + GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK); + + return 1; +} + +static int gt641xx_timer0_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + u32 ctrl; + + raw_spin_lock(>641xx_timer_lock); + + ctrl = GT_READ(GT_TC_CONTROL_OFS); + ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK); + ctrl |= GT_TC_CONTROL_ENTC0_MSK; + + GT_WRITE(GT_TC0_OFS, delta); + GT_WRITE(GT_TC_CONTROL_OFS, ctrl); + + raw_spin_unlock(>641xx_timer_lock); + + return 0; +} + +static void gt641xx_timer0_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + u32 ctrl; + + raw_spin_lock(>641xx_timer_lock); + + ctrl = GT_READ(GT_TC_CONTROL_OFS); + ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK; + break; + case CLOCK_EVT_MODE_ONESHOT: + ctrl |= GT_TC_CONTROL_ENTC0_MSK; + break; + default: + break; + } + + GT_WRITE(GT_TC_CONTROL_OFS, ctrl); + + raw_spin_unlock(>641xx_timer_lock); +} + +static void gt641xx_timer0_event_handler(struct clock_event_device *dev) +{ +} + +static struct clock_event_device gt641xx_timer0_clockevent = { + .name = "gt641xx-timer0", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .irq = GT641XX_TIMER0_IRQ, + .set_next_event = gt641xx_timer0_set_next_event, + .set_mode = gt641xx_timer0_set_mode, + .event_handler = gt641xx_timer0_event_handler, +}; + +static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *cd = >641xx_timer0_clockevent; + + cd->event_handler(cd); + + return IRQ_HANDLED; +} + +static struct irqaction gt641xx_timer0_irqaction = { + .handler = gt641xx_timer0_interrupt, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "gt641xx_timer0", +}; + +static int __init gt641xx_timer0_clockevent_init(void) +{ + struct clock_event_device *cd; + + if (!gt641xx_base_clock) + return 0; + + GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ); + + cd = >641xx_timer0_clockevent; + cd->rating = 200 + gt641xx_base_clock / 10000000; + clockevent_set_clock(cd, gt641xx_base_clock); + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + cd->cpumask = cpumask_of(0); + + clockevents_register_device(>641xx_timer0_clockevent); + + return setup_irq(GT641XX_TIMER0_IRQ, >641xx_timer0_irqaction); +} +arch_initcall(gt641xx_timer0_clockevent_init); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c new file mode 100644 index 00000000..51095dd9 --- /dev/null +++ b/arch/mips/kernel/cevt-r4k.c @@ -0,0 +1,216 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/irq.h> + +#include <asm/smtc_ipi.h> +#include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * The SMTC Kernel for the 34K, 1004K, et. al. replaces several + * of these routines with SMTC-specific variants. + */ + +#ifndef CONFIG_MIPS_MT_SMTC + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned int cnt; + int res; + + cnt = read_c0_count(); + cnt += delta; + write_c0_compare(cnt); + res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; + return res; +} + +#endif /* CONFIG_MIPS_MT_SMTC */ + +void mips_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* Nothing to do ... */ +} + +DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); +int cp0_timer_irq_installed; + +#ifndef CONFIG_MIPS_MT_SMTC + +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + const int r2 = cpu_has_mips_r2; + struct clock_event_device *cd; + int cpu = smp_processor_id(); + + /* + * Suckage alert: + * Before R2 of the architecture there was no way to see if a + * performance counter interrupt was pending, so we have to run + * the performance counter interrupt handler anyway. + */ + if (handle_perf_irq(r2)) + goto out; + + /* + * The same applies to performance counter interrupts. But with the + * above we now know that the reason we got here must be a timer + * interrupt. Being the paranoiacs we are we check anyway. + */ + if (!r2 || (read_c0_cause() & (1 << 30))) { + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + +out: + return IRQ_HANDLED; +} + +#endif /* Not CONFIG_MIPS_MT_SMTC */ + +struct irqaction c0_compare_irqaction = { + .handler = c0_compare_interrupt, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "timer", +}; + + +void mips_event_handler(struct clock_event_device *dev) +{ +} + +/* + * FIXME: This doesn't hold for the relocated E9000 compare interrupt. + */ +static int c0_compare_int_pending(void) +{ + return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); +} + +/* + * Compare interrupt can be routed and latched outside the core, + * so wait up to worst case number of cycle counter ticks for timer interrupt + * changes to propagate to the cause register. + */ +#define COMPARE_INT_SEEN_TICKS 50 + +int c0_compare_int_usable(void) +{ + unsigned int delta; + unsigned int cnt; + + /* + * IP7 already pending? Try to clear it by acking the timer. + */ + if (c0_compare_int_pending()) { + cnt = read_c0_count(); + write_c0_compare(cnt); + back_to_back_c0_hazard(); + while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) + if (!c0_compare_int_pending()) + break; + if (c0_compare_int_pending()) + return 0; + } + + for (delta = 0x10; delta <= 0x400000; delta <<= 1) { + cnt = read_c0_count(); + cnt += delta; + write_c0_compare(cnt); + back_to_back_c0_hazard(); + if ((int)(read_c0_count() - cnt) < 0) + break; + /* increase delta if the timer was already expired */ + } + + while ((int)(read_c0_count() - cnt) <= 0) + ; /* Wait for expiry */ + + while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) + if (c0_compare_int_pending()) + break; + if (!c0_compare_int_pending()) + return 0; + cnt = read_c0_count(); + write_c0_compare(cnt); + back_to_back_c0_hazard(); + while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) + if (!c0_compare_int_pending()) + break; + if (c0_compare_int_pending()) + return 0; + + /* + * Feels like a real count / compare timer. + */ + return 1; +} + +#ifndef CONFIG_MIPS_MT_SMTC + +int __cpuinit r4k_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + + if (!c0_compare_int_usable()) + return -ENXIO; + + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of it's liking. + */ + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; + if (get_c0_compare_int) + irq = get_c0_compare_int(); + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + clockevent_set_clock(cd, mips_hpt_frequency); + + /* Calculate the min / max delta */ + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_clock_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); + + return 0; +} + +#endif /* Not CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c new file mode 100644 index 00000000..e73439fd --- /dev/null +++ b/arch/mips/kernel/cevt-sb1250.c @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2000, 2001 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/percpu.h> +#include <linux/smp.h> + +#include <asm/addrspace.h> +#include <asm/io.h> +#include <asm/time.h> + +#include <asm/sibyte/sb1250.h> +#include <asm/sibyte/sb1250_regs.h> +#include <asm/sibyte/sb1250_int.h> +#include <asm/sibyte/sb1250_scd.h> + +#define IMR_IP2_VAL K_INT_MAP_I0 +#define IMR_IP3_VAL K_INT_MAP_I1 +#define IMR_IP4_VAL K_INT_MAP_I2 + +/* + * The general purpose timer ticks at 1MHz independent if + * the rest of the system + */ +static void sibyte_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + unsigned int cpu = smp_processor_id(); + void __iomem *cfg, *init; + + cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); + init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + __raw_writeq(0, cfg); + __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init); + __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, + cfg); + break; + + case CLOCK_EVT_MODE_ONESHOT: + /* Stop the timer until we actually program a shot */ + case CLOCK_EVT_MODE_SHUTDOWN: + __raw_writeq(0, cfg); + break; + + case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ + case CLOCK_EVT_MODE_RESUME: + ; + } +} + +static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) +{ + unsigned int cpu = smp_processor_id(); + void __iomem *cfg, *init; + + cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); + init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); + + __raw_writeq(0, cfg); + __raw_writeq(delta - 1, init); + __raw_writeq(M_SCD_TIMER_ENABLE, cfg); + + return 0; +} + +static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd = dev_id; + void __iomem *cfg; + unsigned long tmode; + + if (cd->mode == CLOCK_EVT_MODE_PERIODIC) + tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS; + else + tmode = 0; + + /* ACK interrupt */ + cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); + ____raw_writeq(tmode, cfg); + + cd->event_handler(cd); + + return IRQ_HANDLED; +} + +static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); +static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); +static DEFINE_PER_CPU(char [18], sibyte_hpt_name); + +void __cpuinit sb1250_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + unsigned int irq = K_INT_TIMER_0 + cpu; + struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); + struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); + unsigned char *name = per_cpu(sibyte_hpt_name, cpu); + + /* Only have 4 general purpose timers, and we use last one as hpt */ + BUG_ON(cpu > 2); + + sprintf(name, "sb1250-counter-%d", cpu); + cd->name = name; + cd->features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT; + clockevent_set_clock(cd, V_SCD_TIMER_FREQ); + cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); + cd->min_delta_ns = clockevent_delta2ns(2, cd); + cd->rating = 200; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = sibyte_next_event; + cd->set_mode = sibyte_set_mode; + clockevents_register_device(cd); + + sb1250_mask_irq(cpu, irq); + + /* + * Map the timer interrupt to IP[4] of this cpu + */ + __raw_writeq(IMR_IP4_VAL, + IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) + + (irq << 3))); + + sb1250_unmask_irq(cpu, irq); + + action->handler = sibyte_counter_handler; + action->flags = IRQF_PERCPU | IRQF_TIMER; + action->name = name; + action->dev_id = cd; + + irq_set_affinity(irq, cpumask_of(cpu)); + setup_irq(irq, action); +} diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 00000000..2e72d30b --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c @@ -0,0 +1,324 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/irq.h> + +#include <asm/smtc_ipi.h> +#include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * Variant clock event timer support for SMTC on MIPS 34K, 1004K + * or other MIPS MT cores. + * + * Notes on SMTC Support: + * + * SMTC has multiple microthread TCs pretending to be Linux CPUs. + * But there's only one Count/Compare pair per VPE, and Compare + * interrupts are taken opportunisitically by available TCs + * bound to the VPE with the Count register. The new timer + * framework provides for global broadcasts, but we really + * want VPE-level multicasts for best behavior. So instead + * of invoking the high-level clock-event broadcast code, + * this version of SMTC support uses the historical SMTC + * multicast mechanisms "under the hood", appearing to the + * generic clock layer as if the interrupts are per-CPU. + * + * The approach taken here is to maintain a set of NR_CPUS + * virtual timers, and track which "CPU" needs to be alerted + * at each event. + * + * It's unlikely that we'll see a MIPS MT core with more than + * 2 VPEs, but we *know* that we won't need to handle more + * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements + * is always going to be overkill, but always going to be enough. + */ + +unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; +static int smtc_nextinvpe[NR_CPUS]; + +/* + * Timestamps stored are absolute values to be programmed + * into Count register. Valid timestamps will never be zero. + * If a Zero Count value is actually calculated, it is converted + * to be a 1, which will introduce 1 or two CPU cycles of error + * roughly once every four billion events, which at 1000 HZ means + * about once every 50 days. If that's actually a problem, one + * could alternate squashing 0 to 1 and to -1. + */ + +#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) +#define ISVALID(x) ((x) != 0L) + +/* + * Time comparison is subtle, as it's really truncated + * modular arithmetic. + */ + +#define IS_SOONER(a, b, reference) \ + (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) + +/* + * CATCHUP_INCREMENT, used when the function falls behind the counter. + * Could be an increasing function instead of a constant; + */ + +#define CATCHUP_INCREMENT 64 + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int mtflags; + unsigned long timestamp, reference, previous; + unsigned long nextcomp = 0L; + int vpe = current_cpu_data.vpe_id; + int cpu = smp_processor_id(); + local_irq_save(flags); + mtflags = dmt(); + + /* + * Maintain the per-TC virtual timer + * and program the per-VPE shared Count register + * as appropriate here... + */ + reference = (unsigned long)read_c0_count(); + timestamp = MAKEVALID(reference + delta); + /* + * To really model the clock, we have to catch the case + * where the current next-in-VPE timestamp is the old + * timestamp for the calling CPE, but the new value is + * in fact later. In that case, we have to do a full + * scan and discover the new next-in-VPE CPU id and + * timestamp. + */ + previous = smtc_nexttime[vpe][cpu]; + if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) + && IS_SOONER(previous, timestamp, reference)) { + int i; + int soonest = cpu; + + /* + * Update timestamp array here, so that new + * value gets considered along with those of + * other virtual CPUs on the VPE. + */ + smtc_nexttime[vpe][cpu] = timestamp; + for_each_online_cpu(i) { + if (ISVALID(smtc_nexttime[vpe][i]) + && IS_SOONER(smtc_nexttime[vpe][i], + smtc_nexttime[vpe][soonest], reference)) { + soonest = i; + } + } + smtc_nextinvpe[vpe] = soonest; + nextcomp = smtc_nexttime[vpe][soonest]; + /* + * Otherwise, we don't have to process the whole array rank, + * we just have to see if the event horizon has gotten closer. + */ + } else { + if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || + IS_SOONER(timestamp, + smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { + smtc_nextinvpe[vpe] = cpu; + nextcomp = timestamp; + } + /* + * Since next-in-VPE may me the same as the executing + * virtual CPU, we update the array *after* checking + * its value. + */ + smtc_nexttime[vpe][cpu] = timestamp; + } + + /* + * It may be that, in fact, we don't need to update Compare, + * but if we do, we want to make sure we didn't fall into + * a crack just behind Count. + */ + if (ISVALID(nextcomp)) { + write_c0_compare(nextcomp); + ehb(); + /* + * We never return an error, we just make sure + * that we trigger the handlers as quickly as + * we can if we fell behind. + */ + while ((nextcomp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) { + nextcomp += CATCHUP_INCREMENT; + write_c0_compare(nextcomp); + ehb(); + } + } + emt(mtflags); + local_irq_restore(flags); + return 0; +} + + +void smtc_distribute_timer(int vpe) +{ + unsigned long flags; + unsigned int mtflags; + int cpu; + struct clock_event_device *cd; + unsigned long nextstamp; + unsigned long reference; + + +repeat: + nextstamp = 0L; + for_each_online_cpu(cpu) { + /* + * Find virtual CPUs within the current VPE who have + * unserviced timer requests whose time is now past. + */ + local_irq_save(flags); + mtflags = dmt(); + if (cpu_data[cpu].vpe_id == vpe && + ISVALID(smtc_nexttime[vpe][cpu])) { + reference = (unsigned long)read_c0_count(); + if ((smtc_nexttime[vpe][cpu] - reference) + > (unsigned long)LONG_MAX) { + smtc_nexttime[vpe][cpu] = 0L; + emt(mtflags); + local_irq_restore(flags); + /* + * We don't send IPIs to ourself. + */ + if (cpu != smp_processor_id()) { + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); + } else { + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + } else { + /* Local to VPE but Valid Time not yet reached. */ + if (!ISVALID(nextstamp) || + IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, + reference)) { + smtc_nextinvpe[vpe] = cpu; + nextstamp = smtc_nexttime[vpe][cpu]; + } + emt(mtflags); + local_irq_restore(flags); + } + } else { + emt(mtflags); + local_irq_restore(flags); + + } + } + /* Reprogram for interrupt at next soonest timestamp for VPE */ + if (ISVALID(nextstamp)) { + write_c0_compare(nextstamp); + ehb(); + if ((nextstamp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) + goto repeat; + } +} + + +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ + handle_perf_irq(1); + + if (read_c0_cause() & (1 << 30)) { + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); + smtc_distribute_timer(cpu_data[cpu].vpe_id); + } + return IRQ_HANDLED; +} + + +int __cpuinit smtc_clockevent_init(void) +{ + uint64_t mips_freq = mips_hpt_frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + int i; + int j; + + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + if (cpu == 0) { + for (i = 0; i < num_possible_cpus(); i++) { + smtc_nextinvpe[i] = 0; + for (j = 0; j < num_possible_cpus(); j++) + smtc_nexttime[i][j] = 0L; + } + /* + * SMTC also can't have the usablility test + * run by secondary TCs once Compare is in use. + */ + if (!c0_compare_int_usable()) + return -ENXIO; + } + + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of it's liking. + */ + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; + if (get_c0_compare_int) + irq = get_c0_compare_int(); + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_clock_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + /* + * On SMTC we only want to do the data structure + * initialization and IRQ setup once. + */ + if (cpu) + return 0; + /* + * And we need the hwmask associated with the c0_compare + * vector to be initialized. + */ + irq_hwmask[irq] = (0x100 << cp0_compare_irq); + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); + + return 0; +} diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c new file mode 100644 index 00000000..e5c30b1d --- /dev/null +++ b/arch/mips/kernel/cevt-txx9.c @@ -0,0 +1,193 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Based on linux/arch/mips/kernel/cevt-r4k.c, + * linux/arch/mips/jmr3927/rbhma3100/setup.c + * + * Copyright 2001 MontaVista Software Inc. + * Copyright (C) 2000-2001 Toshiba Corporation + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <asm/time.h> +#include <asm/txx9tmr.h> + +#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL) +#define TIMER_CCD 0 /* 1/2 */ +#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD)) + +struct txx9_clocksource { + struct clocksource cs; + struct txx9_tmr_reg __iomem *tmrptr; +}; + +static cycle_t txx9_cs_read(struct clocksource *cs) +{ + struct txx9_clocksource *txx9_cs = + container_of(cs, struct txx9_clocksource, cs); + return __raw_readl(&txx9_cs->tmrptr->trr); +} + +/* Use 1 bit smaller width to use full bits in that width */ +#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1) + +static struct txx9_clocksource txx9_clocksource = { + .cs = { + .name = "TXx9", + .rating = 200, + .read = txx9_cs_read, + .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, +}; + +void __init txx9_clocksource_init(unsigned long baseaddr, + unsigned int imbusclk) +{ + struct txx9_tmr_reg __iomem *tmrptr; + + clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); + + tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); + __raw_writel(TCR_BASE, &tmrptr->tcr); + __raw_writel(0, &tmrptr->tisr); + __raw_writel(TIMER_CCD, &tmrptr->ccdr); + __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr); + __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra); + __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); + txx9_clocksource.tmrptr = tmrptr; +} + +struct txx9_clock_event_device { + struct clock_event_device cd; + struct txx9_tmr_reg __iomem *tmrptr; +}; + +static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr) +{ + /* stop and reset counter */ + __raw_writel(TCR_BASE, &tmrptr->tcr); + /* clear pending interrupt */ + __raw_writel(0, &tmrptr->tisr); +} + +static void txx9tmr_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + struct txx9_clock_event_device *txx9_cd = + container_of(evt, struct txx9_clock_event_device, cd); + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; + + txx9tmr_stop_and_clear(tmrptr); + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, + &tmrptr->itmr); + /* start timer */ + __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> + evt->shift, + &tmrptr->cpra); + __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + __raw_writel(0, &tmrptr->itmr); + break; + case CLOCK_EVT_MODE_ONESHOT: + __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr); + break; + case CLOCK_EVT_MODE_RESUME: + __raw_writel(TIMER_CCD, &tmrptr->ccdr); + __raw_writel(0, &tmrptr->itmr); + break; + } +} + +static int txx9tmr_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + struct txx9_clock_event_device *txx9_cd = + container_of(evt, struct txx9_clock_event_device, cd); + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; + + txx9tmr_stop_and_clear(tmrptr); + /* start timer */ + __raw_writel(delta, &tmrptr->cpra); + __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); + return 0; +} + +static struct txx9_clock_event_device txx9_clock_event_device = { + .cd = { + .name = "TXx9", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_mode = txx9tmr_set_mode, + .set_next_event = txx9tmr_set_next_event, + }, +}; + +static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) +{ + struct txx9_clock_event_device *txx9_cd = dev_id; + struct clock_event_device *cd = &txx9_cd->cd; + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; + + __raw_writel(0, &tmrptr->tisr); /* ack interrupt */ + cd->event_handler(cd); + return IRQ_HANDLED; +} + +static struct irqaction txx9tmr_irq = { + .handler = txx9tmr_interrupt, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "txx9tmr", + .dev_id = &txx9_clock_event_device, +}; + +void __init txx9_clockevent_init(unsigned long baseaddr, int irq, + unsigned int imbusclk) +{ + struct clock_event_device *cd = &txx9_clock_event_device.cd; + struct txx9_tmr_reg __iomem *tmrptr; + + tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); + txx9tmr_stop_and_clear(tmrptr); + __raw_writel(TIMER_CCD, &tmrptr->ccdr); + __raw_writel(0, &tmrptr->itmr); + txx9_clock_event_device.tmrptr = tmrptr; + + clockevent_set_clock(cd, TIMER_CLK(imbusclk)); + cd->max_delta_ns = + clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); + cd->min_delta_ns = clockevent_delta2ns(0xf, cd); + cd->irq = irq; + cd->cpumask = cpumask_of(0), + clockevents_register_device(cd); + setup_irq(irq, &txx9tmr_irq); + printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", + baseaddr, irq); +} + +void __init txx9_tmr_init(unsigned long baseaddr) +{ + struct txx9_tmr_reg __iomem *tmrptr; + + tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); + /* Start once to make CounterResetEnable effective */ + __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr); + /* Stop and reset the counter */ + __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr); + __raw_writel(0, &tmrptr->tisr); + __raw_writel(0xffffffff, &tmrptr->cpra); + __raw_writel(0, &tmrptr->itmr); + __raw_writel(0, &tmrptr->ccdr); + __raw_writel(0, &tmrptr->pgmr); + iounmap(tmrptr); +} diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c new file mode 100644 index 00000000..d6a18644 --- /dev/null +++ b/arch/mips/kernel/cpu-bugs64.c @@ -0,0 +1,319 @@ +/* + * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ptrace.h> +#include <linux/stddef.h> + +#include <asm/bugs.h> +#include <asm/compiler.h> +#include <asm/cpu.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> +#include <asm/setup.h> + +static char bug64hit[] __initdata = + "reliable operation impossible!\n%s"; +static char nowar[] __initdata = + "Please report to <linux-mips@linux-mips.org>."; +static char r4kwar[] __initdata = + "Enable CPU_R4000_WORKAROUNDS to rectify."; +static char daddiwar[] __initdata = + "Enable CPU_DADDI_WORKAROUNDS to rectify."; + +static inline void align_mod(const int align, const int mod) +{ + asm volatile( + ".set push\n\t" + ".set noreorder\n\t" + ".balign %0\n\t" + ".rept %1\n\t" + "nop\n\t" + ".endr\n\t" + ".set pop" + : + : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod)); +} + +static inline void mult_sh_align_mod(long *v1, long *v2, long *w, + const int align, const int mod) +{ + unsigned long flags; + int m1, m2; + long p, s, lv1, lv2, lw; + + /* + * We want the multiply and the shift to be isolated from the + * rest of the code to disable gcc optimizations. Hence the + * asm statements that execute nothing, but make gcc not know + * what the values of m1, m2 and s are and what lv2 and p are + * used for. + */ + + local_irq_save(flags); + /* + * The following code leads to a wrong result of the first + * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId + * 00000422 or 00000430, respectively). + * + * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and + * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for + * details. I got no permission to duplicate them here, + * sigh... --macro + */ + asm volatile( + "" + : "=r" (m1), "=r" (m2), "=r" (s) + : "0" (5), "1" (8), "2" (5)); + align_mod(align, mod); + /* + * The trailing nop is needed to fulfill the two-instruction + * requirement between reading hi/lo and staring a mult/div. + * Leaving it out may cause gas insert a nop itself breaking + * the desired alignment of the next chunk. + */ + asm volatile( + ".set push\n\t" + ".set noat\n\t" + ".set noreorder\n\t" + ".set nomacro\n\t" + "mult %2, %3\n\t" + "dsll32 %0, %4, %5\n\t" + "mflo $0\n\t" + "dsll32 %1, %4, %5\n\t" + "nop\n\t" + ".set pop" + : "=&r" (lv1), "=r" (lw) + : "r" (m1), "r" (m2), "r" (s), "I" (0) + : "hi", "lo", GCC_REG_ACCUM); + /* We have to use single integers for m1 and m2 and a double + * one for p to be sure the mulsidi3 gcc's RTL multiplication + * instruction has the workaround applied. Older versions of + * gcc have correct umulsi3 and mulsi3, but other + * multiplication variants lack the workaround. + */ + asm volatile( + "" + : "=r" (m1), "=r" (m2), "=r" (s) + : "0" (m1), "1" (m2), "2" (s)); + align_mod(align, mod); + p = m1 * m2; + lv2 = s << 32; + asm volatile( + "" + : "=r" (lv2) + : "0" (lv2), "r" (p)); + local_irq_restore(flags); + + *v1 = lv1; + *v2 = lv2; + *w = lw; +} + +static inline void check_mult_sh(void) +{ + long v1[8], v2[8], w[8]; + int bug, fix, i; + + printk("Checking for the multiply/shift bug... "); + + /* + * Testing discovered false negatives for certain code offsets + * into cache lines. Hence we test all possible offsets for + * the worst assumption of an R4000 I-cache line width of 32 + * bytes. + * + * We can't use a loop as alignment directives need to be + * immediates. + */ + mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0); + mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1); + mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2); + mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3); + mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4); + mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5); + mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6); + mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7); + + bug = 0; + for (i = 0; i < 8; i++) + if (v1[i] != w[i]) + bug = 1; + + if (bug == 0) { + printk("no.\n"); + return; + } + + printk("yes, workaround... "); + + fix = 1; + for (i = 0; i < 8; i++) + if (v2[i] != w[i]) + fix = 0; + + if (fix == 1) { + printk("yes.\n"); + return; + } + + printk("no.\n"); + panic(bug64hit, !R4000_WAR ? r4kwar : nowar); +} + +static volatile int daddi_ov __cpuinitdata; + +asmlinkage void __init do_daddi_ov(struct pt_regs *regs) +{ + daddi_ov = 1; + regs->cp0_epc += 4; +} + +static inline void check_daddi(void) +{ + extern asmlinkage void handle_daddi_ov(void); + unsigned long flags; + void *handler; + long v, tmp; + + printk("Checking for the daddi bug... "); + + local_irq_save(flags); + handler = set_except_vector(12, handle_daddi_ov); + /* + * The following code fails to trigger an overflow exception + * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or + * 00000430, respectively). + * + * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and + * 3.0" by MIPS Technologies, Inc., erratum #23 for details. + * I got no permission to duplicate it here, sigh... --macro + */ + asm volatile( + ".set push\n\t" + ".set noat\n\t" + ".set noreorder\n\t" + ".set nomacro\n\t" + "addiu %1, $0, %2\n\t" + "dsrl %1, %1, 1\n\t" +#ifdef HAVE_AS_SET_DADDI + ".set daddi\n\t" +#endif + "daddi %0, %1, %3\n\t" + ".set pop" + : "=r" (v), "=&r" (tmp) + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); + set_except_vector(12, handler); + local_irq_restore(flags); + + if (daddi_ov) { + printk("no.\n"); + return; + } + + printk("yes, workaround... "); + + local_irq_save(flags); + handler = set_except_vector(12, handle_daddi_ov); + asm volatile( + "addiu %1, $0, %2\n\t" + "dsrl %1, %1, 1\n\t" + "daddi %0, %1, %3" + : "=r" (v), "=&r" (tmp) + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); + set_except_vector(12, handler); + local_irq_restore(flags); + + if (daddi_ov) { + printk("yes.\n"); + return; + } + + printk("no.\n"); + panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); +} + +int daddiu_bug = -1; + +static inline void check_daddiu(void) +{ + long v, w, tmp; + + printk("Checking for the daddiu bug... "); + + /* + * The following code leads to a wrong result of daddiu when + * executed on R4400 rev. 1.0 (PRId 00000440). + * + * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by + * MIPS Technologies, Inc., erratum #7 for details. + * + * According to "MIPS R4000PC/SC Errata, Processor Revision + * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this + * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and + * 00000430, respectively), too. Testing failed to trigger it + * so far. + * + * I got no permission to duplicate the errata here, sigh... + * --macro + */ + asm volatile( + ".set push\n\t" + ".set noat\n\t" + ".set noreorder\n\t" + ".set nomacro\n\t" + "addiu %2, $0, %3\n\t" + "dsrl %2, %2, 1\n\t" +#ifdef HAVE_AS_SET_DADDI + ".set daddi\n\t" +#endif + "daddiu %0, %2, %4\n\t" + "addiu %1, $0, %4\n\t" + "daddu %1, %2\n\t" + ".set pop" + : "=&r" (v), "=&r" (w), "=&r" (tmp) + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); + + daddiu_bug = v != w; + + if (!daddiu_bug) { + printk("no.\n"); + return; + } + + printk("yes, workaround... "); + + asm volatile( + "addiu %2, $0, %3\n\t" + "dsrl %2, %2, 1\n\t" + "daddiu %0, %2, %4\n\t" + "addiu %1, $0, %4\n\t" + "daddu %1, %2" + : "=&r" (v), "=&r" (w), "=&r" (tmp) + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); + + if (v == w) { + printk("yes.\n"); + return; + } + + printk("no.\n"); + panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); +} + +void __init check_bugs64_early(void) +{ + check_mult_sh(); + check_daddiu(); +} + +void __init check_bugs64(void) +{ + check_daddi(); +} diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c new file mode 100644 index 00000000..5099201f --- /dev/null +++ b/arch/mips/kernel/cpu-probe.c @@ -0,0 +1,1188 @@ +/* + * Processor capabilities determination functions. + * + * Copyright (C) xxxx the Anonymous + * Copyright (C) 1994 - 2006 Ralf Baechle + * Copyright (C) 2003, 2004 Maciej W. Rozycki + * Copyright (C) 2001, 2004 MIPS Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ptrace.h> +#include <linux/smp.h> +#include <linux/stddef.h> +#include <linux/export.h> + +#include <asm/bugs.h> +#include <asm/cpu.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> +#include <asm/watch.h> +#include <asm/elf.h> +#include <asm/spram.h> +#include <asm/uaccess.h> + +/* + * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, + * the implementation of the "wait" feature differs between CPU families. This + * points to the function that implements CPU specific wait. + * The wait instruction stops the pipeline and reduces the power consumption of + * the CPU very much. + */ +void (*cpu_wait)(void); +EXPORT_SYMBOL(cpu_wait); + +static void r3081_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | R30XX_CONF_HALT); +} + +static void r39xx_wait(void) +{ + local_irq_disable(); + if (!need_resched()) + write_c0_conf(read_c0_conf() | TX39_CONF_HALT); + local_irq_enable(); +} + +extern void r4k_wait(void); + +/* + * This variant is preferable as it allows testing need_resched and going to + * sleep depending on the outcome atomically. Unfortunately the "It is + * implementation-dependent whether the pipeline restarts when a non-enabled + * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes + * using this version a gamble. + */ +void r4k_wait_irqoff(void) +{ + local_irq_disable(); + if (!need_resched()) + __asm__(" .set push \n" + " .set mips3 \n" + " wait \n" + " .set pop \n"); + local_irq_enable(); + __asm__(" .globl __pastwait \n" + "__pastwait: \n"); +} + +/* + * The RM7000 variant has to handle erratum 38. The workaround is to not + * have any pending stores when the WAIT instruction is executed. + */ +static void rm7k_wait_irqoff(void) +{ + local_irq_disable(); + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " .set noat \n" + " mfc0 $1, $12 \n" + " sync \n" + " mtc0 $1, $12 # stalls until W stage \n" + " wait \n" + " mtc0 $1, $12 # stalls until W stage \n" + " .set pop \n"); + local_irq_enable(); +} + +/* + * The Au1xxx wait is available only if using 32khz counter or + * external timer source, but specifically not CP0 Counter. + * alchemy/common/time.c may override cpu_wait! + */ +static void au1k_wait(void) +{ + __asm__(" .set mips3 \n" + " cache 0x14, 0(%0) \n" + " cache 0x14, 32(%0) \n" + " sync \n" + " nop \n" + " wait \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " .set mips0 \n" + : : "r" (au1k_wait)); +} + +static int __initdata nowait; + +static int __init wait_disable(char *s) +{ + nowait = 1; + + return 1; +} + +__setup("nowait", wait_disable); + +static int __cpuinitdata mips_fpu_disabled; + +static int __init fpu_disable(char *s) +{ + cpu_data[0].options &= ~MIPS_CPU_FPU; + mips_fpu_disabled = 1; + + return 1; +} + +__setup("nofpu", fpu_disable); + +int __cpuinitdata mips_dsp_disabled; + +static int __init dsp_disable(char *s) +{ + cpu_data[0].ases &= ~MIPS_ASE_DSP; + mips_dsp_disabled = 1; + + return 1; +} + +__setup("nodsp", dsp_disable); + +void __init check_wait(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + if (nowait) { + printk("Wait instruction disabled.\n"); + return; + } + + switch (c->cputype) { + case CPU_R3081: + case CPU_R3081E: + cpu_wait = r3081_wait; + break; + case CPU_TX3927: + cpu_wait = r39xx_wait; + break; + case CPU_R4200: +/* case CPU_R4300: */ + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_R5500: + case CPU_NEVADA: + case CPU_4KC: + case CPU_4KEC: + case CPU_4KSC: + case CPU_5KC: + case CPU_25KF: + case CPU_PR4450: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_JZRISC: + case CPU_XLR: + case CPU_XLP: + cpu_wait = r4k_wait; + break; + + case CPU_RM7000: + cpu_wait = rm7k_wait_irqoff; + break; + + case CPU_24K: + case CPU_34K: + case CPU_1004K: + cpu_wait = r4k_wait; + if (read_c0_config7() & MIPS_CONF7_WII) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_74K: + cpu_wait = r4k_wait; + if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_TX49XX: + cpu_wait = r4k_wait_irqoff; + break; + case CPU_ALCHEMY: + cpu_wait = au1k_wait; + break; + case CPU_20KC: + /* + * WAIT on Rev1.0 has E1, E2, E3 and E16. + * WAIT on Rev2.0 and Rev3.0 has E16. + * Rev3.1 WAIT is nop, why bother + */ + if ((c->processor_id & 0xff) <= 0x64) + break; + + /* + * Another rev is incremeting c0_count at a reduced clock + * rate while in WAIT mode. So we basically have the choice + * between using the cp0 timer as clocksource or avoiding + * the WAIT instruction. Until more details are known, + * disable the use of WAIT for 20Kc entirely. + cpu_wait = r4k_wait; + */ + break; + case CPU_RM9000: + if ((c->processor_id & 0x00ff) >= 0x40) + cpu_wait = r4k_wait; + break; + default: + break; + } +} + +static inline void check_errata(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + switch (c->cputype) { + case CPU_34K: + /* + * Erratum "RPS May Cause Incorrect Instruction Execution" + * This code only handles VPE0, any SMP/SMTC/RTOS code + * making use of VPE1 will be responsable for that VPE. + */ + if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) + write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS); + break; + default: + break; + } +} + +void __init check_bugs32(void) +{ + check_errata(); +} + +/* + * Probe whether cpu has config register by trying to play with + * alternate cache bit and see whether it matters. + * It's used by cpu_probe to distinguish between R3000A and R3081. + */ +static inline int cpu_has_confreg(void) +{ +#ifdef CONFIG_CPU_R3000 + extern unsigned long r3k_cache_size(unsigned long); + unsigned long size1, size2; + unsigned long cfg = read_c0_conf(); + + size1 = r3k_cache_size(ST0_ISC); + write_c0_conf(cfg ^ R30XX_CONF_AC); + size2 = r3k_cache_size(ST0_ISC); + write_c0_conf(cfg); + return size1 != size2; +#else + return 0; +#endif +} + +static inline void set_elf_platform(int cpu, const char *plat) +{ + if (cpu == 0) + __elf_platform = plat; +} + +/* + * Get the FPU Implementation/Revision. + */ +static inline unsigned long cpu_get_fpu_id(void) +{ + unsigned long tmp, fpu_id; + + tmp = read_c0_status(); + __enable_fpu(); + fpu_id = read_32bit_cp1_register(CP1_REVISION); + write_c0_status(tmp); + return fpu_id; +} + +/* + * Check the CPU has an FPU the official way. + */ +static inline int __cpu_has_fpu(void) +{ + return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); +} + +static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) +{ +#ifdef __NEED_VMBITS_PROBE + write_c0_entryhi(0x3fffffffffffe000ULL); + back_to_back_c0_hazard(); + c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); +#endif +} + +#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ + | MIPS_CPU_COUNTER) + +static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) +{ + switch (c->processor_id & 0xff00) { + case PRID_IMP_R2000: + c->cputype = CPU_R2000; + __cpu_name[cpu] = "R2000"; + c->isa_level = MIPS_CPU_ISA_I; + c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | + MIPS_CPU_NOFPUEX; + if (__cpu_has_fpu()) + c->options |= MIPS_CPU_FPU; + c->tlbsize = 64; + break; + case PRID_IMP_R3000: + if ((c->processor_id & 0xff) == PRID_REV_R3000A) { + if (cpu_has_confreg()) { + c->cputype = CPU_R3081E; + __cpu_name[cpu] = "R3081"; + } else { + c->cputype = CPU_R3000A; + __cpu_name[cpu] = "R3000A"; + } + break; + } else { + c->cputype = CPU_R3000; + __cpu_name[cpu] = "R3000"; + } + c->isa_level = MIPS_CPU_ISA_I; + c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | + MIPS_CPU_NOFPUEX; + if (__cpu_has_fpu()) + c->options |= MIPS_CPU_FPU; + c->tlbsize = 64; + break; + case PRID_IMP_R4000: + if (read_c0_config() & CONF_SC) { + if ((c->processor_id & 0xff) >= PRID_REV_R4400) { + c->cputype = CPU_R4400PC; + __cpu_name[cpu] = "R4400PC"; + } else { + c->cputype = CPU_R4000PC; + __cpu_name[cpu] = "R4000PC"; + } + } else { + if ((c->processor_id & 0xff) >= PRID_REV_R4400) { + c->cputype = CPU_R4400SC; + __cpu_name[cpu] = "R4400SC"; + } else { + c->cputype = CPU_R4000SC; + __cpu_name[cpu] = "R4000SC"; + } + } + + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_WATCH | MIPS_CPU_VCE | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_VR41XX: + switch (c->processor_id & 0xf0) { + case PRID_REV_VR4111: + c->cputype = CPU_VR4111; + __cpu_name[cpu] = "NEC VR4111"; + break; + case PRID_REV_VR4121: + c->cputype = CPU_VR4121; + __cpu_name[cpu] = "NEC VR4121"; + break; + case PRID_REV_VR4122: + if ((c->processor_id & 0xf) < 0x3) { + c->cputype = CPU_VR4122; + __cpu_name[cpu] = "NEC VR4122"; + } else { + c->cputype = CPU_VR4181A; + __cpu_name[cpu] = "NEC VR4181A"; + } + break; + case PRID_REV_VR4130: + if ((c->processor_id & 0xf) < 0x4) { + c->cputype = CPU_VR4131; + __cpu_name[cpu] = "NEC VR4131"; + } else { + c->cputype = CPU_VR4133; + __cpu_name[cpu] = "NEC VR4133"; + } + break; + default: + printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); + c->cputype = CPU_VR41XX; + __cpu_name[cpu] = "NEC Vr41xx"; + break; + } + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS; + c->tlbsize = 32; + break; + case PRID_IMP_R4300: + c->cputype = CPU_R4300; + __cpu_name[cpu] = "R4300"; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; + case PRID_IMP_R4600: + c->cputype = CPU_R4600; + __cpu_name[cpu] = "R4600"; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + #if 0 + case PRID_IMP_R4650: + /* + * This processor doesn't have an MMU, so it's not + * "real easy" to run Linux on it. It is left purely + * for documentation. Commented out because it shares + * it's c0_prid id number with the TX3900. + */ + c->cputype = CPU_R4650; + __cpu_name[cpu] = "R4650"; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + #endif + case PRID_IMP_TX39: + c->isa_level = MIPS_CPU_ISA_I; + c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; + + if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { + c->cputype = CPU_TX3927; + __cpu_name[cpu] = "TX3927"; + c->tlbsize = 64; + } else { + switch (c->processor_id & 0xff) { + case PRID_REV_TX3912: + c->cputype = CPU_TX3912; + __cpu_name[cpu] = "TX3912"; + c->tlbsize = 32; + break; + case PRID_REV_TX3922: + c->cputype = CPU_TX3922; + __cpu_name[cpu] = "TX3922"; + c->tlbsize = 64; + break; + } + } + break; + case PRID_IMP_R4700: + c->cputype = CPU_R4700; + __cpu_name[cpu] = "R4700"; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_TX49: + c->cputype = CPU_TX49XX; + __cpu_name[cpu] = "R49XX"; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_LLSC; + if (!(c->processor_id & 0x08)) + c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; + c->tlbsize = 48; + break; + case PRID_IMP_R5000: + c->cputype = CPU_R5000; + __cpu_name[cpu] = "R5000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R5432: + c->cputype = CPU_R5432; + __cpu_name[cpu] = "R5432"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_WATCH | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R5500: + c->cputype = CPU_R5500; + __cpu_name[cpu] = "R5500"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_WATCH | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_NEVADA: + c->cputype = CPU_NEVADA; + __cpu_name[cpu] = "Nevada"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_DIVEC | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R6000: + c->cputype = CPU_R6000; + __cpu_name[cpu] = "R6000"; + c->isa_level = MIPS_CPU_ISA_II; + c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; + case PRID_IMP_R6000A: + c->cputype = CPU_R6000A; + __cpu_name[cpu] = "R6000A"; + c->isa_level = MIPS_CPU_ISA_II; + c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; + case PRID_IMP_RM7000: + c->cputype = CPU_RM7000; + __cpu_name[cpu] = "RM7000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + /* + * Undocumented RM7000: Bit 29 in the info register of + * the RM7000 v2.0 indicates if the TLB has 48 or 64 + * entries. + * + * 29 1 => 64 entry JTLB + * 0 => 48 entry JTLB + */ + c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; + break; + case PRID_IMP_RM9000: + c->cputype = CPU_RM9000; + __cpu_name[cpu] = "RM9000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + /* + * Bit 29 in the info register of the RM9000 + * indicates if the TLB has 48 or 64 entries. + * + * 29 1 => 64 entry JTLB + * 0 => 48 entry JTLB + */ + c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; + break; + case PRID_IMP_R8000: + c->cputype = CPU_R8000; + __cpu_name[cpu] = "RM8000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ + break; + case PRID_IMP_R10000: + c->cputype = CPU_R10000; + __cpu_name[cpu] = "R10000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | + MIPS_CPU_LLSC; + c->tlbsize = 64; + break; + case PRID_IMP_R12000: + c->cputype = CPU_R12000; + __cpu_name[cpu] = "R12000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | + MIPS_CPU_LLSC; + c->tlbsize = 64; + break; + case PRID_IMP_R14000: + c->cputype = CPU_R14000; + __cpu_name[cpu] = "R14000"; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | + MIPS_CPU_LLSC; + c->tlbsize = 64; + break; + case PRID_IMP_LOONGSON2: + c->cputype = CPU_LOONGSON2; + __cpu_name[cpu] = "ICT Loongson-2"; + + switch (c->processor_id & PRID_REV_MASK) { + case PRID_REV_LOONGSON2E: + set_elf_platform(cpu, "loongson2e"); + break; + case PRID_REV_LOONGSON2F: + set_elf_platform(cpu, "loongson2f"); + break; + } + + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | + MIPS_CPU_FPU | MIPS_CPU_LLSC | + MIPS_CPU_32FPR; + c->tlbsize = 64; + break; + } +} + +static char unknown_isa[] __cpuinitdata = KERN_ERR \ + "Unsupported ISA type, c0.config0: %d."; + +static inline unsigned int decode_config0(struct cpuinfo_mips *c) +{ + unsigned int config0; + int isa; + + config0 = read_c0_config(); + + if (((config0 & MIPS_CONF_MT) >> 7) == 1) + c->options |= MIPS_CPU_TLB; + isa = (config0 & MIPS_CONF_AT) >> 13; + switch (isa) { + case 0: + switch ((config0 & MIPS_CONF_AR) >> 10) { + case 0: + c->isa_level = MIPS_CPU_ISA_M32R1; + break; + case 1: + c->isa_level = MIPS_CPU_ISA_M32R2; + break; + default: + goto unknown; + } + break; + case 2: + switch ((config0 & MIPS_CONF_AR) >> 10) { + case 0: + c->isa_level = MIPS_CPU_ISA_M64R1; + break; + case 1: + c->isa_level = MIPS_CPU_ISA_M64R2; + break; + default: + goto unknown; + } + break; + default: + goto unknown; + } + + return config0 & MIPS_CONF_M; + +unknown: + panic(unknown_isa, config0); +} + +static inline unsigned int decode_config1(struct cpuinfo_mips *c) +{ + unsigned int config1; + + config1 = read_c0_config1(); + + if (config1 & MIPS_CONF1_MD) + c->ases |= MIPS_ASE_MDMX; + if (config1 & MIPS_CONF1_WR) + c->options |= MIPS_CPU_WATCH; + if (config1 & MIPS_CONF1_CA) + c->ases |= MIPS_ASE_MIPS16; + if (config1 & MIPS_CONF1_EP) + c->options |= MIPS_CPU_EJTAG; + if (config1 & MIPS_CONF1_FP) { + c->options |= MIPS_CPU_FPU; + c->options |= MIPS_CPU_32FPR; + } + if (cpu_has_tlb) + c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; + + return config1 & MIPS_CONF_M; +} + +static inline unsigned int decode_config2(struct cpuinfo_mips *c) +{ + unsigned int config2; + + config2 = read_c0_config2(); + + if (config2 & MIPS_CONF2_SL) + c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; + + return config2 & MIPS_CONF_M; +} + +static inline unsigned int decode_config3(struct cpuinfo_mips *c) +{ + unsigned int config3; + + config3 = read_c0_config3(); + + if (config3 & MIPS_CONF3_SM) + c->ases |= MIPS_ASE_SMARTMIPS; + if (config3 & MIPS_CONF3_DSP) + c->ases |= MIPS_ASE_DSP; + if (config3 & MIPS_CONF3_VINT) + c->options |= MIPS_CPU_VINT; + if (config3 & MIPS_CONF3_VEIC) + c->options |= MIPS_CPU_VEIC; + if (config3 & MIPS_CONF3_MT) + c->ases |= MIPS_ASE_MIPSMT; + if (config3 & MIPS_CONF3_ULRI) + c->options |= MIPS_CPU_ULRI; + + return config3 & MIPS_CONF_M; +} + +static inline unsigned int decode_config4(struct cpuinfo_mips *c) +{ + unsigned int config4; + + config4 = read_c0_config4(); + + if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT + && cpu_has_tlb) + c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; + + c->kscratch_mask = (config4 >> 16) & 0xff; + + return config4 & MIPS_CONF_M; +} + +static void __cpuinit decode_configs(struct cpuinfo_mips *c) +{ + int ok; + + /* MIPS32 or MIPS64 compliant CPU. */ + c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | + MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; + + c->scache.flags = MIPS_CACHE_NOT_PRESENT; + + ok = decode_config0(c); /* Read Config registers. */ + BUG_ON(!ok); /* Arch spec violation! */ + if (ok) + ok = decode_config1(c); + if (ok) + ok = decode_config2(c); + if (ok) + ok = decode_config3(c); + if (ok) + ok = decode_config4(c); + + mips_probe_watch_registers(c); + + if (cpu_has_mips_r2) + c->core = read_c0_ebase() & 0x3ff; +} + +static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + switch (c->processor_id & 0xff00) { + case PRID_IMP_4KC: + c->cputype = CPU_4KC; + __cpu_name[cpu] = "MIPS 4Kc"; + break; + case PRID_IMP_4KEC: + case PRID_IMP_4KECR2: + c->cputype = CPU_4KEC; + __cpu_name[cpu] = "MIPS 4KEc"; + break; + case PRID_IMP_4KSC: + case PRID_IMP_4KSD: + c->cputype = CPU_4KSC; + __cpu_name[cpu] = "MIPS 4KSc"; + break; + case PRID_IMP_5KC: + c->cputype = CPU_5KC; + __cpu_name[cpu] = "MIPS 5Kc"; + break; + case PRID_IMP_20KC: + c->cputype = CPU_20KC; + __cpu_name[cpu] = "MIPS 20Kc"; + break; + case PRID_IMP_24K: + case PRID_IMP_24KE: + c->cputype = CPU_24K; + __cpu_name[cpu] = "MIPS 24Kc"; + break; + case PRID_IMP_25KF: + c->cputype = CPU_25KF; + __cpu_name[cpu] = "MIPS 25Kc"; + break; + case PRID_IMP_34K: + c->cputype = CPU_34K; + __cpu_name[cpu] = "MIPS 34Kc"; + break; + case PRID_IMP_74K: + c->cputype = CPU_74K; + __cpu_name[cpu] = "MIPS 74Kc"; + break; + case PRID_IMP_1004K: + c->cputype = CPU_1004K; + __cpu_name[cpu] = "MIPS 1004Kc"; + break; + } + + spram_config(); +} + +static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + switch (c->processor_id & 0xff00) { + case PRID_IMP_AU1_REV1: + case PRID_IMP_AU1_REV2: + c->cputype = CPU_ALCHEMY; + switch ((c->processor_id >> 24) & 0xff) { + case 0: + __cpu_name[cpu] = "Au1000"; + break; + case 1: + __cpu_name[cpu] = "Au1500"; + break; + case 2: + __cpu_name[cpu] = "Au1100"; + break; + case 3: + __cpu_name[cpu] = "Au1550"; + break; + case 4: + __cpu_name[cpu] = "Au1200"; + if ((c->processor_id & 0xff) == 2) + __cpu_name[cpu] = "Au1250"; + break; + case 5: + __cpu_name[cpu] = "Au1210"; + break; + default: + __cpu_name[cpu] = "Au1xxx"; + break; + } + break; + } +} + +static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + + switch (c->processor_id & 0xff00) { + case PRID_IMP_SB1: + c->cputype = CPU_SB1; + __cpu_name[cpu] = "SiByte SB1"; + /* FPU in pass1 is known to have issues. */ + if ((c->processor_id & 0xff) < 0x02) + c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); + break; + case PRID_IMP_SB1A: + c->cputype = CPU_SB1A; + __cpu_name[cpu] = "SiByte SB1A"; + break; + } +} + +static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + switch (c->processor_id & 0xff00) { + case PRID_IMP_SR71000: + c->cputype = CPU_SR71000; + __cpu_name[cpu] = "Sandcraft SR71000"; + c->scache.ways = 8; + c->tlbsize = 64; + break; + } +} + +static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + switch (c->processor_id & 0xff00) { + case PRID_IMP_PR4450: + c->cputype = CPU_PR4450; + __cpu_name[cpu] = "Philips PR4450"; + c->isa_level = MIPS_CPU_ISA_M32R1; + break; + } +} + +static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + switch (c->processor_id & 0xff00) { + case PRID_IMP_BMIPS32_REV4: + case PRID_IMP_BMIPS32_REV8: + c->cputype = CPU_BMIPS32; + __cpu_name[cpu] = "Broadcom BMIPS32"; + set_elf_platform(cpu, "bmips32"); + break; + case PRID_IMP_BMIPS3300: + case PRID_IMP_BMIPS3300_ALT: + case PRID_IMP_BMIPS3300_BUG: + c->cputype = CPU_BMIPS3300; + __cpu_name[cpu] = "Broadcom BMIPS3300"; + set_elf_platform(cpu, "bmips3300"); + break; + case PRID_IMP_BMIPS43XX: { + int rev = c->processor_id & 0xff; + + if (rev >= PRID_REV_BMIPS4380_LO && + rev <= PRID_REV_BMIPS4380_HI) { + c->cputype = CPU_BMIPS4380; + __cpu_name[cpu] = "Broadcom BMIPS4380"; + set_elf_platform(cpu, "bmips4380"); + } else { + c->cputype = CPU_BMIPS4350; + __cpu_name[cpu] = "Broadcom BMIPS4350"; + set_elf_platform(cpu, "bmips4350"); + } + break; + } + case PRID_IMP_BMIPS5000: + c->cputype = CPU_BMIPS5000; + __cpu_name[cpu] = "Broadcom BMIPS5000"; + set_elf_platform(cpu, "bmips5000"); + c->options |= MIPS_CPU_ULRI; + break; + } +} + +static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + switch (c->processor_id & 0xff00) { + case PRID_IMP_CAVIUM_CN38XX: + case PRID_IMP_CAVIUM_CN31XX: + case PRID_IMP_CAVIUM_CN30XX: + c->cputype = CPU_CAVIUM_OCTEON; + __cpu_name[cpu] = "Cavium Octeon"; + goto platform; + case PRID_IMP_CAVIUM_CN58XX: + case PRID_IMP_CAVIUM_CN56XX: + case PRID_IMP_CAVIUM_CN50XX: + case PRID_IMP_CAVIUM_CN52XX: + c->cputype = CPU_CAVIUM_OCTEON_PLUS; + __cpu_name[cpu] = "Cavium Octeon+"; +platform: + set_elf_platform(cpu, "octeon"); + break; + case PRID_IMP_CAVIUM_CN61XX: + case PRID_IMP_CAVIUM_CN63XX: + case PRID_IMP_CAVIUM_CN66XX: + case PRID_IMP_CAVIUM_CN68XX: + c->cputype = CPU_CAVIUM_OCTEON2; + __cpu_name[cpu] = "Cavium Octeon II"; + set_elf_platform(cpu, "octeon2"); + break; + default: + printk(KERN_INFO "Unknown Octeon chip!\n"); + c->cputype = CPU_UNKNOWN; + break; + } +} + +static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + /* JZRISC does not implement the CP0 counter. */ + c->options &= ~MIPS_CPU_COUNTER; + switch (c->processor_id & 0xff00) { + case PRID_IMP_JZRISC: + c->cputype = CPU_JZRISC; + __cpu_name[cpu] = "Ingenic JZRISC"; + break; + default: + panic("Unknown Ingenic Processor ID!"); + break; + } +} + +static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) +{ + decode_configs(c); + + if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) { + c->cputype = CPU_ALCHEMY; + __cpu_name[cpu] = "Au1300"; + /* following stuff is not for Alchemy */ + return; + } + + c->options = (MIPS_CPU_TLB | + MIPS_CPU_4KEX | + MIPS_CPU_COUNTER | + MIPS_CPU_DIVEC | + MIPS_CPU_WATCH | + MIPS_CPU_EJTAG | + MIPS_CPU_LLSC); + + switch (c->processor_id & 0xff00) { + case PRID_IMP_NETLOGIC_XLP8XX: + case PRID_IMP_NETLOGIC_XLP3XX: + c->cputype = CPU_XLP; + __cpu_name[cpu] = "Netlogic XLP"; + break; + + case PRID_IMP_NETLOGIC_XLR732: + case PRID_IMP_NETLOGIC_XLR716: + case PRID_IMP_NETLOGIC_XLR532: + case PRID_IMP_NETLOGIC_XLR308: + case PRID_IMP_NETLOGIC_XLR532C: + case PRID_IMP_NETLOGIC_XLR516C: + case PRID_IMP_NETLOGIC_XLR508C: + case PRID_IMP_NETLOGIC_XLR308C: + c->cputype = CPU_XLR; + __cpu_name[cpu] = "Netlogic XLR"; + break; + + case PRID_IMP_NETLOGIC_XLS608: + case PRID_IMP_NETLOGIC_XLS408: + case PRID_IMP_NETLOGIC_XLS404: + case PRID_IMP_NETLOGIC_XLS208: + case PRID_IMP_NETLOGIC_XLS204: + case PRID_IMP_NETLOGIC_XLS108: + case PRID_IMP_NETLOGIC_XLS104: + case PRID_IMP_NETLOGIC_XLS616B: + case PRID_IMP_NETLOGIC_XLS608B: + case PRID_IMP_NETLOGIC_XLS416B: + case PRID_IMP_NETLOGIC_XLS412B: + case PRID_IMP_NETLOGIC_XLS408B: + case PRID_IMP_NETLOGIC_XLS404B: + c->cputype = CPU_XLR; + __cpu_name[cpu] = "Netlogic XLS"; + break; + + default: + pr_info("Unknown Netlogic chip id [%02x]!\n", + c->processor_id); + c->cputype = CPU_XLR; + break; + } + + if (c->cputype == CPU_XLP) { + c->isa_level = MIPS_CPU_ISA_M64R2; + c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK); + /* This will be updated again after all threads are woken up */ + c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; + } else { + c->isa_level = MIPS_CPU_ISA_M64R1; + c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; + } +} + +#ifdef CONFIG_64BIT +/* For use by uaccess.h */ +u64 __ua_limit; +EXPORT_SYMBOL(__ua_limit); +#endif + +const char *__cpu_name[NR_CPUS]; +const char *__elf_platform; + +__cpuinit void cpu_probe(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int cpu = smp_processor_id(); + + c->processor_id = PRID_IMP_UNKNOWN; + c->fpu_id = FPIR_IMP_NONE; + c->cputype = CPU_UNKNOWN; + + c->processor_id = read_c0_prid(); + switch (c->processor_id & 0xff0000) { + case PRID_COMP_LEGACY: + cpu_probe_legacy(c, cpu); + break; + case PRID_COMP_MIPS: + cpu_probe_mips(c, cpu); + break; + case PRID_COMP_ALCHEMY: + cpu_probe_alchemy(c, cpu); + break; + case PRID_COMP_SIBYTE: + cpu_probe_sibyte(c, cpu); + break; + case PRID_COMP_BROADCOM: + cpu_probe_broadcom(c, cpu); + break; + case PRID_COMP_SANDCRAFT: + cpu_probe_sandcraft(c, cpu); + break; + case PRID_COMP_NXP: + cpu_probe_nxp(c, cpu); + break; + case PRID_COMP_CAVIUM: + cpu_probe_cavium(c, cpu); + break; + case PRID_COMP_INGENIC: + cpu_probe_ingenic(c, cpu); + break; + case PRID_COMP_NETLOGIC: + cpu_probe_netlogic(c, cpu); + break; + } + + BUG_ON(!__cpu_name[cpu]); + BUG_ON(c->cputype == CPU_UNKNOWN); + + /* + * Platform code can force the cpu type to optimize code + * generation. In that case be sure the cpu type is correctly + * manually setup otherwise it could trigger some nasty bugs. + */ + BUG_ON(current_cpu_type() != c->cputype); + + if (mips_fpu_disabled) + c->options &= ~MIPS_CPU_FPU; + + if (mips_dsp_disabled) + c->ases &= ~MIPS_ASE_DSP; + + if (c->options & MIPS_CPU_FPU) { + c->fpu_id = cpu_get_fpu_id(); + + if (c->isa_level == MIPS_CPU_ISA_M32R1 || + c->isa_level == MIPS_CPU_ISA_M32R2 || + c->isa_level == MIPS_CPU_ISA_M64R1 || + c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->fpu_id & MIPS_FPIR_3D) + c->ases |= MIPS_ASE_MIPS3D; + } + } + + if (cpu_has_mips_r2) + c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; + else + c->srsets = 1; + + cpu_probe_vmbits(c); + +#ifdef CONFIG_64BIT + if (cpu == 0) + __ua_limit = ~((1ull << cpu_vmbits) - 1); +#endif +} + +__cpuinit void cpu_report(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + printk(KERN_INFO "CPU revision is: %08x (%s)\n", + c->processor_id, cpu_name_string()); + if (c->options & MIPS_CPU_FPU) + printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); +} diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig new file mode 100644 index 00000000..58c601ee --- /dev/null +++ b/arch/mips/kernel/cpufreq/Kconfig @@ -0,0 +1,41 @@ +# +# CPU Frequency scaling +# + +config MIPS_EXTERNAL_TIMER + bool + +config MIPS_CPUFREQ + bool + default y + depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER + +if MIPS_CPUFREQ + +menu "CPU Frequency scaling" + +source "drivers/cpufreq/Kconfig" + +if CPU_FREQ + +comment "CPUFreq processor drivers" + +config LOONGSON2_CPUFREQ + tristate "Loongson2 CPUFreq Driver" + select CPU_FREQ_TABLE + depends on MIPS_CPUFREQ + help + This option adds a CPUFreq driver for loongson processors which + support software configurable cpu frequency. + + Loongson2F and it's successors support this feature. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +endif # CPU_FREQ + +endmenu + +endif # MIPS_CPUFREQ diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile new file mode 100644 index 00000000..c3479a43 --- /dev/null +++ b/arch/mips/kernel/cpufreq/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Linux/MIPS cpufreq. +# + +obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c new file mode 100644 index 00000000..5426779d --- /dev/null +++ b/arch/mips/kernel/cpufreq/loongson2_clock.c @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology + * Author: Yanhua, yanh@lemote.com + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/module.h> +#include <linux/cpufreq.h> +#include <linux/platform_device.h> + +#include <asm/clock.h> + +#include <loongson.h> + +static LIST_HEAD(clock_list); +static DEFINE_SPINLOCK(clock_lock); +static DEFINE_MUTEX(clock_list_sem); + +/* Minimum CLK support */ +enum { + DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT, + DC_87PT, DC_DISABLE, DC_RESV +}; + +struct cpufreq_frequency_table loongson2_clockmod_table[] = { + {DC_RESV, CPUFREQ_ENTRY_INVALID}, + {DC_ZERO, CPUFREQ_ENTRY_INVALID}, + {DC_25PT, 0}, + {DC_37PT, 0}, + {DC_50PT, 0}, + {DC_62PT, 0}, + {DC_75PT, 0}, + {DC_87PT, 0}, + {DC_DISABLE, 0}, + {DC_RESV, CPUFREQ_TABLE_END}, +}; +EXPORT_SYMBOL_GPL(loongson2_clockmod_table); + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 800000000, +}; + +struct clk *clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(clk_get); + +static void propagate_rate(struct clk *clk) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &clock_list, node) { + if (likely(clkp->parent != clk)) + continue; + if (likely(clkp->ops && clkp->ops->recalc)) + clkp->ops->recalc(clkp); + if (unlikely(clkp->flags & CLK_RATE_PROPAGATES)) + propagate_rate(clkp); + } +} + +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + +unsigned long clk_get_rate(struct clk *clk) +{ + return (unsigned long)clk->rate; +} +EXPORT_SYMBOL(clk_get_rate); + +void clk_put(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_put); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return clk_set_rate_ex(clk, rate, 0); +} +EXPORT_SYMBOL_GPL(clk_set_rate); + +int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) +{ + int ret = 0; + int regval; + int i; + + if (likely(clk->ops && clk->ops->set_rate)) { + unsigned long flags; + + spin_lock_irqsave(&clock_lock, flags); + ret = clk->ops->set_rate(clk, rate, algo_id); + spin_unlock_irqrestore(&clock_lock, flags); + } + + if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) + propagate_rate(clk); + + for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END; + i++) { + if (loongson2_clockmod_table[i].frequency == + CPUFREQ_ENTRY_INVALID) + continue; + if (rate == loongson2_clockmod_table[i].frequency) + break; + } + if (rate != loongson2_clockmod_table[i].frequency) + return -ENOTSUPP; + + clk->rate = rate; + + regval = LOONGSON_CHIPCFG0; + regval = (regval & ~0x7) | (loongson2_clockmod_table[i].index - 1); + LOONGSON_CHIPCFG0 = regval; + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_rate_ex); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + if (likely(clk->ops && clk->ops->round_rate)) { + unsigned long flags, rounded; + + spin_lock_irqsave(&clock_lock, flags); + rounded = clk->ops->round_rate(clk, rate); + spin_unlock_irqrestore(&clock_lock, flags); + + return rounded; + } + + return rate; +} +EXPORT_SYMBOL_GPL(clk_round_rate); + +/* + * This is the simple version of Loongson-2 wait, Maybe we need do this in + * interrupt disabled content + */ + +DEFINE_SPINLOCK(loongson2_wait_lock); +void loongson2_cpu_wait(void) +{ + u32 cpu_freq; + unsigned long flags; + + spin_lock_irqsave(&loongson2_wait_lock, flags); + cpu_freq = LOONGSON_CHIPCFG0; + LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + spin_unlock_irqrestore(&loongson2_wait_lock, flags); +} +EXPORT_SYMBOL_GPL(loongson2_cpu_wait); + +MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); +MODULE_DESCRIPTION("cpufreq driver for Loongson 2F"); +MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c new file mode 100644 index 00000000..ae5db206 --- /dev/null +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -0,0 +1,227 @@ +/* + * Cpufreq driver for the loongson-2 processors + * + * The 2E revision of loongson processor not support this feature. + * + * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology + * Author: Yanhua, yanh@lemote.com + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/cpufreq.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/sched.h> /* set_cpus_allowed() */ +#include <linux/delay.h> +#include <linux/platform_device.h> + +#include <asm/clock.h> + +#include <loongson.h> + +static uint nowait; + +static struct clk *cpuclk; + +static void (*saved_cpu_wait) (void); + +static int loongson2_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block loongson2_cpufreq_notifier_block = { + .notifier_call = loongson2_cpu_freq_notifier +}; + +static int loongson2_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + if (val == CPUFREQ_POSTCHANGE) + current_cpu_data.udelay_val = loops_per_jiffy; + + return 0; +} + +static unsigned int loongson2_cpufreq_get(unsigned int cpu) +{ + return clk_get_rate(cpuclk); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int loongson2_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int cpu = policy->cpu; + unsigned int newstate = 0; + cpumask_t cpus_allowed; + struct cpufreq_freqs freqs; + unsigned int freq; + + if (!cpu_online(cpu)) + return -ENODEV; + + cpus_allowed = current->cpus_allowed; + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + if (cpufreq_frequency_table_target + (policy, &loongson2_clockmod_table[0], target_freq, relation, + &newstate)) + return -EINVAL; + + freq = + ((cpu_clock_freq / 1000) * + loongson2_clockmod_table[newstate].index) / 8; + if (freq < policy->min || freq > policy->max) + return -EINVAL; + + pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); + + freqs.cpu = cpu; + freqs.old = loongson2_cpufreq_get(cpu); + freqs.new = freq; + freqs.flags = 0; + + if (freqs.new == freqs.old) + return 0; + + /* notifiers */ + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + set_cpus_allowed_ptr(current, &cpus_allowed); + + /* setting the cpu frequency */ + clk_set_rate(cpuclk, freq); + + /* notifiers */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + pr_debug("cpufreq: set frequency %u kHz\n", freq); + + return 0; +} + +static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int i; + + if (!cpu_online(policy->cpu)) + return -ENODEV; + + cpuclk = clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + cpuclk->rate = cpu_clock_freq / 1000; + if (!cpuclk->rate) + return -EINVAL; + + /* clock table init */ + for (i = 2; + (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END); + i++) + loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8; + + policy->cur = loongson2_cpufreq_get(policy->cpu); + + cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], + policy->cpu); + + return cpufreq_frequency_table_cpuinfo(policy, + &loongson2_clockmod_table[0]); +} + +static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + &loongson2_clockmod_table[0]); +} + +static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) +{ + clk_put(cpuclk); + return 0; +} + +static struct freq_attr *loongson2_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver loongson2_cpufreq_driver = { + .owner = THIS_MODULE, + .name = "loongson2", + .init = loongson2_cpufreq_cpu_init, + .verify = loongson2_cpufreq_verify, + .target = loongson2_cpufreq_target, + .get = loongson2_cpufreq_get, + .exit = loongson2_cpufreq_exit, + .attr = loongson2_table_attr, +}; + +static struct platform_device_id platform_device_ids[] = { + { + .name = "loongson2_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "loongson2_cpufreq", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpufreq_init(void) +{ + int ret; + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpufreq: Loongson-2F CPU frequency driver.\n"); + + cpufreq_register_notifier(&loongson2_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + ret = cpufreq_register_driver(&loongson2_cpufreq_driver); + + if (!ret && !nowait) { + saved_cpu_wait = cpu_wait; + cpu_wait = loongson2_cpu_wait; + } + + return ret; +} + +static void __exit cpufreq_exit(void) +{ + if (!nowait && saved_cpu_wait) + cpu_wait = saved_cpu_wait; + cpufreq_unregister_driver(&loongson2_cpufreq_driver); + cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait"); + +MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); +MODULE_DESCRIPTION("cpufreq driver for Loongson2F"); +MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c new file mode 100644 index 00000000..f96f99c7 --- /dev/null +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2000,2001,2004 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/clocksource.h> + +#include <asm/addrspace.h> +#include <asm/io.h> +#include <asm/time.h> + +#include <asm/sibyte/bcm1480_regs.h> +#include <asm/sibyte/sb1250_regs.h> +#include <asm/sibyte/bcm1480_int.h> +#include <asm/sibyte/bcm1480_scd.h> + +#include <asm/sibyte/sb1250.h> + +static cycle_t bcm1480_hpt_read(struct clocksource *cs) +{ + return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); +} + +struct clocksource bcm1480_clocksource = { + .name = "zbbus-cycles", + .rating = 200, + .read = bcm1480_hpt_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +void __init sb1480_clocksource_init(void) +{ + struct clocksource *cs = &bcm1480_clocksource; + unsigned int plldiv; + unsigned long zbbus; + + plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); + zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); + clocksource_register_hz(cs, zbbus); +} diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c new file mode 100644 index 00000000..46bd7fa9 --- /dev/null +++ b/arch/mips/kernel/csrc-ioasic.c @@ -0,0 +1,63 @@ +/* + * DEC I/O ASIC's counter clocksource + * + * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <linux/clocksource.h> +#include <linux/init.h> + +#include <asm/ds1287.h> +#include <asm/time.h> +#include <asm/dec/ioasic.h> +#include <asm/dec/ioasic_addrs.h> + +static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) +{ + return ioasic_read(IO_REG_FCTR); +} + +static struct clocksource clocksource_dec = { + .name = "dec-ioasic", + .read = dec_ioasic_hpt_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +void __init dec_ioasic_clocksource_init(void) +{ + unsigned int freq; + u32 start, end; + int i = HZ / 10; + + + while (!ds1287_timer_state()) + ; + + start = dec_ioasic_hpt_read(&clocksource_dec); + + while (i--) + while (!ds1287_timer_state()) + ; + + end = dec_ioasic_hpt_read(&clocksource_dec); + + freq = (end - start) * 10; + printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); + + clocksource_dec.rating = 200 + freq / 10000000; + clocksource_register_hz(&clocksource_dec, freq); +} diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c new file mode 100644 index 00000000..2e7c5232 --- /dev/null +++ b/arch/mips/kernel/csrc-powertv.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2008 Scientific-Atlanta, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +/* + * The file comes from kernel/csrc-r4k.c + */ +#include <linux/clocksource.h> +#include <linux/init.h> + +#include <asm/time.h> /* Not included in linux/time.h */ + +#include <asm/mach-powertv/asic_regs.h> +#include "powertv-clock.h" + +/* MIPS PLL Register Definitions */ +#define PLL_GET_M(x) (((x) >> 8) & 0x000000FF) +#define PLL_GET_N(x) (((x) >> 16) & 0x000000FF) +#define PLL_GET_P(x) (((x) >> 24) & 0x00000007) + +/* + * returns: Clock frequency in kHz + */ +unsigned int __init mips_get_pll_freq(void) +{ + unsigned int pll_reg, m, n, p; + unsigned int fin = 54000; /* Base frequency in kHz */ + unsigned int fout; + + /* Read PLL register setting */ + pll_reg = asic_read(mips_pll_setup); + m = PLL_GET_M(pll_reg); + n = PLL_GET_N(pll_reg); + p = PLL_GET_P(pll_reg); + pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p); + + /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */ + fout = ((2 * n * fin) / (m * (0x01 << p))); + + pr_info("MIPS Clock Freq=%d kHz\n", fout); + + return fout; +} + +static cycle_t c0_hpt_read(struct clocksource *cs) +{ + return read_c0_count(); +} + +static struct clocksource clocksource_mips = { + .name = "powertv-counter", + .read = c0_hpt_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void __init powertv_c0_hpt_clocksource_init(void) +{ + unsigned int pll_freq = mips_get_pll_freq(); + + pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000, + (pll_freq % 1000) * 100 / 1000); + + mips_hpt_frequency = pll_freq / 2 * 1000; + + clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; + + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); +} + +/** + * struct tim_c - free running counter + * @hi: High 16 bits of the counter + * @lo: Low 32 bits of the counter + * + * Lays out the structure of the free running counter in memory. This counter + * increments at a rate of 27 MHz/8 on all platforms. + */ +struct tim_c { + unsigned int hi; + unsigned int lo; +}; + +static struct tim_c *tim_c; + +static cycle_t tim_c_read(struct clocksource *cs) +{ + unsigned int hi; + unsigned int next_hi; + unsigned int lo; + + hi = readl(&tim_c->hi); + + for (;;) { + lo = readl(&tim_c->lo); + next_hi = readl(&tim_c->hi); + if (next_hi == hi) + break; + hi = next_hi; + } + +pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo); + return ((u64) hi << 32) | lo; +} + +#define TIM_C_SIZE 48 /* # bits in the timer */ + +static struct clocksource clocksource_tim_c = { + .name = "powertv-tim_c", + .read = tim_c_read, + .mask = CLOCKSOURCE_MASK(TIM_C_SIZE), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +/** + * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock + * + * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to + * 1 / (27,000,000/8) seconds. + */ +static void __init powertv_tim_c_clocksource_init(void) +{ + const unsigned long counts_per_second = 27000000 / 8; + + clocksource_tim_c.rating = 200; + + clocksource_register_hz(&clocksource_tim_c, counts_per_second); + tim_c = (struct tim_c *) asic_reg_addr(tim_ch); +} + +/** + powertv_clocksource_init - initialize all clocksources + */ +void __init powertv_clocksource_init(void) +{ + powertv_c0_hpt_clocksource_init(); + powertv_tim_c_clocksource_init(); +} diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c new file mode 100644 index 00000000..decd1fa3 --- /dev/null +++ b/arch/mips/kernel/csrc-r4k.c @@ -0,0 +1,36 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 by Ralf Baechle + */ +#include <linux/clocksource.h> +#include <linux/init.h> + +#include <asm/time.h> + +static cycle_t c0_hpt_read(struct clocksource *cs) +{ + return read_c0_count(); +} + +static struct clocksource clocksource_mips = { + .name = "MIPS", + .read = c0_hpt_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +int __init init_r4k_clocksource(void) +{ + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + + /* Calculate a somewhat reasonable rating value */ + clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; + + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); + + return 0; +} diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c new file mode 100644 index 00000000..e9606d90 --- /dev/null +++ b/arch/mips/kernel/csrc-sb1250.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2000, 2001 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/clocksource.h> + +#include <asm/addrspace.h> +#include <asm/io.h> +#include <asm/time.h> + +#include <asm/sibyte/sb1250.h> +#include <asm/sibyte/sb1250_regs.h> +#include <asm/sibyte/sb1250_int.h> +#include <asm/sibyte/sb1250_scd.h> + +#define SB1250_HPT_NUM 3 +#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */ + +/* + * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over + * again. + */ +static cycle_t sb1250_hpt_read(struct clocksource *cs) +{ + unsigned int count; + + count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)))); + + return SB1250_HPT_VALUE - count; +} + +struct clocksource bcm1250_clocksource = { + .name = "bcm1250-counter-3", + .rating = 200, + .read = sb1250_hpt_read, + .mask = CLOCKSOURCE_MASK(23), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +void __init sb1250_clocksource_init(void) +{ + struct clocksource *cs = &bcm1250_clocksource; + + /* Setup hpt using timer #3 but do not enable irq for it */ + __raw_writeq(0, + IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, + R_SCD_TIMER_CFG))); + __raw_writeq(SB1250_HPT_VALUE, + IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, + R_SCD_TIMER_INIT))); + __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, + IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, + R_SCD_TIMER_CFG))); + + clocksource_register_hz(cs, V_SCD_TIMER_FREQ); +} diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c new file mode 100644 index 00000000..9ae813eb --- /dev/null +++ b/arch/mips/kernel/early_printk.c @@ -0,0 +1,44 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2007 MIPS Technologies, Inc. + * written by Ralf Baechle (ralf@linux-mips.org) + */ +#include <linux/console.h> +#include <linux/init.h> + +#include <asm/setup.h> + +extern void prom_putchar(char); + +static void __init +early_console_write(struct console *con, const char *s, unsigned n) +{ + while (n-- && *s) { + if (*s == '\n') + prom_putchar('\r'); + prom_putchar(*s); + s++; + } +} + +static struct console early_console __initdata = { + .name = "early", + .write = early_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1 +}; + +static int early_console_initialized __initdata; + +void __init setup_early_printk(void) +{ + if (early_console_initialized) + return; + early_console_initialized = 1; + + register_console(&early_console); +} diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S new file mode 100644 index 00000000..37acfa03 --- /dev/null +++ b/arch/mips/kernel/entry.S @@ -0,0 +1,192 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/isadep.h> +#include <asm/thread_info.h> +#include <asm/war.h> +#ifdef CONFIG_MIPS_MT_SMTC +#include <asm/mipsmtregs.h> +#endif + +#ifndef CONFIG_PREEMPT +#define resume_kernel restore_all +#else +#define __ret_from_irq ret_from_exception +#endif + + .text + .align 5 +#ifndef CONFIG_PREEMPT +FEXPORT(ret_from_exception) + local_irq_disable # preempt stop + b __ret_from_irq +#endif +FEXPORT(ret_from_irq) + LONG_S s0, TI_REGS($28) +FEXPORT(__ret_from_irq) + LONG_L t0, PT_STATUS(sp) # returning to kernel mode? + andi t0, t0, KU_USER + beqz t0, resume_kernel + +resume_userspace: + local_irq_disable # make sure we dont miss an + # interrupt setting need_resched + # between sampling and return + LONG_L a2, TI_FLAGS($28) # current->work + andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) + bnez t0, work_pending + j restore_all + +#ifdef CONFIG_PREEMPT +resume_kernel: + local_irq_disable + lw t0, TI_PRE_COUNT($28) + bnez t0, restore_all +need_resched: + LONG_L t0, TI_FLAGS($28) + andi t1, t0, _TIF_NEED_RESCHED + beqz t1, restore_all + LONG_L t0, PT_STATUS(sp) # Interrupts off? + andi t0, 1 + beqz t0, restore_all + jal preempt_schedule_irq + b need_resched +#endif + +FEXPORT(ret_from_fork) + jal schedule_tail # a0 = struct task_struct *prev + +FEXPORT(syscall_exit) + local_irq_disable # make sure need_resched and + # signals dont change between + # sampling and return + LONG_L a2, TI_FLAGS($28) # current->work + li t0, _TIF_ALLWORK_MASK + and t0, a2, t0 + bnez t0, syscall_exit_work + +FEXPORT(restore_all) # restore full frame +#ifdef CONFIG_MIPS_MT_SMTC +#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP +/* Re-arm any temporarily masked interrupts not explicitly "acked" */ + mfc0 v0, CP0_TCSTATUS + ori v1, v0, TCSTATUS_IXMT + mtc0 v1, CP0_TCSTATUS + andi v0, TCSTATUS_IXMT + _ehb + mfc0 t0, CP0_TCCONTEXT + DMT 9 # dmt t1 + jal mips_ihb + mfc0 t2, CP0_STATUS + andi t3, t0, 0xff00 + or t2, t2, t3 + mtc0 t2, CP0_STATUS + _ehb + andi t1, t1, VPECONTROL_TE + beqz t1, 1f + EMT +1: + mfc0 v1, CP0_TCSTATUS + /* We set IXMT above, XOR should clear it here */ + xori v1, v1, TCSTATUS_IXMT + or v1, v0, v1 + mtc0 v1, CP0_TCSTATUS + _ehb + xor t0, t0, t3 + mtc0 t0, CP0_TCCONTEXT +#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ +/* Detect and execute deferred IPI "interrupts" */ + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + jal deferred_smtc_ipi + LONG_S s0, TI_REGS($28) +#endif /* CONFIG_MIPS_MT_SMTC */ + .set noat + RESTORE_TEMP + RESTORE_AT + RESTORE_STATIC +FEXPORT(restore_partial) # restore partial frame +#ifdef CONFIG_TRACE_IRQFLAGS + SAVE_STATIC + SAVE_AT + SAVE_TEMP + LONG_L v0, PT_STATUS(sp) +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + and v0, ST0_IEP +#else + and v0, ST0_IE +#endif + beqz v0, 1f + jal trace_hardirqs_on + b 2f +1: jal trace_hardirqs_off +2: + RESTORE_TEMP + RESTORE_AT + RESTORE_STATIC +#endif + RESTORE_SOME + RESTORE_SP_AND_RET + .set at + +work_pending: + andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS + beqz t0, work_notifysig +work_resched: + jal schedule + + local_irq_disable # make sure need_resched and + # signals dont change between + # sampling and return + LONG_L a2, TI_FLAGS($28) + andi t0, a2, _TIF_WORK_MASK # is there any work to be done + # other than syscall tracing? + beqz t0, restore_all + andi t0, a2, _TIF_NEED_RESCHED + bnez t0, work_resched + +work_notifysig: # deal with pending signals and + # notify-resume requests + move a0, sp + li a1, 0 + jal do_notify_resume # a2 already loaded + j resume_userspace + +FEXPORT(syscall_exit_work_partial) + SAVE_STATIC +syscall_exit_work: + li t0, _TIF_WORK_SYSCALL_EXIT + and t0, a2 # a2 is preloaded with TI_FLAGS + beqz t0, work_pending # trace bit set? + local_irq_enable # could let syscall_trace_leave() + # call schedule() instead + move a0, sp + jal syscall_trace_leave + b resume_userspace + +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) + +/* + * MIPS32R2 Instruction Hazard Barrier - must be called + * + * For C code use the inline version named instruction_hazard(). + */ +LEAF(mips_ihb) + .set mips32r2 + jr.hb ra + nop + END(mips_ihb) + +#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c new file mode 100644 index 00000000..6a2d758d --- /dev/null +++ b/arch/mips/kernel/ftrace.c @@ -0,0 +1,328 @@ +/* + * Code for replacing ftrace calls with jumps. + * + * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> + * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China + * Author: Wu Zhangjin <wuzhangjin@gmail.com> + * + * Thanks goes to Steven Rostedt for writing the original x86 version. + */ + +#include <linux/uaccess.h> +#include <linux/init.h> +#include <linux/ftrace.h> + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/cacheflush.h> +#include <asm/uasm.h> + +#include <asm-generic/sections.h> + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif + +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ +#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ +#define JUMP_RANGE_MASK ((1UL << 28) - 1) + +#define INSN_NOP 0x00000000 /* nop */ +#define INSN_JAL(addr) \ + ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) + +static unsigned int insn_jal_ftrace_caller __read_mostly; +static unsigned int insn_lui_v1_hi16_mcount __read_mostly; +static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; + +static inline void ftrace_dyn_arch_init_insns(void) +{ + u32 *buf; + unsigned int v1; + + /* lui v1, hi16_mcount */ + v1 = 3; + buf = (u32 *)&insn_lui_v1_hi16_mcount; + UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); + + /* jal (ftrace_caller + 8), jump over the first two instruction */ + buf = (u32 *)&insn_jal_ftrace_caller; + uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* j ftrace_graph_caller */ + buf = (u32 *)&insn_j_ftrace_graph_caller; + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); +#endif +} + +static int ftrace_modify_code(unsigned long ip, unsigned int new_code) +{ + int faulted; + + /* *(unsigned int *)ip = new_code; */ + safe_store_code(new_code, ip, faulted); + + if (unlikely(faulted)) + return -EFAULT; + + flush_icache_range(ip, ip + 8); + + return 0; +} + +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount --> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions + */ + +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int new; + unsigned long ip = rec->ip; + + /* + * If ip is in kernel space, no long call, otherwise, long call is + * needed. + */ + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + + return ftrace_modify_code(ip, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int new; + unsigned long ip = rec->ip; + + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : + insn_lui_v1_hi16_mcount; + + return ftrace_modify_code(ip, new); +} + +#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned int new; + + new = INSN_JAL((unsigned long)func); + + return ftrace_modify_code(FTRACE_CALL_IP, new); +} + +int __init ftrace_dyn_arch_init(void *data) +{ + /* Encode the instructions when booting */ + ftrace_dyn_arch_init_insns(); + + /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ + ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); + + /* The return code is retured via data */ + *(unsigned long *)data = 0; + + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE + +extern void ftrace_graph_call(void); +#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, + insn_j_ftrace_graph_caller); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); +} + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifndef KBUILD_MCOUNT_RA_ADDRESS + +#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ +#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ +#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ + +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) +{ + unsigned long sp, ip, tmp; + unsigned int code; + int faulted; + + /* + * For module, move the ip from the return address after the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for + * kernel, move after the instruction "move ra, at"(offset is 16) + */ + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); + + /* + * search the text until finding the non-store instruction or "s{d,w} + * ra, offset(sp)" instruction + */ + do { + /* get the code at "ip": code = *(unsigned int *)ip; */ + safe_load_code(code, ip, faulted); + + if (unlikely(faulted)) + return 0; + /* + * If we hit the non-store instruction before finding where the + * ra is stored, then this is a leaf function and it does not + * store the ra on the stack + */ + if ((code & S_R_SP) != S_R_SP) + return parent_ra_addr; + + /* Move to the next instruction */ + ip -= 4; + } while ((code & S_RA_SP) != S_RA_SP); + + sp = fp + (code & OFFSET_MASK); + + /* tmp = *(unsigned long *)sp; */ + safe_load_stack(tmp, sp, faulted); + if (unlikely(faulted)) + return 0; + + if (tmp == old_parent_ra) + return sp; + return 0; +} + +#endif /* !KBUILD_MCOUNT_RA_ADDRESS */ + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, + unsigned long fp) +{ + unsigned long old_parent_ra; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long) + &return_to_handler; + int faulted, insns; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * "parent_ra_addr" is the stack address saved the return address of + * the caller of _mcount. + * + * if the gcc < 4.5, a leaf function does not save the return address + * in the stack address, so, we "emulate" one in _mcount's stack space, + * and hijack it directly, but for a non-leaf function, it save the + * return address to the its own stack space, we can not hijack it + * directly, but need to find the real stack address, + * ftrace_get_parent_addr() does it! + * + * if gcc>= 4.5, with the new -mmcount-ra-address option, for a + * non-leaf function, the location of the return address will be saved + * to $12 for us, and for a leaf function, only put a zero into $12. we + * do it in ftrace_graph_caller of mcount.S. + */ + + /* old_parent_ra = *parent_ra_addr; */ + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); + if (unlikely(faulted)) + goto out; +#ifndef KBUILD_MCOUNT_RA_ADDRESS + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, + old_parent_ra, (unsigned long)parent_ra_addr, fp); + /* + * If fails when getting the stack address of the non-leaf function's + * ra, stop function graph tracer and return + */ + if (parent_ra_addr == 0) + goto out; +#endif + /* *parent_ra_addr = return_hooker; */ + safe_store_stack(return_hooker, parent_ra_addr, faulted); + if (unlikely(faulted)) + goto out; + + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) + == -EBUSY) { + *parent_ra_addr = old_parent_ra; + return; + } + + /* + * Get the recorded ip of the current mcount calling site in the + * __mcount_loc section, which will be used to filter the function + * entries configured through the tracing/set_graph_function interface. + */ + + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + *parent_ra_addr = old_parent_ra; + } + return; +out: + ftrace_graph_stop(); + WARN_ON(1); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S new file mode 100644 index 00000000..8882e576 --- /dev/null +++ b/arch/mips/kernel/genex.S @@ -0,0 +1,548 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2002, 2007 Maciej W. Rozycki + */ +#include <linux/init.h> + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/cacheops.h> +#include <asm/irqflags.h> +#include <asm/regdef.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/war.h> +#include <asm/page.h> +#include <asm/thread_info.h> + +#define PANIC_PIC(msg) \ + .set push; \ + .set reorder; \ + PTR_LA a0,8f; \ + .set noat; \ + PTR_LA AT, panic; \ + jr AT; \ +9: b 9b; \ + .set pop; \ + TEXT(msg) + + __INIT + +NESTED(except_vec0_generic, 0, sp) + PANIC_PIC("Exception vector 0 called") + END(except_vec0_generic) + +NESTED(except_vec1_generic, 0, sp) + PANIC_PIC("Exception vector 1 called") + END(except_vec1_generic) + +/* + * General exception vector for all other CPUs. + * + * Be careful when changing this, it has to be at most 128 bytes + * to fit into space reserved for the exception handler. + */ +NESTED(except_vec3_generic, 0, sp) + .set push + .set noat +#if R5432_CP0_INTERRUPT_WAR + mfc0 k0, CP0_INDEX +#endif + mfc0 k1, CP0_CAUSE + andi k1, k1, 0x7c +#ifdef CONFIG_64BIT + dsll k1, k1, 1 +#endif + PTR_L k0, exception_handlers(k1) + jr k0 + .set pop + END(except_vec3_generic) + +/* + * General exception handler for CPUs with virtual coherency exception. + * + * Be careful when changing this, it has to be at most 256 (as a special + * exception) bytes to fit into space reserved for the exception handler. + */ +NESTED(except_vec3_r4000, 0, sp) + .set push + .set mips3 + .set noat + mfc0 k1, CP0_CAUSE + li k0, 31<<2 + andi k1, k1, 0x7c + .set push + .set noreorder + .set nomacro + beq k1, k0, handle_vced + li k0, 14<<2 + beq k1, k0, handle_vcei +#ifdef CONFIG_64BIT + dsll k1, k1, 1 +#endif + .set pop + PTR_L k0, exception_handlers(k1) + jr k0 + + /* + * Big shit, we now may have two dirty primary cache lines for the same + * physical address. We can safely invalidate the line pointed to by + * c0_badvaddr because after return from this exception handler the + * load / store will be re-executed. + */ +handle_vced: + MFC0 k0, CP0_BADVADDR + li k1, -4 # Is this ... + and k0, k1 # ... really needed? + mtc0 zero, CP0_TAGLO + cache Index_Store_Tag_D, (k0) + cache Hit_Writeback_Inv_SD, (k0) +#ifdef CONFIG_PROC_FS + PTR_LA k0, vced_count + lw k1, (k0) + addiu k1, 1 + sw k1, (k0) +#endif + eret + +handle_vcei: + MFC0 k0, CP0_BADVADDR + cache Hit_Writeback_Inv_SD, (k0) # also cleans pi +#ifdef CONFIG_PROC_FS + PTR_LA k0, vcei_count + lw k1, (k0) + addiu k1, 1 + sw k1, (k0) +#endif + eret + .set pop + END(except_vec3_r4000) + + __FINIT + + .align 5 /* 32 byte rollback region */ +LEAF(r4k_wait) + .set push + .set noreorder + /* start of rollback region */ + LONG_L t0, TI_FLAGS($28) + nop + andi t0, _TIF_NEED_RESCHED + bnez t0, 1f + nop + nop + nop + .set mips3 + wait + /* end of rollback region (the region size must be power of two) */ + .set pop +1: + jr ra + END(r4k_wait) + + .macro BUILD_ROLLBACK_PROLOGUE handler + FEXPORT(rollback_\handler) + .set push + .set noat + MFC0 k0, CP0_EPC + PTR_LA k1, r4k_wait + ori k0, 0x1f /* 32 byte rollback region */ + xori k0, 0x1f + bne k0, k1, 9f + MTC0 k0, CP0_EPC +9: + .set pop + .endm + + .align 5 +BUILD_ROLLBACK_PROLOGUE handle_int +NESTED(handle_int, PT_SIZE, sp) +#ifdef CONFIG_TRACE_IRQFLAGS + /* + * Check to see if the interrupted code has just disabled + * interrupts and ignore this interrupt for now if so. + * + * local_irq_disable() disables interrupts and then calls + * trace_hardirqs_off() to track the state. If an interrupt is taken + * after interrupts are disabled but before the state is updated + * it will appear to restore_all that it is incorrectly returning with + * interrupts disabled + */ + .set push + .set noat + mfc0 k0, CP0_STATUS +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + and k0, ST0_IEP + bnez k0, 1f + + mfc0 k0, CP0_EPC + .set noreorder + j k0 + rfe +#else + and k0, ST0_IE + bnez k0, 1f + + eret +#endif +1: + .set pop +#endif + SAVE_ALL + CLI + TRACE_IRQS_OFF + + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + PTR_LA ra, ret_from_irq + j plat_irq_dispatch + END(handle_int) + + __INIT + +/* + * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. + * This is a dedicated interrupt exception vector which reduces the + * interrupt processing overhead. The jump instruction will be replaced + * at the initialization time. + * + * Be careful when changing this, it has to be at most 128 bytes + * to fit into space reserved for the exception handler. + */ +NESTED(except_vec4, 0, sp) +1: j 1b /* Dummy, will be replaced */ + END(except_vec4) + +/* + * EJTAG debug exception handler. + * The EJTAG debug exception entry point is 0xbfc00480, which + * normally is in the boot PROM, so the boot PROM must do a + * unconditional jump to this vector. + */ +NESTED(except_vec_ejtag_debug, 0, sp) + j ejtag_debug_handler + END(except_vec_ejtag_debug) + + __FINIT + +/* + * Vectored interrupt handler. + * This prototype is copied to ebase + n*IntCtl.VS and patched + * to invoke the handler + */ +BUILD_ROLLBACK_PROLOGUE except_vec_vi +NESTED(except_vec_vi, 0, sp) + SAVE_SOME + SAVE_AT + .set push + .set noreorder +#ifdef CONFIG_MIPS_MT_SMTC + /* + * To keep from blindly blocking *all* interrupts + * during service by SMTC kernel, we also want to + * pass the IM value to be cleared. + */ +FEXPORT(except_vec_vi_mori) + ori a0, $0, 0 +#endif /* CONFIG_MIPS_MT_SMTC */ +FEXPORT(except_vec_vi_lui) + lui v0, 0 /* Patched */ + j except_vec_vi_handler +FEXPORT(except_vec_vi_ori) + ori v0, 0 /* Patched */ + .set pop + END(except_vec_vi) +EXPORT(except_vec_vi_end) + +/* + * Common Vectored Interrupt code + * Complete the register saves and invoke the handler which is passed in $v0 + */ +NESTED(except_vec_vi_handler, 0, sp) + SAVE_TEMP + SAVE_STATIC +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC has an interesting problem that interrupts are level-triggered, + * and the CLI macro will clear EXL, potentially causing a duplicate + * interrupt service invocation. So we need to clear the associated + * IM bit of Status prior to doing CLI, and restore it after the + * service routine has been invoked - we must assume that the + * service routine will have cleared the state, and any active + * level represents a new or otherwised unserviced event... + */ + mfc0 t1, CP0_STATUS + and t0, a0, t1 +#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP + mfc0 t2, CP0_TCCONTEXT + or t2, t0, t2 + mtc0 t2, CP0_TCCONTEXT +#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ + xor t1, t1, t0 + mtc0 t1, CP0_STATUS + _ehb +#endif /* CONFIG_MIPS_MT_SMTC */ + CLI +#ifdef CONFIG_TRACE_IRQFLAGS + move s0, v0 +#ifdef CONFIG_MIPS_MT_SMTC + move s1, a0 +#endif + TRACE_IRQS_OFF +#ifdef CONFIG_MIPS_MT_SMTC + move a0, s1 +#endif + move v0, s0 +#endif + + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + PTR_LA ra, ret_from_irq + jr v0 + END(except_vec_vi_handler) + +/* + * EJTAG debug exception handler. + */ +NESTED(ejtag_debug_handler, PT_SIZE, sp) + .set push + .set noat + MTC0 k0, CP0_DESAVE + mfc0 k0, CP0_DEBUG + + sll k0, k0, 30 # Check for SDBBP. + bgez k0, ejtag_return + + PTR_LA k0, ejtag_debug_buffer + LONG_S k1, 0(k0) + SAVE_ALL + move a0, sp + jal ejtag_exception_handler + RESTORE_ALL + PTR_LA k0, ejtag_debug_buffer + LONG_L k1, 0(k0) + +ejtag_return: + MFC0 k0, CP0_DESAVE + .set mips32 + deret + .set pop + END(ejtag_debug_handler) + +/* + * This buffer is reserved for the use of the EJTAG debug + * handler. + */ + .data +EXPORT(ejtag_debug_buffer) + .fill LONGSIZE + .previous + + __INIT + +/* + * NMI debug exception handler for MIPS reference boards. + * The NMI debug exception entry point is 0xbfc00000, which + * normally is in the boot PROM, so the boot PROM must do a + * unconditional jump to this vector. + */ +NESTED(except_vec_nmi, 0, sp) + j nmi_handler + END(except_vec_nmi) + + __FINIT + +NESTED(nmi_handler, PT_SIZE, sp) + .set push + .set noat + SAVE_ALL + move a0, sp + jal nmi_exception_handler + RESTORE_ALL + .set mips3 + eret + .set pop + END(nmi_handler) + + .macro __build_clear_none + .endm + + .macro __build_clear_sti + TRACE_IRQS_ON + STI + .endm + + .macro __build_clear_cli + CLI + TRACE_IRQS_OFF + .endm + + .macro __build_clear_fpe + .set push + /* gas fails to assemble cfc1 for some archs (octeon).*/ \ + .set mips1 + cfc1 a1, fcr31 + li a2, ~(0x3f << 12) + and a2, a1 + ctc1 a2, fcr31 + .set pop + TRACE_IRQS_ON + STI + .endm + + .macro __build_clear_ade + MFC0 t0, CP0_BADVADDR + PTR_S t0, PT_BVADDR(sp) + KMODE + .endm + + .macro __BUILD_silent exception + .endm + + /* Gas tries to parse the PRINT argument as a string containing + string escapes and emits bogus warnings if it believes to + recognize an unknown escape code. So make the arguments + start with an n and gas will believe \n is ok ... */ + .macro __BUILD_verbose nexception + LONG_L a1, PT_EPC(sp) +#ifdef CONFIG_32BIT + PRINT("Got \nexception at %08lx\012") +#endif +#ifdef CONFIG_64BIT + PRINT("Got \nexception at %016lx\012") +#endif + .endm + + .macro __BUILD_count exception + LONG_L t0,exception_count_\exception + LONG_ADDIU t0, 1 + LONG_S t0,exception_count_\exception + .comm exception_count\exception, 8, 8 + .endm + + .macro __BUILD_HANDLER exception handler clear verbose ext + .align 5 + NESTED(handle_\exception, PT_SIZE, sp) + .set noat + SAVE_ALL + FEXPORT(handle_\exception\ext) + __BUILD_clear_\clear + .set at + __BUILD_\verbose \exception + move a0, sp + PTR_LA ra, ret_from_exception + j do_\handler + END(handle_\exception) + .endm + + .macro BUILD_HANDLER exception handler clear verbose + __BUILD_HANDLER \exception \handler \clear \verbose _int + .endm + + BUILD_HANDLER adel ade ade silent /* #4 */ + BUILD_HANDLER ades ade ade silent /* #5 */ + BUILD_HANDLER ibe be cli silent /* #6 */ + BUILD_HANDLER dbe be cli silent /* #7 */ + BUILD_HANDLER bp bp sti silent /* #9 */ + BUILD_HANDLER ri ri sti silent /* #10 */ + BUILD_HANDLER cpu cpu sti silent /* #11 */ + BUILD_HANDLER ov ov sti silent /* #12 */ + BUILD_HANDLER tr tr sti silent /* #13 */ + BUILD_HANDLER fpe fpe fpe silent /* #15 */ + BUILD_HANDLER mdmx mdmx sti silent /* #22 */ +#ifdef CONFIG_HARDWARE_WATCHPOINTS + /* + * For watch, interrupts will be enabled after the watch + * registers are read. + */ + BUILD_HANDLER watch watch cli silent /* #23 */ +#else + BUILD_HANDLER watch watch sti verbose /* #23 */ +#endif + BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ + BUILD_HANDLER mt mt sti silent /* #25 */ + BUILD_HANDLER dsp dsp sti silent /* #26 */ + BUILD_HANDLER reserved reserved sti verbose /* others */ + + .align 5 + LEAF(handle_ri_rdhwr_vivt) +#ifdef CONFIG_MIPS_MT_SMTC + PANIC_PIC("handle_ri_rdhwr_vivt called") +#else + .set push + .set noat + .set noreorder + /* check if TLB contains a entry for EPC */ + MFC0 k1, CP0_ENTRYHI + andi k1, 0xff /* ASID_MASK */ + MFC0 k0, CP0_EPC + PTR_SRL k0, PAGE_SHIFT + 1 + PTR_SLL k0, PAGE_SHIFT + 1 + or k1, k0 + MTC0 k1, CP0_ENTRYHI + mtc0_tlbw_hazard + tlbp + tlb_probe_hazard + mfc0 k1, CP0_INDEX + .set pop + bltz k1, handle_ri /* slow path */ + /* fall thru */ +#endif + END(handle_ri_rdhwr_vivt) + + LEAF(handle_ri_rdhwr) + .set push + .set noat + .set noreorder + /* 0x7c03e83b: rdhwr v1,$29 */ + MFC0 k1, CP0_EPC + lui k0, 0x7c03 + lw k1, (k1) + ori k0, 0xe83b + .set reorder + bne k0, k1, handle_ri /* if not ours */ + /* The insn is rdhwr. No need to check CAUSE.BD here. */ + get_saved_sp /* k1 := current_thread_info */ + .set noreorder + MFC0 k0, CP0_EPC +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + ori k1, _THREAD_MASK + xori k1, _THREAD_MASK + LONG_L v1, TI_TP_VALUE(k1) + LONG_ADDIU k0, 4 + jr k0 + rfe +#else +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + LONG_ADDIU k0, 4 /* stall on $k0 */ +#else + .set at=v1 + LONG_ADDIU k0, 4 + .set noat +#endif + MTC0 k0, CP0_EPC + /* I hope three instructions between MTC0 and ERET are enough... */ + ori k1, _THREAD_MASK + xori k1, _THREAD_MASK + LONG_L v1, TI_TP_VALUE(k1) + .set mips3 + eret + .set mips0 +#endif + .set pop + END(handle_ri_rdhwr) + +#ifdef CONFIG_64BIT +/* A temporary overflow handler used by check_daddi(). */ + + __INIT + + BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ +#endif diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c new file mode 100644 index 00000000..c6854d9d --- /dev/null +++ b/arch/mips/kernel/gpio_txx9.c @@ -0,0 +1,89 @@ +/* + * A gpio chip driver for TXx9 SoCs + * + * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/gpio.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <asm/txx9pio.h> + +static DEFINE_SPINLOCK(txx9_gpio_lock); + +static struct txx9_pio_reg __iomem *txx9_pioptr; + +static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + return __raw_readl(&txx9_pioptr->din) & (1 << offset); +} + +static void txx9_gpio_set_raw(unsigned int offset, int value) +{ + u32 val; + val = __raw_readl(&txx9_pioptr->dout); + if (value) + val |= 1 << offset; + else + val &= ~(1 << offset); + __raw_writel(val, &txx9_pioptr->dout); +} + +static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + unsigned long flags; + spin_lock_irqsave(&txx9_gpio_lock, flags); + txx9_gpio_set_raw(offset, value); + mmiowb(); + spin_unlock_irqrestore(&txx9_gpio_lock, flags); +} + +static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset) +{ + unsigned long flags; + spin_lock_irqsave(&txx9_gpio_lock, flags); + __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset), + &txx9_pioptr->dir); + mmiowb(); + spin_unlock_irqrestore(&txx9_gpio_lock, flags); + return 0; +} + +static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset, + int value) +{ + unsigned long flags; + spin_lock_irqsave(&txx9_gpio_lock, flags); + txx9_gpio_set_raw(offset, value); + __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset), + &txx9_pioptr->dir); + mmiowb(); + spin_unlock_irqrestore(&txx9_gpio_lock, flags); + return 0; +} + +static struct gpio_chip txx9_gpio_chip = { + .get = txx9_gpio_get, + .set = txx9_gpio_set, + .direction_input = txx9_gpio_dir_in, + .direction_output = txx9_gpio_dir_out, + .label = "TXx9", +}; + +int __init txx9_gpio_init(unsigned long baseaddr, + unsigned int base, unsigned int num) +{ + txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg)); + if (!txx9_pioptr) + return -ENODEV; + txx9_gpio_chip.base = base; + txx9_gpio_chip.ngpio = num; + return gpiochip_add(&txx9_gpio_chip); +} diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S new file mode 100644 index 00000000..ea695d96 --- /dev/null +++ b/arch/mips/kernel/head.S @@ -0,0 +1,232 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995 Waldorf Electronics + * Written by Ralf Baechle and Andreas Busse + * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle + * Copyright (C) 1996 Paul M. Antoine + * Modified for DECStation and hence R3000 support by Paul M. Antoine + * Further modifications by David S. Miller and Harald Koerfgen + * Copyright (C) 1999 Silicon Graphics, Inc. + * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + */ +#include <linux/init.h> +#include <linux/threads.h> + +#include <asm/addrspace.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/irqflags.h> +#include <asm/regdef.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> + +#include <kernel-entry-init.h> + + /* + * inputs are the text nasid in t1, data nasid in t2. + */ + .macro MAPPED_KERNEL_SETUP_TLB +#ifdef CONFIG_MAPPED_KERNEL + /* + * This needs to read the nasid - assume 0 for now. + * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0, + * 0+DVG in tlblo_1. + */ + dli t0, 0xffffffffc0000000 + dmtc0 t0, CP0_ENTRYHI + li t0, 0x1c000 # Offset of text into node memory + dsll t1, NASID_SHFT # Shift text nasid into place + dsll t2, NASID_SHFT # Same for data nasid + or t1, t1, t0 # Physical load address of kernel text + or t2, t2, t0 # Physical load address of kernel data + dsrl t1, 12 # 4K pfn + dsrl t2, 12 # 4K pfn + dsll t1, 6 # Get pfn into place + dsll t2, 6 # Get pfn into place + li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6) + or t0, t0, t1 + mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr + li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6) + or t0, t0, t2 + mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr + li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M + mtc0 t0, CP0_PAGEMASK + li t0, 0 # KMAP_INX + mtc0 t0, CP0_INDEX + li t0, 1 + mtc0 t0, CP0_WIRED + tlbwi +#else + mtc0 zero, CP0_WIRED +#endif + .endm + + /* + * For the moment disable interrupts, mark the kernel mode and + * set ST0_KX so that the CPU does not spit fire when using + * 64-bit addresses. A full initialization of the CPU's status + * register is done later in per_cpu_trap_init(). + */ + .macro setup_c0_status set clr + .set push +#ifdef CONFIG_MIPS_MT_SMTC + /* + * For SMTC, we need to set privilege and disable interrupts only for + * the current TC, using the TCStatus register. + */ + mfc0 t0, CP0_TCSTATUS + /* Fortunately CU 0 is in the same place in both registers */ + /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ + li t1, ST0_CU0 | 0x08001c00 + or t0, t1 + /* Clear TKSU, leave IXMT */ + xori t0, 0x00001800 + mtc0 t0, CP0_TCSTATUS + _ehb + /* We need to leave the global IE bit set, but clear EXL...*/ + mfc0 t0, CP0_STATUS + or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr + xor t0, ST0_EXL | ST0_ERL | \clr + mtc0 t0, CP0_STATUS +#else + mfc0 t0, CP0_STATUS + or t0, ST0_CU0|\set|0x1f|\clr + xor t0, 0x1f|\clr + mtc0 t0, CP0_STATUS + .set noreorder + sll zero,3 # ehb +#endif + .set pop + .endm + + .macro setup_c0_status_pri +#ifdef CONFIG_64BIT + setup_c0_status ST0_KX 0 +#else + setup_c0_status 0 0 +#endif + .endm + + .macro setup_c0_status_sec +#ifdef CONFIG_64BIT + setup_c0_status ST0_KX ST0_BEV +#else + setup_c0_status 0 ST0_BEV +#endif + .endm + +#ifndef CONFIG_NO_EXCEPT_FILL + /* + * Reserved space for exception handlers. + * Necessary for machines which link their kernels at KSEG0. + */ + .fill 0x400 +#endif + +EXPORT(_stext) + +#ifdef CONFIG_BOOT_RAW + /* + * Give us a fighting chance of running if execution beings at the + * kernel load address. This is needed because this platform does + * not have a ELF loader yet. + */ +FEXPORT(__kernel_entry) + j kernel_entry +#endif + + __REF + +NESTED(kernel_entry, 16, sp) # kernel entry point + + kernel_entry_setup # cpu specific setup + + setup_c0_status_pri + + /* We might not get launched at the address the kernel is linked to, + so we jump there. */ + PTR_LA t0, 0f + jr t0 +0: + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * In SMTC kernel, "CLI" is thread-specific, in TCStatus. + * We still need to enable interrupts globally in Status, + * and clear EXL/ERL. + * + * TCContext is used to track interrupt levels under + * service in SMTC kernel. Clear for boot TC before + * allowing any interrupts. + */ + mtc0 zero, CP0_TCCONTEXT + + mfc0 t0, CP0_STATUS + ori t0, t0, 0xff1f + xori t0, t0, 0x001e + mtc0 t0, CP0_STATUS +#endif /* CONFIG_MIPS_MT_SMTC */ + + PTR_LA t0, __bss_start # clear .bss + LONG_S zero, (t0) + PTR_LA t1, __bss_stop - LONGSIZE +1: + PTR_ADDIU t0, LONGSIZE + LONG_S zero, (t0) + bne t0, t1, 1b + + LONG_S a0, fw_arg0 # firmware arguments + LONG_S a1, fw_arg1 + LONG_S a2, fw_arg2 + LONG_S a3, fw_arg3 + + MTC0 zero, CP0_CONTEXT # clear context register + PTR_LA $28, init_thread_union + /* Set the SP after an empty pt_regs. */ + PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE + PTR_ADDU sp, $28 + back_to_back_c0_hazard + set_saved_sp sp, t0, t1 + PTR_SUBU sp, 4 * SZREG # init stack pointer + + j start_kernel + END(kernel_entry) + + __CPUINIT + +#ifdef CONFIG_SMP +/* + * SMP slave cpus entry point. Board specific code for bootstrap calls this + * function after setting up the stack and gp registers. + */ +NESTED(smp_bootstrap, 16, sp) +#ifdef CONFIG_MIPS_MT_SMTC + /* + * Read-modify-writes of Status must be atomic, and this + * is one case where CLI is invoked without EXL being + * necessarily set. The CLI and setup_c0_status will + * in fact be redundant for all but the first TC of + * each VPE being booted. + */ + DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ + jal mips_ihb +#endif /* CONFIG_MIPS_MT_SMTC */ + setup_c0_status_sec + smp_slave_setup +#ifdef CONFIG_MIPS_MT_SMTC + andi t2, t2, VPECONTROL_TE + beqz t2, 2f + EMT # emt +2: +#endif /* CONFIG_MIPS_MT_SMTC */ + j start_secondary + END(smp_bootstrap) +#endif /* CONFIG_SMP */ + + __FINIT diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c new file mode 100644 index 00000000..c5bc344f --- /dev/null +++ b/arch/mips/kernel/i8253.c @@ -0,0 +1,39 @@ +/* + * i8253.c 8253/PIT functions + * + */ +#include <linux/clockchips.h> +#include <linux/i8253.h> +#include <linux/export.h> +#include <linux/smp.h> +#include <linux/irq.h> + +#include <asm/time.h> + +static irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + i8253_clockevent.event_handler(&i8253_clockevent); + + return IRQ_HANDLED; +} + +static struct irqaction irq0 = { + .handler = timer_interrupt, + .flags = IRQF_NOBALANCING | IRQF_TIMER, + .name = "timer" +}; + +void __init setup_pit_timer(void) +{ + clockevent_i8253_init(true); + setup_irq(0, &irq0); +} + +static int __init init_pit_clocksource(void) +{ + if (num_possible_cpus() > 1) /* PIT does not scale! */ + return 0; + + return clocksource_i8253_init(); +} +arch_initcall(init_pit_clocksource); diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c new file mode 100644 index 00000000..32b397b6 --- /dev/null +++ b/arch/mips/kernel/i8259.c @@ -0,0 +1,335 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Code to handle x86 style IRQs plus some generic interrupt stuff. + * + * Copyright (C) 1992 Linus Torvalds + * Copyright (C) 1994 - 2000 Ralf Baechle + */ +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/syscore_ops.h> +#include <linux/irq.h> + +#include <asm/i8259.h> +#include <asm/io.h> + +/* + * This is the 'legacy' 8259A Programmable Interrupt Controller, + * present in the majority of PC/AT boxes. + * plus some generic x86 specific things if generic specifics makes + * any sense at all. + * this file should become arch/i386/kernel/irq.c when the old irq.c + * moves to arch independent land + */ + +static int i8259A_auto_eoi = -1; +DEFINE_RAW_SPINLOCK(i8259A_lock); +static void disable_8259A_irq(struct irq_data *d); +static void enable_8259A_irq(struct irq_data *d); +static void mask_and_ack_8259A(struct irq_data *d); +static void init_8259A(int auto_eoi); + +static struct irq_chip i8259A_chip = { + .name = "XT-PIC", + .irq_mask = disable_8259A_irq, + .irq_disable = disable_8259A_irq, + .irq_unmask = enable_8259A_irq, + .irq_mask_ack = mask_and_ack_8259A, +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF + .irq_set_affinity = plat_set_irq_affinity, +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ +}; + +/* + * 8259A PIC functions to handle ISA devices: + */ + +/* + * This contains the irq mask for both 8259A irq controllers, + */ +static unsigned int cached_irq_mask = 0xffff; + +#define cached_master_mask (cached_irq_mask) +#define cached_slave_mask (cached_irq_mask >> 8) + +static void disable_8259A_irq(struct irq_data *d) +{ + unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; + unsigned long flags; + + mask = 1 << irq; + raw_spin_lock_irqsave(&i8259A_lock, flags); + cached_irq_mask |= mask; + if (irq & 8) + outb(cached_slave_mask, PIC_SLAVE_IMR); + else + outb(cached_master_mask, PIC_MASTER_IMR); + raw_spin_unlock_irqrestore(&i8259A_lock, flags); +} + +static void enable_8259A_irq(struct irq_data *d) +{ + unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; + unsigned long flags; + + mask = ~(1 << irq); + raw_spin_lock_irqsave(&i8259A_lock, flags); + cached_irq_mask &= mask; + if (irq & 8) + outb(cached_slave_mask, PIC_SLAVE_IMR); + else + outb(cached_master_mask, PIC_MASTER_IMR); + raw_spin_unlock_irqrestore(&i8259A_lock, flags); +} + +int i8259A_irq_pending(unsigned int irq) +{ + unsigned int mask; + unsigned long flags; + int ret; + + irq -= I8259A_IRQ_BASE; + mask = 1 << irq; + raw_spin_lock_irqsave(&i8259A_lock, flags); + if (irq < 8) + ret = inb(PIC_MASTER_CMD) & mask; + else + ret = inb(PIC_SLAVE_CMD) & (mask >> 8); + raw_spin_unlock_irqrestore(&i8259A_lock, flags); + + return ret; +} + +void make_8259A_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); + enable_irq(irq); +} + +/* + * This function assumes to be called rarely. Switching between + * 8259A registers is slow. + * This has to be protected by the irq controller spinlock + * before being called. + */ +static inline int i8259A_irq_real(unsigned int irq) +{ + int value; + int irqmask = 1 << irq; + + if (irq < 8) { + outb(0x0B, PIC_MASTER_CMD); /* ISR register */ + value = inb(PIC_MASTER_CMD) & irqmask; + outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ + return value; + } + outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ + value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); + outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ + return value; +} + +/* + * Careful! The 8259A is a fragile beast, it pretty + * much _has_ to be done exactly like this (mask it + * first, _then_ send the EOI, and the order of EOI + * to the two 8259s is important! + */ +static void mask_and_ack_8259A(struct irq_data *d) +{ + unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE; + unsigned long flags; + + irqmask = 1 << irq; + raw_spin_lock_irqsave(&i8259A_lock, flags); + /* + * Lightweight spurious IRQ detection. We do not want + * to overdo spurious IRQ handling - it's usually a sign + * of hardware problems, so we only do the checks we can + * do without slowing down good hardware unnecessarily. + * + * Note that IRQ7 and IRQ15 (the two spurious IRQs + * usually resulting from the 8259A-1|2 PICs) occur + * even if the IRQ is masked in the 8259A. Thus we + * can check spurious 8259A IRQs without doing the + * quite slow i8259A_irq_real() call for every IRQ. + * This does not cover 100% of spurious interrupts, + * but should be enough to warn the user that there + * is something bad going on ... + */ + if (cached_irq_mask & irqmask) + goto spurious_8259A_irq; + cached_irq_mask |= irqmask; + +handle_real_irq: + if (irq & 8) { + inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ + outb(cached_slave_mask, PIC_SLAVE_IMR); + outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ + outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ + } else { + inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ + outb(cached_master_mask, PIC_MASTER_IMR); + outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ + } + smtc_im_ack_irq(irq); + raw_spin_unlock_irqrestore(&i8259A_lock, flags); + return; + +spurious_8259A_irq: + /* + * this is the slow path - should happen rarely. + */ + if (i8259A_irq_real(irq)) + /* + * oops, the IRQ _is_ in service according to the + * 8259A - not spurious, go handle it. + */ + goto handle_real_irq; + + { + static int spurious_irq_mask; + /* + * At this point we can be sure the IRQ is spurious, + * lets ACK and report it. [once per IRQ] + */ + if (!(spurious_irq_mask & irqmask)) { + printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); + spurious_irq_mask |= irqmask; + } + atomic_inc(&irq_err_count); + /* + * Theoretically we do not have to handle this IRQ, + * but in Linux this does not cause problems and is + * simpler for us. + */ + goto handle_real_irq; + } +} + +static void i8259A_resume(void) +{ + if (i8259A_auto_eoi >= 0) + init_8259A(i8259A_auto_eoi); +} + +static void i8259A_shutdown(void) +{ + /* Put the i8259A into a quiescent state that + * the kernel initialization code can get it + * out of. + */ + if (i8259A_auto_eoi >= 0) { + outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ + } +} + +static struct syscore_ops i8259_syscore_ops = { + .resume = i8259A_resume, + .shutdown = i8259A_shutdown, +}; + +static int __init i8259A_init_sysfs(void) +{ + register_syscore_ops(&i8259_syscore_ops); + return 0; +} + +device_initcall(i8259A_init_sysfs); + +static void init_8259A(int auto_eoi) +{ + unsigned long flags; + + i8259A_auto_eoi = auto_eoi; + + raw_spin_lock_irqsave(&i8259A_lock, flags); + + outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ + + /* + * outb_p - this has to work on a wide range of PC hardware. + */ + outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ + outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */ + outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ + if (auto_eoi) /* master does Auto EOI */ + outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); + else /* master expects normal EOI */ + outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); + + outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ + outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */ + outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ + outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ + if (auto_eoi) + /* + * In AEOI mode we just have to mask the interrupt + * when acking. + */ + i8259A_chip.irq_mask_ack = disable_8259A_irq; + else + i8259A_chip.irq_mask_ack = mask_and_ack_8259A; + + udelay(100); /* wait for 8259A to initialize */ + + outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ + outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ + + raw_spin_unlock_irqrestore(&i8259A_lock, flags); +} + +/* + * IRQ2 is cascade interrupt to second interrupt controller + */ +static struct irqaction irq2 = { + .handler = no_action, + .name = "cascade", + .flags = IRQF_NO_THREAD, +}; + +static struct resource pic1_io_resource = { + .name = "pic1", + .start = PIC_MASTER_CMD, + .end = PIC_MASTER_IMR, + .flags = IORESOURCE_BUSY +}; + +static struct resource pic2_io_resource = { + .name = "pic2", + .start = PIC_SLAVE_CMD, + .end = PIC_SLAVE_IMR, + .flags = IORESOURCE_BUSY +}; + +/* + * On systems with i8259-style interrupt controllers we assume for + * driver compatibility reasons interrupts 0 - 15 to be the i8259 + * interrupts even if the hardware uses a different interrupt numbering. + */ +void __init init_i8259_irqs(void) +{ + int i; + + insert_resource(&ioport_resource, &pic1_io_resource); + insert_resource(&ioport_resource, &pic2_io_resource); + + init_8259A(0); + + for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { + irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); + irq_set_probe(i); + } + + setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); +} diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c new file mode 100644 index 00000000..5f9a7626 --- /dev/null +++ b/arch/mips/kernel/init_task.c @@ -0,0 +1,35 @@ +#include <linux/mm.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/init_task.h> +#include <linux/fs.h> +#include <linux/mqueue.h> + +#include <asm/thread_info.h> +#include <asm/uaccess.h> +#include <asm/pgtable.h> + +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +/* + * Initial thread structure. + * + * We need to make sure that this is 8192-byte aligned due to the + * way process stacks are handled. This is done by making sure + * the linker maps this in the .text segment right after head.S, + * and making head.S ensure the proper alignment. + * + * The things we do for performance.. + */ +union thread_union init_thread_union __init_task_data + __attribute__((__aligned__(THREAD_SIZE))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); + +EXPORT_SYMBOL(init_task); diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c new file mode 100644 index 00000000..0c527f65 --- /dev/null +++ b/arch/mips/kernel/irq-gic.c @@ -0,0 +1,258 @@ +#undef DEBUG + +#include <linux/bitmap.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/irq.h> + +#include <asm/io.h> +#include <asm/gic.h> +#include <asm/gcmpregs.h> +#include <linux/hardirq.h> +#include <asm-generic/bitops/find.h> + + +static unsigned long _gic_base; +static unsigned int _irqbase; +static unsigned int gic_irq_flags[GIC_NUM_INTRS]; +#define GIC_IRQ_FLAG_EDGE 0x0001 + +struct gic_pcpu_mask pcpu_masks[NR_CPUS]; +static struct gic_pending_regs pending_regs[NR_CPUS]; +static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; + +void gic_send_ipi(unsigned int intr) +{ + pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__, + read_c0_status()); + GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); +} + +/* This is Malta specific and needs to be exported */ +static void __init vpe_local_setup(unsigned int numvpes) +{ + int i; + unsigned long timer_interrupt = 5, perf_interrupt = 5; + unsigned int vpe_ctl; + + /* + * Setup the default performance counter timer interrupts + * for all VPEs + */ + for (i = 0; i < numvpes; i++) { + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); + + /* Are Interrupts locally routable? */ + GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl); + if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK) + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), + GIC_MAP_TO_PIN_MSK | timer_interrupt); + + if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK) + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), + GIC_MAP_TO_PIN_MSK | perf_interrupt); + } +} + +unsigned int gic_get_int(void) +{ + unsigned int i; + unsigned long *pending, *intrmask, *pcpu_mask; + unsigned long *pending_abs, *intrmask_abs; + + /* Get per-cpu bitmaps */ + pending = pending_regs[smp_processor_id()].pending; + intrmask = intrmask_regs[smp_processor_id()].intrmask; + pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; + + pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED, + GIC_SH_PEND_31_0_OFS); + intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED, + GIC_SH_MASK_31_0_OFS); + + for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) { + GICREAD(*pending_abs, pending[i]); + GICREAD(*intrmask_abs, intrmask[i]); + pending_abs++; + intrmask_abs++; + } + + bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS); + bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS); + + i = find_first_bit(pending, GIC_NUM_INTRS); + + pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i); + + return i; +} + +static void gic_irq_ack(struct irq_data *d) +{ + unsigned int irq = d->irq - _irqbase; + + pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); + GIC_CLR_INTR_MASK(irq); + + if (gic_irq_flags[irq] & GIC_IRQ_FLAG_EDGE) + GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); +} + +static void gic_mask_irq(struct irq_data *d) +{ + unsigned int irq = d->irq - _irqbase; + pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); + GIC_CLR_INTR_MASK(irq); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + unsigned int irq = d->irq - _irqbase; + pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); + GIC_SET_INTR_MASK(irq); +} + +#ifdef CONFIG_SMP + +static DEFINE_SPINLOCK(gic_lock); + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, + bool force) +{ + unsigned int irq = d->irq - _irqbase; + cpumask_t tmp = CPU_MASK_NONE; + unsigned long flags; + int i; + + pr_debug("%s(%d) called\n", __func__, irq); + cpumask_and(&tmp, cpumask, cpu_online_mask); + if (cpus_empty(tmp)) + return -1; + + /* Assumption : cpumask refers to a single CPU */ + spin_lock_irqsave(&gic_lock, flags); + for (;;) { + /* Re-route this IRQ */ + GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); + + /* Update the pcpu_masks */ + for (i = 0; i < NR_CPUS; i++) + clear_bit(irq, pcpu_masks[i].pcpu_mask); + set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); + + } + cpumask_copy(d->affinity, cpumask); + spin_unlock_irqrestore(&gic_lock, flags); + + return IRQ_SET_MASK_OK_NOCOPY; +} +#endif + +static struct irq_chip gic_irq_controller = { + .name = "MIPS GIC", + .irq_ack = gic_irq_ack, + .irq_mask = gic_mask_irq, + .irq_mask_ack = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = gic_set_affinity, +#endif +}; + +static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, + unsigned int pin, unsigned int polarity, unsigned int trigtype, + unsigned int flags) +{ + /* Setup Intr to Pin mapping */ + if (pin & GIC_MAP_TO_NMI_MSK) { + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); + /* FIXME: hack to route NMI to all cpu's */ + for (cpu = 0; cpu < NR_CPUS; cpu += 32) { + GICWRITE(GIC_REG_ADDR(SHARED, + GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)), + 0xffffffff); + } + } else { + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), + GIC_MAP_TO_PIN_MSK | pin); + /* Setup Intr to CPU mapping */ + GIC_SH_MAP_TO_VPE_SMASK(intr, cpu); + } + + /* Setup Intr Polarity */ + GIC_SET_POLARITY(intr, polarity); + + /* Setup Intr Trigger Type */ + GIC_SET_TRIGGER(intr, trigtype); + + /* Init Intr Masks */ + GIC_CLR_INTR_MASK(intr); + /* Initialise per-cpu Interrupt software masks */ + if (flags & GIC_FLAG_IPI) + set_bit(intr, pcpu_masks[cpu].pcpu_mask); + if (flags & GIC_FLAG_TRANSPARENT) + GIC_SET_INTR_MASK(intr); + if (trigtype == GIC_TRIG_EDGE) + gic_irq_flags[intr] |= GIC_IRQ_FLAG_EDGE; +} + +static void __init gic_basic_init(int numintrs, int numvpes, + struct gic_intr_map *intrmap, int mapsize) +{ + unsigned int i, cpu; + + /* Setup defaults */ + for (i = 0; i < numintrs; i++) { + GIC_SET_POLARITY(i, GIC_POL_POS); + GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); + GIC_CLR_INTR_MASK(i); + if (i < GIC_NUM_INTRS) + gic_irq_flags[i] = 0; + } + + /* Setup specifics */ + for (i = 0; i < mapsize; i++) { + cpu = intrmap[i].cpunum; + if (cpu == GIC_UNUSED) + continue; + if (cpu == 0 && i != 0 && intrmap[i].flags == 0) + continue; + gic_setup_intr(i, + intrmap[i].cpunum, + intrmap[i].pin, + intrmap[i].polarity, + intrmap[i].trigtype, + intrmap[i].flags); + } + + vpe_local_setup(numvpes); + + for (i = _irqbase; i < (_irqbase + numintrs); i++) + irq_set_chip(i, &gic_irq_controller); +} + +void __init gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, + struct gic_intr_map *intr_map, unsigned int intr_map_size, + unsigned int irqbase) +{ + unsigned int gicconfig; + int numvpes, numintrs; + + _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, + gic_addrspace_size); + _irqbase = irqbase; + + GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); + numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> + GIC_SH_CONFIG_NUMINTRS_SHF; + numintrs = ((numintrs + 1) * 8); + + numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> + GIC_SH_CONFIG_NUMVPES_SHF; + + pr_debug("%s called\n", __func__); + + gic_basic_init(numintrs, numvpes, intr_map, intr_map_size); +} diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c new file mode 100644 index 00000000..883fc6ce --- /dev/null +++ b/arch/mips/kernel/irq-gt641xx.c @@ -0,0 +1,131 @@ +/* + * GT641xx IRQ routines. + * + * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <linux/hardirq.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/gt64120.h> + +#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE)) + +static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); + +static void ack_gt641xx_irq(struct irq_data *d) +{ + unsigned long flags; + u32 cause; + + raw_spin_lock_irqsave(>641xx_irq_lock, flags); + cause = GT_READ(GT_INTRCAUSE_OFS); + cause &= ~GT641XX_IRQ_TO_BIT(d->irq); + GT_WRITE(GT_INTRCAUSE_OFS, cause); + raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); +} + +static void mask_gt641xx_irq(struct irq_data *d) +{ + unsigned long flags; + u32 mask; + + raw_spin_lock_irqsave(>641xx_irq_lock, flags); + mask = GT_READ(GT_INTRMASK_OFS); + mask &= ~GT641XX_IRQ_TO_BIT(d->irq); + GT_WRITE(GT_INTRMASK_OFS, mask); + raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); +} + +static void mask_ack_gt641xx_irq(struct irq_data *d) +{ + unsigned long flags; + u32 cause, mask; + + raw_spin_lock_irqsave(>641xx_irq_lock, flags); + mask = GT_READ(GT_INTRMASK_OFS); + mask &= ~GT641XX_IRQ_TO_BIT(d->irq); + GT_WRITE(GT_INTRMASK_OFS, mask); + + cause = GT_READ(GT_INTRCAUSE_OFS); + cause &= ~GT641XX_IRQ_TO_BIT(d->irq); + GT_WRITE(GT_INTRCAUSE_OFS, cause); + raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); +} + +static void unmask_gt641xx_irq(struct irq_data *d) +{ + unsigned long flags; + u32 mask; + + raw_spin_lock_irqsave(>641xx_irq_lock, flags); + mask = GT_READ(GT_INTRMASK_OFS); + mask |= GT641XX_IRQ_TO_BIT(d->irq); + GT_WRITE(GT_INTRMASK_OFS, mask); + raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); +} + +static struct irq_chip gt641xx_irq_chip = { + .name = "GT641xx", + .irq_ack = ack_gt641xx_irq, + .irq_mask = mask_gt641xx_irq, + .irq_mask_ack = mask_ack_gt641xx_irq, + .irq_unmask = unmask_gt641xx_irq, +}; + +void gt641xx_irq_dispatch(void) +{ + u32 cause, mask; + int i; + + cause = GT_READ(GT_INTRCAUSE_OFS); + mask = GT_READ(GT_INTRMASK_OFS); + cause &= mask; + + /* + * bit0 : logical or of all the interrupt bits. + * bit30: logical or of bits[29:26,20:1]. + * bit31: logical or of bits[25:1]. + */ + for (i = 1; i < 30; i++) { + if (cause & (1U << i)) { + do_IRQ(GT641XX_IRQ_BASE + i); + return; + } + } + + atomic_inc(&irq_err_count); +} + +void __init gt641xx_irq_init(void) +{ + int i; + + GT_WRITE(GT_INTRMASK_OFS, 0); + GT_WRITE(GT_INTRCAUSE_OFS, 0); + + /* + * bit0 : logical or of all the interrupt bits. + * bit30: logical or of bits[29:26,20:1]. + * bit31: logical or of bits[25:1]. + */ + for (i = 1; i < 30; i++) + irq_set_chip_and_handler(GT641XX_IRQ_BASE + i, + >641xx_irq_chip, handle_level_irq); +} diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c new file mode 100644 index 00000000..14ac52c5 --- /dev/null +++ b/arch/mips/kernel/irq-msc01.c @@ -0,0 +1,164 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Copyright (c) 2004 MIPS Inc + * Author: chris@mips.com + * + * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/msc01_ic.h> +#include <asm/traps.h> + +static unsigned long _icctrl_msc; +#define MSC01_IC_REG_BASE _icctrl_msc + +#define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0) +#define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0) + +static unsigned int irq_base; + +/* mask off an interrupt */ +static inline void mask_msc_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + if (irq < (irq_base + 32)) + MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); + else + MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32)); +} + +/* unmask an interrupt */ +static inline void unmask_msc_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + if (irq < (irq_base + 32)) + MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); + else + MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32)); +} + +/* + * Masks and ACKs an IRQ + */ +static void level_mask_and_ack_msc_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + mask_msc_irq(d); + if (!cpu_has_veic) + MSCIC_WRITE(MSC01_IC_EOI, 0); + /* This actually needs to be a call into platform code */ + smtc_im_ack_irq(irq); +} + +/* + * Masks and ACKs an IRQ + */ +static void edge_mask_and_ack_msc_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + mask_msc_irq(d); + if (!cpu_has_veic) + MSCIC_WRITE(MSC01_IC_EOI, 0); + else { + u32 r; + MSCIC_READ(MSC01_IC_SUP+irq*8, r); + MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); + MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); + } + smtc_im_ack_irq(irq); +} + +/* + * Interrupt handler for interrupts coming from SOC-it. + */ +void ll_msc_irq(void) +{ + unsigned int irq; + + /* read the interrupt vector register */ + MSCIC_READ(MSC01_IC_VEC, irq); + if (irq < 64) + do_IRQ(irq + irq_base); + else { + /* Ignore spurious interrupt */ + } +} + +static void msc_bind_eic_interrupt(int irq, int set) +{ + MSCIC_WRITE(MSC01_IC_RAMW, + (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); +} + +static struct irq_chip msc_levelirq_type = { + .name = "SOC-it-Level", + .irq_ack = level_mask_and_ack_msc_irq, + .irq_mask = mask_msc_irq, + .irq_mask_ack = level_mask_and_ack_msc_irq, + .irq_unmask = unmask_msc_irq, + .irq_eoi = unmask_msc_irq, +}; + +static struct irq_chip msc_edgeirq_type = { + .name = "SOC-it-Edge", + .irq_ack = edge_mask_and_ack_msc_irq, + .irq_mask = mask_msc_irq, + .irq_mask_ack = edge_mask_and_ack_msc_irq, + .irq_unmask = unmask_msc_irq, + .irq_eoi = unmask_msc_irq, +}; + + +void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq) +{ + _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000); + + /* Reset interrupt controller - initialises all registers to 0 */ + MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT); + + board_bind_eic_interrupt = &msc_bind_eic_interrupt; + + for (; nirq >= 0; nirq--, imp++) { + int n = imp->im_irq; + + switch (imp->im_type) { + case MSC01_IRQ_EDGE: + irq_set_chip_and_handler_name(irqbase + n, + &msc_edgeirq_type, + handle_edge_irq, + "edge"); + if (cpu_has_veic) + MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); + else + MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); + break; + case MSC01_IRQ_LEVEL: + irq_set_chip_and_handler_name(irqbase + n, + &msc_levelirq_type, + handle_level_irq, + "level"); + if (cpu_has_veic) + MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); + else + MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); + } + } + + irq_base = irqbase; + + MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */ + +} diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c new file mode 100644 index 00000000..b0662cf9 --- /dev/null +++ b/arch/mips/kernel/irq-rm7000.c @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2003 Ralf Baechle + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Handler for RM7000 extended interrupts. These are a non-standard + * feature so we handle them separately from standard interrupts. + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> + +#include <asm/irq_cpu.h> +#include <asm/mipsregs.h> + +static inline void unmask_rm7k_irq(struct irq_data *d) +{ + set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); +} + +static inline void mask_rm7k_irq(struct irq_data *d) +{ + clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); +} + +static struct irq_chip rm7k_irq_controller = { + .name = "RM7000", + .irq_ack = mask_rm7k_irq, + .irq_mask = mask_rm7k_irq, + .irq_mask_ack = mask_rm7k_irq, + .irq_unmask = unmask_rm7k_irq, + .irq_eoi = unmask_rm7k_irq +}; + +void __init rm7k_cpu_irq_init(void) +{ + int base = RM7K_CPU_IRQ_BASE; + int i; + + clear_c0_intcontrol(0x00000f00); /* Mask all */ + + for (i = base; i < base + 4; i++) + irq_set_chip_and_handler(i, &rm7k_irq_controller, + handle_percpu_irq); +} diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c new file mode 100644 index 00000000..1282b9ae --- /dev/null +++ b/arch/mips/kernel/irq-rm9000.c @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2003 Ralf Baechle + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Handler for RM9000 extended interrupts. These are a non-standard + * feature so we handle them separately from standard interrupts. + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <asm/irq_cpu.h> +#include <asm/mipsregs.h> + +static inline void unmask_rm9k_irq(struct irq_data *d) +{ + set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); +} + +static inline void mask_rm9k_irq(struct irq_data *d) +{ + clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); +} + +static inline void rm9k_cpu_irq_enable(struct irq_data *d) +{ + unsigned long flags; + + local_irq_save(flags); + unmask_rm9k_irq(d); + local_irq_restore(flags); +} + +/* + * Performance counter interrupts are global on all processors. + */ +static void local_rm9k_perfcounter_irq_startup(void *args) +{ + rm9k_cpu_irq_enable(args); +} + +static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d) +{ + on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1); + + return 0; +} + +static void local_rm9k_perfcounter_irq_shutdown(void *args) +{ + unsigned long flags; + + local_irq_save(flags); + mask_rm9k_irq(args); + local_irq_restore(flags); +} + +static void rm9k_perfcounter_irq_shutdown(struct irq_data *d) +{ + on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1); +} + +static struct irq_chip rm9k_irq_controller = { + .name = "RM9000", + .irq_ack = mask_rm9k_irq, + .irq_mask = mask_rm9k_irq, + .irq_mask_ack = mask_rm9k_irq, + .irq_unmask = unmask_rm9k_irq, + .irq_eoi = unmask_rm9k_irq +}; + +static struct irq_chip rm9k_perfcounter_irq = { + .name = "RM9000", + .irq_startup = rm9k_perfcounter_irq_startup, + .irq_shutdown = rm9k_perfcounter_irq_shutdown, + .irq_ack = mask_rm9k_irq, + .irq_mask = mask_rm9k_irq, + .irq_mask_ack = mask_rm9k_irq, + .irq_unmask = unmask_rm9k_irq, +}; + +unsigned int rm9000_perfcount_irq; + +EXPORT_SYMBOL(rm9000_perfcount_irq); + +void __init rm9k_cpu_irq_init(void) +{ + int base = RM9K_CPU_IRQ_BASE; + int i; + + clear_c0_intcontrol(0x0000f000); /* Mask all */ + + for (i = base; i < base + 4; i++) + irq_set_chip_and_handler(i, &rm9k_irq_controller, + handle_level_irq); + + rm9000_perfcount_irq = base + 1; + irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, + handle_percpu_irq); +} diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c new file mode 100644 index 00000000..a5aa43d0 --- /dev/null +++ b/arch/mips/kernel/irq.c @@ -0,0 +1,164 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Code to handle x86 style IRQs plus some generic interrupt stuff. + * + * Copyright (C) 1992 Linus Torvalds + * Copyright (C) 1994 - 2000 Ralf Baechle + */ +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/proc_fs.h> +#include <linux/mm.h> +#include <linux/random.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/kallsyms.h> +#include <linux/kgdb.h> +#include <linux/ftrace.h> + +#include <linux/atomic.h> +#include <asm/uaccess.h> + +#ifdef CONFIG_KGDB +int kgdb_early_setup; +#endif + +static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; + +int allocate_irqno(void) +{ + int irq; + +again: + irq = find_first_zero_bit(irq_map, NR_IRQS); + + if (irq >= NR_IRQS) + return -ENOSPC; + + if (test_and_set_bit(irq, irq_map)) + goto again; + + return irq; +} + +/* + * Allocate the 16 legacy interrupts for i8259 devices. This happens early + * in the kernel initialization so treating allocation failure as BUG() is + * ok. + */ +void __init alloc_legacy_irqno(void) +{ + int i; + + for (i = 0; i <= 16; i++) + BUG_ON(test_and_set_bit(i, irq_map)); +} + +void free_irqno(unsigned int irq) +{ + smp_mb__before_clear_bit(); + clear_bit(irq, irq_map); + smp_mb__after_clear_bit(); +} + +/* + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves. + */ +void ack_bad_irq(unsigned int irq) +{ + smtc_im_ack_irq(irq); + printk("unexpected IRQ # %d\n", irq); +} + +atomic_t irq_err_count; + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + return 0; +} + +asmlinkage void spurious_interrupt(void) +{ + atomic_inc(&irq_err_count); +} + +void __init init_IRQ(void) +{ + int i; + +#ifdef CONFIG_KGDB + if (kgdb_early_setup) + return; +#endif + + for (i = 0; i < NR_IRQS; i++) + irq_set_noprobe(i); + + arch_init_irq(); + +#ifdef CONFIG_KGDB + if (!kgdb_early_setup) + kgdb_early_setup = 1; +#endif +} + +#ifdef DEBUG_STACKOVERFLOW +static inline void check_stack_overflow(void) +{ + unsigned long sp; + + __asm__ __volatile__("move %0, $sp" : "=r" (sp)); + sp &= THREAD_MASK; + + /* + * Check for stack overflow: is there less than STACK_WARN free? + * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. + */ + if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { + printk("do_IRQ: stack overflow: %ld\n", + sp - sizeof(struct thread_info)); + dump_stack(); + } +} +#else +static inline void check_stack_overflow(void) {} +#endif + + +/* + * do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ +void __irq_entry do_IRQ(unsigned int irq) +{ + irq_enter(); + check_stack_overflow(); + if (!smtc_handle_on_other_cpu(irq)) + generic_handle_irq(irq); + irq_exit(); +} + +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * To avoid inefficient and in some cases pathological re-checking of + * IRQ affinity, we have this variant that skips the affinity check. + */ + +void __irq_entry do_IRQ_no_affinity(unsigned int irq) +{ + irq_enter(); + smtc_im_backstop(irq); + generic_handle_irq(irq); + irq_exit(); +} + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c new file mode 100644 index 00000000..972263bc --- /dev/null +++ b/arch/mips/kernel/irq_cpu.c @@ -0,0 +1,115 @@ +/* + * Copyright 2001 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * Copyright (C) 2001 Ralf Baechle + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki <macro@mips.com> + * + * This file define the irq handler for MIPS CPU interrupts. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * Almost all MIPS CPUs define 8 interrupt sources. They are typically + * level triggered (i.e., cannot be cleared from CPU; must be cleared from + * device). The first two are software interrupts which we don't really + * use or support. The last one is usually the CPU timer interrupt if + * counter register is present or, for CPUs with an external FPU, by + * convention it's the FPU exception interrupt. + * + * Don't even think about using this on SMP. You have been warned. + * + * This file exports one global function: + * void mips_cpu_irq_init(void); + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/irq.h> + +#include <asm/irq_cpu.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> + +static inline void unmask_mips_irq(struct irq_data *d) +{ + set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); + irq_enable_hazard(); +} + +static inline void mask_mips_irq(struct irq_data *d) +{ + clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); + irq_disable_hazard(); +} + +static struct irq_chip mips_cpu_irq_controller = { + .name = "MIPS", + .irq_ack = mask_mips_irq, + .irq_mask = mask_mips_irq, + .irq_mask_ack = mask_mips_irq, + .irq_unmask = unmask_mips_irq, + .irq_eoi = unmask_mips_irq, +}; + +/* + * Basically the same as above but taking care of all the MT stuff + */ + +static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d) +{ + unsigned int vpflags = dvpe(); + + clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); + evpe(vpflags); + unmask_mips_irq(d); + return 0; +} + +/* + * While we ack the interrupt interrupts are disabled and thus we don't need + * to deal with concurrency issues. Same for mips_cpu_irq_end. + */ +static void mips_mt_cpu_irq_ack(struct irq_data *d) +{ + unsigned int vpflags = dvpe(); + clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); + evpe(vpflags); + mask_mips_irq(d); +} + +static struct irq_chip mips_mt_cpu_irq_controller = { + .name = "MIPS", + .irq_startup = mips_mt_cpu_irq_startup, + .irq_ack = mips_mt_cpu_irq_ack, + .irq_mask = mask_mips_irq, + .irq_mask_ack = mips_mt_cpu_irq_ack, + .irq_unmask = unmask_mips_irq, + .irq_eoi = unmask_mips_irq, +}; + +void __init mips_cpu_irq_init(void) +{ + int irq_base = MIPS_CPU_IRQ_BASE; + int i; + + /* Mask interrupts. */ + clear_c0_status(ST0_IM); + clear_c0_cause(CAUSEF_IP); + + /* Software interrupts are used for MT/CMT IPI */ + for (i = irq_base; i < irq_base + 2; i++) + irq_set_chip_and_handler(i, cpu_has_mipsmt ? + &mips_mt_cpu_irq_controller : + &mips_cpu_irq_controller, + handle_percpu_irq); + + for (i = irq_base + 2; i < irq_base + 8; i++) + irq_set_chip_and_handler(i, &mips_cpu_irq_controller, + handle_percpu_irq); +} diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c new file mode 100644 index 00000000..b0c55b50 --- /dev/null +++ b/arch/mips/kernel/irq_txx9.c @@ -0,0 +1,191 @@ +/* + * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c, + * linux/arch/mips/tx4927/common/tx4927_irq.c, + * linux/arch/mips/tx4938/common/irq.c + * + * Copyright 2001, 2003-2005 MontaVista Software Inc. + * Author: MontaVista Software, Inc. + * ahennessy@mvista.com + * source@mvista.com + * Copyright (C) 2000-2001 Toshiba Corporation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/irq.h> +#include <asm/txx9irq.h> + +struct txx9_irc_reg { + u32 cer; + u32 cr[2]; + u32 unused0; + u32 ilr[8]; + u32 unused1[4]; + u32 imr; + u32 unused2[7]; + u32 scr; + u32 unused3[7]; + u32 ssr; + u32 unused4[7]; + u32 csr; +}; + +/* IRCER : Int. Control Enable */ +#define TXx9_IRCER_ICE 0x00000001 + +/* IRCR : Int. Control */ +#define TXx9_IRCR_LOW 0x00000000 +#define TXx9_IRCR_HIGH 0x00000001 +#define TXx9_IRCR_DOWN 0x00000002 +#define TXx9_IRCR_UP 0x00000003 +#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002) + +/* IRSCR : Int. Status Control */ +#define TXx9_IRSCR_EIClrE 0x00000100 +#define TXx9_IRSCR_EIClr_MASK 0x0000000f + +/* IRCSR : Int. Current Status */ +#define TXx9_IRCSR_IF 0x00010000 +#define TXx9_IRCSR_ILV_MASK 0x00000700 +#define TXx9_IRCSR_IVL_MASK 0x0000001f + +#define irc_dlevel 0 +#define irc_elevel 1 + +static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly; + +static struct { + unsigned char level; + unsigned char mode; +} txx9irq[TXx9_MAX_IR] __read_mostly; + +static void txx9_irq_unmask(struct irq_data *d) +{ + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; + u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; + int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; + + __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs)) + | (txx9irq[irq_nr].level << ofs), + ilrp); +#ifdef CONFIG_CPU_TX39XX + /* update IRCSR */ + __raw_writel(0, &txx9_ircptr->imr); + __raw_writel(irc_elevel, &txx9_ircptr->imr); +#endif +} + +static inline void txx9_irq_mask(struct irq_data *d) +{ + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; + u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; + int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; + + __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs)) + | (irc_dlevel << ofs), + ilrp); +#ifdef CONFIG_CPU_TX39XX + /* update IRCSR */ + __raw_writel(0, &txx9_ircptr->imr); + __raw_writel(irc_elevel, &txx9_ircptr->imr); + /* flush write buffer */ + __raw_readl(&txx9_ircptr->ssr); +#else + mmiowb(); +#endif +} + +static void txx9_irq_mask_ack(struct irq_data *d) +{ + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; + + txx9_irq_mask(d); + /* clear edge detection */ + if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) + __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); +} + +static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; + u32 cr; + u32 __iomem *crp; + int ofs; + int mode; + + if (flow_type & IRQF_TRIGGER_PROBE) + return 0; + switch (flow_type & IRQF_TRIGGER_MASK) { + case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break; + case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break; + case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break; + case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break; + default: + return -EINVAL; + } + crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8]; + cr = __raw_readl(crp); + ofs = (irq_nr & (8 - 1)) * 2; + cr &= ~(0x3 << ofs); + cr |= (mode & 0x3) << ofs; + __raw_writel(cr, crp); + txx9irq[irq_nr].mode = mode; + return 0; +} + +static struct irq_chip txx9_irq_chip = { + .name = "TXX9", + .irq_ack = txx9_irq_mask_ack, + .irq_mask = txx9_irq_mask, + .irq_mask_ack = txx9_irq_mask_ack, + .irq_unmask = txx9_irq_unmask, + .irq_set_type = txx9_irq_set_type, +}; + +void __init txx9_irq_init(unsigned long baseaddr) +{ + int i; + + txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg)); + for (i = 0; i < TXx9_MAX_IR; i++) { + txx9irq[i].level = 4; /* middle level */ + txx9irq[i].mode = TXx9_IRCR_LOW; + irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip, + handle_level_irq); + } + + /* mask all IRC interrupts */ + __raw_writel(0, &txx9_ircptr->imr); + for (i = 0; i < 8; i++) + __raw_writel(0, &txx9_ircptr->ilr[i]); + /* setup IRC interrupt mode (Low Active) */ + for (i = 0; i < 2; i++) + __raw_writel(0, &txx9_ircptr->cr[i]); + /* enable interrupt control */ + __raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer); + __raw_writel(irc_elevel, &txx9_ircptr->imr); +} + +int __init txx9_irq_set_pri(int irc_irq, int new_pri) +{ + int old_pri; + + if ((unsigned int)irc_irq >= TXx9_MAX_IR) + return 0; + old_pri = txx9irq[irc_irq].level; + txx9irq[irc_irq].level = new_pri; + return old_pri; +} + +int txx9_irq(void) +{ + u32 csr = __raw_readl(&txx9_ircptr->csr); + + if (likely(!(csr & TXx9_IRCSR_IF))) + return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1)); + return -1; +} diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c new file mode 100644 index 00000000..6001610c --- /dev/null +++ b/arch/mips/kernel/jump_label.c @@ -0,0 +1,54 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2010 Cavium Networks, Inc. + */ + +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/memory.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/cpu.h> + +#include <asm/cacheflush.h> +#include <asm/inst.h> + +#ifdef HAVE_JUMP_LABEL + +#define J_RANGE_MASK ((1ul << 28) - 1) + +void arch_jump_label_transform(struct jump_entry *e, + enum jump_label_type type) +{ + union mips_instruction insn; + union mips_instruction *insn_p = + (union mips_instruction *)(unsigned long)e->code; + + /* Jump only works within a 256MB aligned region. */ + BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); + + /* Target must have 4 byte alignment. */ + BUG_ON((e->target & 3) != 0); + + if (type == JUMP_LABEL_ENABLE) { + insn.j_format.opcode = j_op; + insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; + } else { + insn.word = 0; /* nop */ + } + + get_online_cpus(); + mutex_lock(&text_mutex); + *insn_p = insn; + + flush_icache_range((unsigned long)insn_p, + (unsigned long)insn_p + sizeof(*insn_p)); + + mutex_unlock(&text_mutex); + put_online_cpus(); +} + +#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c new file mode 100644 index 00000000..f4546e97 --- /dev/null +++ b/arch/mips/kernel/kgdb.c @@ -0,0 +1,384 @@ +/* + * Originally written by Glenn Engel, Lake Stevens Instrument Division + * + * Contributed by HP Systems + * + * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse + * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de> + * + * Copyright (C) 1995 Andreas Busse + * + * Copyright (C) 2003 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * Copyright (C) 2004-2005 MontaVista Software Inc. + * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com + * + * Copyright (C) 2007-2008 Wind River Systems, Inc. + * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/ptrace.h> /* for linux pt_regs struct */ +#include <linux/kgdb.h> +#include <linux/kdebug.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <asm/inst.h> +#include <asm/fpu.h> +#include <asm/cacheflush.h> +#include <asm/processor.h> +#include <asm/sigcontext.h> + +static struct hard_trap_info { + unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ + unsigned char signo; /* Signal that we map this trap into */ +} hard_trap_info[] = { + { 6, SIGBUS }, /* instruction bus error */ + { 7, SIGBUS }, /* data bus error */ + { 9, SIGTRAP }, /* break */ +/* { 11, SIGILL }, */ /* CPU unusable */ + { 12, SIGFPE }, /* overflow */ + { 13, SIGTRAP }, /* trap */ + { 14, SIGSEGV }, /* virtual instruction cache coherency */ + { 15, SIGFPE }, /* floating point exception */ + { 23, SIGSEGV }, /* watch */ + { 31, SIGSEGV }, /* virtual data cache coherency */ + { 0, 0} /* Must be last */ +}; + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = +{ + { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) }, + { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) }, + { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) }, + { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) }, + { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) }, + { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) }, + { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) }, + { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) }, + { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) }, + { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) }, + { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) }, + { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) }, + { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) }, + { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) }, + { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) }, + { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) }, + { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) }, + { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) }, + { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) }, + { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) }, + { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) }, + { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) }, + { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) }, + { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) }, + { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) }, + { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) }, + { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) }, + { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) }, + { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) }, + { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) }, + { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) }, + { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) }, + { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) }, + { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) }, + { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) }, + { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) }, + { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) }, + { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) }, + { "f0", GDB_SIZEOF_REG, 0 }, + { "f1", GDB_SIZEOF_REG, 1 }, + { "f2", GDB_SIZEOF_REG, 2 }, + { "f3", GDB_SIZEOF_REG, 3 }, + { "f4", GDB_SIZEOF_REG, 4 }, + { "f5", GDB_SIZEOF_REG, 5 }, + { "f6", GDB_SIZEOF_REG, 6 }, + { "f7", GDB_SIZEOF_REG, 7 }, + { "f8", GDB_SIZEOF_REG, 8 }, + { "f9", GDB_SIZEOF_REG, 9 }, + { "f10", GDB_SIZEOF_REG, 10 }, + { "f11", GDB_SIZEOF_REG, 11 }, + { "f12", GDB_SIZEOF_REG, 12 }, + { "f13", GDB_SIZEOF_REG, 13 }, + { "f14", GDB_SIZEOF_REG, 14 }, + { "f15", GDB_SIZEOF_REG, 15 }, + { "f16", GDB_SIZEOF_REG, 16 }, + { "f17", GDB_SIZEOF_REG, 17 }, + { "f18", GDB_SIZEOF_REG, 18 }, + { "f19", GDB_SIZEOF_REG, 19 }, + { "f20", GDB_SIZEOF_REG, 20 }, + { "f21", GDB_SIZEOF_REG, 21 }, + { "f22", GDB_SIZEOF_REG, 22 }, + { "f23", GDB_SIZEOF_REG, 23 }, + { "f24", GDB_SIZEOF_REG, 24 }, + { "f25", GDB_SIZEOF_REG, 25 }, + { "f26", GDB_SIZEOF_REG, 26 }, + { "f27", GDB_SIZEOF_REG, 27 }, + { "f28", GDB_SIZEOF_REG, 28 }, + { "f29", GDB_SIZEOF_REG, 29 }, + { "f30", GDB_SIZEOF_REG, 30 }, + { "f31", GDB_SIZEOF_REG, 31 }, + { "fsr", GDB_SIZEOF_REG, 0 }, + { "fir", GDB_SIZEOF_REG, 0 }, +}; + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + int fp_reg; + + if (regno < 0 || regno >= DBG_MAX_REG_NUM) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1 && regno < 38) { + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { + /* FP registers 38 -> 69 */ + if (!(regs->cp0_status & ST0_CU1)) + return 0; + if (regno == 70) { + /* Process the fcr31/fsr (register 70) */ + memcpy((void *)¤t->thread.fpu.fcr31, mem, + dbg_reg_def[regno].size); + goto out_save; + } else if (regno == 71) { + /* Ignore the fir (register 71) */ + goto out_save; + } + fp_reg = dbg_reg_def[regno].offset; + memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem, + dbg_reg_def[regno].size); +out_save: + restore_fp(current); + } + + return 0; +} + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + int fp_reg; + + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1 && regno < 38) { + /* First 38 registers */ + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { + /* FP registers 38 -> 69 */ + if (!(regs->cp0_status & ST0_CU1)) + goto out; + save_fp(current); + if (regno == 70) { + /* Process the fcr31/fsr (register 70) */ + memcpy(mem, (void *)¤t->thread.fpu.fcr31, + dbg_reg_def[regno].size); + goto out; + } else if (regno == 71) { + /* Ignore the fir (register 71) */ + memset(mem, 0, dbg_reg_def[regno].size); + goto out; + } + fp_reg = dbg_reg_def[regno].offset; + memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg], + dbg_reg_def[regno].size); + } + +out: + return dbg_reg_def[regno].name; + +} + +void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__( + ".globl breakinst\n\t" + ".set\tnoreorder\n\t" + "nop\n" + "breakinst:\tbreak\n\t" + "nop\n\t" + ".set\treorder"); +} + +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), NULL); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +static int compute_signal(int tt) +{ + struct hard_trap_info *ht; + + for (ht = hard_trap_info; ht->tt && ht->signo; ht++) + if (ht->tt == tt) + return ht->signo; + + return SIGHUP; /* default for things we don't know about */ +} + +/* + * Similar to regs_to_gdb_regs() except that process is sleeping and so + * we may not be able to get all the info. + */ +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) +{ + int reg; + struct thread_info *ti = task_thread_info(p); + unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32; + struct pt_regs *regs = (struct pt_regs *)ksp - 1; +#if (KGDB_GDB_REG_SIZE == 32) + u32 *ptr = (u32 *)gdb_regs; +#else + u64 *ptr = (u64 *)gdb_regs; +#endif + + for (reg = 0; reg < 16; reg++) + *(ptr++) = regs->regs[reg]; + + /* S0 - S7 */ + for (reg = 16; reg < 24; reg++) + *(ptr++) = regs->regs[reg]; + + for (reg = 24; reg < 28; reg++) + *(ptr++) = 0; + + /* GP, SP, FP, RA */ + for (reg = 28; reg < 32; reg++) + *(ptr++) = regs->regs[reg]; + + *(ptr++) = regs->cp0_status; + *(ptr++) = regs->lo; + *(ptr++) = regs->hi; + *(ptr++) = regs->cp0_badvaddr; + *(ptr++) = regs->cp0_cause; + *(ptr++) = regs->cp0_epc; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->cp0_epc = pc; +} + +/* + * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, + * then try to fall into the debugger + */ +static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, + void *ptr) +{ + struct die_args *args = (struct die_args *)ptr; + struct pt_regs *regs = args->regs; + int trap = (regs->cp0_cause & 0x7c) >> 2; + + /* Userspace events, ignore. */ + if (user_mode(regs)) + return NOTIFY_DONE; + + if (atomic_read(&kgdb_active) != -1) + kgdb_nmicallback(smp_processor_id(), regs); + + if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) + return NOTIFY_DONE; + + if (atomic_read(&kgdb_setting_breakpoint)) + if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) + regs->cp0_epc += 4; + + /* In SMP mode, __flush_cache_all does IPI */ + local_irq_enable(); + __flush_cache_all(); + + return NOTIFY_STOP; +} + +#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +int kgdb_ll_trap(int cmd, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig, + + }; + + if (!kgdb_io_module_registered) + return NOTIFY_DONE; + + return kgdb_mips_notify(NULL, cmd, &args); +} +#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_mips_notify, +}; + +/* + * Handle the 'c' command + */ +int kgdb_arch_handle_exception(int vector, int signo, int err_code, + char *remcom_in_buffer, char *remcom_out_buffer, + struct pt_regs *regs) +{ + char *ptr; + unsigned long address; + + switch (remcom_in_buffer[0]) { + case 'c': + /* handle the optional parameter */ + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &address)) + regs->cp0_epc = address; + + return 0; + } + + return -1; +} + +struct kgdb_arch arch_kgdb_ops; + +/* + * We use kgdb_early_setup so that functions we need to call now don't + * cause trouble when called again later. + */ +int kgdb_arch_init(void) +{ + union mips_instruction insn = { + .r_format = { + .opcode = spec_op, + .func = break_op, + } + }; + memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE); + + register_die_notifier(&kgdb_notifier); + + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c new file mode 100644 index 00000000..158467da --- /dev/null +++ b/arch/mips/kernel/kprobes.c @@ -0,0 +1,676 @@ +/* + * Kernel Probes (KProbes) + * arch/mips/kernel/kprobes.c + * + * Copyright 2006 Sony Corp. + * Copyright 2010 Cavium Networks + * + * Some portions copied from the powerpc version. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kprobes.h> +#include <linux/preempt.h> +#include <linux/uaccess.h> +#include <linux/kdebug.h> +#include <linux/slab.h> + +#include <asm/ptrace.h> +#include <asm/branch.h> +#include <asm/break.h> +#include <asm/inst.h> + +static const union mips_instruction breakpoint_insn = { + .b_format = { + .opcode = spec_op, + .code = BRK_KPROBE_BP, + .func = break_op + } +}; + +static const union mips_instruction breakpoint2_insn = { + .b_format = { + .opcode = spec_op, + .code = BRK_KPROBE_SSTEPBP, + .func = break_op + } +}; + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static int __kprobes insn_has_delayslot(union mips_instruction insn) +{ + switch (insn.i_format.opcode) { + + /* + * This group contains: + * jr and jalr are in r_format format. + */ + case spec_op: + switch (insn.r_format.func) { + case jr_op: + case jalr_op: + break; + default: + goto insn_ok; + } + + /* + * This group contains: + * bltz_op, bgez_op, bltzl_op, bgezl_op, + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. + */ + case bcond_op: + + /* + * These are unconditional and in j_format. + */ + case jal_op: + case j_op: + + /* + * These are conditional and in i_format. + */ + case beq_op: + case beql_op: + case bne_op: + case bnel_op: + case blez_op: + case blezl_op: + case bgtz_op: + case bgtzl_op: + + /* + * These are the FPA/cp1 branch instructions. + */ + case cop1_op: + +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + case ldc2_op: /* This is bbit032 on Octeon */ + case swc2_op: /* This is bbit1 on Octeon */ + case sdc2_op: /* This is bbit132 on Octeon */ +#endif + return 1; + default: + break; + } +insn_ok: + return 0; +} + +/* + * insn_has_ll_or_sc function checks whether instruction is ll or sc + * one; putting breakpoint on top of atomic ll/sc pair is bad idea; + * so we need to prevent it and refuse kprobes insertion for such + * instructions; cannot do much about breakpoint in the middle of + * ll/sc pair; it is upto user to avoid those places + */ +static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) +{ + int ret = 0; + + switch (insn.i_format.opcode) { + case ll_op: + case lld_op: + case sc_op: + case scd_op: + ret = 1; + break; + default: + break; + } + return ret; +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + union mips_instruction insn; + union mips_instruction prev_insn; + int ret = 0; + + insn = p->addr[0]; + + if (insn_has_ll_or_sc(insn)) { + pr_notice("Kprobes for ll and sc instructions are not" + "supported\n"); + ret = -EINVAL; + goto out; + } + + if ((probe_kernel_read(&prev_insn, p->addr - 1, + sizeof(mips_instruction)) == 0) && + insn_has_delayslot(prev_insn)) { + pr_notice("Kprobes for branch delayslot are not supported\n"); + ret = -EINVAL; + goto out; + } + + /* insn: must be on special executable page on mips. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) { + ret = -ENOMEM; + goto out; + } + + /* + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break trap instruction at + * index one. + * + * On MIPS arch if the instruction at probed address is a + * branch instruction, we need to execute the instruction at + * Branch Delayslot (BD) at the time of probe hit. As MIPS also + * doesn't have single stepping support, the BD instruction can + * not be executed in-line and it would be executed on SSOL slot + * using a normal breakpoint instruction in the next slot. + * So, read the instruction and save it for later execution. + */ + if (insn_has_delayslot(insn)) + memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); + else + memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); + + p->ainsn.insn[1] = breakpoint2_insn; + p->opcode = *p->addr; + +out: + return ret; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + *p->addr = breakpoint_insn; + flush_insn_slot(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + *p->addr = p->opcode; + flush_insn_slot(p); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + free_insn_slot(p->ainsn.insn, 0); +} + +static void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; + kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; + kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; + kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; +} + +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; + kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; + kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; +} + +static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = p; + kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); + kcb->kprobe_saved_epc = regs->cp0_epc; +} + +/** + * evaluate_branch_instrucion - + * + * Evaluate the branch instruction at probed address during probe hit. The + * result of evaluation would be the updated epc. The insturction in delayslot + * would actually be single stepped using a normal breakpoint) on SSOL slot. + * + * The result is also saved in the kprobe control block for later use, + * in case we need to execute the delayslot instruction. The latter will be + * false for NOP instruction in dealyslot and the branch-likely instructions + * when the branch is taken. And for those cases we set a flag as + * SKIP_DELAYSLOT in the kprobe control block + */ +static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + union mips_instruction insn = p->opcode; + long epc; + int ret = 0; + + epc = regs->cp0_epc; + if (epc & 3) + goto unaligned; + + if (p->ainsn.insn->word == 0) + kcb->flags |= SKIP_DELAYSLOT; + else + kcb->flags &= ~SKIP_DELAYSLOT; + + ret = __compute_return_epc_for_insn(regs, insn); + if (ret < 0) + return ret; + + if (ret == BRANCH_LIKELY_TAKEN) + kcb->flags |= SKIP_DELAYSLOT; + + kcb->target_epc = regs->cp0_epc; + + return 0; + +unaligned: + pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); + force_sig(SIGBUS, current); + return -EFAULT; + +} + +static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + int ret = 0; + + regs->cp0_status &= ~ST0_IE; + + /* single step inline if the instruction is a break */ + if (p->opcode.word == breakpoint_insn.word || + p->opcode.word == breakpoint2_insn.word) + regs->cp0_epc = (unsigned long)p->addr; + else if (insn_has_delayslot(p->opcode)) { + ret = evaluate_branch_instruction(p, regs, kcb); + if (ret < 0) { + pr_notice("Kprobes: Error in evaluating branch\n"); + return; + } + } + regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; +} + +/* + * Called after single-stepping. p->addr is the address of the + * instruction whose first byte has been replaced by the "break 0" + * instruction. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. The address of this + * copy is p->ainsn.insn. + * + * This function prepares to return from the post-single-step + * breakpoint trap. In case of branch instructions, the target + * epc to be restored. + */ +static void __kprobes resume_execution(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + if (insn_has_delayslot(p->opcode)) + regs->cp0_epc = kcb->target_epc; + else { + unsigned long orig_epc = kcb->kprobe_saved_epc; + regs->cp0_epc = orig_epc + 4; + } +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + int ret = 0; + kprobe_opcode_t *addr; + struct kprobe_ctlblk *kcb; + + addr = (kprobe_opcode_t *) regs->cp0_epc; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + + /* Check we're not actually recursing */ + if (kprobe_running()) { + p = get_kprobe(addr); + if (p) { + if (kcb->kprobe_status == KPROBE_HIT_SS && + p->ainsn.insn->word == breakpoint_insn.word) { + regs->cp0_status &= ~ST0_IE; + regs->cp0_status |= kcb->kprobe_saved_SR; + goto no_kprobe; + } + /* + * We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + prepare_singlestep(p, regs, kcb); + kcb->kprobe_status = KPROBE_REENTER; + if (kcb->flags & SKIP_DELAYSLOT) { + resume_execution(p, regs, kcb); + restore_previous_kprobe(kcb); + preempt_enable_no_resched(); + } + return 1; + } else { + if (addr->word != breakpoint_insn.word) { + /* + * The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; + goto no_kprobe; + } + p = __get_cpu_var(current_kprobe); + if (p->break_handler && p->break_handler(p, regs)) + goto ss_probe; + } + goto no_kprobe; + } + + p = get_kprobe(addr); + if (!p) { + if (addr->word != breakpoint_insn.word) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + */ + ret = 1; + } + /* Not one of ours: let kernel handle it */ + goto no_kprobe; + } + + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + if (p->pre_handler && p->pre_handler(p, regs)) { + /* handler has already set things up, so skip ss setup */ + return 1; + } + +ss_probe: + prepare_singlestep(p, regs, kcb); + if (kcb->flags & SKIP_DELAYSLOT) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (p->post_handler) + p->post_handler(p, regs, 0); + resume_execution(p, regs, kcb); + preempt_enable_no_resched(); + } else + kcb->kprobe_status = KPROBE_HIT_SS; + + return 1; + +no_kprobe: + preempt_enable_no_resched(); + return ret; + +} + +static inline int post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + resume_execution(cur, regs, kcb); + + regs->cp0_status |= kcb->kprobe_saved_SR; + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + if (kcb->kprobe_status & KPROBE_HIT_SS) { + resume_execution(cur, regs, kcb); + regs->cp0_status |= kcb->kprobe_old_SR; + + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAK: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEPBP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + + case DIE_PAGE_FAULT: + /* kprobe_running() needs smp_processor_id() */ + preempt_disable(); + + if (kprobe_running() + && kprobe_fault_handler(args->regs, args->trapnr)) + ret = NOTIFY_STOP; + preempt_enable(); + break; + default: + break; + } + return ret; +} + +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct jprobe *jp = container_of(p, struct jprobe, kp); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + kcb->jprobe_saved_regs = *regs; + kcb->jprobe_saved_sp = regs->regs[29]; + + memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); + + regs->cp0_epc = (unsigned long)(jp->entry); + + return 1; +} + +/* Defined in the inline asm below. */ +void jprobe_return_end(void); + +void __kprobes jprobe_return(void) +{ + /* Assembler quirk necessitates this '0,code' business. */ + asm volatile( + "break 0,%0\n\t" + ".globl jprobe_return_end\n" + "jprobe_return_end:\n" + : : "n" (BRK_KPROBE_BP) : "memory"); +} + +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (regs->cp0_epc >= (unsigned long)jprobe_return && + regs->cp0_epc <= (unsigned long)jprobe_return_end) { + *regs = kcb->jprobe_saved_regs; + memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); + preempt_enable_no_resched(); + + return 1; + } + return 0; +} + +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe causes the + * handlers to fire + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile( + ".set push\n\t" + /* Keep the assembler from reordering and placing JR here. */ + ".set noreorder\n\t" + "nop\n\t" + ".global kretprobe_trampoline\n" + "kretprobe_trampoline:\n\t" + "nop\n\t" + ".set pop" + : : : "memory"); +} + +void kretprobe_trampoline(void); + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; + + /* Replace the return addr with trampoline addr */ + regs->regs[31] = (unsigned long)kretprobe_trampoline; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *node, *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + + /* + * It is possible to have multiple instances associated with a given + * task either because an multiple functions in the call path + * have a return probe installed on them, and/or more than one return + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always inserted at the head of the list + * - when multiple return probes are registered for the same + * function, the first instance's ret_addr will point to the + * real return address, and all the rest will point to + * kretprobe_trampoline + */ + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + if (ri->rp && ri->rp->handler) + ri->rp->handler(ri, regs); + + orig_ret_address = (unsigned long)ri->ret_addr; + recycle_rp_inst(ri, &empty_rp); + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + instruction_pointer(regs) = orig_ret_address; + + reset_current_kprobe(); + kretprobe_hash_unlock(current, &flags); + preempt_enable_no_resched(); + + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) + return 1; + + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c new file mode 100644 index 00000000..84d0639e --- /dev/null +++ b/arch/mips/kernel/kspd.c @@ -0,0 +1,423 @@ +/* + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/unistd.h> +#include <linux/file.h> +#include <linux/fdtable.h> +#include <linux/fs.h> +#include <linux/syscalls.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> + +#include <asm/vpe.h> +#include <asm/rtlx.h> +#include <asm/kspd.h> + +static struct workqueue_struct *workqueue; +static struct work_struct work; + +extern unsigned long cpu_khz; + +struct mtsp_syscall { + int cmd; + unsigned char abi; + unsigned char size; +}; + +struct mtsp_syscall_ret { + int retval; + int errno; +}; + +struct mtsp_syscall_generic { + int arg0; + int arg1; + int arg2; + int arg3; + int arg4; + int arg5; + int arg6; +}; + +static struct list_head kspd_notifylist; +static int sp_stopping; + +/* these should match with those in the SDE kit */ +#define MTSP_SYSCALL_BASE 0 +#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0) +#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1) +#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2) +#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3) +#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4) +#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5) +#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6) +#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7) +#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8) +#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9) +#define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10) + +#define MTSP_O_RDONLY 0x0000 +#define MTSP_O_WRONLY 0x0001 +#define MTSP_O_RDWR 0x0002 +#define MTSP_O_NONBLOCK 0x0004 +#define MTSP_O_APPEND 0x0008 +#define MTSP_O_SHLOCK 0x0010 +#define MTSP_O_EXLOCK 0x0020 +#define MTSP_O_ASYNC 0x0040 +/* XXX: check which of these is actually O_SYNC vs O_DSYNC */ +#define MTSP_O_FSYNC O_SYNC +#define MTSP_O_NOFOLLOW 0x0100 +#define MTSP_O_SYNC 0x0080 +#define MTSP_O_CREAT 0x0200 +#define MTSP_O_TRUNC 0x0400 +#define MTSP_O_EXCL 0x0800 +#define MTSP_O_BINARY 0x8000 + +extern int tclimit; + +struct apsp_table { + int sp; + int ap; +}; + +/* we might want to do the mode flags too */ +struct apsp_table open_flags_table[] = { + { MTSP_O_RDWR, O_RDWR }, + { MTSP_O_WRONLY, O_WRONLY }, + { MTSP_O_CREAT, O_CREAT }, + { MTSP_O_TRUNC, O_TRUNC }, + { MTSP_O_NONBLOCK, O_NONBLOCK }, + { MTSP_O_APPEND, O_APPEND }, + { MTSP_O_NOFOLLOW, O_NOFOLLOW } +}; + +struct apsp_table syscall_command_table[] = { + { MTSP_SYSCALL_OPEN, __NR_open }, + { MTSP_SYSCALL_CLOSE, __NR_close }, + { MTSP_SYSCALL_READ, __NR_read }, + { MTSP_SYSCALL_WRITE, __NR_write }, + { MTSP_SYSCALL_LSEEK32, __NR_lseek }, + { MTSP_SYSCALL_IOCTL, __NR_ioctl } +}; + +static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) +{ + register long int _num __asm__("$2") = num; + register long int _arg0 __asm__("$4") = arg0; + register long int _arg1 __asm__("$5") = arg1; + register long int _arg2 __asm__("$6") = arg2; + register long int _arg3 __asm__("$7") = arg3; + + mm_segment_t old_fs; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + __asm__ __volatile__ ( + " syscall \n" + : "=r" (_num), "=r" (_arg3) + : "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3)); + + set_fs(old_fs); + + /* $a3 is error flag */ + if (_arg3) + return -_num; + + return _num; +} + +static int translate_syscall_command(int cmd) +{ + int i; + int ret = -1; + + for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) { + if ((cmd == syscall_command_table[i].sp)) + return syscall_command_table[i].ap; + } + + return ret; +} + +static unsigned int translate_open_flags(int flags) +{ + int i; + unsigned int ret = 0; + + for (i = 0; i < ARRAY_SIZE(open_flags_table); i++) { + if( (flags & open_flags_table[i].sp) ) { + ret |= open_flags_table[i].ap; + } + } + + return ret; +} + + +static int sp_setfsuidgid(uid_t uid, gid_t gid) +{ + struct cred *new; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + + new->fsuid = uid; + new->fsgid = gid; + + commit_creds(new); + + return 0; +} + +/* + * Expects a request to be on the sysio channel. Reads it. Decides whether + * its a linux syscall and runs it, or whatever. Puts the return code back + * into the request and sends the whole thing back. + */ +void sp_work_handle_request(void) +{ + struct mtsp_syscall sc; + struct mtsp_syscall_generic generic; + struct mtsp_syscall_ret ret; + struct kspd_notifications *n; + unsigned long written; + mm_segment_t old_fs; + struct timeval tv; + struct timezone tz; + int err, cmd; + + char *vcwd; + int size; + + ret.retval = -1; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) { + set_fs(old_fs); + printk(KERN_ERR "Expected request but nothing to read\n"); + return; + } + + size = sc.size; + + if (size) { + if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) { + set_fs(old_fs); + printk(KERN_ERR "Expected request but nothing to read\n"); + return; + } + } + + /* Run the syscall at the privilege of the user who loaded the + SP program */ + + if (vpe_getuid(tclimit)) { + err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit)); + if (!err) + pr_err("Change of creds failed\n"); + } + + switch (sc.cmd) { + /* needs the flags argument translating from SDE kit to + linux */ + case MTSP_SYSCALL_PIPEFREQ: + ret.retval = cpu_khz * 1000; + ret.errno = 0; + break; + + case MTSP_SYSCALL_GETTOD: + memset(&tz, 0, sizeof(tz)); + if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, + (int)&tz, 0, 0)) == 0) + ret.retval = tv.tv_sec; + break; + + case MTSP_SYSCALL_EXIT: + list_for_each_entry(n, &kspd_notifylist, list) + n->kspd_sp_exit(tclimit); + sp_stopping = 1; + + printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n", + generic.arg0); + break; + + case MTSP_SYSCALL_OPEN: + generic.arg1 = translate_open_flags(generic.arg1); + + vcwd = vpe_getcwd(tclimit); + + /* change to cwd of the process that loaded the SP program */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + sys_chdir(vcwd); + set_fs(old_fs); + + sc.cmd = __NR_open; + + /* fall through */ + + default: + if ((sc.cmd >= __NR_Linux) && + (sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) ) + cmd = sc.cmd; + else + cmd = translate_syscall_command(sc.cmd); + + if (cmd >= 0) { + ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1, + generic.arg2, generic.arg3); + } else + printk(KERN_WARNING + "KSPD: Unknown SP syscall number %d\n", sc.cmd); + break; + } /* switch */ + + if (vpe_getuid(tclimit)) { + err = sp_setfsuidgid(0, 0); + if (!err) + pr_err("restoring old creds failed\n"); + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret)); + set_fs(old_fs); + if (written < sizeof(ret)) + printk("KSPD: sp_work_handle_request failed to send to SP\n"); +} + +static void sp_cleanup(void) +{ + struct files_struct *files = current->files; + int i, j; + struct fdtable *fdt; + + j = 0; + + /* + * It is safe to dereference the fd table without RCU or + * ->file_lock + */ + fdt = files_fdtable(files); + for (;;) { + unsigned long set; + i = j * __NFDBITS; + if (i >= fdt->max_fds) + break; + set = fdt->open_fds[j++]; + while (set) { + if (set & 1) { + struct file * file = xchg(&fdt->fd[i], NULL); + if (file) + filp_close(file, files); + } + i++; + set >>= 1; + } + } + + /* Put daemon cwd back to root to avoid umount problems */ + sys_chdir("/"); +} + +static int channel_open; + +/* the work handler */ +static void sp_work(struct work_struct *unused) +{ + if (!channel_open) { + if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { + printk("KSPD: unable to open sp channel\n"); + sp_stopping = 1; + } else { + channel_open++; + printk(KERN_DEBUG "KSPD: SP channel opened\n"); + } + } else { + /* wait for some data, allow it to sleep */ + rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1); + + /* Check we haven't been woken because we are stopping */ + if (!sp_stopping) + sp_work_handle_request(); + } + + if (!sp_stopping) + queue_work(workqueue, &work); + else + sp_cleanup(); +} + +static void startwork(int vpe) +{ + sp_stopping = channel_open = 0; + + if (workqueue == NULL) { + if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) { + printk(KERN_ERR "unable to start kspd\n"); + return; + } + + INIT_WORK(&work, sp_work); + } + + queue_work(workqueue, &work); +} + +static void stopwork(int vpe) +{ + sp_stopping = 1; + + printk(KERN_DEBUG "KSPD: SP stopping\n"); +} + +void kspd_notify(struct kspd_notifications *notify) +{ + list_add(¬ify->list, &kspd_notifylist); +} + +static struct vpe_notifications notify; +static int kspd_module_init(void) +{ + INIT_LIST_HEAD(&kspd_notifylist); + + notify.start = startwork; + notify.stop = stopwork; + vpe_notify(tclimit, ¬ify); + + return 0; +} + +static void kspd_module_exit(void) +{ + +} + +module_init(kspd_module_init); +module_exit(kspd_module_exit); + +MODULE_DESCRIPTION("MIPS KSPD"); +MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); +MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c new file mode 100644 index 00000000..922a554c --- /dev/null +++ b/arch/mips/kernel/linux32.c @@ -0,0 +1,358 @@ +/* + * Conversion between 32-bit and 64-bit native system calls. + * + * Copyright (C) 2000 Silicon Graphics, Inc. + * Written by Ulf Carlsson (ulfc@engr.sgi.com) + * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com) + */ +#include <linux/compiler.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/highuid.h> +#include <linux/resource.h> +#include <linux/highmem.h> +#include <linux/time.h> +#include <linux/times.h> +#include <linux/poll.h> +#include <linux/skbuff.h> +#include <linux/filter.h> +#include <linux/shm.h> +#include <linux/sem.h> +#include <linux/msg.h> +#include <linux/icmpv6.h> +#include <linux/syscalls.h> +#include <linux/sysctl.h> +#include <linux/utime.h> +#include <linux/utsname.h> +#include <linux/personality.h> +#include <linux/dnotify.h> +#include <linux/module.h> +#include <linux/binfmts.h> +#include <linux/security.h> +#include <linux/compat.h> +#include <linux/vfs.h> +#include <linux/ipc.h> +#include <linux/slab.h> + +#include <net/sock.h> +#include <net/scm.h> + +#include <asm/compat-signal.h> +#include <asm/sim.h> +#include <asm/uaccess.h> +#include <asm/mmu_context.h> +#include <asm/mman.h> + +/* Use this to get at 32-bit user passed pointers. */ +/* A() macro should be used for places where you e.g. + have some internal variable u32 and just want to get + rid of a compiler warning. AA() has to be used in + places where you want to convert a function argument + to 32bit pointer or when you e.g. access pt_regs + structure and want to consider 32bit registers only. + */ +#define A(__x) ((unsigned long)(__x)) +#define AA(__x) ((unsigned long)((int)__x)) + +#ifdef __MIPSEB__ +#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL)) +#endif +#ifdef __MIPSEL__ +#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL)) +#endif + +SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, pgoff) +{ + unsigned long error; + + error = -EINVAL; + if (pgoff & (~PAGE_MASK >> 12)) + goto out; + error = sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT-12)); +out: + return error; +} + +/* + * sys_execve() executes a new program. + */ +asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs) +{ + int error; + char * filename; + + filename = getname(compat_ptr(regs.regs[4])); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = compat_do_execve(filename, compat_ptr(regs.regs[5]), + compat_ptr(regs.regs[6]), ®s); + putname(filename); + +out: + return error; +} + +#define RLIM_INFINITY32 0x7fffffff +#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) + +struct rlimit32 { + int rlim_cur; + int rlim_max; +}; + +SYSCALL_DEFINE4(32_truncate64, const char __user *, path, + unsigned long, __dummy, unsigned long, a2, unsigned long, a3) +{ + return sys_truncate(path, merge_64(a2, a3)); +} + +SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy, + unsigned long, a2, unsigned long, a3) +{ + return sys_ftruncate(fd, merge_64(a2, a3)); +} + +SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high, + unsigned int, offset_low, loff_t __user *, result, + unsigned int, origin) +{ + return sys_llseek(fd, offset_high, offset_low, result, origin); +} + +/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op + + lseek back to original location. They fail just like lseek does on + non-seekable files. */ + +SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count, + unsigned long, unused, unsigned long, a4, unsigned long, a5) +{ + return sys_pread64(fd, buf, count, merge_64(a4, a5)); +} + +SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf, + size_t, count, u32, unused, u64, a4, u64, a5) +{ + return sys_pwrite64(fd, buf, count, merge_64(a4, a5)); +} + +SYSCALL_DEFINE2(32_sched_rr_get_interval, compat_pid_t, pid, + struct compat_timespec __user *, interval) +{ + struct timespec t; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); + set_fs(old_fs); + if (put_user (t.tv_sec, &interval->tv_sec) || + __put_user(t.tv_nsec, &interval->tv_nsec)) + return -EFAULT; + return ret; +} + +#ifdef CONFIG_SYSVIPC + +SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, + unsigned long, ptr, unsigned long, fifth) +{ + int version, err; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + /* struct sembuf is the same on 32 and 64bit :)) */ + err = sys_semtimedop(first, compat_ptr(ptr), second, NULL); + break; + case SEMTIMEDOP: + err = compat_sys_semtimedop(first, compat_ptr(ptr), second, + compat_ptr(fifth)); + break; + case SEMGET: + err = sys_semget(first, second, third); + break; + case SEMCTL: + err = compat_sys_semctl(first, second, third, compat_ptr(ptr)); + break; + case MSGSND: + err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr)); + break; + case MSGRCV: + err = compat_sys_msgrcv(first, second, fifth, third, + version, compat_ptr(ptr)); + break; + case MSGGET: + err = sys_msgget((key_t) first, second); + break; + case MSGCTL: + err = compat_sys_msgctl(first, second, compat_ptr(ptr)); + break; + case SHMAT: + err = compat_sys_shmat(first, second, third, version, + compat_ptr(ptr)); + break; + case SHMDT: + err = sys_shmdt(compat_ptr(ptr)); + break; + case SHMGET: + err = sys_shmget(first, (unsigned)second, third); + break; + case SHMCTL: + err = compat_sys_shmctl(first, second, compat_ptr(ptr)); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +#else + +SYSCALL_DEFINE6(32_ipc, u32, call, int, first, int, second, int, third, + u32, ptr, u32, fifth) +{ + return -ENOSYS; +} + +#endif /* CONFIG_SYSVIPC */ + +#ifdef CONFIG_MIPS32_N32 +SYSCALL_DEFINE4(n32_semctl, int, semid, int, semnum, int, cmd, u32, arg) +{ + /* compat_sys_semctl expects a pointer to union semun */ + u32 __user *uptr = compat_alloc_user_space(sizeof(u32)); + if (put_user(arg, uptr)) + return -EFAULT; + return compat_sys_semctl(semid, semnum, cmd, uptr); +} + +SYSCALL_DEFINE4(n32_msgsnd, int, msqid, u32, msgp, unsigned int, msgsz, + int, msgflg) +{ + return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp)); +} + +SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz, + int, msgtyp, int, msgflg) +{ + return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64, + compat_ptr(msgp)); +} +#endif + +SYSCALL_DEFINE1(32_personality, unsigned long, personality) +{ + unsigned int p = personality & 0xffffffff; + int ret; + + if (personality(current->personality) == PER_LINUX32 && + personality(p) == PER_LINUX) + p = (p & ~PER_MASK) | PER_LINUX32; + ret = sys_personality(p); + if (ret != -1 && personality(ret) == PER_LINUX32) + ret = (ret & ~PER_MASK) | PER_LINUX; + return ret; +} + +SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd, + compat_off_t __user *, offset, s32, count) +{ + mm_segment_t old_fs = get_fs(); + int ret; + off_t of; + + if (offset && get_user(of, offset)) + return -EFAULT; + + set_fs(KERNEL_DS); + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count); + set_fs(old_fs); + + if (offset && put_user(of, offset)) + return -EFAULT; + + return ret; +} + +asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3, + size_t count) +{ + return sys_readahead(fd, merge_64(a2, a3), count); +} + +asmlinkage long sys32_sync_file_range(int fd, int __pad, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + int flags) +{ + return sys_sync_file_range(fd, + merge_64(a2, a3), merge_64(a4, a5), + flags); +} + +asmlinkage long sys32_fadvise64_64(int fd, int __pad, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + int flags) +{ + return sys_fadvise64_64(fd, + merge_64(a2, a3), merge_64(a4, a5), + flags); +} + +asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2, + unsigned offset_a3, unsigned len_a4, unsigned len_a5) +{ + return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), + merge_64(len_a4, len_a5)); +} + +save_static_function(sys32_clone); +static int noinline __used +_sys32_clone(nabi_no_regargs struct pt_regs regs) +{ + unsigned long clone_flags; + unsigned long newsp; + int __user *parent_tidptr, *child_tidptr; + + clone_flags = regs.regs[4]; + newsp = regs.regs[5]; + if (!newsp) + newsp = regs.regs[29]; + parent_tidptr = (int __user *) regs.regs[6]; + + /* Use __dummy4 instead of getting it off the stack, so that + syscall() works. */ + child_tidptr = (int __user *) __dummy4; + return do_fork(clone_flags, newsp, ®s, 0, + parent_tidptr, child_tidptr); +} + +asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, + size_t len) +{ + return sys_lookup_dcookie(merge_64(a0, a1), buf, len); +} + +SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, + u64, a3, u64, a4, int, dfd, const char __user *, pathname) +{ + return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), + dfd, pathname); +} + +SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val, + struct compat_timespec __user *, utime, u32 __user *, uaddr2, + u32, val3) +{ + return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3); +} diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c new file mode 100644 index 00000000..85beb9b0 --- /dev/null +++ b/arch/mips/kernel/machine_kexec.c @@ -0,0 +1,85 @@ +/* + * machine_kexec.c for kexec + * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/kexec.h> +#include <linux/mm.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/page.h> + +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +extern unsigned long kexec_start_address; +extern unsigned long kexec_indirection_page; + +int +machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void +machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void +machine_shutdown(void) +{ +} + +void +machine_crash_shutdown(struct pt_regs *regs) +{ +} + +typedef void (*noretfun_t)(void) __attribute__((noreturn)); + +void +machine_kexec(struct kimage *image) +{ + unsigned long reboot_code_buffer; + unsigned long entry; + unsigned long *ptr; + + reboot_code_buffer = + (unsigned long)page_address(image->control_code_page); + + kexec_start_address = image->start; + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + + memcpy((void*)reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through KSEG0 (or + * CKSEG0 or XPHYS if on 64bit system), hence the + * pys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + printk("Will call new kernel at %08lx\n", image->start); + printk("Bye ...\n"); + __flush_cache_all(); + ((noretfun_t) reboot_code_buffer)(); +} diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S new file mode 100644 index 00000000..4c968e7e --- /dev/null +++ b/arch/mips/kernel/mcount.S @@ -0,0 +1,202 @@ +/* + * MIPS specific _mcount support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive for + * more details. + * + * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China + * Copyright (C) 2010 DSLab, Lanzhou University, China + * Author: Wu Zhangjin <wuzhangjin@gmail.com> + */ + +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/ftrace.h> + + .text + .set noreorder + .set noat + + .macro MCOUNT_SAVE_REGS + PTR_SUBU sp, PT_SIZE + PTR_S ra, PT_R31(sp) + PTR_S AT, PT_R1(sp) + PTR_S a0, PT_R4(sp) + PTR_S a1, PT_R5(sp) + PTR_S a2, PT_R6(sp) + PTR_S a3, PT_R7(sp) +#ifdef CONFIG_64BIT + PTR_S a4, PT_R8(sp) + PTR_S a5, PT_R9(sp) + PTR_S a6, PT_R10(sp) + PTR_S a7, PT_R11(sp) +#endif + .endm + + .macro MCOUNT_RESTORE_REGS + PTR_L ra, PT_R31(sp) + PTR_L AT, PT_R1(sp) + PTR_L a0, PT_R4(sp) + PTR_L a1, PT_R5(sp) + PTR_L a2, PT_R6(sp) + PTR_L a3, PT_R7(sp) +#ifdef CONFIG_64BIT + PTR_L a4, PT_R8(sp) + PTR_L a5, PT_R9(sp) + PTR_L a6, PT_R10(sp) + PTR_L a7, PT_R11(sp) + PTR_ADDIU sp, PT_SIZE +#else + PTR_ADDIU sp, (PT_SIZE + 8) +#endif +.endm + + .macro RETURN_BACK + jr ra + move ra, AT + .endm + +/* + * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass + * the location of the parent's return address. + */ +#define MCOUNT_RA_ADDRESS_REG $12 + +#ifdef CONFIG_DYNAMIC_FTRACE + +NESTED(ftrace_caller, PT_SIZE, ra) + .globl _mcount +_mcount: + b ftrace_stub + nop + lw t1, function_trace_stop + bnez t1, ftrace_stub + nop + + MCOUNT_SAVE_REGS +#ifdef KBUILD_MCOUNT_RA_ADDRESS + PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) +#endif + + move a0, ra /* arg1: self return address */ + .globl ftrace_call +ftrace_call: + nop /* a placeholder for the call to a real tracing function */ + move a1, AT /* arg2: parent's return address */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call +ftrace_graph_call: + nop + nop +#endif + + MCOUNT_RESTORE_REGS + .globl ftrace_stub +ftrace_stub: + RETURN_BACK + END(ftrace_caller) + +#else /* ! CONFIG_DYNAMIC_FTRACE */ + +NESTED(_mcount, PT_SIZE, ra) + lw t1, function_trace_stop + bnez t1, ftrace_stub + nop + PTR_LA t1, ftrace_stub + PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ + bne t1, t2, static_trace + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + PTR_L t3, ftrace_graph_return + bne t1, t3, ftrace_graph_caller + nop + PTR_LA t1, ftrace_graph_entry_stub + PTR_L t3, ftrace_graph_entry + bne t1, t3, ftrace_graph_caller + nop +#endif + b ftrace_stub + nop + +static_trace: + MCOUNT_SAVE_REGS + + move a0, ra /* arg1: self return address */ + jalr t2 /* (1) call *ftrace_trace_function */ + move a1, AT /* arg2: parent's return address */ + + MCOUNT_RESTORE_REGS + .globl ftrace_stub +ftrace_stub: + RETURN_BACK + END(_mcount) + +#endif /* ! CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +NESTED(ftrace_graph_caller, PT_SIZE, ra) +#ifndef CONFIG_DYNAMIC_FTRACE + MCOUNT_SAVE_REGS +#endif + + /* arg1: Get the location of the parent's return address */ +#ifdef KBUILD_MCOUNT_RA_ADDRESS +#ifdef CONFIG_DYNAMIC_FTRACE + PTR_L a0, PT_R12(sp) +#else + move a0, MCOUNT_RA_ADDRESS_REG +#endif + bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */ + nop +#endif + PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */ +1: + + /* arg2: Get self return address */ +#ifdef CONFIG_DYNAMIC_FTRACE + PTR_L a1, PT_R31(sp) +#else + move a1, ra +#endif + + /* arg3: Get frame pointer of current stack */ +#ifdef CONFIG_FRAME_POINTER + move a2, fp +#else /* ! CONFIG_FRAME_POINTER */ +#ifdef CONFIG_64BIT + PTR_LA a2, PT_SIZE(sp) +#else + PTR_LA a2, (PT_SIZE+8)(sp) +#endif +#endif + + jal prepare_ftrace_return + nop + MCOUNT_RESTORE_REGS + RETURN_BACK + END(ftrace_graph_caller) + + .align 2 + .globl return_to_handler +return_to_handler: + PTR_SUBU sp, PT_SIZE + PTR_S v0, PT_R2(sp) + + jal ftrace_return_to_handler + PTR_S v1, PT_R3(sp) + + /* restore the real parent address: v0 -> ra */ + move ra, v0 + + PTR_L v0, PT_R2(sp) + PTR_L v1, PT_R3(sp) + jr ra + PTR_ADDIU sp, PT_SIZE +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + + .set at + .set reorder diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c new file mode 100644 index 00000000..33f63bab --- /dev/null +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -0,0 +1,214 @@ +/* + * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * Copyright (C) 2005 Mips Technologies, Inc + */ +#include <linux/cpu.h> +#include <linux/cpuset.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/security.h> +#include <linux/types.h> +#include <asm/uaccess.h> + +/* + * CPU mask used to set process affinity for MT VPEs/TCs with FPUs + */ +cpumask_t mt_fpu_cpumask; + +static int fpaff_threshold = -1; +unsigned long mt_fpemul_threshold; + +/* + * Replacement functions for the sys_sched_setaffinity() and + * sys_sched_getaffinity() system calls, so that we can integrate + * FPU affinity with the user's requested processor affinity. + * This code is 98% identical with the sys_sched_setaffinity() + * and sys_sched_getaffinity() system calls, and should be + * updated when kernel/sched.c changes. + */ + +/* + * find_process_by_pid - find a process with a matching PID value. + * used in sys_sched_set/getaffinity() in kernel/sched.c, so + * cloned here. + */ +static inline struct task_struct *find_process_by_pid(pid_t pid) +{ + return pid ? find_task_by_vpid(pid) : current; +} + +/* + * check the target process has a UID that matches the current process's + */ +static bool check_same_owner(struct task_struct *p) +{ + const struct cred *cred = current_cred(), *pcred; + bool match; + + rcu_read_lock(); + pcred = __task_cred(p); + match = (cred->euid == pcred->euid || + cred->euid == pcred->uid); + rcu_read_unlock(); + return match; +} + +/* + * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process + */ +asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) +{ + cpumask_var_t cpus_allowed, new_mask, effective_mask; + struct thread_info *ti; + struct task_struct *p; + int retval; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + get_online_cpus(); + rcu_read_lock(); + + p = find_process_by_pid(pid); + if (!p) { + rcu_read_unlock(); + put_online_cpus(); + return -ESRCH; + } + + /* Prevent p going away */ + get_task_struct(p); + rcu_read_unlock(); + + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_put_task; + } + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_free_cpus_allowed; + } + if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_free_new_mask; + } + retval = -EPERM; + if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) + goto out_unlock; + + retval = security_task_setscheduler(p); + if (retval) + goto out_unlock; + + /* Record new user-specified CPU set for future reference */ + cpumask_copy(&p->thread.user_cpus_allowed, new_mask); + + again: + /* Compute new global allowed CPU set if necessary */ + ti = task_thread_info(p); + if (test_ti_thread_flag(ti, TIF_FPUBOUND) && + cpus_intersects(*new_mask, mt_fpu_cpumask)) { + cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); + retval = set_cpus_allowed_ptr(p, effective_mask); + } else { + cpumask_copy(effective_mask, new_mask); + clear_ti_thread_flag(ti, TIF_FPUBOUND); + retval = set_cpus_allowed_ptr(p, new_mask); + } + + if (!retval) { + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(effective_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } + } +out_unlock: + free_cpumask_var(effective_mask); +out_free_new_mask: + free_cpumask_var(new_mask); +out_free_cpus_allowed: + free_cpumask_var(cpus_allowed); +out_put_task: + put_task_struct(p); + put_online_cpus(); + return retval; +} + +/* + * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process + */ +asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) +{ + unsigned int real_len; + cpumask_t mask; + int retval; + struct task_struct *p; + + real_len = sizeof(mask); + if (len < real_len) + return -EINVAL; + + get_online_cpus(); + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + retval = security_task_getscheduler(p); + if (retval) + goto out_unlock; + + cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); + +out_unlock: + read_unlock(&tasklist_lock); + put_online_cpus(); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + + +static int __init fpaff_thresh(char *str) +{ + get_option(&str, &fpaff_threshold); + return 1; +} +__setup("fpaff=", fpaff_thresh); + +/* + * FPU Use Factor empirically derived from experiments on 34K + */ +#define FPUSEFACTOR 2000 + +static __init int mt_fp_affinity_init(void) +{ + if (fpaff_threshold >= 0) { + mt_fpemul_threshold = fpaff_threshold; + } else { + mt_fpemul_threshold = + (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; + } + printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", + mt_fpemul_threshold); + + return 0; +} +arch_initcall(mt_fp_affinity_init); diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c new file mode 100644 index 00000000..7f3376b1 --- /dev/null +++ b/arch/mips/kernel/mips-mt.c @@ -0,0 +1,331 @@ +/* + * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * Copyright (C) 2005 Mips Technologies, Inc + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/security.h> + +#include <asm/cpu.h> +#include <asm/processor.h> +#include <linux/atomic.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/mipsmtregs.h> +#include <asm/r4kcache.h> +#include <asm/cacheflush.h> + +int vpelimit; + +static int __init maxvpes(char *str) +{ + get_option(&str, &vpelimit); + + return 1; +} + +__setup("maxvpes=", maxvpes); + +int tclimit; + +static int __init maxtcs(char *str) +{ + get_option(&str, &tclimit); + + return 1; +} + +__setup("maxtcs=", maxtcs); + +/* + * Dump new MIPS MT state for the core. Does not leave TCs halted. + * Takes an argument which taken to be a pre-call MVPControl value. + */ + +void mips_mt_regdump(unsigned long mvpctl) +{ + unsigned long flags; + unsigned long vpflags; + unsigned long mvpconf0; + int nvpe; + int ntc; + int i; + int tc; + unsigned long haltval; + unsigned long tcstatval; +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_soft_dump(void); +#endif /* CONFIG_MIPT_MT_SMTC */ + + local_irq_save(flags); + vpflags = dvpe(); + printk("=== MIPS MT State Dump ===\n"); + printk("-- Global State --\n"); + printk(" MVPControl Passed: %08lx\n", mvpctl); + printk(" MVPControl Read: %08lx\n", vpflags); + printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; + printk("-- per-VPE State --\n"); + for (i = 0; i < nvpe; i++) { + for (tc = 0; tc < ntc; tc++) { + settc(tc); + if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { + printk(" VPE %d\n", i); + printk(" VPEControl : %08lx\n", + read_vpe_c0_vpecontrol()); + printk(" VPEConf0 : %08lx\n", + read_vpe_c0_vpeconf0()); + printk(" VPE%d.Status : %08lx\n", + i, read_vpe_c0_status()); + printk(" VPE%d.EPC : %08lx %pS\n", + i, read_vpe_c0_epc(), + (void *) read_vpe_c0_epc()); + printk(" VPE%d.Cause : %08lx\n", + i, read_vpe_c0_cause()); + printk(" VPE%d.Config7 : %08lx\n", + i, read_vpe_c0_config7()); + break; /* Next VPE */ + } + } + } + printk("-- per-TC State --\n"); + for (tc = 0; tc < ntc; tc++) { + settc(tc); + if (read_tc_c0_tcbind() == read_c0_tcbind()) { + /* Are we dumping ourself? */ + haltval = 0; /* Then we're not halted, and mustn't be */ + tcstatval = flags; /* And pre-dump TCStatus is flags */ + printk(" TC %d (current TC with VPE EPC above)\n", tc); + } else { + haltval = read_tc_c0_tchalt(); + write_tc_c0_tchalt(1); + tcstatval = read_tc_c0_tcstatus(); + printk(" TC %d\n", tc); + } + printk(" TCStatus : %08lx\n", tcstatval); + printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); + printk(" TCRestart : %08lx %pS\n", + read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); + printk(" TCHalt : %08lx\n", haltval); + printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); + if (!haltval) + write_tc_c0_tchalt(0); + } +#ifdef CONFIG_MIPS_MT_SMTC + smtc_soft_dump(); +#endif /* CONFIG_MIPT_MT_SMTC */ + printk("===========================\n"); + evpe(vpflags); + local_irq_restore(flags); +} + +static int mt_opt_norps; +static int mt_opt_rpsctl = -1; +static int mt_opt_nblsu = -1; +static int mt_opt_forceconfig7; +static int mt_opt_config7 = -1; + +static int __init rps_disable(char *s) +{ + mt_opt_norps = 1; + return 1; +} +__setup("norps", rps_disable); + +static int __init rpsctl_set(char *str) +{ + get_option(&str, &mt_opt_rpsctl); + return 1; +} +__setup("rpsctl=", rpsctl_set); + +static int __init nblsu_set(char *str) +{ + get_option(&str, &mt_opt_nblsu); + return 1; +} +__setup("nblsu=", nblsu_set); + +static int __init config7_set(char *str) +{ + get_option(&str, &mt_opt_config7); + mt_opt_forceconfig7 = 1; + return 1; +} +__setup("config7=", config7_set); + +/* Experimental cache flush control parameters that should go away some day */ +int mt_protiflush; +int mt_protdflush; +int mt_n_iflushes = 1; +int mt_n_dflushes = 1; + +static int __init set_protiflush(char *s) +{ + mt_protiflush = 1; + return 1; +} +__setup("protiflush", set_protiflush); + +static int __init set_protdflush(char *s) +{ + mt_protdflush = 1; + return 1; +} +__setup("protdflush", set_protdflush); + +static int __init niflush(char *s) +{ + get_option(&s, &mt_n_iflushes); + return 1; +} +__setup("niflush=", niflush); + +static int __init ndflush(char *s) +{ + get_option(&s, &mt_n_dflushes); + return 1; +} +__setup("ndflush=", ndflush); + +static unsigned int itc_base; + +static int __init set_itc_base(char *str) +{ + get_option(&str, &itc_base); + return 1; +} + +__setup("itcbase=", set_itc_base); + +void mips_mt_set_cpuoptions(void) +{ + unsigned int oconfig7 = read_c0_config7(); + unsigned int nconfig7 = oconfig7; + + if (mt_opt_norps) { + printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); + } + if (mt_opt_rpsctl >= 0) { + printk("34K return prediction stack override set to %d.\n", + mt_opt_rpsctl); + if (mt_opt_rpsctl) + nconfig7 |= (1 << 2); + else + nconfig7 &= ~(1 << 2); + } + if (mt_opt_nblsu >= 0) { + printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); + if (mt_opt_nblsu) + nconfig7 |= (1 << 5); + else + nconfig7 &= ~(1 << 5); + } + if (mt_opt_forceconfig7) { + printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); + nconfig7 = mt_opt_config7; + } + if (oconfig7 != nconfig7) { + __asm__ __volatile("sync"); + write_c0_config7(nconfig7); + ehb(); + printk("Config7: 0x%08x\n", read_c0_config7()); + } + + /* Report Cache management debug options */ + if (mt_protiflush) + printk("I-cache flushes single-threaded\n"); + if (mt_protdflush) + printk("D-cache flushes single-threaded\n"); + if (mt_n_iflushes != 1) + printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); + if (mt_n_dflushes != 1) + printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); + + if (itc_base != 0) { + /* + * Configure ITC mapping. This code is very + * specific to the 34K core family, which uses + * a special mode bit ("ITC") in the ErrCtl + * register to enable access to ITC control + * registers via cache "tag" operations. + */ + unsigned long ectlval; + unsigned long itcblkgrn; + + /* ErrCtl register is known as "ecc" to Linux */ + ectlval = read_c0_ecc(); + write_c0_ecc(ectlval | (0x1 << 26)); + ehb(); +#define INDEX_0 (0x80000000) +#define INDEX_8 (0x80000008) + /* Read "cache tag" for Dcache pseudo-index 8 */ + cache_op(Index_Load_Tag_D, INDEX_8); + ehb(); + itcblkgrn = read_c0_dtaglo(); + itcblkgrn &= 0xfffe0000; + /* Set for 128 byte pitch of ITC cells */ + itcblkgrn |= 0x00000c00; + /* Stage in Tag register */ + write_c0_dtaglo(itcblkgrn); + ehb(); + /* Write out to ITU with CACHE op */ + cache_op(Index_Store_Tag_D, INDEX_8); + /* Now set base address, and turn ITC on with 0x1 bit */ + write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); + ehb(); + /* Write out to ITU with CACHE op */ + cache_op(Index_Store_Tag_D, INDEX_0); + write_c0_ecc(ectlval); + ehb(); + printk("Mapped %ld ITC cells starting at 0x%08x\n", + ((itcblkgrn & 0x7fe00000) >> 20), itc_base); + } +} + +/* + * Function to protect cache flushes from concurrent execution + * depends on MP software model chosen. + */ + +void mt_cflush_lockdown(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_cflush_lockdown(void); + + smtc_cflush_lockdown(); +#endif /* CONFIG_MIPS_MT_SMTC */ + /* FILL IN VSMP and AP/SP VERSIONS HERE */ +} + +void mt_cflush_release(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_cflush_release(void); + + smtc_cflush_release(); +#endif /* CONFIG_MIPS_MT_SMTC */ + /* FILL IN VSMP and AP/SP VERSIONS HERE */ +} + +struct class *mt_class; + +static int __init mt_init(void) +{ + struct class *mtc; + + mtc = class_create(THIS_MODULE, "mt"); + if (IS_ERR(mtc)) + return PTR_ERR(mtc); + + mt_class = mtc; + + return 0; +} + +subsys_initcall(mt_init); diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c new file mode 100644 index 00000000..57ba13ed --- /dev/null +++ b/arch/mips/kernel/mips_ksyms.c @@ -0,0 +1,58 @@ +/* + * Export MIPS-specific functions needed for loadable modules. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle + * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. + */ +#include <linux/interrupt.h> +#include <linux/export.h> +#include <asm/checksum.h> +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/ftrace.h> + +extern void *__bzero(void *__s, size_t __count); +extern long __strncpy_from_user_nocheck_asm(char *__to, + const char *__from, long __len); +extern long __strncpy_from_user_asm(char *__to, const char *__from, + long __len); +extern long __strlen_user_nocheck_asm(const char *s); +extern long __strlen_user_asm(const char *s); +extern long __strnlen_user_nocheck_asm(const char *s); +extern long __strnlen_user_asm(const char *s); + +/* + * String functions + */ +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memmove); + +EXPORT_SYMBOL(kernel_thread); + +/* + * Userspace access stuff. + */ +EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__copy_user_inatomic); +EXPORT_SYMBOL(__bzero); +EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); +EXPORT_SYMBOL(__strncpy_from_user_asm); +EXPORT_SYMBOL(__strlen_user_nocheck_asm); +EXPORT_SYMBOL(__strlen_user_asm); +EXPORT_SYMBOL(__strnlen_user_nocheck_asm); +EXPORT_SYMBOL(__strnlen_user_asm); + +EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(csum_partial_copy_nocheck); +EXPORT_SYMBOL(__csum_partial_copy_user); + +EXPORT_SYMBOL(invalid_pte_table); +#ifdef CONFIG_FUNCTION_TRACER +/* _mcount is defined in arch/mips/kernel/mcount.S */ +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c new file mode 100644 index 00000000..411a058d --- /dev/null +++ b/arch/mips/kernel/mips_machine.c @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/slab.h> + +#include <asm/mips_machine.h> + +static struct mips_machine *mips_machine __initdata; +static char *mips_machine_name = "Unknown"; + +#define for_each_machine(mach) \ + for ((mach) = (struct mips_machine *)&__mips_machines_start; \ + (mach) && \ + (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \ + (mach)++) + +__init void mips_set_machine_name(const char *name) +{ + char *p; + + if (name == NULL) + return; + + p = kstrdup(name, GFP_KERNEL); + if (!p) + pr_err("MIPS: no memory for machine_name\n"); + + mips_machine_name = p; +} + +char *mips_get_machine_name(void) +{ + return mips_machine_name; +} + +__init int mips_machtype_setup(char *id) +{ + struct mips_machine *mach; + + for_each_machine(mach) { + if (mach->mach_id == NULL) + continue; + + if (strcmp(mach->mach_id, id) == 0) { + mips_machtype = mach->mach_type; + return 0; + } + } + + pr_err("MIPS: no machine found for id '%s', supported machines:\n", id); + pr_err("%-24s %s\n", "id", "name"); + for_each_machine(mach) + pr_err("%-24s %s\n", mach->mach_id, mach->mach_name); + + return 1; +} + +__setup("machtype=", mips_machtype_setup); + +__init void mips_machine_setup(void) +{ + struct mips_machine *mach; + + for_each_machine(mach) { + if (mips_machtype == mach->mach_type) { + mips_machine = mach; + break; + } + } + + if (!mips_machine) + return; + + mips_set_machine_name(mips_machine->mach_name); + pr_info("MIPS: machine is %s\n", mips_machine_name); + + if (mips_machine->mach_setup) + mips_machine->mach_setup(); +} diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c new file mode 100644 index 00000000..a5066b1c --- /dev/null +++ b/arch/mips/kernel/module.c @@ -0,0 +1,391 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Copyright (C) 2001 Rusty Russell. + * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2005 Thiemo Seufer + */ + +#undef DEBUG + +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/jump_label.h> + +#include <asm/pgtable.h> /* MODULE_START */ + +struct mips_hi16 { + struct mips_hi16 *next; + Elf_Addr *addr; + Elf_Addr value; +}; + +static struct mips_hi16 *mips_hi16_list; + +static LIST_HEAD(dbe_list); +static DEFINE_SPINLOCK(dbe_lock); + +#ifdef MODULE_START +void *module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, + GFP_KERNEL, PAGE_KERNEL, -1, + __builtin_return_address(0)); +} +#endif + +static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) +{ + return 0; +} + +static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v) +{ + *location += v; + + return 0; +} + +static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v) +{ + *location = v; + + return 0; +} + +static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) +{ + if (v % 4) { + pr_err("module %s: dangerous R_MIPS_26 REL relocation\n", + me->name); + return -ENOEXEC; + } + + if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { + printk(KERN_ERR + "module %s: relocation overflow\n", + me->name); + return -ENOEXEC; + } + + *location = (*location & ~0x03ffffff) | + ((*location + (v >> 2)) & 0x03ffffff); + + return 0; +} + +static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) +{ + if (v % 4) { + pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n", + me->name); + return -ENOEXEC; + } + + if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { + printk(KERN_ERR + "module %s: relocation overflow\n", + me->name); + return -ENOEXEC; + } + + *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff); + + return 0; +} + +static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) +{ + struct mips_hi16 *n; + + /* + * We cannot relocate this one now because we don't know the value of + * the carry we need to add. Save the information, and let LO16 do the + * actual relocation. + */ + n = kmalloc(sizeof *n, GFP_KERNEL); + if (!n) + return -ENOMEM; + + n->addr = (Elf_Addr *)location; + n->value = v; + n->next = mips_hi16_list; + mips_hi16_list = n; + + return 0; +} + +static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) +{ + *location = (*location & 0xffff0000) | + ((((long long) v + 0x8000LL) >> 16) & 0xffff); + + return 0; +} + +static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) +{ + unsigned long insnlo = *location; + Elf_Addr val, vallo; + + /* Sign extend the addend we extract from the lo insn. */ + vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; + + if (mips_hi16_list != NULL) { + struct mips_hi16 *l; + + l = mips_hi16_list; + while (l != NULL) { + struct mips_hi16 *next; + unsigned long insn; + + /* + * The value for the HI16 had best be the same. + */ + if (v != l->value) + goto out_danger; + + /* + * Do the HI16 relocation. Note that we actually don't + * need to know anything about the LO16 itself, except + * where to find the low 16 bits of the addend needed + * by the LO16. + */ + insn = *l->addr; + val = ((insn & 0xffff) << 16) + vallo; + val += v; + + /* + * Account for the sign extension that will happen in + * the low bits. + */ + val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; + + insn = (insn & ~0xffff) | val; + *l->addr = insn; + + next = l->next; + kfree(l); + l = next; + } + + mips_hi16_list = NULL; + } + + /* + * Ok, we're done with the HI16 relocs. Now deal with the LO16. + */ + val = v + vallo; + insnlo = (insnlo & ~0xffff) | (val & 0xffff); + *location = insnlo; + + return 0; + +out_danger: + pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); + + return -ENOEXEC; +} + +static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v) +{ + *location = (*location & 0xffff0000) | (v & 0xffff); + + return 0; +} + +static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v) +{ + *(Elf_Addr *)location = v; + + return 0; +} + +static int apply_r_mips_higher_rela(struct module *me, u32 *location, + Elf_Addr v) +{ + *location = (*location & 0xffff0000) | + ((((long long) v + 0x80008000LL) >> 32) & 0xffff); + + return 0; +} + +static int apply_r_mips_highest_rela(struct module *me, u32 *location, + Elf_Addr v) +{ + *location = (*location & 0xffff0000) | + ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); + + return 0; +} + +static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, + Elf_Addr v) = { + [R_MIPS_NONE] = apply_r_mips_none, + [R_MIPS_32] = apply_r_mips_32_rel, + [R_MIPS_26] = apply_r_mips_26_rel, + [R_MIPS_HI16] = apply_r_mips_hi16_rel, + [R_MIPS_LO16] = apply_r_mips_lo16_rel +}; + +static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, + Elf_Addr v) = { + [R_MIPS_NONE] = apply_r_mips_none, + [R_MIPS_32] = apply_r_mips_32_rela, + [R_MIPS_26] = apply_r_mips_26_rela, + [R_MIPS_HI16] = apply_r_mips_hi16_rela, + [R_MIPS_LO16] = apply_r_mips_lo16_rela, + [R_MIPS_64] = apply_r_mips_64_rela, + [R_MIPS_HIGHER] = apply_r_mips_higher_rela, + [R_MIPS_HIGHEST] = apply_r_mips_highest_rela +}; + +int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; + Elf_Sym *sym; + u32 *location; + unsigned int i; + Elf_Addr v; + int res; + + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* This is the symbol it is referring to */ + sym = (Elf_Sym *)sechdrs[symindex].sh_addr + + ELF_MIPS_R_SYM(rel[i]); + if (IS_ERR_VALUE(sym->st_value)) { + /* Ignore unresolved weak symbol */ + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) + continue; + printk(KERN_WARNING "%s: Unknown symbol %s\n", + me->name, strtab + sym->st_name); + return -ENOENT; + } + + v = sym->st_value; + + res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); + if (res) + return res; + } + + return 0; +} + +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; + Elf_Sym *sym; + u32 *location; + unsigned int i; + Elf_Addr v; + int res; + + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* This is the symbol it is referring to */ + sym = (Elf_Sym *)sechdrs[symindex].sh_addr + + ELF_MIPS_R_SYM(rel[i]); + if (IS_ERR_VALUE(sym->st_value)) { + /* Ignore unresolved weak symbol */ + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) + continue; + printk(KERN_WARNING "%s: Unknown symbol %s\n", + me->name, strtab + sym->st_name); + return -ENOENT; + } + + v = sym->st_value + rel[i].r_addend; + + res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); + if (res) + return res; + } + + return 0; +} + +/* Given an address, look for it in the module exception tables. */ +const struct exception_table_entry *search_module_dbetables(unsigned long addr) +{ + unsigned long flags; + const struct exception_table_entry *e = NULL; + struct mod_arch_specific *dbe; + + spin_lock_irqsave(&dbe_lock, flags); + list_for_each_entry(dbe, &dbe_list, dbe_list) { + e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr); + if (e) + break; + } + spin_unlock_irqrestore(&dbe_lock, flags); + + /* Now, if we found one, we are running inside it now, hence + we cannot unload the module, hence no refcnt needed. */ + return e; +} + +/* Put in dbe list if necessary. */ +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + /* Make jump label nops. */ + jump_label_apply_nops(me); + + INIT_LIST_HEAD(&me->arch.dbe_list); + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (strcmp("__dbe_table", secstrings + s->sh_name) != 0) + continue; + me->arch.dbe_start = (void *)s->sh_addr; + me->arch.dbe_end = (void *)s->sh_addr + s->sh_size; + spin_lock_irq(&dbe_lock); + list_add(&me->arch.dbe_list, &dbe_list); + spin_unlock_irq(&dbe_lock); + } + return 0; +} + +void module_arch_cleanup(struct module *mod) +{ + spin_lock_irq(&dbe_lock); + list_del(&mod->arch.dbe_list); + spin_unlock_irq(&dbe_lock); +} diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S new file mode 100644 index 00000000..ce89c806 --- /dev/null +++ b/arch/mips/kernel/octeon_switch.S @@ -0,0 +1,502 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * Copyright (C) 1994, 1995, 1996, by Andreas Busse + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2000 MIPS Technologies, Inc. + * written by Carsten Langgaard, carstenl@mips.com + */ +#include <asm/asm.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> + +#include <asm/asmmacro.h> + +/* + * Offset to the current process status flags, the first 32 bytes of the + * stack are not used. + */ +#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) + +/* + * task_struct *resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) + */ + .align 7 + LEAF(resume) + .set arch=octeon + mfc0 t1, CP0_STATUS + LONG_S t1, THREAD_STATUS(a0) + cpu_save_nonscratch a0 + LONG_S ra, THREAD_REG31(a0) + + /* check if we need to save COP2 registers */ + PTR_L t2, TASK_THREAD_INFO(a0) + LONG_L t0, ST_OFF(t2) + bbit0 t0, 30, 1f + + /* Disable COP2 in the stored process state */ + li t1, ST0_CU2 + xor t0, t1 + LONG_S t0, ST_OFF(t2) + + /* Enable COP2 so we can save it */ + mfc0 t0, CP0_STATUS + or t0, t1 + mtc0 t0, CP0_STATUS + + /* Save COP2 */ + daddu a0, THREAD_CP2 + jal octeon_cop2_save + dsubu a0, THREAD_CP2 + + /* Disable COP2 now that we are done */ + mfc0 t0, CP0_STATUS + li t1, ST0_CU2 + xor t0, t1 + mtc0 t0, CP0_STATUS + +1: +#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 + /* Check if we need to store CVMSEG state */ + mfc0 t0, $11,7 /* CvmMemCtl */ + bbit0 t0, 6, 3f /* Is user access enabled? */ + + /* Store the CVMSEG state */ + /* Extract the size of CVMSEG */ + andi t0, 0x3f + /* Multiply * (cache line size/sizeof(long)/2) */ + sll t0, 7-LONGLOG-1 + li t1, -32768 /* Base address of CVMSEG */ + LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ + synciobdma +2: + .set noreorder + LONG_L t8, 0(t1) /* Load from CVMSEG */ + subu t0, 1 /* Decrement loop var */ + LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */ + LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */ + LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */ + LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */ + bnez t0, 2b /* Loop until we've copied it all */ + LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ + .set reorder + + /* Disable access to CVMSEG */ + mfc0 t0, $11,7 /* CvmMemCtl */ + xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ + mtc0 t0, $11,7 /* CvmMemCtl */ +#endif +3: + /* + * The order of restoring the registers takes care of the race + * updating $28, $29 and kernelsp without disabling ints. + */ + move $28, a2 + cpu_restore_nonscratch a1 + +#if (_THREAD_SIZE - 32) < 0x8000 + PTR_ADDIU t0, $28, _THREAD_SIZE - 32 +#else + PTR_LI t0, _THREAD_SIZE - 32 + PTR_ADDU t0, $28 +#endif + set_saved_sp t0, t1, t2 + + mfc0 t1, CP0_STATUS /* Do we really need this? */ + li a3, 0xff01 + and t1, a3 + LONG_L a2, THREAD_STATUS(a1) + nor a3, $0, a3 + and a2, a3 + or a2, t1 + mtc0 a2, CP0_STATUS + move v0, a0 + jr ra + END(resume) + +/* + * void octeon_cop2_save(struct octeon_cop2_state *a0) + */ + .align 7 + LEAF(octeon_cop2_save) + + dmfc0 t9, $9,7 /* CvmCtl register. */ + + /* Save the COP2 CRC state */ + dmfc2 t0, 0x0201 + dmfc2 t1, 0x0202 + dmfc2 t2, 0x0200 + sd t0, OCTEON_CP2_CRC_IV(a0) + sd t1, OCTEON_CP2_CRC_LENGTH(a0) + sd t2, OCTEON_CP2_CRC_POLY(a0) + /* Skip next instructions if CvmCtl[NODFA_CP2] set */ + bbit1 t9, 28, 1f + + /* Save the LLM state */ + dmfc2 t0, 0x0402 + dmfc2 t1, 0x040A + sd t0, OCTEON_CP2_LLM_DAT(a0) + sd t1, OCTEON_CP2_LLM_DAT+8(a0) + +1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ + + /* Save the COP2 crypto state */ + /* this part is mostly common to both pass 1 and later revisions */ + dmfc2 t0, 0x0084 + dmfc2 t1, 0x0080 + dmfc2 t2, 0x0081 + dmfc2 t3, 0x0082 + sd t0, OCTEON_CP2_3DES_IV(a0) + dmfc2 t0, 0x0088 + sd t1, OCTEON_CP2_3DES_KEY(a0) + dmfc2 t1, 0x0111 /* only necessary for pass 1 */ + sd t2, OCTEON_CP2_3DES_KEY+8(a0) + dmfc2 t2, 0x0102 + sd t3, OCTEON_CP2_3DES_KEY+16(a0) + dmfc2 t3, 0x0103 + sd t0, OCTEON_CP2_3DES_RESULT(a0) + dmfc2 t0, 0x0104 + sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ + dmfc2 t1, 0x0105 + sd t2, OCTEON_CP2_AES_IV(a0) + dmfc2 t2, 0x0106 + sd t3, OCTEON_CP2_AES_IV+8(a0) + dmfc2 t3, 0x0107 + sd t0, OCTEON_CP2_AES_KEY(a0) + dmfc2 t0, 0x0110 + sd t1, OCTEON_CP2_AES_KEY+8(a0) + dmfc2 t1, 0x0100 + sd t2, OCTEON_CP2_AES_KEY+16(a0) + dmfc2 t2, 0x0101 + sd t3, OCTEON_CP2_AES_KEY+24(a0) + mfc0 t3, $15,0 /* Get the processor ID register */ + sd t0, OCTEON_CP2_AES_KEYLEN(a0) + li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + sd t1, OCTEON_CP2_AES_RESULT(a0) + sd t2, OCTEON_CP2_AES_RESULT+8(a0) + /* Skip to the Pass1 version of the remainder of the COP2 state */ + beq t3, t0, 2f + + /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ + dmfc2 t1, 0x0240 + dmfc2 t2, 0x0241 + dmfc2 t3, 0x0242 + dmfc2 t0, 0x0243 + sd t1, OCTEON_CP2_HSH_DATW(a0) + dmfc2 t1, 0x0244 + sd t2, OCTEON_CP2_HSH_DATW+8(a0) + dmfc2 t2, 0x0245 + sd t3, OCTEON_CP2_HSH_DATW+16(a0) + dmfc2 t3, 0x0246 + sd t0, OCTEON_CP2_HSH_DATW+24(a0) + dmfc2 t0, 0x0247 + sd t1, OCTEON_CP2_HSH_DATW+32(a0) + dmfc2 t1, 0x0248 + sd t2, OCTEON_CP2_HSH_DATW+40(a0) + dmfc2 t2, 0x0249 + sd t3, OCTEON_CP2_HSH_DATW+48(a0) + dmfc2 t3, 0x024A + sd t0, OCTEON_CP2_HSH_DATW+56(a0) + dmfc2 t0, 0x024B + sd t1, OCTEON_CP2_HSH_DATW+64(a0) + dmfc2 t1, 0x024C + sd t2, OCTEON_CP2_HSH_DATW+72(a0) + dmfc2 t2, 0x024D + sd t3, OCTEON_CP2_HSH_DATW+80(a0) + dmfc2 t3, 0x024E + sd t0, OCTEON_CP2_HSH_DATW+88(a0) + dmfc2 t0, 0x0250 + sd t1, OCTEON_CP2_HSH_DATW+96(a0) + dmfc2 t1, 0x0251 + sd t2, OCTEON_CP2_HSH_DATW+104(a0) + dmfc2 t2, 0x0252 + sd t3, OCTEON_CP2_HSH_DATW+112(a0) + dmfc2 t3, 0x0253 + sd t0, OCTEON_CP2_HSH_IVW(a0) + dmfc2 t0, 0x0254 + sd t1, OCTEON_CP2_HSH_IVW+8(a0) + dmfc2 t1, 0x0255 + sd t2, OCTEON_CP2_HSH_IVW+16(a0) + dmfc2 t2, 0x0256 + sd t3, OCTEON_CP2_HSH_IVW+24(a0) + dmfc2 t3, 0x0257 + sd t0, OCTEON_CP2_HSH_IVW+32(a0) + dmfc2 t0, 0x0258 + sd t1, OCTEON_CP2_HSH_IVW+40(a0) + dmfc2 t1, 0x0259 + sd t2, OCTEON_CP2_HSH_IVW+48(a0) + dmfc2 t2, 0x025E + sd t3, OCTEON_CP2_HSH_IVW+56(a0) + dmfc2 t3, 0x025A + sd t0, OCTEON_CP2_GFM_MULT(a0) + dmfc2 t0, 0x025B + sd t1, OCTEON_CP2_GFM_MULT+8(a0) + sd t2, OCTEON_CP2_GFM_POLY(a0) + sd t3, OCTEON_CP2_GFM_RESULT(a0) + sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + jr ra + +2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ + dmfc2 t3, 0x0040 + dmfc2 t0, 0x0041 + dmfc2 t1, 0x0042 + dmfc2 t2, 0x0043 + sd t3, OCTEON_CP2_HSH_DATW(a0) + dmfc2 t3, 0x0044 + sd t0, OCTEON_CP2_HSH_DATW+8(a0) + dmfc2 t0, 0x0045 + sd t1, OCTEON_CP2_HSH_DATW+16(a0) + dmfc2 t1, 0x0046 + sd t2, OCTEON_CP2_HSH_DATW+24(a0) + dmfc2 t2, 0x0048 + sd t3, OCTEON_CP2_HSH_DATW+32(a0) + dmfc2 t3, 0x0049 + sd t0, OCTEON_CP2_HSH_DATW+40(a0) + dmfc2 t0, 0x004A + sd t1, OCTEON_CP2_HSH_DATW+48(a0) + sd t2, OCTEON_CP2_HSH_IVW(a0) + sd t3, OCTEON_CP2_HSH_IVW+8(a0) + sd t0, OCTEON_CP2_HSH_IVW+16(a0) + +3: /* pass 1 or CvmCtl[NOCRYPTO] set */ + jr ra + END(octeon_cop2_save) + +/* + * void octeon_cop2_restore(struct octeon_cop2_state *a0) + */ + .align 7 + .set push + .set noreorder + LEAF(octeon_cop2_restore) + /* First cache line was prefetched before the call */ + pref 4, 128(a0) + dmfc0 t9, $9,7 /* CvmCtl register. */ + + pref 4, 256(a0) + ld t0, OCTEON_CP2_CRC_IV(a0) + pref 4, 384(a0) + ld t1, OCTEON_CP2_CRC_LENGTH(a0) + ld t2, OCTEON_CP2_CRC_POLY(a0) + + /* Restore the COP2 CRC state */ + dmtc2 t0, 0x0201 + dmtc2 t1, 0x1202 + bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */ + dmtc2 t2, 0x4200 + + /* Restore the LLM state */ + ld t0, OCTEON_CP2_LLM_DAT(a0) + ld t1, OCTEON_CP2_LLM_DAT+8(a0) + dmtc2 t0, 0x0402 + dmtc2 t1, 0x040A + +2: + bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */ + nop + + /* Restore the COP2 crypto state common to pass 1 and pass 2 */ + ld t0, OCTEON_CP2_3DES_IV(a0) + ld t1, OCTEON_CP2_3DES_KEY(a0) + ld t2, OCTEON_CP2_3DES_KEY+8(a0) + dmtc2 t0, 0x0084 + ld t0, OCTEON_CP2_3DES_KEY+16(a0) + dmtc2 t1, 0x0080 + ld t1, OCTEON_CP2_3DES_RESULT(a0) + dmtc2 t2, 0x0081 + ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */ + dmtc2 t0, 0x0082 + ld t0, OCTEON_CP2_AES_IV(a0) + dmtc2 t1, 0x0098 + ld t1, OCTEON_CP2_AES_IV+8(a0) + dmtc2 t2, 0x010A /* only really needed for pass 1 */ + ld t2, OCTEON_CP2_AES_KEY(a0) + dmtc2 t0, 0x0102 + ld t0, OCTEON_CP2_AES_KEY+8(a0) + dmtc2 t1, 0x0103 + ld t1, OCTEON_CP2_AES_KEY+16(a0) + dmtc2 t2, 0x0104 + ld t2, OCTEON_CP2_AES_KEY+24(a0) + dmtc2 t0, 0x0105 + ld t0, OCTEON_CP2_AES_KEYLEN(a0) + dmtc2 t1, 0x0106 + ld t1, OCTEON_CP2_AES_RESULT(a0) + dmtc2 t2, 0x0107 + ld t2, OCTEON_CP2_AES_RESULT+8(a0) + mfc0 t3, $15,0 /* Get the processor ID register */ + dmtc2 t0, 0x0110 + li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + dmtc2 t1, 0x0100 + bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ + dmtc2 t2, 0x0101 + + /* this code is specific for pass 1 */ + ld t0, OCTEON_CP2_HSH_DATW(a0) + ld t1, OCTEON_CP2_HSH_DATW+8(a0) + ld t2, OCTEON_CP2_HSH_DATW+16(a0) + dmtc2 t0, 0x0040 + ld t0, OCTEON_CP2_HSH_DATW+24(a0) + dmtc2 t1, 0x0041 + ld t1, OCTEON_CP2_HSH_DATW+32(a0) + dmtc2 t2, 0x0042 + ld t2, OCTEON_CP2_HSH_DATW+40(a0) + dmtc2 t0, 0x0043 + ld t0, OCTEON_CP2_HSH_DATW+48(a0) + dmtc2 t1, 0x0044 + ld t1, OCTEON_CP2_HSH_IVW(a0) + dmtc2 t2, 0x0045 + ld t2, OCTEON_CP2_HSH_IVW+8(a0) + dmtc2 t0, 0x0046 + ld t0, OCTEON_CP2_HSH_IVW+16(a0) + dmtc2 t1, 0x0048 + dmtc2 t2, 0x0049 + b done_restore /* unconditional branch */ + dmtc2 t0, 0x004A + +3: /* this is post-pass1 code */ + ld t2, OCTEON_CP2_HSH_DATW(a0) + ld t0, OCTEON_CP2_HSH_DATW+8(a0) + ld t1, OCTEON_CP2_HSH_DATW+16(a0) + dmtc2 t2, 0x0240 + ld t2, OCTEON_CP2_HSH_DATW+24(a0) + dmtc2 t0, 0x0241 + ld t0, OCTEON_CP2_HSH_DATW+32(a0) + dmtc2 t1, 0x0242 + ld t1, OCTEON_CP2_HSH_DATW+40(a0) + dmtc2 t2, 0x0243 + ld t2, OCTEON_CP2_HSH_DATW+48(a0) + dmtc2 t0, 0x0244 + ld t0, OCTEON_CP2_HSH_DATW+56(a0) + dmtc2 t1, 0x0245 + ld t1, OCTEON_CP2_HSH_DATW+64(a0) + dmtc2 t2, 0x0246 + ld t2, OCTEON_CP2_HSH_DATW+72(a0) + dmtc2 t0, 0x0247 + ld t0, OCTEON_CP2_HSH_DATW+80(a0) + dmtc2 t1, 0x0248 + ld t1, OCTEON_CP2_HSH_DATW+88(a0) + dmtc2 t2, 0x0249 + ld t2, OCTEON_CP2_HSH_DATW+96(a0) + dmtc2 t0, 0x024A + ld t0, OCTEON_CP2_HSH_DATW+104(a0) + dmtc2 t1, 0x024B + ld t1, OCTEON_CP2_HSH_DATW+112(a0) + dmtc2 t2, 0x024C + ld t2, OCTEON_CP2_HSH_IVW(a0) + dmtc2 t0, 0x024D + ld t0, OCTEON_CP2_HSH_IVW+8(a0) + dmtc2 t1, 0x024E + ld t1, OCTEON_CP2_HSH_IVW+16(a0) + dmtc2 t2, 0x0250 + ld t2, OCTEON_CP2_HSH_IVW+24(a0) + dmtc2 t0, 0x0251 + ld t0, OCTEON_CP2_HSH_IVW+32(a0) + dmtc2 t1, 0x0252 + ld t1, OCTEON_CP2_HSH_IVW+40(a0) + dmtc2 t2, 0x0253 + ld t2, OCTEON_CP2_HSH_IVW+48(a0) + dmtc2 t0, 0x0254 + ld t0, OCTEON_CP2_HSH_IVW+56(a0) + dmtc2 t1, 0x0255 + ld t1, OCTEON_CP2_GFM_MULT(a0) + dmtc2 t2, 0x0256 + ld t2, OCTEON_CP2_GFM_MULT+8(a0) + dmtc2 t0, 0x0257 + ld t0, OCTEON_CP2_GFM_POLY(a0) + dmtc2 t1, 0x0258 + ld t1, OCTEON_CP2_GFM_RESULT(a0) + dmtc2 t2, 0x0259 + ld t2, OCTEON_CP2_GFM_RESULT+8(a0) + dmtc2 t0, 0x025E + dmtc2 t1, 0x025A + dmtc2 t2, 0x025B + +done_restore: + jr ra + nop + END(octeon_cop2_restore) + .set pop + +/* + * void octeon_mult_save() + * sp is assumed to point to a struct pt_regs + * + * NOTE: This is called in SAVE_SOME in stackframe.h. It can only + * safely modify k0 and k1. + */ + .align 7 + .set push + .set noreorder + LEAF(octeon_mult_save) + dmfc0 k0, $9,7 /* CvmCtl register. */ + bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ + nop + + /* Save the multiplier state */ + v3mulu k0, $0, $0 + v3mulu k1, $0, $0 + sd k0, PT_MTP(sp) /* PT_MTP has P0 */ + v3mulu k0, $0, $0 + sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */ + ori k1, $0, 1 + v3mulu k1, k1, $0 + sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */ + v3mulu k0, $0, $0 + sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ + v3mulu k1, $0, $0 + sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ + jr ra + sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ + +1: /* Resume here if CvmCtl[NOMUL] */ + jr ra + END(octeon_mult_save) + .set pop + +/* + * void octeon_mult_restore() + * sp is assumed to point to a struct pt_regs + * + * NOTE: This is called in RESTORE_SOME in stackframe.h. + */ + .align 7 + .set push + .set noreorder + LEAF(octeon_mult_restore) + dmfc0 k1, $9,7 /* CvmCtl register. */ + ld v0, PT_MPL(sp) /* MPL0 */ + ld v1, PT_MPL+8(sp) /* MPL1 */ + ld k0, PT_MPL+16(sp) /* MPL2 */ + bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ + /* Normally falls through, so no time wasted here */ + nop + + /* Restore the multiplier state */ + ld k1, PT_MTP+16(sp) /* P2 */ + MTM0 v0 /* MPL0 */ + ld v0, PT_MTP+8(sp) /* P1 */ + MTM1 v1 /* MPL1 */ + ld v1, PT_MTP(sp) /* P0 */ + MTM2 k0 /* MPL2 */ + MTP2 k1 /* P2 */ + MTP1 v0 /* P1 */ + jr ra + MTP0 v1 /* P0 */ + +1: /* Resume here if CvmCtl[NOMUL] */ + jr ra + nop + END(octeon_mult_restore) + .set pop diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c new file mode 100644 index 00000000..c1cf9c6c --- /dev/null +++ b/arch/mips/kernel/perf_event.c @@ -0,0 +1,69 @@ +/* + * Linux performance counter support for MIPS. + * + * Copyright (C) 2010 MIPS Technologies, Inc. + * Author: Deng-Cheng Zhu + * + * This code is based on the implementation for ARM, which is in turn + * based on the sparc64 perf event code and the x86 code. Performance + * counter access is based on the MIPS Oprofile code. And the callchain + * support references the code of MIPS stacktrace.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/perf_event.h> + +#include <asm/stacktrace.h> + +/* Callchain handling code. */ + +/* + * Leave userspace callchain empty for now. When we find a way to trace + * the user stack callchains, we will add it here. + */ + +static void save_raw_perf_callchain(struct perf_callchain_entry *entry, + unsigned long reg29) +{ + unsigned long *sp = (unsigned long *)reg29; + unsigned long addr; + + while (!kstack_end(sp)) { + addr = *sp++; + if (__kernel_text_address(addr)) { + perf_callchain_store(entry, addr); + if (entry->nr >= PERF_MAX_STACK_DEPTH) + break; + } + } +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + unsigned long sp = regs->regs[29]; +#ifdef CONFIG_KALLSYMS + unsigned long ra = regs->regs[31]; + unsigned long pc = regs->cp0_epc; + + if (raw_show_trace || !__kernel_text_address(pc)) { + unsigned long stack_page = + (unsigned long)task_stack_page(current); + if (stack_page && sp >= stack_page && + sp <= stack_page + THREAD_SIZE - 32) + save_raw_perf_callchain(entry, sp); + return; + } + do { + perf_callchain_store(entry, pc); + if (entry->nr >= PERF_MAX_STACK_DEPTH) + break; + pc = unwind_stack(current, &sp, pc, &ra); + } while (pc); +#else + save_raw_perf_callchain(entry, sp); +#endif +} diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c new file mode 100644 index 00000000..811084f4 --- /dev/null +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -0,0 +1,1609 @@ +/* + * Linux performance counter support for MIPS. + * + * Copyright (C) 2010 MIPS Technologies, Inc. + * Copyright (C) 2011 Cavium Networks, Inc. + * Author: Deng-Cheng Zhu + * + * This code is based on the implementation for ARM, which is in turn + * based on the sparc64 perf event code and the x86 code. Performance + * counter access is based on the MIPS Oprofile code. And the callchain + * support references the code of MIPS stacktrace.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/uaccess.h> + +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/stacktrace.h> +#include <asm/time.h> /* For perf_irq */ + +#define MIPS_MAX_HWEVENTS 4 + +struct cpu_hw_events { + /* Array of events on this cpu. */ + struct perf_event *events[MIPS_MAX_HWEVENTS]; + + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; + + /* + * Software copy of the control register for each performance counter. + * MIPS CPUs vary in performance counters. They use this differently, + * and even may not use it. + */ + unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { + .saved_ctrl = {0}, +}; + +/* The description of MIPS performance events. */ +struct mips_perf_event { + unsigned int event_id; + /* + * MIPS performance counters are indexed starting from 0. + * CNTR_EVEN indicates the indexes of the counters to be used are + * even numbers. + */ + unsigned int cntr_mask; + #define CNTR_EVEN 0x55555555 + #define CNTR_ODD 0xaaaaaaaa + #define CNTR_ALL 0xffffffff +#ifdef CONFIG_MIPS_MT_SMP + enum { + T = 0, + V = 1, + P = 2, + } range; +#else + #define T + #define V + #define P +#endif +}; + +static struct mips_perf_event raw_event; +static DEFINE_MUTEX(raw_event_mutex); + +#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff +#define C(x) PERF_COUNT_HW_CACHE_##x + +struct mips_pmu { + u64 max_period; + u64 valid_count; + u64 overflow; + const char *name; + int irq; + u64 (*read_counter)(unsigned int idx); + void (*write_counter)(unsigned int idx, u64 val); + const struct mips_perf_event *(*map_raw_event)(u64 config); + const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; + const struct mips_perf_event (*cache_event_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + unsigned int num_counters; +}; + +static struct mips_pmu mipspmu; + +#define M_CONFIG1_PC (1 << 4) + +#define M_PERFCTL_EXL (1 << 0) +#define M_PERFCTL_KERNEL (1 << 1) +#define M_PERFCTL_SUPERVISOR (1 << 2) +#define M_PERFCTL_USER (1 << 3) +#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) +#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) +#define M_PERFCTL_VPEID(vpe) ((vpe) << 16) +#define M_PERFCTL_MT_EN(filter) ((filter) << 20) +#define M_TC_EN_ALL M_PERFCTL_MT_EN(0) +#define M_TC_EN_VPE M_PERFCTL_MT_EN(1) +#define M_TC_EN_TC M_PERFCTL_MT_EN(2) +#define M_PERFCTL_TCID(tcid) ((tcid) << 22) +#define M_PERFCTL_WIDE (1 << 30) +#define M_PERFCTL_MORE (1 << 31) + +#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ + M_PERFCTL_KERNEL | \ + M_PERFCTL_USER | \ + M_PERFCTL_SUPERVISOR | \ + M_PERFCTL_INTERRUPT_ENABLE) + +#ifdef CONFIG_MIPS_MT_SMP +#define M_PERFCTL_CONFIG_MASK 0x3fff801f +#else +#define M_PERFCTL_CONFIG_MASK 0x1f +#endif +#define M_PERFCTL_EVENT_MASK 0xfe0 + + +#ifdef CONFIG_MIPS_MT_SMP +static int cpu_has_mipsmt_pertccounters; + +static DEFINE_RWLOCK(pmuint_rwlock); + +/* + * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because + * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. + */ +#if defined(CONFIG_HW_PERF_EVENTS) +#define vpe_id() (cpu_has_mipsmt_pertccounters ? \ + 0 : smp_processor_id()) +#else +#define vpe_id() (cpu_has_mipsmt_pertccounters ? \ + 0 : cpu_data[smp_processor_id()].vpe_id) +#endif + +/* Copied from op_model_mipsxx.c */ +static unsigned int vpe_shift(void) +{ + if (num_possible_cpus() > 1) + return 1; + + return 0; +} + +static unsigned int counters_total_to_per_cpu(unsigned int counters) +{ + return counters >> vpe_shift(); +} + +static unsigned int counters_per_cpu_to_total(unsigned int counters) +{ + return counters << vpe_shift(); +} + +#else /* !CONFIG_MIPS_MT_SMP */ +#define vpe_id() 0 + +#endif /* CONFIG_MIPS_MT_SMP */ + +static void resume_local_counters(void); +static void pause_local_counters(void); +static irqreturn_t mipsxx_pmu_handle_irq(int, void *); +static int mipsxx_pmu_handle_shared_irq(void); + +static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) +{ + if (vpe_id() == 1) + idx = (idx + 2) & 3; + return idx; +} + +static u64 mipsxx_pmu_read_counter(unsigned int idx) +{ + idx = mipsxx_pmu_swizzle_perf_idx(idx); + + switch (idx) { + case 0: + /* + * The counters are unsigned, we must cast to truncate + * off the high bits. + */ + return (u32)read_c0_perfcntr0(); + case 1: + return (u32)read_c0_perfcntr1(); + case 2: + return (u32)read_c0_perfcntr2(); + case 3: + return (u32)read_c0_perfcntr3(); + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } +} + +static u64 mipsxx_pmu_read_counter_64(unsigned int idx) +{ + idx = mipsxx_pmu_swizzle_perf_idx(idx); + + switch (idx) { + case 0: + return read_c0_perfcntr0_64(); + case 1: + return read_c0_perfcntr1_64(); + case 2: + return read_c0_perfcntr2_64(); + case 3: + return read_c0_perfcntr3_64(); + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } +} + +static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) +{ + idx = mipsxx_pmu_swizzle_perf_idx(idx); + + switch (idx) { + case 0: + write_c0_perfcntr0(val); + return; + case 1: + write_c0_perfcntr1(val); + return; + case 2: + write_c0_perfcntr2(val); + return; + case 3: + write_c0_perfcntr3(val); + return; + } +} + +static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) +{ + idx = mipsxx_pmu_swizzle_perf_idx(idx); + + switch (idx) { + case 0: + write_c0_perfcntr0_64(val); + return; + case 1: + write_c0_perfcntr1_64(val); + return; + case 2: + write_c0_perfcntr2_64(val); + return; + case 3: + write_c0_perfcntr3_64(val); + return; + } +} + +static unsigned int mipsxx_pmu_read_control(unsigned int idx) +{ + idx = mipsxx_pmu_swizzle_perf_idx(idx); + + switch (idx) { + case 0: + return read_c0_perfctrl0(); + case 1: + return read_c0_perfctrl1(); + case 2: + return read_c0_perfctrl2(); + case 3: + return read_c0_perfctrl3(); + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } +} + +static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) +{ + idx = mipsxx_pmu_swizzle_perf_idx(idx); + + switch (idx) { + case 0: + write_c0_perfctrl0(val); + return; + case 1: + write_c0_perfctrl1(val); + return; + case 2: + write_c0_perfctrl2(val); + return; + case 3: + write_c0_perfctrl3(val); + return; + } +} + +static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc) +{ + int i; + + /* + * We only need to care the counter mask. The range has been + * checked definitely. + */ + unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; + + for (i = mipspmu.num_counters - 1; i >= 0; i--) { + /* + * Note that some MIPS perf events can be counted by both + * even and odd counters, wheresas many other are only by + * even _or_ odd counters. This introduces an issue that + * when the former kind of event takes the counter the + * latter kind of event wants to use, then the "counter + * allocation" for the latter event will fail. In fact if + * they can be dynamically swapped, they both feel happy. + * But here we leave this issue alone for now. + */ + if (test_bit(i, &cntr_mask) && + !test_and_set_bit(i, cpuc->used_mask)) + return i; + } + + return -EAGAIN; +} + +static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + WARN_ON(idx < 0 || idx >= mipspmu.num_counters); + + cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | + (evt->config_base & M_PERFCTL_CONFIG_MASK) | + /* Make sure interrupt enabled. */ + M_PERFCTL_INTERRUPT_ENABLE; + /* + * We do not actually let the counter run. Leave it until start(). + */ +} + +static void mipsxx_pmu_disable_event(int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long flags; + + WARN_ON(idx < 0 || idx >= mipspmu.num_counters); + + local_irq_save(flags); + cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & + ~M_PERFCTL_COUNT_EVENT_WHENEVER; + mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); + local_irq_restore(flags); +} + +static int mipspmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + u64 left = local64_read(&hwc->period_left); + u64 period = hwc->sample_period; + int ret = 0; + + if (unlikely((left + period) & (1ULL << 63))) { + /* left underflowed by more than period. */ + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } else if (unlikely((left + period) <= period)) { + /* left underflowed by less than period. */ + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > mipspmu.max_period) { + left = mipspmu.max_period; + local64_set(&hwc->period_left, left); + } + + local64_set(&hwc->prev_count, mipspmu.overflow - left); + + mipspmu.write_counter(idx, mipspmu.overflow - left); + + perf_event_update_userpage(event); + + return ret; +} + +static void mipspmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + u64 prev_raw_count, new_raw_count; + u64 delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = mipspmu.read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void mipspmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + mipsxx_pmu_enable_event(hwc, hwc->idx); +} + +static void mipspmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + mipsxx_pmu_disable_event(hwc->idx); + barrier(); + mipspmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int mipspmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + perf_pmu_disable(event->pmu); + + /* To look for a free counter for this event. */ + idx = mipsxx_pmu_alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; + mipsxx_pmu_disable_event(idx); + cpuc->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + mipspmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static void mipspmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0 || idx >= mipspmu.num_counters); + + mipspmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void mipspmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + mipspmu_event_update(event, hwc, hwc->idx); +} + +static void mipspmu_enable(struct pmu *pmu) +{ +#ifdef CONFIG_MIPS_MT_SMP + write_unlock(&pmuint_rwlock); +#endif + resume_local_counters(); +} + +/* + * MIPS performance counters can be per-TC. The control registers can + * not be directly accessed accross CPUs. Hence if we want to do global + * control, we need cross CPU calls. on_each_cpu() can help us, but we + * can not make sure this function is called with interrupts enabled. So + * here we pause local counters and then grab a rwlock and leave the + * counters on other CPUs alone. If any counter interrupt raises while + * we own the write lock, simply pause local counters on that CPU and + * spin in the handler. Also we know we won't be switched to another + * CPU after pausing local counters and before grabbing the lock. + */ +static void mipspmu_disable(struct pmu *pmu) +{ + pause_local_counters(); +#ifdef CONFIG_MIPS_MT_SMP + write_lock(&pmuint_rwlock); +#endif +} + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmu_reserve_mutex); +static int (*save_perf_irq)(void); + +static int mipspmu_get_irq(void) +{ + int err; + + if (mipspmu.irq >= 0) { + /* Request my own irq handler. */ + err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, + IRQF_PERCPU | IRQF_NOBALANCING, + "mips_perf_pmu", NULL); + if (err) { + pr_warning("Unable to request IRQ%d for MIPS " + "performance counters!\n", mipspmu.irq); + } + } else if (cp0_perfcount_irq < 0) { + /* + * We are sharing the irq number with the timer interrupt. + */ + save_perf_irq = perf_irq; + perf_irq = mipsxx_pmu_handle_shared_irq; + err = 0; + } else { + pr_warning("The platform hasn't properly defined its " + "interrupt controller.\n"); + err = -ENOENT; + } + + return err; +} + +static void mipspmu_free_irq(void) +{ + if (mipspmu.irq >= 0) + free_irq(mipspmu.irq, NULL); + else if (cp0_perfcount_irq < 0) + perf_irq = save_perf_irq; +} + +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu.num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int mipspmu_event_init(struct perf_event *event) +{ + int err = 0; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + if (!atomic_inc_not_zero(&active_events)) { + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + return __hw_perf_event_init(event); +} + +static struct pmu pmu = { + .pmu_enable = mipspmu_enable, + .pmu_disable = mipspmu_disable, + .event_init = mipspmu_event_init, + .add = mipspmu_add, + .del = mipspmu_del, + .start = mipspmu_start, + .stop = mipspmu_stop, + .read = mipspmu_read, +}; + +static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) +{ +/* + * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for + * event_id. + */ +#ifdef CONFIG_MIPS_MT_SMP + return ((unsigned int)pev->range << 24) | + (pev->cntr_mask & 0xffff00) | + (pev->event_id & 0xff); +#else + return (pev->cntr_mask & 0xffff00) | + (pev->event_id & 0xff); +#endif +} + +static const struct mips_perf_event *mipspmu_map_general_event(int idx) +{ + const struct mips_perf_event *pev; + + pev = ((*mipspmu.general_event_map)[idx].event_id == + UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : + &(*mipspmu.general_event_map)[idx]); + + return pev; +} + +static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct mips_perf_event *pev; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pev = &((*mipspmu.cache_event_map) + [cache_type] + [cache_op] + [cache_result]); + + if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) + return ERR_PTR(-EOPNOTSUPP); + + return pev; + +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_cpuc; + + memset(&fake_cpuc, 0, sizeof(fake_cpuc)); + + if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) + return -EINVAL; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) + return -EINVAL; + } + + if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) + return -EINVAL; + + return 0; +} + +/* This is needed by specific irq handlers in perf_event_*.c */ +static void handle_associated_event(struct cpu_hw_events *cpuc, + int idx, struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc = &event->hw; + + mipspmu_event_update(event, hwc, idx); + data->period = event->hw.last_period; + if (!mipspmu_event_set_period(event, hwc, idx)) + return; + + if (perf_event_overflow(event, data, regs)) + mipsxx_pmu_disable_event(idx); +} + + +static int __n_counters(void) +{ + if (!(read_c0_config1() & M_CONFIG1_PC)) + return 0; + if (!(read_c0_perfctrl0() & M_PERFCTL_MORE)) + return 1; + if (!(read_c0_perfctrl1() & M_PERFCTL_MORE)) + return 2; + if (!(read_c0_perfctrl2() & M_PERFCTL_MORE)) + return 3; + + return 4; +} + +static int n_counters(void) +{ + int counters; + + switch (current_cpu_type()) { + case CPU_R10000: + counters = 2; + break; + + case CPU_R12000: + case CPU_R14000: + counters = 4; + break; + + default: + counters = __n_counters(); + } + + return counters; +} + +static void reset_counters(void *arg) +{ + int counters = (int)(long)arg; + switch (counters) { + case 4: + mipsxx_pmu_write_control(3, 0); + mipspmu.write_counter(3, 0); + case 3: + mipsxx_pmu_write_control(2, 0); + mipspmu.write_counter(2, 0); + case 2: + mipsxx_pmu_write_control(1, 0); + mipspmu.write_counter(1, 0); + case 1: + mipsxx_pmu_write_control(0, 0); + mipspmu.write_counter(0, 0); + } +} + +/* 24K/34K/1004K cores can share the same event map. */ +static const struct mips_perf_event mipsxxcore_event_map + [PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, + [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, +}; + +/* 74K core has different branch event code. */ +static const struct mips_perf_event mipsxx74Kcore_event_map + [PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, + [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, +}; + +static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL }, + [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, +}; + +/* 24K/34K/1004K cores can share the same cache event map. */ +static const struct mips_perf_event mipsxxcore_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T }, + /* + * Note that MIPS has only "hit" events countable for + * the prefetch operation. + */ + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +}; + +/* 74K core has completely different cache event map. */ +static const struct mips_perf_event mipsxx74Kcore_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, + [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, + [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T }, + /* + * Note that MIPS has only "hit" events countable for + * the prefetch operation. + */ + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(DTLB)] = { + /* 74K core does not have specific DTLB events. */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +}; + + +static const struct mips_perf_event octeon_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL }, + [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(DTLB)] = { + /* + * Only general DTLB misses are counted use the same event for + * read and write. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +}; + +#ifdef CONFIG_MIPS_MT_SMP +static void check_and_calc_range(struct perf_event *event, + const struct mips_perf_event *pev) +{ + struct hw_perf_event *hwc = &event->hw; + + if (event->cpu >= 0) { + if (pev->range > V) { + /* + * The user selected an event that is processor + * wide, while expecting it to be VPE wide. + */ + hwc->config_base |= M_TC_EN_ALL; + } else { + /* + * FIXME: cpu_data[event->cpu].vpe_id reports 0 + * for both CPUs. + */ + hwc->config_base |= M_PERFCTL_VPEID(event->cpu); + hwc->config_base |= M_TC_EN_VPE; + } + } else + hwc->config_base |= M_TC_EN_ALL; +} +#else +static void check_and_calc_range(struct perf_event *event, + const struct mips_perf_event *pev) +{ +} +#endif + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct mips_perf_event *pev; + int err; + + /* Returning MIPS event descriptor for generic perf event. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -EINVAL; + pev = mipspmu_map_general_event(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + pev = mipspmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + /* We are working on the global raw event. */ + mutex_lock(&raw_event_mutex); + pev = mipspmu.map_raw_event(event->attr.config); + } else { + /* The event type is not (yet) supported. */ + return -EOPNOTSUPP; + } + + if (IS_ERR(pev)) { + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + return PTR_ERR(pev); + } + + /* + * We allow max flexibility on how each individual counter shared + * by the single CPU operates (the mode exclusion and the range). + */ + hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE; + + /* Calculate range bits and validate it. */ + if (num_possible_cpus() > 1) + check_and_calc_range(event, pev); + + hwc->event_base = mipspmu_perf_event_encode(pev); + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + + if (!attr->exclude_user) + hwc->config_base |= M_PERFCTL_USER; + if (!attr->exclude_kernel) { + hwc->config_base |= M_PERFCTL_KERNEL; + /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */ + hwc->config_base |= M_PERFCTL_EXL; + } + if (!attr->exclude_hv) + hwc->config_base |= M_PERFCTL_SUPERVISOR; + + hwc->config_base &= M_PERFCTL_CONFIG_MASK; + /* + * The event can belong to another cpu. We do not assign a local + * counter for it for now. + */ + hwc->idx = -1; + hwc->config = 0; + + if (!hwc->sample_period) { + hwc->sample_period = mipspmu.max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) + err = validate_group(event); + + event->destroy = hw_perf_event_destroy; + + if (err) + event->destroy(event); + + return err; +} + +static void pause_local_counters(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int ctr = mipspmu.num_counters; + unsigned long flags; + + local_irq_save(flags); + do { + ctr--; + cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); + mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + } while (ctr > 0); + local_irq_restore(flags); +} + +static void resume_local_counters(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int ctr = mipspmu.num_counters; + + do { + ctr--; + mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); + } while (ctr > 0); +} + +static int mipsxx_pmu_handle_shared_irq(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct perf_sample_data data; + unsigned int counters = mipspmu.num_counters; + u64 counter; + int handled = IRQ_NONE; + struct pt_regs *regs; + + if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) + return handled; + /* + * First we pause the local counters, so that when we are locked + * here, the counters are all paused. When it gets locked due to + * perf_disable(), the timer interrupt handler will be delayed. + * + * See also mipsxx_pmu_start(). + */ + pause_local_counters(); +#ifdef CONFIG_MIPS_MT_SMP + read_lock(&pmuint_rwlock); +#endif + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + switch (counters) { +#define HANDLE_COUNTER(n) \ + case n + 1: \ + if (test_bit(n, cpuc->used_mask)) { \ + counter = mipspmu.read_counter(n); \ + if (counter & mipspmu.overflow) { \ + handle_associated_event(cpuc, n, &data, regs); \ + handled = IRQ_HANDLED; \ + } \ + } + HANDLE_COUNTER(3) + HANDLE_COUNTER(2) + HANDLE_COUNTER(1) + HANDLE_COUNTER(0) + } + + /* + * Do all the work for the pending perf events. We can do this + * in here because the performance counter interrupt is a regular + * interrupt, not NMI. + */ + if (handled == IRQ_HANDLED) + irq_work_run(); + +#ifdef CONFIG_MIPS_MT_SMP + read_unlock(&pmuint_rwlock); +#endif + resume_local_counters(); + return handled; +} + +static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) +{ + return mipsxx_pmu_handle_shared_irq(); +} + +/* 24K */ +#define IS_BOTH_COUNTERS_24K_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) + +/* 34K */ +#define IS_BOTH_COUNTERS_34K_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) +#ifdef CONFIG_MIPS_MT_SMP +#define IS_RANGE_P_34K_EVENT(r, b) \ + ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ + (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ + (r) == 176 || ((b) >= 50 && (b) <= 55) || \ + ((b) >= 64 && (b) <= 67)) +#define IS_RANGE_V_34K_EVENT(r) ((r) == 47) +#endif + +/* 74K */ +#define IS_BOTH_COUNTERS_74K_EVENT(b) \ + ((b) == 0 || (b) == 1) + +/* 1004K */ +#define IS_BOTH_COUNTERS_1004K_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) +#ifdef CONFIG_MIPS_MT_SMP +#define IS_RANGE_P_1004K_EVENT(r, b) \ + ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ + (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \ + (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \ + (r) == 188 || (b) == 61 || (b) == 62 || \ + ((b) >= 64 && (b) <= 67)) +#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) +#endif + +/* + * User can use 0-255 raw events, where 0-127 for the events of even + * counters, and 128-255 for odd counters. Note that bit 7 is used to + * indicate the parity. So, for example, when user wants to take the + * Event Num of 15 for odd counters (by referring to the user manual), + * then 128 needs to be added to 15 as the input for the event config, + * i.e., 143 (0x8F) to be used. + */ +static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) +{ + unsigned int raw_id = config & 0xff; + unsigned int base_id = raw_id & 0x7f; + + raw_event.event_id = base_id; + + switch (current_cpu_type()) { + case CPU_24K: + if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + /* + * This is actually doing nothing. Non-multithreading + * CPUs will not check and calculate the range. + */ + raw_event.range = P; +#endif + break; + case CPU_34K: + if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + if (IS_RANGE_P_34K_EVENT(raw_id, base_id)) + raw_event.range = P; + else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id))) + raw_event.range = V; + else + raw_event.range = T; +#endif + break; + case CPU_74K: + if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + raw_event.range = P; +#endif + break; + case CPU_1004K: + if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + if (IS_RANGE_P_1004K_EVENT(raw_id, base_id)) + raw_event.range = P; + else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id))) + raw_event.range = V; + else + raw_event.range = T; +#endif + break; + } + + return &raw_event; +} + +static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) +{ + unsigned int raw_id = config & 0xff; + unsigned int base_id = raw_id & 0x7f; + + + raw_event.cntr_mask = CNTR_ALL; + raw_event.event_id = base_id; + + if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { + if (base_id > 0x42) + return ERR_PTR(-EOPNOTSUPP); + } else { + if (base_id > 0x3a) + return ERR_PTR(-EOPNOTSUPP); + } + + switch (base_id) { + case 0x00: + case 0x0f: + case 0x1e: + case 0x1f: + case 0x2f: + case 0x34: + case 0x3b ... 0x3f: + return ERR_PTR(-EOPNOTSUPP); + default: + break; + } + + return &raw_event; +} + +static int __init +init_hw_perf_events(void) +{ + int counters, irq; + int counter_bits; + + pr_info("Performance counters: "); + + counters = n_counters(); + if (counters == 0) { + pr_cont("No available PMU.\n"); + return -ENODEV; + } + +#ifdef CONFIG_MIPS_MT_SMP + cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); + if (!cpu_has_mipsmt_pertccounters) + counters = counters_total_to_per_cpu(counters); +#endif + +#ifdef MSC01E_INT_BASE + if (cpu_has_veic) { + /* + * Using platform specific interrupt controller defines. + */ + irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; + } else { +#endif + if (cp0_perfcount_irq >= 0) + irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + else + irq = -1; +#ifdef MSC01E_INT_BASE + } +#endif + + mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; + + switch (current_cpu_type()) { + case CPU_24K: + mipspmu.name = "mips/24K"; + mipspmu.general_event_map = &mipsxxcore_event_map; + mipspmu.cache_event_map = &mipsxxcore_cache_map; + break; + case CPU_34K: + mipspmu.name = "mips/34K"; + mipspmu.general_event_map = &mipsxxcore_event_map; + mipspmu.cache_event_map = &mipsxxcore_cache_map; + break; + case CPU_74K: + mipspmu.name = "mips/74K"; + mipspmu.general_event_map = &mipsxx74Kcore_event_map; + mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; + break; + case CPU_1004K: + mipspmu.name = "mips/1004K"; + mipspmu.general_event_map = &mipsxxcore_event_map; + mipspmu.cache_event_map = &mipsxxcore_cache_map; + break; + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + mipspmu.name = "octeon"; + mipspmu.general_event_map = &octeon_event_map; + mipspmu.cache_event_map = &octeon_cache_map; + mipspmu.map_raw_event = octeon_pmu_map_raw_event; + break; + default: + pr_cont("Either hardware does not support performance " + "counters, or not yet implemented.\n"); + return -ENODEV; + } + + mipspmu.num_counters = counters; + mipspmu.irq = irq; + + if (read_c0_perfctrl0() & M_PERFCTL_WIDE) { + mipspmu.max_period = (1ULL << 63) - 1; + mipspmu.valid_count = (1ULL << 63) - 1; + mipspmu.overflow = 1ULL << 63; + mipspmu.read_counter = mipsxx_pmu_read_counter_64; + mipspmu.write_counter = mipsxx_pmu_write_counter_64; + counter_bits = 64; + } else { + mipspmu.max_period = (1ULL << 31) - 1; + mipspmu.valid_count = (1ULL << 31) - 1; + mipspmu.overflow = 1ULL << 31; + mipspmu.read_counter = mipsxx_pmu_read_counter; + mipspmu.write_counter = mipsxx_pmu_write_counter; + counter_bits = 32; + } + + on_each_cpu(reset_counters, (void *)(long)counters, 1); + + pr_cont("%s PMU enabled, %d %d-bit counters available to each " + "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq, + irq < 0 ? " (share with timer interrupt)" : ""); + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c new file mode 100644 index 00000000..f8b2c592 --- /dev/null +++ b/arch/mips/kernel/proc.c @@ -0,0 +1,112 @@ +/* + * Copyright (C) 1995, 1996, 2001 Ralf Baechle + * Copyright (C) 2001, 2004 MIPS Technologies, Inc. + * Copyright (C) 2004 Maciej W. Rozycki + */ +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <asm/bootinfo.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/mipsregs.h> +#include <asm/processor.h> +#include <asm/mips_machine.h> + +unsigned int vced_count, vcei_count; + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned long n = (unsigned long) v - 1; + unsigned int version = cpu_data[n].processor_id; + unsigned int fp_vers = cpu_data[n].fpu_id; + char fmt [64]; + int i; + +#ifdef CONFIG_SMP + if (!cpu_online(n)) + return 0; +#endif + + /* + * For the first processor also print the system type + */ + if (n == 0) { + seq_printf(m, "system type\t\t: %s\n", get_system_type()); + if (mips_get_machine_name()) + seq_printf(m, "machine\t\t\t: %s\n", + mips_get_machine_name()); + } + + seq_printf(m, "processor\t\t: %ld\n", n); + sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", + cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); + seq_printf(m, fmt, __cpu_name[n], + (version >> 4) & 0x0f, version & 0x0f, + (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); + seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", + cpu_data[n].udelay_val / (500000/HZ), + (cpu_data[n].udelay_val / (5000/HZ)) % 100); + seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); + seq_printf(m, "microsecond timers\t: %s\n", + cpu_has_counter ? "yes" : "no"); + seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); + seq_printf(m, "extra interrupt vector\t: %s\n", + cpu_has_divec ? "yes" : "no"); + seq_printf(m, "hardware watchpoint\t: %s", + cpu_has_watch ? "yes, " : "no\n"); + if (cpu_has_watch) { + seq_printf(m, "count: %d, address/irw mask: [", + cpu_data[n].watch_reg_count); + for (i = 0; i < cpu_data[n].watch_reg_count; i++) + seq_printf(m, "%s0x%04x", i ? ", " : "" , + cpu_data[n].watch_reg_masks[i]); + seq_printf(m, "]\n"); + } + seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", + cpu_has_mips16 ? " mips16" : "", + cpu_has_mdmx ? " mdmx" : "", + cpu_has_mips3d ? " mips3d" : "", + cpu_has_smartmips ? " smartmips" : "", + cpu_has_dsp ? " dsp" : "", + cpu_has_mipsmt ? " mt" : "" + ); + seq_printf(m, "shadow register sets\t: %d\n", + cpu_data[n].srsets); + seq_printf(m, "kscratch registers\t: %d\n", + hweight8(cpu_data[n].kscratch_mask)); + seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); + + sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", + cpu_has_vce ? "%u" : "not available"); + seq_printf(m, fmt, 'D', vced_count); + seq_printf(m, fmt, 'I', vcei_count); + seq_printf(m, "\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + unsigned long i = *pos; + + return i < NR_CPUS ? (void *) (i + 1) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c new file mode 100644 index 00000000..e9a5fd72 --- /dev/null +++ b/arch/mips/kernel/process.c @@ -0,0 +1,494 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. + * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2004 Thiemo Seufer + */ +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/tick.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/export.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/personality.h> +#include <linux/sys.h> +#include <linux/user.h> +#include <linux/init.h> +#include <linux/completion.h> +#include <linux/kallsyms.h> +#include <linux/random.h> + +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cpu.h> +#include <asm/dsp.h> +#include <asm/fpu.h> +#include <asm/pgtable.h> +#include <asm/mipsregs.h> +#include <asm/processor.h> +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/elf.h> +#include <asm/isadep.h> +#include <asm/inst.h> +#include <asm/stacktrace.h> + +/* + * The idle thread. There's no useful work to be done, so just try to conserve + * power and have a low exit latency (ie sit in a loop waiting for somebody to + * say that they'd like to reschedule) + */ +void __noreturn cpu_idle(void) +{ + int cpu; + + /* CPU is going idle. */ + cpu = smp_processor_id(); + + /* endless idle loop with no priority at all */ + while (1) { + tick_nohz_idle_enter(); + rcu_idle_enter(); + while (!need_resched() && cpu_online(cpu)) { +#ifdef CONFIG_MIPS_MT_SMTC + extern void smtc_idle_loop_hook(void); + + smtc_idle_loop_hook(); +#endif + + if (cpu_wait) { + /* Don't trace irqs off for idle */ + stop_critical_timings(); + (*cpu_wait)(); + start_critical_timings(); + } + } +#ifdef CONFIG_HOTPLUG_CPU + if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && + (system_state == SYSTEM_RUNNING || + system_state == SYSTEM_BOOTING)) + play_dead(); +#endif + rcu_idle_exit(); + tick_nohz_idle_exit(); + schedule_preempt_disabled(); + } +} + +asmlinkage void ret_from_fork(void); + +void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) +{ + unsigned long status; + + /* New thread loses kernel privileges. */ + status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); +#ifdef CONFIG_64BIT + status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; +#endif + status |= KU_USER; + regs->cp0_status = status; + clear_used_math(); + clear_fpu_owner(); + if (cpu_has_dsp) + __init_dsp(); + regs->cp0_epc = pc; + regs->regs[29] = sp; +} + +void exit_thread(void) +{ +} + +void flush_thread(void) +{ +} + +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long unused, struct task_struct *p, struct pt_regs *regs) +{ + struct thread_info *ti = task_thread_info(p); + struct pt_regs *childregs; + unsigned long childksp; + p->set_child_tid = p->clear_child_tid = NULL; + + childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; + + preempt_disable(); + + if (is_fpu_owner()) + save_fp(p); + + if (cpu_has_dsp) + save_dsp(p); + + preempt_enable(); + + /* set up new TSS. */ + childregs = (struct pt_regs *) childksp - 1; + /* Put the stack after the struct pt_regs. */ + childksp = (unsigned long) childregs; + *childregs = *regs; + childregs->regs[7] = 0; /* Clear error flag */ + + childregs->regs[2] = 0; /* Child gets zero as return value */ + + if (childregs->cp0_status & ST0_CU0) { + childregs->regs[28] = (unsigned long) ti; + childregs->regs[29] = childksp; + ti->addr_limit = KERNEL_DS; + } else { + childregs->regs[29] = usp; + ti->addr_limit = USER_DS; + } + p->thread.reg29 = (unsigned long) childregs; + p->thread.reg31 = (unsigned long) ret_from_fork; + + /* + * New tasks lose permission to use the fpu. This accelerates context + * switching for most programs since they don't use the fpu. + */ + p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); + childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC restores TCStatus after Status, and the CU bits + * are aliased there. + */ + childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); +#endif + clear_tsk_thread_flag(p, TIF_USEDFPU); + +#ifdef CONFIG_MIPS_MT_FPAFF + clear_tsk_thread_flag(p, TIF_FPUBOUND); +#endif /* CONFIG_MIPS_MT_FPAFF */ + + if (clone_flags & CLONE_SETTLS) + ti->tp_value = regs->regs[7]; + + return 0; +} + +/* Fill in the fpu structure for a core dump.. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) +{ + memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu)); + + return 1; +} + +void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs) +{ + int i; + + for (i = 0; i < EF_R0; i++) + gp[i] = 0; + gp[EF_R0] = 0; + for (i = 1; i <= 31; i++) + gp[EF_R0 + i] = regs->regs[i]; + gp[EF_R26] = 0; + gp[EF_R27] = 0; + gp[EF_LO] = regs->lo; + gp[EF_HI] = regs->hi; + gp[EF_CP0_EPC] = regs->cp0_epc; + gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr; + gp[EF_CP0_STATUS] = regs->cp0_status; + gp[EF_CP0_CAUSE] = regs->cp0_cause; +#ifdef EF_UNUSED0 + gp[EF_UNUSED0] = 0; +#endif +} + +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) +{ + elf_dump_regs(*regs, task_pt_regs(tsk)); + return 1; +} + +int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) +{ + memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); + + return 1; +} + +/* + * Create a kernel thread + */ +static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *)) +{ + do_exit(fn(arg)); +} + +long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + + regs.regs[4] = (unsigned long) arg; + regs.regs[5] = (unsigned long) fn; + regs.cp0_epc = (unsigned long) kernel_thread_helper; + regs.cp0_status = read_c0_status(); +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | + ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2); +#else + regs.cp0_status |= ST0_EXL; +#endif + + /* Ok, create the new process.. */ + return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); +} + +/* + * + */ +struct mips_frame_info { + void *func; + unsigned long func_size; + int frame_size; + int pc_offset; +}; + +static inline int is_ra_save_ins(union mips_instruction *ip) +{ + /* sw / sd $ra, offset($sp) */ + return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && + ip->i_format.rs == 29 && + ip->i_format.rt == 31; +} + +static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) +{ + if (ip->j_format.opcode == jal_op) + return 1; + if (ip->r_format.opcode != spec_op) + return 0; + return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; +} + +static inline int is_sp_move_ins(union mips_instruction *ip) +{ + /* addiu/daddiu sp,sp,-imm */ + if (ip->i_format.rs != 29 || ip->i_format.rt != 29) + return 0; + if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) + return 1; + return 0; +} + +static int get_frame_info(struct mips_frame_info *info) +{ + union mips_instruction *ip = info->func; + unsigned max_insns = info->func_size / sizeof(union mips_instruction); + unsigned i; + + info->pc_offset = -1; + info->frame_size = 0; + + if (!ip) + goto err; + + if (max_insns == 0) + max_insns = 128U; /* unknown function size */ + max_insns = min(128U, max_insns); + + for (i = 0; i < max_insns; i++, ip++) { + + if (is_jal_jalr_jr_ins(ip)) + break; + if (!info->frame_size) { + if (is_sp_move_ins(ip)) + info->frame_size = - ip->i_format.simmediate; + continue; + } + if (info->pc_offset == -1 && is_ra_save_ins(ip)) { + info->pc_offset = + ip->i_format.simmediate / sizeof(long); + break; + } + } + if (info->frame_size && info->pc_offset >= 0) /* nested */ + return 0; + if (info->pc_offset < 0) /* leaf */ + return 1; + /* prologue seems boggus... */ +err: + return -1; +} + +static struct mips_frame_info schedule_mfi __read_mostly; + +static int __init frame_info_init(void) +{ + unsigned long size = 0; +#ifdef CONFIG_KALLSYMS + unsigned long ofs; + + kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); +#endif + schedule_mfi.func = schedule; + schedule_mfi.func_size = size; + + get_frame_info(&schedule_mfi); + + /* + * Without schedule() frame info, result given by + * thread_saved_pc() and get_wchan() are not reliable. + */ + if (schedule_mfi.pc_offset < 0) + printk("Can't analyze schedule() prologue at %p\n", schedule); + + return 0; +} + +arch_initcall(frame_info_init); + +/* + * Return saved PC of a blocked thread. + */ +unsigned long thread_saved_pc(struct task_struct *tsk) +{ + struct thread_struct *t = &tsk->thread; + + /* New born processes are a special case */ + if (t->reg31 == (unsigned long) ret_from_fork) + return t->reg31; + if (schedule_mfi.pc_offset < 0) + return 0; + return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; +} + + +#ifdef CONFIG_KALLSYMS +/* generic stack unwinding function */ +unsigned long notrace unwind_stack_by_address(unsigned long stack_page, + unsigned long *sp, + unsigned long pc, + unsigned long *ra) +{ + struct mips_frame_info info; + unsigned long size, ofs; + int leaf; + extern void ret_from_irq(void); + extern void ret_from_exception(void); + + if (!stack_page) + return 0; + + /* + * If we reached the bottom of interrupt context, + * return saved pc in pt_regs. + */ + if (pc == (unsigned long)ret_from_irq || + pc == (unsigned long)ret_from_exception) { + struct pt_regs *regs; + if (*sp >= stack_page && + *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { + regs = (struct pt_regs *)*sp; + pc = regs->cp0_epc; + if (__kernel_text_address(pc)) { + *sp = regs->regs[29]; + *ra = regs->regs[31]; + return pc; + } + } + return 0; + } + if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) + return 0; + /* + * Return ra if an exception occurred at the first instruction + */ + if (unlikely(ofs == 0)) { + pc = *ra; + *ra = 0; + return pc; + } + + info.func = (void *)(pc - ofs); + info.func_size = ofs; /* analyze from start to ofs */ + leaf = get_frame_info(&info); + if (leaf < 0) + return 0; + + if (*sp < stack_page || + *sp + info.frame_size > stack_page + THREAD_SIZE - 32) + return 0; + + if (leaf) + /* + * For some extreme cases, get_frame_info() can + * consider wrongly a nested function as a leaf + * one. In that cases avoid to return always the + * same value. + */ + pc = pc != *ra ? *ra : 0; + else + pc = ((unsigned long *)(*sp))[info.pc_offset]; + + *sp += info.frame_size; + *ra = 0; + return __kernel_text_address(pc) ? pc : 0; +} +EXPORT_SYMBOL(unwind_stack_by_address); + +/* used by show_backtrace() */ +unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, + unsigned long pc, unsigned long *ra) +{ + unsigned long stack_page = (unsigned long)task_stack_page(task); + return unwind_stack_by_address(stack_page, sp, pc, ra); +} +#endif + +/* + * get_wchan - a maintenance nightmare^W^Wpain in the ass ... + */ +unsigned long get_wchan(struct task_struct *task) +{ + unsigned long pc = 0; +#ifdef CONFIG_KALLSYMS + unsigned long sp; + unsigned long ra = 0; +#endif + + if (!task || task == current || task->state == TASK_RUNNING) + goto out; + if (!task_stack_page(task)) + goto out; + + pc = thread_saved_pc(task); + +#ifdef CONFIG_KALLSYMS + sp = task->thread.reg29 + schedule_mfi.frame_size; + + while (in_sched_functions(pc)) + pc = unwind_stack(task, &sp, pc, &ra); +#endif + +out: + return pc; +} + +/* + * Don't forget that the stack pointer must be aligned on a 8 bytes + * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. + */ +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_int() & ~PAGE_MASK; + + return sp & ALMASK; +} diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c new file mode 100644 index 00000000..558b5395 --- /dev/null +++ b/arch/mips/kernel/prom.c @@ -0,0 +1,97 @@ +/* + * MIPS support for CONFIG_OF device tree support + * + * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/bootmem.h> +#include <linux/initrd.h> +#include <linux/debugfs.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#include <asm/page.h> +#include <asm/prom.h> + +int __init early_init_dt_scan_memory_arch(unsigned long node, + const char *uname, int depth, + void *data) +{ + return early_init_dt_scan_memory(node, uname, depth, data); +} + +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + return add_memory_region(base, size, BOOT_MEM_RAM); +} + +int __init reserve_mem_mach(unsigned long addr, unsigned long size) +{ + return reserve_bootmem(addr, size, BOOTMEM_DEFAULT); +} + +void __init free_mem_mach(unsigned long addr, unsigned long size) +{ + return free_bootmem(addr, size); +} + +void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void __init early_init_dt_setup_initrd_arch(unsigned long start, + unsigned long end) +{ + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; +} +#endif + +void __init early_init_devtree(void *params) +{ + /* Setup flat device-tree pointer */ + initial_boot_params = params; + + /* Retrieve various informations from the /chosen node of the + * device-tree, including the platform type, initrd location and + * size, and more ... + */ + of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline); + + + /* Scan memory nodes */ + of_scan_flat_dt(early_init_dt_scan_root, NULL); + of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); +} + +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_mem_mach(base, size); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_mem_mach(base, size); +} diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c new file mode 100644 index 00000000..7c24c297 --- /dev/null +++ b/arch/mips/kernel/ptrace.c @@ -0,0 +1,595 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 Ross Biro + * Copyright (C) Linus Torvalds + * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle + * Copyright (C) 1996 David S. Miller + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 1999 MIPS Technologies, Inc. + * Copyright (C) 2000 Ulf Carlsson + * + * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit + * binaries. + */ +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/smp.h> +#include <linux/user.h> +#include <linux/security.h> +#include <linux/audit.h> +#include <linux/seccomp.h> + +#include <asm/byteorder.h> +#include <asm/cpu.h> +#include <asm/dsp.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/pgtable.h> +#include <asm/page.h> +#include <asm/uaccess.h> +#include <asm/bootinfo.h> +#include <asm/reg.h> + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* Don't load the watchpoint registers for the ex-child. */ + clear_tsk_thread_flag(child, TIF_LOAD_WATCH); +} + +/* + * Read a general register set. We always use the 64-bit format, even + * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. + * Registers are sign extended to fill the available space. + */ +int ptrace_getregs(struct task_struct *child, __s64 __user *data) +{ + struct pt_regs *regs; + int i; + + if (!access_ok(VERIFY_WRITE, data, 38 * 8)) + return -EIO; + + regs = task_pt_regs(child); + + for (i = 0; i < 32; i++) + __put_user((long)regs->regs[i], data + i); + __put_user((long)regs->lo, data + EF_LO - EF_R0); + __put_user((long)regs->hi, data + EF_HI - EF_R0); + __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0); + __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0); + __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0); + __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0); + + return 0; +} + +/* + * Write a general register set. As for PTRACE_GETREGS, we always use + * the 64-bit format. On a 32-bit kernel only the lower order half + * (according to endianness) will be used. + */ +int ptrace_setregs(struct task_struct *child, __s64 __user *data) +{ + struct pt_regs *regs; + int i; + + if (!access_ok(VERIFY_READ, data, 38 * 8)) + return -EIO; + + regs = task_pt_regs(child); + + for (i = 0; i < 32; i++) + __get_user(regs->regs[i], data + i); + __get_user(regs->lo, data + EF_LO - EF_R0); + __get_user(regs->hi, data + EF_HI - EF_R0); + __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0); + + /* badvaddr, status, and cause may not be written. */ + + return 0; +} + +int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) +{ + int i; + unsigned int tmp; + + if (!access_ok(VERIFY_WRITE, data, 33 * 8)) + return -EIO; + + if (tsk_used_math(child)) { + fpureg_t *fregs = get_fpu_regs(child); + for (i = 0; i < 32; i++) + __put_user(fregs[i], i + (__u64 __user *) data); + } else { + for (i = 0; i < 32; i++) + __put_user((__u64) -1, i + (__u64 __user *) data); + } + + __put_user(child->thread.fpu.fcr31, data + 64); + + preempt_disable(); + if (cpu_has_fpu) { + unsigned int flags; + + if (cpu_has_mipsmt) { + unsigned int vpflags = dvpe(); + flags = read_c0_status(); + __enable_fpu(); + __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); + write_c0_status(flags); + evpe(vpflags); + } else { + flags = read_c0_status(); + __enable_fpu(); + __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); + write_c0_status(flags); + } + } else { + tmp = 0; + } + preempt_enable(); + __put_user(tmp, data + 65); + + return 0; +} + +int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) +{ + fpureg_t *fregs; + int i; + + if (!access_ok(VERIFY_READ, data, 33 * 8)) + return -EIO; + + fregs = get_fpu_regs(child); + + for (i = 0; i < 32; i++) + __get_user(fregs[i], i + (__u64 __user *) data); + + __get_user(child->thread.fpu.fcr31, data + 64); + + /* FIR may not be written. */ + + return 0; +} + +int ptrace_get_watch_regs(struct task_struct *child, + struct pt_watch_regs __user *addr) +{ + enum pt_watch_style style; + int i; + + if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + return -EIO; + if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) + return -EIO; + +#ifdef CONFIG_32BIT + style = pt_watch_style_mips32; +#define WATCH_STYLE mips32 +#else + style = pt_watch_style_mips64; +#define WATCH_STYLE mips64 +#endif + + __put_user(style, &addr->style); + __put_user(current_cpu_data.watch_reg_use_cnt, + &addr->WATCH_STYLE.num_valid); + for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + __put_user(child->thread.watch.mips3264.watchlo[i], + &addr->WATCH_STYLE.watchlo[i]); + __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, + &addr->WATCH_STYLE.watchhi[i]); + __put_user(current_cpu_data.watch_reg_masks[i], + &addr->WATCH_STYLE.watch_masks[i]); + } + for (; i < 8; i++) { + __put_user(0, &addr->WATCH_STYLE.watchlo[i]); + __put_user(0, &addr->WATCH_STYLE.watchhi[i]); + __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); + } + + return 0; +} + +int ptrace_set_watch_regs(struct task_struct *child, + struct pt_watch_regs __user *addr) +{ + int i; + int watch_active = 0; + unsigned long lt[NUM_WATCH_REGS]; + u16 ht[NUM_WATCH_REGS]; + + if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + return -EIO; + if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) + return -EIO; + /* Check the values. */ + for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); +#ifdef CONFIG_32BIT + if (lt[i] & __UA_LIMIT) + return -EINVAL; +#else + if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { + if (lt[i] & 0xffffffff80000000UL) + return -EINVAL; + } else { + if (lt[i] & __UA_LIMIT) + return -EINVAL; + } +#endif + __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); + if (ht[i] & ~0xff8) + return -EINVAL; + } + /* Install them. */ + for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + if (lt[i] & 7) + watch_active = 1; + child->thread.watch.mips3264.watchlo[i] = lt[i]; + /* Set the G bit. */ + child->thread.watch.mips3264.watchhi[i] = ht[i]; + } + + if (watch_active) + set_tsk_thread_flag(child, TIF_LOAD_WATCH); + else + clear_tsk_thread_flag(child, TIF_LOAD_WATCH); + + return 0; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + int ret; + void __user *addrp = (void __user *) addr; + void __user *datavp = (void __user *) data; + unsigned long __user *datalp = (void __user *) data; + + switch (request) { + /* when I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + ret = generic_ptrace_peekdata(child, addr, data); + break; + + /* Read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + struct pt_regs *regs; + unsigned long tmp = 0; + + regs = task_pt_regs(child); + ret = 0; /* Default return value. */ + + switch (addr) { + case 0 ... 31: + tmp = regs->regs[addr]; + break; + case FPR_BASE ... FPR_BASE + 31: + if (tsk_used_math(child)) { + fpureg_t *fregs = get_fpu_regs(child); + +#ifdef CONFIG_32BIT + /* + * The odd registers are actually the high + * order bits of the values stored in the even + * registers - unless we're using r2k_switch.S. + */ + if (addr & 1) + tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); + else + tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); +#endif +#ifdef CONFIG_64BIT + tmp = fregs[addr - FPR_BASE]; +#endif + } else { + tmp = -1; /* FP not yet used */ + } + break; + case PC: + tmp = regs->cp0_epc; + break; + case CAUSE: + tmp = regs->cp0_cause; + break; + case BADVADDR: + tmp = regs->cp0_badvaddr; + break; + case MMHI: + tmp = regs->hi; + break; + case MMLO: + tmp = regs->lo; + break; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + case ACX: + tmp = regs->acx; + break; +#endif + case FPC_CSR: + tmp = child->thread.fpu.fcr31; + break; + case FPC_EIR: { /* implementation / version register */ + unsigned int flags; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long irqflags; + unsigned int mtflags; +#endif /* CONFIG_MIPS_MT_SMTC */ + + preempt_disable(); + if (!cpu_has_fpu) { + preempt_enable(); + break; + } + +#ifdef CONFIG_MIPS_MT_SMTC + /* Read-modify-write of Status must be atomic */ + local_irq_save(irqflags); + mtflags = dmt(); +#endif /* CONFIG_MIPS_MT_SMTC */ + if (cpu_has_mipsmt) { + unsigned int vpflags = dvpe(); + flags = read_c0_status(); + __enable_fpu(); + __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); + write_c0_status(flags); + evpe(vpflags); + } else { + flags = read_c0_status(); + __enable_fpu(); + __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); + write_c0_status(flags); + } +#ifdef CONFIG_MIPS_MT_SMTC + emt(mtflags); + local_irq_restore(irqflags); +#endif /* CONFIG_MIPS_MT_SMTC */ + preempt_enable(); + break; + } + case DSP_BASE ... DSP_BASE + 5: { + dspreg_t *dregs; + + if (!cpu_has_dsp) { + tmp = 0; + ret = -EIO; + goto out; + } + dregs = __get_dsp_regs(child); + tmp = (unsigned long) (dregs[addr - DSP_BASE]); + break; + } + case DSP_CONTROL: + if (!cpu_has_dsp) { + tmp = 0; + ret = -EIO; + goto out; + } + tmp = child->thread.dsp.dspcontrol; + break; + default: + tmp = 0; + ret = -EIO; + goto out; + } + ret = put_user(tmp, datalp); + break; + } + + /* when I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = generic_ptrace_pokedata(child, addr, data); + break; + + case PTRACE_POKEUSR: { + struct pt_regs *regs; + ret = 0; + regs = task_pt_regs(child); + + switch (addr) { + case 0 ... 31: + regs->regs[addr] = data; + break; + case FPR_BASE ... FPR_BASE + 31: { + fpureg_t *fregs = get_fpu_regs(child); + + if (!tsk_used_math(child)) { + /* FP not yet used */ + memset(&child->thread.fpu, ~0, + sizeof(child->thread.fpu)); + child->thread.fpu.fcr31 = 0; + } +#ifdef CONFIG_32BIT + /* + * The odd registers are actually the high order bits + * of the values stored in the even registers - unless + * we're using r2k_switch.S. + */ + if (addr & 1) { + fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; + fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; + } else { + fregs[addr - FPR_BASE] &= ~0xffffffffLL; + fregs[addr - FPR_BASE] |= data; + } +#endif +#ifdef CONFIG_64BIT + fregs[addr - FPR_BASE] = data; +#endif + break; + } + case PC: + regs->cp0_epc = data; + break; + case MMHI: + regs->hi = data; + break; + case MMLO: + regs->lo = data; + break; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + case ACX: + regs->acx = data; + break; +#endif + case FPC_CSR: + child->thread.fpu.fcr31 = data; + break; + case DSP_BASE ... DSP_BASE + 5: { + dspreg_t *dregs; + + if (!cpu_has_dsp) { + ret = -EIO; + break; + } + + dregs = __get_dsp_regs(child); + dregs[addr - DSP_BASE] = data; + break; + } + case DSP_CONTROL: + if (!cpu_has_dsp) { + ret = -EIO; + break; + } + child->thread.dsp.dspcontrol = data; + break; + default: + /* The rest are not allowed. */ + ret = -EIO; + break; + } + break; + } + + case PTRACE_GETREGS: + ret = ptrace_getregs(child, datavp); + break; + + case PTRACE_SETREGS: + ret = ptrace_setregs(child, datavp); + break; + + case PTRACE_GETFPREGS: + ret = ptrace_getfpregs(child, datavp); + break; + + case PTRACE_SETFPREGS: + ret = ptrace_setfpregs(child, datavp); + break; + + case PTRACE_GET_THREAD_AREA: + ret = put_user(task_thread_info(child)->tp_value, datalp); + break; + + case PTRACE_GET_WATCH_REGS: + ret = ptrace_get_watch_regs(child, addrp); + break; + + case PTRACE_SET_WATCH_REGS: + ret = ptrace_set_watch_regs(child, addrp); + break; + + default: + ret = ptrace_request(child, request, addr, data); + break; + } + out: + return ret; +} + +static inline int audit_arch(void) +{ + int arch = EM_MIPS; +#ifdef CONFIG_64BIT + arch |= __AUDIT_ARCH_64BIT; +#endif +#if defined(__LITTLE_ENDIAN) + arch |= __AUDIT_ARCH_LE; +#endif + return arch; +} + +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage void syscall_trace_enter(struct pt_regs *regs) +{ + /* do the secure computing check first */ + secure_computing(regs->regs[2]); + + if (!(current->ptrace & PT_PTRACED)) + goto out; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + goto out; + + /* The 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? + 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } + +out: + audit_syscall_entry(audit_arch(), regs->regs[2], + regs->regs[4], regs->regs[5], + regs->regs[6], regs->regs[7]); +} + +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage void syscall_trace_leave(struct pt_regs *regs) +{ + audit_syscall_exit(regs); + + if (!(current->ptrace & PT_PTRACED)) + return; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + /* The 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? + 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c new file mode 100644 index 00000000..a3b01781 --- /dev/null +++ b/arch/mips/kernel/ptrace32.c @@ -0,0 +1,336 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 Ross Biro + * Copyright (C) Linus Torvalds + * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle + * Copyright (C) 1996 David S. Miller + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 1999 MIPS Technologies, Inc. + * Copyright (C) 2000 Ulf Carlsson + * + * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit + * binaries. + */ +#include <linux/compiler.h> +#include <linux/compat.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/smp.h> +#include <linux/user.h> +#include <linux/security.h> + +#include <asm/cpu.h> +#include <asm/dsp.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/pgtable.h> +#include <asm/page.h> +#include <asm/uaccess.h> +#include <asm/bootinfo.h> + +/* + * Tracing a 32-bit process with a 64-bit strace and vice versa will not + * work. I don't know how to fix this. + */ +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + int addr = caddr; + int data = cdata; + int ret; + + switch (request) { + + /* + * Read 4 bytes of the other process' storage + * data is a pointer specifying where the user wants the + * 4 bytes copied into + * addr is a pointer in the user's storage that contains an 8 byte + * address in the other process of the 4 bytes that is to be read + * (this is run in a 32-bit process looking at a 64-bit process) + * when I and D space are separate, these will need to be fixed. + */ + case PTRACE_PEEKTEXT_3264: + case PTRACE_PEEKDATA_3264: { + u32 tmp; + int copied; + u32 __user * addrOthers; + + ret = -EIO; + + /* Get the addr in the other process that we want to read */ + if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0) + break; + + copied = access_process_vm(child, (u64)addrOthers, &tmp, + sizeof(tmp), 0); + if (copied != sizeof(tmp)) + break; + ret = put_user(tmp, (u32 __user *) (unsigned long) data); + break; + } + + /* Read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + struct pt_regs *regs; + unsigned int tmp; + + regs = task_pt_regs(child); + ret = 0; /* Default return value. */ + + switch (addr) { + case 0 ... 31: + tmp = regs->regs[addr]; + break; + case FPR_BASE ... FPR_BASE + 31: + if (tsk_used_math(child)) { + fpureg_t *fregs = get_fpu_regs(child); + + /* + * The odd registers are actually the high + * order bits of the values stored in the even + * registers - unless we're using r2k_switch.S. + */ + if (addr & 1) + tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); + else + tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); + } else { + tmp = -1; /* FP not yet used */ + } + break; + case PC: + tmp = regs->cp0_epc; + break; + case CAUSE: + tmp = regs->cp0_cause; + break; + case BADVADDR: + tmp = regs->cp0_badvaddr; + break; + case MMHI: + tmp = regs->hi; + break; + case MMLO: + tmp = regs->lo; + break; + case FPC_CSR: + tmp = child->thread.fpu.fcr31; + break; + case FPC_EIR: { /* implementation / version register */ + unsigned int flags; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned int irqflags; + unsigned int mtflags; +#endif /* CONFIG_MIPS_MT_SMTC */ + + preempt_disable(); + if (!cpu_has_fpu) { + preempt_enable(); + tmp = 0; + break; + } + +#ifdef CONFIG_MIPS_MT_SMTC + /* Read-modify-write of Status must be atomic */ + local_irq_save(irqflags); + mtflags = dmt(); +#endif /* CONFIG_MIPS_MT_SMTC */ + + if (cpu_has_mipsmt) { + unsigned int vpflags = dvpe(); + flags = read_c0_status(); + __enable_fpu(); + __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); + write_c0_status(flags); + evpe(vpflags); + } else { + flags = read_c0_status(); + __enable_fpu(); + __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); + write_c0_status(flags); + } +#ifdef CONFIG_MIPS_MT_SMTC + emt(mtflags); + local_irq_restore(irqflags); +#endif /* CONFIG_MIPS_MT_SMTC */ + preempt_enable(); + break; + } + case DSP_BASE ... DSP_BASE + 5: { + dspreg_t *dregs; + + if (!cpu_has_dsp) { + tmp = 0; + ret = -EIO; + goto out; + } + dregs = __get_dsp_regs(child); + tmp = (unsigned long) (dregs[addr - DSP_BASE]); + break; + } + case DSP_CONTROL: + if (!cpu_has_dsp) { + tmp = 0; + ret = -EIO; + goto out; + } + tmp = child->thread.dsp.dspcontrol; + break; + default: + tmp = 0; + ret = -EIO; + goto out; + } + ret = put_user(tmp, (unsigned __user *) (unsigned long) data); + break; + } + + /* + * Write 4 bytes into the other process' storage + * data is the 4 bytes that the user wants written + * addr is a pointer in the user's storage that contains an + * 8 byte address in the other process where the 4 bytes + * that is to be written + * (this is run in a 32-bit process looking at a 64-bit process) + * when I and D space are separate, these will need to be fixed. + */ + case PTRACE_POKETEXT_3264: + case PTRACE_POKEDATA_3264: { + u32 __user * addrOthers; + + /* Get the addr in the other process that we want to write into */ + ret = -EIO; + if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0) + break; + ret = 0; + if (access_process_vm(child, (u64)addrOthers, &data, + sizeof(data), 1) == sizeof(data)) + break; + ret = -EIO; + break; + } + + case PTRACE_POKEUSR: { + struct pt_regs *regs; + ret = 0; + regs = task_pt_regs(child); + + switch (addr) { + case 0 ... 31: + regs->regs[addr] = data; + break; + case FPR_BASE ... FPR_BASE + 31: { + fpureg_t *fregs = get_fpu_regs(child); + + if (!tsk_used_math(child)) { + /* FP not yet used */ + memset(&child->thread.fpu, ~0, + sizeof(child->thread.fpu)); + child->thread.fpu.fcr31 = 0; + } + /* + * The odd registers are actually the high order bits + * of the values stored in the even registers - unless + * we're using r2k_switch.S. + */ + if (addr & 1) { + fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; + fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; + } else { + fregs[addr - FPR_BASE] &= ~0xffffffffLL; + /* Must cast, lest sign extension fill upper + bits! */ + fregs[addr - FPR_BASE] |= (unsigned int)data; + } + break; + } + case PC: + regs->cp0_epc = data; + break; + case MMHI: + regs->hi = data; + break; + case MMLO: + regs->lo = data; + break; + case FPC_CSR: + child->thread.fpu.fcr31 = data; + break; + case DSP_BASE ... DSP_BASE + 5: { + dspreg_t *dregs; + + if (!cpu_has_dsp) { + ret = -EIO; + break; + } + + dregs = __get_dsp_regs(child); + dregs[addr - DSP_BASE] = data; + break; + } + case DSP_CONTROL: + if (!cpu_has_dsp) { + ret = -EIO; + break; + } + child->thread.dsp.dspcontrol = data; + break; + default: + /* The rest are not allowed. */ + ret = -EIO; + break; + } + break; + } + + case PTRACE_GETREGS: + ret = ptrace_getregs(child, (__s64 __user *) (__u64) data); + break; + + case PTRACE_SETREGS: + ret = ptrace_setregs(child, (__s64 __user *) (__u64) data); + break; + + case PTRACE_GETFPREGS: + ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data); + break; + + case PTRACE_SETFPREGS: + ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data); + break; + + case PTRACE_GET_THREAD_AREA: + ret = put_user(task_thread_info(child)->tp_value, + (unsigned int __user *) (unsigned long) data); + break; + + case PTRACE_GET_THREAD_AREA_3264: + ret = put_user(task_thread_info(child)->tp_value, + (unsigned long __user *) (unsigned long) data); + break; + + case PTRACE_GET_WATCH_REGS: + ret = ptrace_get_watch_regs(child, + (struct pt_watch_regs __user *) (unsigned long) addr); + break; + + case PTRACE_SET_WATCH_REGS: + ret = ptrace_set_watch_regs(child, + (struct pt_watch_regs __user *) (unsigned long) addr); + break; + + default: + ret = compat_ptrace_request(child, request, addr, data); + break; + } +out: + return ret; +} diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S new file mode 100644 index 00000000..61c8a0f2 --- /dev/null +++ b/arch/mips/kernel/r2300_fpu.S @@ -0,0 +1,126 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1998 by Ralf Baechle + * + * Multi-arch abstraction and asm macros for easier reading: + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * + * Further modifications to make this work: + * Copyright (c) 1998 Harald Koerfgen + */ +#include <asm/asm.h> +#include <asm/errno.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/asm-offsets.h> +#include <asm/regdef.h> + +#define EX(a,b) \ +9: a,##b; \ + .section __ex_table,"a"; \ + PTR 9b,bad_stack; \ + .previous + + .set noreorder + .set mips1 + /* Save floating point context */ +LEAF(_save_fp_context) + li v0, 0 # assume success + cfc1 t1,fcr31 + EX(swc1 $f0,(SC_FPREGS+0)(a0)) + EX(swc1 $f1,(SC_FPREGS+8)(a0)) + EX(swc1 $f2,(SC_FPREGS+16)(a0)) + EX(swc1 $f3,(SC_FPREGS+24)(a0)) + EX(swc1 $f4,(SC_FPREGS+32)(a0)) + EX(swc1 $f5,(SC_FPREGS+40)(a0)) + EX(swc1 $f6,(SC_FPREGS+48)(a0)) + EX(swc1 $f7,(SC_FPREGS+56)(a0)) + EX(swc1 $f8,(SC_FPREGS+64)(a0)) + EX(swc1 $f9,(SC_FPREGS+72)(a0)) + EX(swc1 $f10,(SC_FPREGS+80)(a0)) + EX(swc1 $f11,(SC_FPREGS+88)(a0)) + EX(swc1 $f12,(SC_FPREGS+96)(a0)) + EX(swc1 $f13,(SC_FPREGS+104)(a0)) + EX(swc1 $f14,(SC_FPREGS+112)(a0)) + EX(swc1 $f15,(SC_FPREGS+120)(a0)) + EX(swc1 $f16,(SC_FPREGS+128)(a0)) + EX(swc1 $f17,(SC_FPREGS+136)(a0)) + EX(swc1 $f18,(SC_FPREGS+144)(a0)) + EX(swc1 $f19,(SC_FPREGS+152)(a0)) + EX(swc1 $f20,(SC_FPREGS+160)(a0)) + EX(swc1 $f21,(SC_FPREGS+168)(a0)) + EX(swc1 $f22,(SC_FPREGS+176)(a0)) + EX(swc1 $f23,(SC_FPREGS+184)(a0)) + EX(swc1 $f24,(SC_FPREGS+192)(a0)) + EX(swc1 $f25,(SC_FPREGS+200)(a0)) + EX(swc1 $f26,(SC_FPREGS+208)(a0)) + EX(swc1 $f27,(SC_FPREGS+216)(a0)) + EX(swc1 $f28,(SC_FPREGS+224)(a0)) + EX(swc1 $f29,(SC_FPREGS+232)(a0)) + EX(swc1 $f30,(SC_FPREGS+240)(a0)) + EX(swc1 $f31,(SC_FPREGS+248)(a0)) + EX(sw t1,(SC_FPC_CSR)(a0)) + cfc1 t0,$0 # implementation/version + jr ra + .set nomacro + EX(sw t0,(SC_FPC_EIR)(a0)) + .set macro + END(_save_fp_context) + +/* + * Restore FPU state: + * - fp gp registers + * - cp1 status/control register + * + * We base the decision which registers to restore from the signal stack + * frame on the current content of c0_status, not on the content of the + * stack frame which might have been changed by the user. + */ +LEAF(_restore_fp_context) + li v0, 0 # assume success + EX(lw t0,(SC_FPC_CSR)(a0)) + EX(lwc1 $f0,(SC_FPREGS+0)(a0)) + EX(lwc1 $f1,(SC_FPREGS+8)(a0)) + EX(lwc1 $f2,(SC_FPREGS+16)(a0)) + EX(lwc1 $f3,(SC_FPREGS+24)(a0)) + EX(lwc1 $f4,(SC_FPREGS+32)(a0)) + EX(lwc1 $f5,(SC_FPREGS+40)(a0)) + EX(lwc1 $f6,(SC_FPREGS+48)(a0)) + EX(lwc1 $f7,(SC_FPREGS+56)(a0)) + EX(lwc1 $f8,(SC_FPREGS+64)(a0)) + EX(lwc1 $f9,(SC_FPREGS+72)(a0)) + EX(lwc1 $f10,(SC_FPREGS+80)(a0)) + EX(lwc1 $f11,(SC_FPREGS+88)(a0)) + EX(lwc1 $f12,(SC_FPREGS+96)(a0)) + EX(lwc1 $f13,(SC_FPREGS+104)(a0)) + EX(lwc1 $f14,(SC_FPREGS+112)(a0)) + EX(lwc1 $f15,(SC_FPREGS+120)(a0)) + EX(lwc1 $f16,(SC_FPREGS+128)(a0)) + EX(lwc1 $f17,(SC_FPREGS+136)(a0)) + EX(lwc1 $f18,(SC_FPREGS+144)(a0)) + EX(lwc1 $f19,(SC_FPREGS+152)(a0)) + EX(lwc1 $f20,(SC_FPREGS+160)(a0)) + EX(lwc1 $f21,(SC_FPREGS+168)(a0)) + EX(lwc1 $f22,(SC_FPREGS+176)(a0)) + EX(lwc1 $f23,(SC_FPREGS+184)(a0)) + EX(lwc1 $f24,(SC_FPREGS+192)(a0)) + EX(lwc1 $f25,(SC_FPREGS+200)(a0)) + EX(lwc1 $f26,(SC_FPREGS+208)(a0)) + EX(lwc1 $f27,(SC_FPREGS+216)(a0)) + EX(lwc1 $f28,(SC_FPREGS+224)(a0)) + EX(lwc1 $f29,(SC_FPREGS+232)(a0)) + EX(lwc1 $f30,(SC_FPREGS+240)(a0)) + EX(lwc1 $f31,(SC_FPREGS+248)(a0)) + jr ra + ctc1 t0,fcr31 + END(_restore_fp_context) + .set reorder + + .type fault@function + .ent fault +fault: li v0, -EFAULT + jr ra + .end fault diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S new file mode 100644 index 00000000..29389839 --- /dev/null +++ b/arch/mips/kernel/r2300_switch.S @@ -0,0 +1,170 @@ +/* + * r2300_switch.S: R2300 specific task switching code. + * + * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle + * Copyright (C) 1994, 1995, 1996 by Andreas Busse + * + * Multi-cpu abstraction and macros for easier reading: + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * + * Further modifications to make this work: + * Copyright (c) 1998-2000 Harald Koerfgen + */ +#include <asm/asm.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> + +#include <asm/asmmacro.h> + + .set mips1 + .align 5 + +/* + * Offset to the current process status flags, the first 32 bytes of the + * stack are not used. + */ +#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) + +/* + * FPU context is saved iff the process has used it's FPU in the current + * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user + * space STATUS register should be 0, so that a process *always* starts its + * userland with FPU disabled after each context switch. + * + * FPU will be enabled as soon as the process accesses FPU again, through + * do_cpu() trap. + */ + +/* + * task_struct *resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) ) + */ +LEAF(resume) + mfc0 t1, CP0_STATUS + sw t1, THREAD_STATUS(a0) + cpu_save_nonscratch a0 + sw ra, THREAD_REG31(a0) + + /* + * check if we need to save FPU registers + */ + lw t3, TASK_THREAD_INFO(a0) + lw t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 + + and t0, t0, t1 + sw t0, TI_FLAGS(t3) + + /* + * clear saved user stack CU1 bit + */ + lw t0, ST_OFF(t3) + li t1, ~ST0_CU1 + and t0, t0, t1 + sw t0, ST_OFF(t3) + + fpu_save_single a0, t0 # clobbers t0 + +1: + /* + * The order of restoring the registers takes care of the race + * updating $28, $29 and kernelsp without disabling ints. + */ + move $28, a2 + cpu_restore_nonscratch a1 + + addiu t1, $28, _THREAD_SIZE - 32 + sw t1, kernelsp + + mfc0 t1, CP0_STATUS /* Do we really need this? */ + li a3, 0xff01 + and t1, a3 + lw a2, THREAD_STATUS(a1) + nor a3, $0, a3 + and a2, a3 + or a2, t1 + mtc0 a2, CP0_STATUS + move v0, a0 + jr ra + END(resume) + +/* + * Save a thread's fp context. + */ +LEAF(_save_fp) + fpu_save_single a0, t1 # clobbers t1 + jr ra + END(_save_fp) + +/* + * Restore a thread's fp context. + */ +LEAF(_restore_fp) + fpu_restore_single a0, t1 # clobbers t1 + jr ra + END(_restore_fp) + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * We initialize fcr31 to rounding to nearest, no exceptions. + */ + +#define FPU_DEFAULT 0x00000000 + +LEAF(_init_fpu) + mfc0 t0, CP0_STATUS + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_STATUS + + li t1, FPU_DEFAULT + ctc1 t1, fcr31 + + li t0, -1 + + mtc1 t0, $f0 + mtc1 t0, $f1 + mtc1 t0, $f2 + mtc1 t0, $f3 + mtc1 t0, $f4 + mtc1 t0, $f5 + mtc1 t0, $f6 + mtc1 t0, $f7 + mtc1 t0, $f8 + mtc1 t0, $f9 + mtc1 t0, $f10 + mtc1 t0, $f11 + mtc1 t0, $f12 + mtc1 t0, $f13 + mtc1 t0, $f14 + mtc1 t0, $f15 + mtc1 t0, $f16 + mtc1 t0, $f17 + mtc1 t0, $f18 + mtc1 t0, $f19 + mtc1 t0, $f20 + mtc1 t0, $f21 + mtc1 t0, $f22 + mtc1 t0, $f23 + mtc1 t0, $f24 + mtc1 t0, $f25 + mtc1 t0, $f26 + mtc1 t0, $f27 + mtc1 t0, $f28 + mtc1 t0, $f29 + mtc1 t0, $f30 + mtc1 t0, $f31 + jr ra + END(_init_fpu) diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S new file mode 100644 index 00000000..55ffe149 --- /dev/null +++ b/arch/mips/kernel/r4k_fpu.S @@ -0,0 +1,188 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle + * + * Multi-arch abstraction and asm macros for easier reading: + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + * Copyright (C) 1999, 2001 Silicon Graphics, Inc. + */ +#include <asm/asm.h> +#include <asm/errno.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/asm-offsets.h> +#include <asm/regdef.h> + + .macro EX insn, reg, src + .set push + .set nomacro +.ex\@: \insn \reg, \src + .set pop + .section __ex_table,"a" + PTR .ex\@, fault + .previous + .endm + + .set noreorder + .set mips3 + +LEAF(_save_fp_context) + cfc1 t1, fcr31 + +#ifdef CONFIG_64BIT + /* Store the 16 odd double precision registers */ + EX sdc1 $f1, SC_FPREGS+8(a0) + EX sdc1 $f3, SC_FPREGS+24(a0) + EX sdc1 $f5, SC_FPREGS+40(a0) + EX sdc1 $f7, SC_FPREGS+56(a0) + EX sdc1 $f9, SC_FPREGS+72(a0) + EX sdc1 $f11, SC_FPREGS+88(a0) + EX sdc1 $f13, SC_FPREGS+104(a0) + EX sdc1 $f15, SC_FPREGS+120(a0) + EX sdc1 $f17, SC_FPREGS+136(a0) + EX sdc1 $f19, SC_FPREGS+152(a0) + EX sdc1 $f21, SC_FPREGS+168(a0) + EX sdc1 $f23, SC_FPREGS+184(a0) + EX sdc1 $f25, SC_FPREGS+200(a0) + EX sdc1 $f27, SC_FPREGS+216(a0) + EX sdc1 $f29, SC_FPREGS+232(a0) + EX sdc1 $f31, SC_FPREGS+248(a0) +#endif + + /* Store the 16 even double precision registers */ + EX sdc1 $f0, SC_FPREGS+0(a0) + EX sdc1 $f2, SC_FPREGS+16(a0) + EX sdc1 $f4, SC_FPREGS+32(a0) + EX sdc1 $f6, SC_FPREGS+48(a0) + EX sdc1 $f8, SC_FPREGS+64(a0) + EX sdc1 $f10, SC_FPREGS+80(a0) + EX sdc1 $f12, SC_FPREGS+96(a0) + EX sdc1 $f14, SC_FPREGS+112(a0) + EX sdc1 $f16, SC_FPREGS+128(a0) + EX sdc1 $f18, SC_FPREGS+144(a0) + EX sdc1 $f20, SC_FPREGS+160(a0) + EX sdc1 $f22, SC_FPREGS+176(a0) + EX sdc1 $f24, SC_FPREGS+192(a0) + EX sdc1 $f26, SC_FPREGS+208(a0) + EX sdc1 $f28, SC_FPREGS+224(a0) + EX sdc1 $f30, SC_FPREGS+240(a0) + EX sw t1, SC_FPC_CSR(a0) + jr ra + li v0, 0 # success + END(_save_fp_context) + +#ifdef CONFIG_MIPS32_COMPAT + /* Save 32-bit process floating point context */ +LEAF(_save_fp_context32) + cfc1 t1, fcr31 + + EX sdc1 $f0, SC32_FPREGS+0(a0) + EX sdc1 $f2, SC32_FPREGS+16(a0) + EX sdc1 $f4, SC32_FPREGS+32(a0) + EX sdc1 $f6, SC32_FPREGS+48(a0) + EX sdc1 $f8, SC32_FPREGS+64(a0) + EX sdc1 $f10, SC32_FPREGS+80(a0) + EX sdc1 $f12, SC32_FPREGS+96(a0) + EX sdc1 $f14, SC32_FPREGS+112(a0) + EX sdc1 $f16, SC32_FPREGS+128(a0) + EX sdc1 $f18, SC32_FPREGS+144(a0) + EX sdc1 $f20, SC32_FPREGS+160(a0) + EX sdc1 $f22, SC32_FPREGS+176(a0) + EX sdc1 $f24, SC32_FPREGS+192(a0) + EX sdc1 $f26, SC32_FPREGS+208(a0) + EX sdc1 $f28, SC32_FPREGS+224(a0) + EX sdc1 $f30, SC32_FPREGS+240(a0) + EX sw t1, SC32_FPC_CSR(a0) + cfc1 t0, $0 # implementation/version + EX sw t0, SC32_FPC_EIR(a0) + + jr ra + li v0, 0 # success + END(_save_fp_context32) +#endif + +/* + * Restore FPU state: + * - fp gp registers + * - cp1 status/control register + */ +LEAF(_restore_fp_context) + EX lw t0, SC_FPC_CSR(a0) +#ifdef CONFIG_64BIT + EX ldc1 $f1, SC_FPREGS+8(a0) + EX ldc1 $f3, SC_FPREGS+24(a0) + EX ldc1 $f5, SC_FPREGS+40(a0) + EX ldc1 $f7, SC_FPREGS+56(a0) + EX ldc1 $f9, SC_FPREGS+72(a0) + EX ldc1 $f11, SC_FPREGS+88(a0) + EX ldc1 $f13, SC_FPREGS+104(a0) + EX ldc1 $f15, SC_FPREGS+120(a0) + EX ldc1 $f17, SC_FPREGS+136(a0) + EX ldc1 $f19, SC_FPREGS+152(a0) + EX ldc1 $f21, SC_FPREGS+168(a0) + EX ldc1 $f23, SC_FPREGS+184(a0) + EX ldc1 $f25, SC_FPREGS+200(a0) + EX ldc1 $f27, SC_FPREGS+216(a0) + EX ldc1 $f29, SC_FPREGS+232(a0) + EX ldc1 $f31, SC_FPREGS+248(a0) +#endif + EX ldc1 $f0, SC_FPREGS+0(a0) + EX ldc1 $f2, SC_FPREGS+16(a0) + EX ldc1 $f4, SC_FPREGS+32(a0) + EX ldc1 $f6, SC_FPREGS+48(a0) + EX ldc1 $f8, SC_FPREGS+64(a0) + EX ldc1 $f10, SC_FPREGS+80(a0) + EX ldc1 $f12, SC_FPREGS+96(a0) + EX ldc1 $f14, SC_FPREGS+112(a0) + EX ldc1 $f16, SC_FPREGS+128(a0) + EX ldc1 $f18, SC_FPREGS+144(a0) + EX ldc1 $f20, SC_FPREGS+160(a0) + EX ldc1 $f22, SC_FPREGS+176(a0) + EX ldc1 $f24, SC_FPREGS+192(a0) + EX ldc1 $f26, SC_FPREGS+208(a0) + EX ldc1 $f28, SC_FPREGS+224(a0) + EX ldc1 $f30, SC_FPREGS+240(a0) + ctc1 t0, fcr31 + jr ra + li v0, 0 # success + END(_restore_fp_context) + +#ifdef CONFIG_MIPS32_COMPAT +LEAF(_restore_fp_context32) + /* Restore an o32 sigcontext. */ + EX lw t0, SC32_FPC_CSR(a0) + EX ldc1 $f0, SC32_FPREGS+0(a0) + EX ldc1 $f2, SC32_FPREGS+16(a0) + EX ldc1 $f4, SC32_FPREGS+32(a0) + EX ldc1 $f6, SC32_FPREGS+48(a0) + EX ldc1 $f8, SC32_FPREGS+64(a0) + EX ldc1 $f10, SC32_FPREGS+80(a0) + EX ldc1 $f12, SC32_FPREGS+96(a0) + EX ldc1 $f14, SC32_FPREGS+112(a0) + EX ldc1 $f16, SC32_FPREGS+128(a0) + EX ldc1 $f18, SC32_FPREGS+144(a0) + EX ldc1 $f20, SC32_FPREGS+160(a0) + EX ldc1 $f22, SC32_FPREGS+176(a0) + EX ldc1 $f24, SC32_FPREGS+192(a0) + EX ldc1 $f26, SC32_FPREGS+208(a0) + EX ldc1 $f28, SC32_FPREGS+224(a0) + EX ldc1 $f30, SC32_FPREGS+240(a0) + ctc1 t0, fcr31 + jr ra + li v0, 0 # success + END(_restore_fp_context32) +#endif + + .set reorder + + .type fault@function + .ent fault +fault: li v0, -EFAULT # failure + jr ra + .end fault diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S new file mode 100644 index 00000000..9414f935 --- /dev/null +++ b/arch/mips/kernel/r4k_switch.S @@ -0,0 +1,252 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * Copyright (C) 1994, 1995, 1996, by Andreas Busse + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2000 MIPS Technologies, Inc. + * written by Carsten Langgaard, carstenl@mips.com + */ +#include <asm/asm.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> + +#include <asm/asmmacro.h> + +/* + * Offset to the current process status flags, the first 32 bytes of the + * stack are not used. + */ +#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) + +/* + * FPU context is saved iff the process has used it's FPU in the current + * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user + * space STATUS register should be 0, so that a process *always* starts its + * userland with FPU disabled after each context switch. + * + * FPU will be enabled as soon as the process accesses FPU again, through + * do_cpu() trap. + */ + +/* + * task_struct *resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) + */ + .align 5 + LEAF(resume) + mfc0 t1, CP0_STATUS + LONG_S t1, THREAD_STATUS(a0) + cpu_save_nonscratch a0 + LONG_S ra, THREAD_REG31(a0) + + /* + * check if we need to save FPU registers + */ + PTR_L t3, TASK_THREAD_INFO(a0) + LONG_L t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 + + and t0, t0, t1 + LONG_S t0, TI_FLAGS(t3) + + /* + * clear saved user stack CU1 bit + */ + LONG_L t0, ST_OFF(t3) + li t1, ~ST0_CU1 + and t0, t0, t1 + LONG_S t0, ST_OFF(t3) + + fpu_save_double a0 t0 t1 # c0_status passed in t0 + # clobbers t1 +1: + + /* + * The order of restoring the registers takes care of the race + * updating $28, $29 and kernelsp without disabling ints. + */ + move $28, a2 + cpu_restore_nonscratch a1 + + PTR_ADDU t0, $28, _THREAD_SIZE - 32 + set_saved_sp t0, t1, t2 +#ifdef CONFIG_MIPS_MT_SMTC + /* Read-modify-writes of Status must be atomic on a VPE */ + mfc0 t2, CP0_TCSTATUS + ori t1, t2, TCSTATUS_IXMT + mtc0 t1, CP0_TCSTATUS + andi t2, t2, TCSTATUS_IXMT + _ehb + DMT 8 # dmt t0 + move t1,ra + jal mips_ihb + move ra,t1 +#endif /* CONFIG_MIPS_MT_SMTC */ + mfc0 t1, CP0_STATUS /* Do we really need this? */ + li a3, 0xff01 + and t1, a3 + LONG_L a2, THREAD_STATUS(a1) + nor a3, $0, a3 + and a2, a3 + or a2, t1 + mtc0 a2, CP0_STATUS +#ifdef CONFIG_MIPS_MT_SMTC + _ehb + andi t0, t0, VPECONTROL_TE + beqz t0, 1f + emt +1: + mfc0 t1, CP0_TCSTATUS + xori t1, t1, TCSTATUS_IXMT + or t1, t1, t2 + mtc0 t1, CP0_TCSTATUS + _ehb +#endif /* CONFIG_MIPS_MT_SMTC */ + move v0, a0 + jr ra + END(resume) + +/* + * Save a thread's fp context. + */ +LEAF(_save_fp) +#ifdef CONFIG_64BIT + mfc0 t0, CP0_STATUS +#endif + fpu_save_double a0 t0 t1 # clobbers t1 + jr ra + END(_save_fp) + +/* + * Restore a thread's fp context. + */ +LEAF(_restore_fp) +#ifdef CONFIG_64BIT + mfc0 t0, CP0_STATUS +#endif + fpu_restore_double a0 t0 t1 # clobbers t1 + jr ra + END(_restore_fp) + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * We initialize fcr31 to rounding to nearest, no exceptions. + */ + +#define FPU_DEFAULT 0x00000000 + +LEAF(_init_fpu) +#ifdef CONFIG_MIPS_MT_SMTC + /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ + mfc0 t0, CP0_TCSTATUS + /* Bit position is the same for Status, TCStatus */ + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_TCSTATUS +#else /* Normal MIPS CU1 enable */ + mfc0 t0, CP0_STATUS + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_STATUS +#endif /* CONFIG_MIPS_MT_SMTC */ + enable_fpu_hazard + + li t1, FPU_DEFAULT + ctc1 t1, fcr31 + + li t1, -1 # SNaN + +#ifdef CONFIG_64BIT + sll t0, t0, 5 + bgez t0, 1f # 16 / 32 register mode? + + dmtc1 t1, $f1 + dmtc1 t1, $f3 + dmtc1 t1, $f5 + dmtc1 t1, $f7 + dmtc1 t1, $f9 + dmtc1 t1, $f11 + dmtc1 t1, $f13 + dmtc1 t1, $f15 + dmtc1 t1, $f17 + dmtc1 t1, $f19 + dmtc1 t1, $f21 + dmtc1 t1, $f23 + dmtc1 t1, $f25 + dmtc1 t1, $f27 + dmtc1 t1, $f29 + dmtc1 t1, $f31 +1: +#endif + +#ifdef CONFIG_CPU_MIPS32 + mtc1 t1, $f0 + mtc1 t1, $f1 + mtc1 t1, $f2 + mtc1 t1, $f3 + mtc1 t1, $f4 + mtc1 t1, $f5 + mtc1 t1, $f6 + mtc1 t1, $f7 + mtc1 t1, $f8 + mtc1 t1, $f9 + mtc1 t1, $f10 + mtc1 t1, $f11 + mtc1 t1, $f12 + mtc1 t1, $f13 + mtc1 t1, $f14 + mtc1 t1, $f15 + mtc1 t1, $f16 + mtc1 t1, $f17 + mtc1 t1, $f18 + mtc1 t1, $f19 + mtc1 t1, $f20 + mtc1 t1, $f21 + mtc1 t1, $f22 + mtc1 t1, $f23 + mtc1 t1, $f24 + mtc1 t1, $f25 + mtc1 t1, $f26 + mtc1 t1, $f27 + mtc1 t1, $f28 + mtc1 t1, $f29 + mtc1 t1, $f30 + mtc1 t1, $f31 +#else + .set mips3 + dmtc1 t1, $f0 + dmtc1 t1, $f2 + dmtc1 t1, $f4 + dmtc1 t1, $f6 + dmtc1 t1, $f8 + dmtc1 t1, $f10 + dmtc1 t1, $f12 + dmtc1 t1, $f14 + dmtc1 t1, $f16 + dmtc1 t1, $f18 + dmtc1 t1, $f20 + dmtc1 t1, $f22 + dmtc1 t1, $f24 + dmtc1 t1, $f26 + dmtc1 t1, $f28 + dmtc1 t1, $f30 +#endif + jr ra + END(_init_fpu) diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S new file mode 100644 index 00000000..da0fbe46 --- /dev/null +++ b/arch/mips/kernel/r6000_fpu.S @@ -0,0 +1,87 @@ +/* + * r6000_fpu.S: Save/restore floating point context for signal handlers. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 by Ralf Baechle + * + * Multi-arch abstraction and asm macros for easier reading: + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + */ +#include <asm/asm.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/asm-offsets.h> +#include <asm/regdef.h> + + .set noreorder + .set mips2 + /* Save floating point context */ + LEAF(_save_fp_context) + mfc0 t0,CP0_STATUS + sll t0,t0,2 + bgez t0,1f + nop + + cfc1 t1,fcr31 + /* Store the 16 double precision registers */ + sdc1 $f0,(SC_FPREGS+0)(a0) + sdc1 $f2,(SC_FPREGS+16)(a0) + sdc1 $f4,(SC_FPREGS+32)(a0) + sdc1 $f6,(SC_FPREGS+48)(a0) + sdc1 $f8,(SC_FPREGS+64)(a0) + sdc1 $f10,(SC_FPREGS+80)(a0) + sdc1 $f12,(SC_FPREGS+96)(a0) + sdc1 $f14,(SC_FPREGS+112)(a0) + sdc1 $f16,(SC_FPREGS+128)(a0) + sdc1 $f18,(SC_FPREGS+144)(a0) + sdc1 $f20,(SC_FPREGS+160)(a0) + sdc1 $f22,(SC_FPREGS+176)(a0) + sdc1 $f24,(SC_FPREGS+192)(a0) + sdc1 $f26,(SC_FPREGS+208)(a0) + sdc1 $f28,(SC_FPREGS+224)(a0) + sdc1 $f30,(SC_FPREGS+240)(a0) + jr ra + sw t0,SC_FPC_CSR(a0) +1: jr ra + nop + END(_save_fp_context) + +/* Restore FPU state: + * - fp gp registers + * - cp1 status/control register + * + * We base the decision which registers to restore from the signal stack + * frame on the current content of c0_status, not on the content of the + * stack frame which might have been changed by the user. + */ + LEAF(_restore_fp_context) + mfc0 t0,CP0_STATUS + sll t0,t0,2 + + bgez t0,1f + lw t0,SC_FPC_CSR(a0) + /* Restore the 16 double precision registers */ + ldc1 $f0,(SC_FPREGS+0)(a0) + ldc1 $f2,(SC_FPREGS+16)(a0) + ldc1 $f4,(SC_FPREGS+32)(a0) + ldc1 $f6,(SC_FPREGS+48)(a0) + ldc1 $f8,(SC_FPREGS+64)(a0) + ldc1 $f10,(SC_FPREGS+80)(a0) + ldc1 $f12,(SC_FPREGS+96)(a0) + ldc1 $f14,(SC_FPREGS+112)(a0) + ldc1 $f16,(SC_FPREGS+128)(a0) + ldc1 $f18,(SC_FPREGS+144)(a0) + ldc1 $f20,(SC_FPREGS+160)(a0) + ldc1 $f22,(SC_FPREGS+176)(a0) + ldc1 $f24,(SC_FPREGS+192)(a0) + ldc1 $f26,(SC_FPREGS+208)(a0) + ldc1 $f28,(SC_FPREGS+224)(a0) + ldc1 $f30,(SC_FPREGS+240)(a0) + jr ra + ctc1 t0,fcr31 +1: jr ra + nop + END(_restore_fp_context) diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S new file mode 100644 index 00000000..87481f91 --- /dev/null +++ b/arch/mips/kernel/relocate_kernel.S @@ -0,0 +1,82 @@ +/* + * relocate_kernel.S for kexec + * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> +#include <asm/page.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/addrspace.h> + +LEAF(relocate_new_kernel) + PTR_L s0, kexec_indirection_page + PTR_L s1, kexec_start_address + +process_entry: + PTR_L s2, (s0) + PTR_ADD s0, s0, SZREG + + /* destination page */ + and s3, s2, 0x1 + beq s3, zero, 1f + and s4, s2, ~0x1 /* store destination addr in s4 */ + move a0, s4 + b process_entry + +1: + /* indirection page, update s0 */ + and s3, s2, 0x2 + beq s3, zero, 1f + and s0, s2, ~0x2 + b process_entry + +1: + /* done page */ + and s3, s2, 0x4 + beq s3, zero, 1f + b done +1: + /* source page */ + and s3, s2, 0x8 + beq s3, zero, process_entry + and s2, s2, ~0x8 + li s6, (1 << PAGE_SHIFT) / SZREG + +copy_word: + /* copy page word by word */ + REG_L s5, (s2) + REG_S s5, (s4) + PTR_ADD s4, s4, SZREG + PTR_ADD s2, s2, SZREG + LONG_SUB s6, s6, 1 + beq s6, zero, process_entry + b copy_word + b process_entry + +done: + /* jump to kexec_start_address */ + j s1 + END(relocate_new_kernel) + +kexec_start_address: + EXPORT(kexec_start_address) + PTR 0x0 + .size kexec_start_address, PTRSIZE + +kexec_indirection_page: + EXPORT(kexec_indirection_page) + PTR 0 + .size kexec_indirection_page, PTRSIZE + +relocate_new_kernel_end: + +relocate_new_kernel_size: + EXPORT(relocate_new_kernel_size) + PTR relocate_new_kernel_end - relocate_new_kernel + .size relocate_new_kernel_size, PTRSIZE diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c new file mode 100644 index 00000000..07fc5244 --- /dev/null +++ b/arch/mips/kernel/reset.c @@ -0,0 +1,44 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001, 06 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/pm.h> +#include <linux/types.h> +#include <linux/reboot.h> + +#include <asm/reboot.h> + +/* + * Urgs ... Too many MIPS machines to handle this in a generic way. + * So handle all using function pointers to machine specific + * functions. + */ +void (*_machine_restart)(char *command); +void (*_machine_halt)(void); +void (*pm_power_off)(void); + +EXPORT_SYMBOL(pm_power_off); + +void machine_restart(char *command) +{ + if (_machine_restart) + _machine_restart(command); +} + +void machine_halt(void) +{ + if (_machine_halt) + _machine_halt(); +} + +void machine_power_off(void) +{ + if (pm_power_off) + pm_power_off(); +} diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c new file mode 100644 index 00000000..b8c18dcd --- /dev/null +++ b/arch/mips/kernel/rtlx.c @@ -0,0 +1,562 @@ +/* + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <asm/uaccess.h> +#include <linux/list.h> +#include <linux/vmalloc.h> +#include <linux/elf.h> +#include <linux/seq_file.h> +#include <linux/syscalls.h> +#include <linux/moduleloader.h> +#include <linux/interrupt.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <asm/mipsmtregs.h> +#include <asm/mips_mt.h> +#include <asm/cacheflush.h> +#include <linux/atomic.h> +#include <asm/cpu.h> +#include <asm/processor.h> +#include <asm/vpe.h> +#include <asm/rtlx.h> + +static struct rtlx_info *rtlx; +static int major; +static char module_name[] = "rtlx"; + +static struct chan_waitqueues { + wait_queue_head_t rt_queue; + wait_queue_head_t lx_queue; + atomic_t in_open; + struct mutex mutex; +} channel_wqs[RTLX_CHANNELS]; + +static struct vpe_notifications notify; +static int sp_stopping; + +extern void *vpe_get_shared(int index); + +static void rtlx_dispatch(void) +{ + do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); +} + + +/* Interrupt handler may be called before rtlx_init has otherwise had + a chance to run. +*/ +static irqreturn_t rtlx_interrupt(int irq, void *dev_id) +{ + unsigned int vpeflags; + unsigned long flags; + int i; + + /* Ought not to be strictly necessary for SMTC builds */ + local_irq_save(flags); + vpeflags = dvpe(); + set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); + irq_enable_hazard(); + evpe(vpeflags); + local_irq_restore(flags); + + for (i = 0; i < RTLX_CHANNELS; i++) { + wake_up(&channel_wqs[i].lx_queue); + wake_up(&channel_wqs[i].rt_queue); + } + + return IRQ_HANDLED; +} + +static void __used dump_rtlx(void) +{ + int i; + + printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); + + for (i = 0; i < RTLX_CHANNELS; i++) { + struct rtlx_channel *chan = &rtlx->channel[i]; + + printk(" rt_state %d lx_state %d buffer_size %d\n", + chan->rt_state, chan->lx_state, chan->buffer_size); + + printk(" rt_read %d rt_write %d\n", + chan->rt_read, chan->rt_write); + + printk(" lx_read %d lx_write %d\n", + chan->lx_read, chan->lx_write); + + printk(" rt_buffer <%s>\n", chan->rt_buffer); + printk(" lx_buffer <%s>\n", chan->lx_buffer); + } +} + +/* call when we have the address of the shared structure from the SP side. */ +static int rtlx_init(struct rtlx_info *rtlxi) +{ + if (rtlxi->id != RTLX_ID) { + printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", + rtlxi, rtlxi->id); + return -ENOEXEC; + } + + rtlx = rtlxi; + + return 0; +} + +/* notifications */ +static void starting(int vpe) +{ + int i; + sp_stopping = 0; + + /* force a reload of rtlx */ + rtlx=NULL; + + /* wake up any sleeping rtlx_open's */ + for (i = 0; i < RTLX_CHANNELS; i++) + wake_up_interruptible(&channel_wqs[i].lx_queue); +} + +static void stopping(int vpe) +{ + int i; + + sp_stopping = 1; + for (i = 0; i < RTLX_CHANNELS; i++) + wake_up_interruptible(&channel_wqs[i].lx_queue); +} + + +int rtlx_open(int index, int can_sleep) +{ + struct rtlx_info **p; + struct rtlx_channel *chan; + enum rtlx_state state; + int ret = 0; + + if (index >= RTLX_CHANNELS) { + printk(KERN_DEBUG "rtlx_open index out of range\n"); + return -ENOSYS; + } + + if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { + printk(KERN_DEBUG "rtlx_open channel %d already opened\n", + index); + ret = -EBUSY; + goto out_fail; + } + + if (rtlx == NULL) { + if( (p = vpe_get_shared(tclimit)) == NULL) { + if (can_sleep) { + __wait_event_interruptible(channel_wqs[index].lx_queue, + (p = vpe_get_shared(tclimit)), ret); + if (ret) + goto out_fail; + } else { + printk(KERN_DEBUG "No SP program loaded, and device " + "opened with O_NONBLOCK\n"); + ret = -ENOSYS; + goto out_fail; + } + } + + smp_rmb(); + if (*p == NULL) { + if (can_sleep) { + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait( + &channel_wqs[index].lx_queue, + &wait, TASK_INTERRUPTIBLE); + smp_rmb(); + if (*p != NULL) + break; + if (!signal_pending(current)) { + schedule(); + continue; + } + ret = -ERESTARTSYS; + goto out_fail; + } + finish_wait(&channel_wqs[index].lx_queue, &wait); + } else { + pr_err(" *vpe_get_shared is NULL. " + "Has an SP program been loaded?\n"); + ret = -ENOSYS; + goto out_fail; + } + } + + if ((unsigned int)*p < KSEG0) { + printk(KERN_WARNING "vpe_get_shared returned an " + "invalid pointer maybe an error code %d\n", + (int)*p); + ret = -ENOSYS; + goto out_fail; + } + + if ((ret = rtlx_init(*p)) < 0) + goto out_ret; + } + + chan = &rtlx->channel[index]; + + state = xchg(&chan->lx_state, RTLX_STATE_OPENED); + if (state == RTLX_STATE_OPENED) { + ret = -EBUSY; + goto out_fail; + } + +out_fail: + smp_mb(); + atomic_dec(&channel_wqs[index].in_open); + smp_mb(); + +out_ret: + return ret; +} + +int rtlx_release(int index) +{ + if (rtlx == NULL) { + pr_err("rtlx_release() with null rtlx\n"); + return 0; + } + rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; + return 0; +} + +unsigned int rtlx_read_poll(int index, int can_sleep) +{ + struct rtlx_channel *chan; + + if (rtlx == NULL) + return 0; + + chan = &rtlx->channel[index]; + + /* data available to read? */ + if (chan->lx_read == chan->lx_write) { + if (can_sleep) { + int ret = 0; + + __wait_event_interruptible(channel_wqs[index].lx_queue, + (chan->lx_read != chan->lx_write) || + sp_stopping, ret); + if (ret) + return ret; + + if (sp_stopping) + return 0; + } else + return 0; + } + + return (chan->lx_write + chan->buffer_size - chan->lx_read) + % chan->buffer_size; +} + +static inline int write_spacefree(int read, int write, int size) +{ + if (read == write) { + /* + * Never fill the buffer completely, so indexes are always + * equal if empty and only empty, or !equal if data available + */ + return size - 1; + } + + return ((read + size - write) % size) - 1; +} + +unsigned int rtlx_write_poll(int index) +{ + struct rtlx_channel *chan = &rtlx->channel[index]; + + return write_spacefree(chan->rt_read, chan->rt_write, + chan->buffer_size); +} + +ssize_t rtlx_read(int index, void __user *buff, size_t count) +{ + size_t lx_write, fl = 0L; + struct rtlx_channel *lx; + unsigned long failed; + + if (rtlx == NULL) + return -ENOSYS; + + lx = &rtlx->channel[index]; + + mutex_lock(&channel_wqs[index].mutex); + smp_rmb(); + lx_write = lx->lx_write; + + /* find out how much in total */ + count = min(count, + (size_t)(lx_write + lx->buffer_size - lx->lx_read) + % lx->buffer_size); + + /* then how much from the read pointer onwards */ + fl = min(count, (size_t)lx->buffer_size - lx->lx_read); + + failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); + if (failed) + goto out; + + /* and if there is anything left at the beginning of the buffer */ + if (count - fl) + failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); + +out: + count -= failed; + + smp_wmb(); + lx->lx_read = (lx->lx_read + count) % lx->buffer_size; + smp_wmb(); + mutex_unlock(&channel_wqs[index].mutex); + + return count; +} + +ssize_t rtlx_write(int index, const void __user *buffer, size_t count) +{ + struct rtlx_channel *rt; + unsigned long failed; + size_t rt_read; + size_t fl; + + if (rtlx == NULL) + return(-ENOSYS); + + rt = &rtlx->channel[index]; + + mutex_lock(&channel_wqs[index].mutex); + smp_rmb(); + rt_read = rt->rt_read; + + /* total number of bytes to copy */ + count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, + rt->buffer_size)); + + /* first bit from write pointer to the end of the buffer, or count */ + fl = min(count, (size_t) rt->buffer_size - rt->rt_write); + + failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); + if (failed) + goto out; + + /* if there's any left copy to the beginning of the buffer */ + if (count - fl) { + failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); + } + +out: + count -= failed; + + smp_wmb(); + rt->rt_write = (rt->rt_write + count) % rt->buffer_size; + smp_wmb(); + mutex_unlock(&channel_wqs[index].mutex); + + return count; +} + + +static int file_open(struct inode *inode, struct file *filp) +{ + return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1); +} + +static int file_release(struct inode *inode, struct file *filp) +{ + return rtlx_release(iminor(inode)); +} + +static unsigned int file_poll(struct file *file, poll_table * wait) +{ + int minor; + unsigned int mask = 0; + + minor = iminor(file->f_path.dentry->d_inode); + + poll_wait(file, &channel_wqs[minor].rt_queue, wait); + poll_wait(file, &channel_wqs[minor].lx_queue, wait); + + if (rtlx == NULL) + return 0; + + /* data available to read? */ + if (rtlx_read_poll(minor, 0)) + mask |= POLLIN | POLLRDNORM; + + /* space to write */ + if (rtlx_write_poll(minor)) + mask |= POLLOUT | POLLWRNORM; + + return mask; +} + +static ssize_t file_read(struct file *file, char __user * buffer, size_t count, + loff_t * ppos) +{ + int minor = iminor(file->f_path.dentry->d_inode); + + /* data available? */ + if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { + return 0; // -EAGAIN makes cat whinge + } + + return rtlx_read(minor, buffer, count); +} + +static ssize_t file_write(struct file *file, const char __user * buffer, + size_t count, loff_t * ppos) +{ + int minor; + struct rtlx_channel *rt; + + minor = iminor(file->f_path.dentry->d_inode); + rt = &rtlx->channel[minor]; + + /* any space left... */ + if (!rtlx_write_poll(minor)) { + int ret = 0; + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + __wait_event_interruptible(channel_wqs[minor].rt_queue, + rtlx_write_poll(minor), + ret); + if (ret) + return ret; + } + + return rtlx_write(minor, buffer, count); +} + +static const struct file_operations rtlx_fops = { + .owner = THIS_MODULE, + .open = file_open, + .release = file_release, + .write = file_write, + .read = file_read, + .poll = file_poll, + .llseek = noop_llseek, +}; + +static struct irqaction rtlx_irq = { + .handler = rtlx_interrupt, + .name = "RTLX", +}; + +static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; + +static char register_chrdev_failed[] __initdata = + KERN_ERR "rtlx_module_init: unable to register device\n"; + +static int __init rtlx_module_init(void) +{ + struct device *dev; + int i, err; + + if (!cpu_has_mipsmt) { + printk("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (tclimit == 0) { + printk(KERN_WARNING "No TCs reserved for AP/SP, not " + "initializing RTLX.\nPass maxtcs=<n> argument as kernel " + "argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, module_name, &rtlx_fops); + if (major < 0) { + printk(register_chrdev_failed); + return major; + } + + /* initialise the wait queues */ + for (i = 0; i < RTLX_CHANNELS; i++) { + init_waitqueue_head(&channel_wqs[i].rt_queue); + init_waitqueue_head(&channel_wqs[i].lx_queue); + atomic_set(&channel_wqs[i].in_open, 0); + mutex_init(&channel_wqs[i].mutex); + + dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + "%s%d", module_name, i); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_chrdev; + } + } + + /* set up notifiers */ + notify.start = starting; + notify.stop = stopping; + vpe_notify(tclimit, ¬ify); + + if (cpu_has_vint) + set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); + else { + pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); + err = -ENODEV; + goto out_chrdev; + } + + rtlx_irq.dev_id = rtlx; + setup_irq(rtlx_irq_num, &rtlx_irq); + + return 0; + +out_chrdev: + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); + + return err; +} + +static void __exit rtlx_module_exit(void) +{ + int i; + + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); + + unregister_chrdev(major, module_name); +} + +module_init(rtlx_module_init); +module_exit(rtlx_module_exit); + +MODULE_DESCRIPTION("MIPS RTLX"); +MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); +MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S new file mode 100644 index 00000000..a632bc14 --- /dev/null +++ b/arch/mips/kernel/scall32-o32.S @@ -0,0 +1,610 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2004 Thiemo Seufer + */ +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/irqflags.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/isadep.h> +#include <asm/sysmips.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> +#include <asm/war.h> +#include <asm/asm-offsets.h> + +/* Highest syscall used of any syscall flavour */ +#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls + + .align 5 +NESTED(handle_sys, PT_SIZE, sp) + .set noat + SAVE_SOME + TRACE_IRQS_ON_RELOAD + STI + .set at + + lw t1, PT_EPC(sp) # skip syscall on return + + subu v0, v0, __NR_O32_Linux # check syscall number + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 + addiu t1, 4 # skip to next instruction + sw t1, PT_EPC(sp) + beqz t0, illegal_syscall + + sll t0, v0, 3 + la t1, sys_call_table + addu t1, t0 + lw t2, (t1) # syscall routine + lw t3, 4(t1) # >= 0 if we need stack arguments + beqz t2, illegal_syscall + + sw a3, PT_R26(sp) # save a3 for syscall restarting + bgez t3, stackargs + +stack_done: + lw t0, TI_FLAGS($28) # syscall tracing enabled? + li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + and t0, t1 + bnez t0, syscall_trace_entry # -> yes + + jalr t2 # Do The Real Thing (TM) + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sw t0, PT_R7(sp) # set error flag + beqz t0, 1f + + lw t1, PT_R2(sp) # syscall number + negu v0 # error + sw t1, PT_R0(sp) # save it for syscall restarting +1: sw v0, PT_R2(sp) # result + +o32_syscall_exit: + local_irq_disable # make sure need_resched and + # signals dont change between + # sampling and return + lw a2, TI_FLAGS($28) # current->work + li t0, _TIF_ALLWORK_MASK + and t0, a2 + bnez t0, o32_syscall_exit_work + + j restore_partial + +o32_syscall_exit_work: + j syscall_exit_work_partial + +/* ------------------------------------------------------------------------ */ + +syscall_trace_entry: + SAVE_STATIC + move s0, t2 + move a0, sp + jal syscall_trace_enter + + move t0, s0 + RESTORE_STATIC + lw a0, PT_R4(sp) # Restore argument registers + lw a1, PT_R5(sp) + lw a2, PT_R6(sp) + lw a3, PT_R7(sp) + jalr t0 + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sw t0, PT_R7(sp) # set error flag + beqz t0, 1f + + lw t1, PT_R2(sp) # syscall number + negu v0 # error + sw t1, PT_R0(sp) # save it for syscall restarting +1: sw v0, PT_R2(sp) # result + + j syscall_exit + +/* ------------------------------------------------------------------------ */ + + /* + * More than four arguments. Try to deal with it by copying the + * stack arguments from the user stack to the kernel stack. + * This Sucks (TM). + */ +stackargs: + lw t0, PT_R29(sp) # get old user stack pointer + + /* + * We intentionally keep the kernel stack a little below the top of + * userspace so we don't have to do a slower byte accurate check here. + */ + lw t5, TI_ADDR_LIMIT($28) + addu t4, t0, 32 + and t5, t4 + bltz t5, bad_stack # -> sp is bad + + /* Ok, copy the args from the luser stack to the kernel stack. + * t3 is the precomputed number of instruction bytes needed to + * load or store arguments 6-8. + */ + + la t1, 5f # load up to 3 arguments + subu t1, t3 +1: lw t5, 16(t0) # argument #5 from usp + .set push + .set noreorder + .set nomacro + jr t1 + addiu t1, 6f - 5f + +2: lw t8, 28(t0) # argument #8 from usp +3: lw t7, 24(t0) # argument #7 from usp +4: lw t6, 20(t0) # argument #6 from usp +5: jr t1 + sw t5, 16(sp) # argument #5 to ksp + + sw t8, 28(sp) # argument #8 to ksp + sw t7, 24(sp) # argument #7 to ksp + sw t6, 20(sp) # argument #6 to ksp +6: j stack_done # go back + nop + .set pop + + .section __ex_table,"a" + PTR 1b,bad_stack + PTR 2b,bad_stack + PTR 3b,bad_stack + PTR 4b,bad_stack + .previous + + /* + * The stackpointer for a call with more than 4 arguments is bad. + * We probably should handle this case a bit more drastic. + */ +bad_stack: + li v0, EFAULT + sw v0, PT_R2(sp) + li t0, 1 # set error flag + sw t0, PT_R7(sp) + j o32_syscall_exit + + /* + * The system call does not exist in this kernel + */ +illegal_syscall: + li v0, ENOSYS # error + sw v0, PT_R2(sp) + li t0, 1 # set error flag + sw t0, PT_R7(sp) + j o32_syscall_exit + END(handle_sys) + + LEAF(sys_syscall) + subu t0, a0, __NR_O32_Linux # check syscall number + sltiu v0, t0, __NR_O32_Linux_syscalls + 1 + beqz t0, einval # do not recurse + sll t1, t0, 3 + beqz v0, einval + lw t2, sys_call_table(t1) # syscall routine + + /* Some syscalls like execve get their arguments from struct pt_regs + and claim zero arguments in the syscall table. Thus we have to + assume the worst case and shuffle around all potential arguments. + If you want performance, don't use indirect syscalls. */ + + move a0, a1 # shift argument registers + move a1, a2 + move a2, a3 + lw a3, 16(sp) + lw t4, 20(sp) + lw t5, 24(sp) + lw t6, 28(sp) + sw t4, 16(sp) + sw t5, 20(sp) + sw t6, 24(sp) + sw a0, PT_R4(sp) # .. and push back a0 - a3, some + sw a1, PT_R5(sp) # syscalls expect them there + sw a2, PT_R6(sp) + sw a3, PT_R7(sp) + sw a3, PT_R26(sp) # update a3 for syscall restarting + jr t2 + /* Unreached */ + +einval: li v0, -ENOSYS + jr ra + END(sys_syscall) + + .macro fifty ptr, nargs, from=1, to=50 + sys \ptr \nargs + .if \to-\from + fifty \ptr,\nargs,"(\from+1)",\to + .endif + .endm + + .macro mille ptr, nargs, from=1, to=20 + fifty \ptr,\nargs + .if \to-\from + mille \ptr,\nargs,"(\from+1)",\to + .endif + .endm + + .macro syscalltable + sys sys_syscall 8 /* 4000 */ + sys sys_exit 1 + sys sys_fork 0 + sys sys_read 3 + sys sys_write 3 + sys sys_open 3 /* 4005 */ + sys sys_close 1 + sys sys_waitpid 3 + sys sys_creat 2 + sys sys_link 2 + sys sys_unlink 1 /* 4010 */ + sys sys_execve 0 + sys sys_chdir 1 + sys sys_time 1 + sys sys_mknod 3 + sys sys_chmod 2 /* 4015 */ + sys sys_lchown 3 + sys sys_ni_syscall 0 + sys sys_ni_syscall 0 /* was sys_stat */ + sys sys_lseek 3 + sys sys_getpid 0 /* 4020 */ + sys sys_mount 5 + sys sys_oldumount 1 + sys sys_setuid 1 + sys sys_getuid 0 + sys sys_stime 1 /* 4025 */ + sys sys_ptrace 4 + sys sys_alarm 1 + sys sys_ni_syscall 0 /* was sys_fstat */ + sys sys_pause 0 + sys sys_utime 2 /* 4030 */ + sys sys_ni_syscall 0 + sys sys_ni_syscall 0 + sys sys_access 2 + sys sys_nice 1 + sys sys_ni_syscall 0 /* 4035 */ + sys sys_sync 0 + sys sys_kill 2 + sys sys_rename 2 + sys sys_mkdir 2 + sys sys_rmdir 1 /* 4040 */ + sys sys_dup 1 + sys sysm_pipe 0 + sys sys_times 1 + sys sys_ni_syscall 0 + sys sys_brk 1 /* 4045 */ + sys sys_setgid 1 + sys sys_getgid 0 + sys sys_ni_syscall 0 /* was signal(2) */ + sys sys_geteuid 0 + sys sys_getegid 0 /* 4050 */ + sys sys_acct 1 + sys sys_umount 2 + sys sys_ni_syscall 0 + sys sys_ioctl 3 + sys sys_fcntl 3 /* 4055 */ + sys sys_ni_syscall 2 + sys sys_setpgid 2 + sys sys_ni_syscall 0 + sys sys_olduname 1 + sys sys_umask 1 /* 4060 */ + sys sys_chroot 1 + sys sys_ustat 2 + sys sys_dup2 2 + sys sys_getppid 0 + sys sys_getpgrp 0 /* 4065 */ + sys sys_setsid 0 + sys sys_sigaction 3 + sys sys_sgetmask 0 + sys sys_ssetmask 1 + sys sys_setreuid 2 /* 4070 */ + sys sys_setregid 2 + sys sys_sigsuspend 0 + sys sys_sigpending 1 + sys sys_sethostname 2 + sys sys_setrlimit 2 /* 4075 */ + sys sys_getrlimit 2 + sys sys_getrusage 2 + sys sys_gettimeofday 2 + sys sys_settimeofday 2 + sys sys_getgroups 2 /* 4080 */ + sys sys_setgroups 2 + sys sys_ni_syscall 0 /* old_select */ + sys sys_symlink 2 + sys sys_ni_syscall 0 /* was sys_lstat */ + sys sys_readlink 3 /* 4085 */ + sys sys_uselib 1 + sys sys_swapon 2 + sys sys_reboot 3 + sys sys_old_readdir 3 + sys sys_mips_mmap 6 /* 4090 */ + sys sys_munmap 2 + sys sys_truncate 2 + sys sys_ftruncate 2 + sys sys_fchmod 2 + sys sys_fchown 3 /* 4095 */ + sys sys_getpriority 2 + sys sys_setpriority 3 + sys sys_ni_syscall 0 + sys sys_statfs 2 + sys sys_fstatfs 2 /* 4100 */ + sys sys_ni_syscall 0 /* was ioperm(2) */ + sys sys_socketcall 2 + sys sys_syslog 3 + sys sys_setitimer 3 + sys sys_getitimer 2 /* 4105 */ + sys sys_newstat 2 + sys sys_newlstat 2 + sys sys_newfstat 2 + sys sys_uname 1 + sys sys_ni_syscall 0 /* 4110 was iopl(2) */ + sys sys_vhangup 0 + sys sys_ni_syscall 0 /* was sys_idle() */ + sys sys_ni_syscall 0 /* was sys_vm86 */ + sys sys_wait4 4 + sys sys_swapoff 1 /* 4115 */ + sys sys_sysinfo 1 + sys sys_ipc 6 + sys sys_fsync 1 + sys sys_sigreturn 0 + sys sys_clone 0 /* 4120 */ + sys sys_setdomainname 2 + sys sys_newuname 1 + sys sys_ni_syscall 0 /* sys_modify_ldt */ + sys sys_adjtimex 1 + sys sys_mprotect 3 /* 4125 */ + sys sys_sigprocmask 3 + sys sys_ni_syscall 0 /* was create_module */ + sys sys_init_module 5 + sys sys_delete_module 1 + sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */ + sys sys_quotactl 4 + sys sys_getpgid 1 + sys sys_fchdir 1 + sys sys_bdflush 2 + sys sys_sysfs 3 /* 4135 */ + sys sys_personality 1 + sys sys_ni_syscall 0 /* for afs_syscall */ + sys sys_setfsuid 1 + sys sys_setfsgid 1 + sys sys_llseek 5 /* 4140 */ + sys sys_getdents 3 + sys sys_select 5 + sys sys_flock 2 + sys sys_msync 3 + sys sys_readv 3 /* 4145 */ + sys sys_writev 3 + sys sys_cacheflush 3 + sys sys_cachectl 3 + sys sys_sysmips 4 + sys sys_ni_syscall 0 /* 4150 */ + sys sys_getsid 1 + sys sys_fdatasync 1 + sys sys_sysctl 1 + sys sys_mlock 2 + sys sys_munlock 2 /* 4155 */ + sys sys_mlockall 1 + sys sys_munlockall 0 + sys sys_sched_setparam 2 + sys sys_sched_getparam 2 + sys sys_sched_setscheduler 3 /* 4160 */ + sys sys_sched_getscheduler 1 + sys sys_sched_yield 0 + sys sys_sched_get_priority_max 1 + sys sys_sched_get_priority_min 1 + sys sys_sched_rr_get_interval 2 /* 4165 */ + sys sys_nanosleep, 2 + sys sys_mremap, 5 + sys sys_accept 3 + sys sys_bind 3 + sys sys_connect 3 /* 4170 */ + sys sys_getpeername 3 + sys sys_getsockname 3 + sys sys_getsockopt 5 + sys sys_listen 2 + sys sys_recv 4 /* 4175 */ + sys sys_recvfrom 6 + sys sys_recvmsg 3 + sys sys_send 4 + sys sys_sendmsg 3 + sys sys_sendto 6 /* 4180 */ + sys sys_setsockopt 5 + sys sys_shutdown 2 + sys sys_socket 3 + sys sys_socketpair 4 + sys sys_setresuid 3 /* 4185 */ + sys sys_getresuid 3 + sys sys_ni_syscall 0 /* was sys_query_module */ + sys sys_poll 3 + sys sys_ni_syscall 0 /* was nfsservctl */ + sys sys_setresgid 3 /* 4190 */ + sys sys_getresgid 3 + sys sys_prctl 5 + sys sys_rt_sigreturn 0 + sys sys_rt_sigaction 4 + sys sys_rt_sigprocmask 4 /* 4195 */ + sys sys_rt_sigpending 2 + sys sys_rt_sigtimedwait 4 + sys sys_rt_sigqueueinfo 3 + sys sys_rt_sigsuspend 0 + sys sys_pread64 6 /* 4200 */ + sys sys_pwrite64 6 + sys sys_chown 3 + sys sys_getcwd 2 + sys sys_capget 2 + sys sys_capset 2 /* 4205 */ + sys sys_sigaltstack 0 + sys sys_sendfile 4 + sys sys_ni_syscall 0 + sys sys_ni_syscall 0 + sys sys_mips_mmap2 6 /* 4210 */ + sys sys_truncate64 4 + sys sys_ftruncate64 4 + sys sys_stat64 2 + sys sys_lstat64 2 + sys sys_fstat64 2 /* 4215 */ + sys sys_pivot_root 2 + sys sys_mincore 3 + sys sys_madvise 3 + sys sys_getdents64 3 + sys sys_fcntl64 3 /* 4220 */ + sys sys_ni_syscall 0 + sys sys_gettid 0 + sys sys_readahead 5 + sys sys_setxattr 5 + sys sys_lsetxattr 5 /* 4225 */ + sys sys_fsetxattr 5 + sys sys_getxattr 4 + sys sys_lgetxattr 4 + sys sys_fgetxattr 4 + sys sys_listxattr 3 /* 4230 */ + sys sys_llistxattr 3 + sys sys_flistxattr 3 + sys sys_removexattr 2 + sys sys_lremovexattr 2 + sys sys_fremovexattr 2 /* 4235 */ + sys sys_tkill 2 + sys sys_sendfile64 5 + sys sys_futex 6 +#ifdef CONFIG_MIPS_MT_FPAFF + /* + * For FPU affinity scheduling on MIPS MT processors, we need to + * intercept sys_sched_xxxaffinity() calls until we get a proper hook + * in kernel/sched.c. Considered only temporary we only support these + * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm. + */ + sys mipsmt_sys_sched_setaffinity 3 + sys mipsmt_sys_sched_getaffinity 3 +#else + sys sys_sched_setaffinity 3 + sys sys_sched_getaffinity 3 /* 4240 */ +#endif /* CONFIG_MIPS_MT_FPAFF */ + sys sys_io_setup 2 + sys sys_io_destroy 1 + sys sys_io_getevents 5 + sys sys_io_submit 3 + sys sys_io_cancel 3 /* 4245 */ + sys sys_exit_group 1 + sys sys_lookup_dcookie 4 + sys sys_epoll_create 1 + sys sys_epoll_ctl 4 + sys sys_epoll_wait 4 /* 4250 */ + sys sys_remap_file_pages 5 + sys sys_set_tid_address 1 + sys sys_restart_syscall 0 + sys sys_fadvise64_64 7 + sys sys_statfs64 3 /* 4255 */ + sys sys_fstatfs64 2 + sys sys_timer_create 3 + sys sys_timer_settime 4 + sys sys_timer_gettime 2 + sys sys_timer_getoverrun 1 /* 4260 */ + sys sys_timer_delete 1 + sys sys_clock_settime 2 + sys sys_clock_gettime 2 + sys sys_clock_getres 2 + sys sys_clock_nanosleep 4 /* 4265 */ + sys sys_tgkill 3 + sys sys_utimes 2 + sys sys_mbind 4 + sys sys_ni_syscall 0 /* sys_get_mempolicy */ + sys sys_ni_syscall 0 /* 4270 sys_set_mempolicy */ + sys sys_mq_open 4 + sys sys_mq_unlink 1 + sys sys_mq_timedsend 5 + sys sys_mq_timedreceive 5 + sys sys_mq_notify 2 /* 4275 */ + sys sys_mq_getsetattr 3 + sys sys_ni_syscall 0 /* sys_vserver */ + sys sys_waitid 5 + sys sys_ni_syscall 0 /* available, was setaltroot */ + sys sys_add_key 5 /* 4280 */ + sys sys_request_key 4 + sys sys_keyctl 5 + sys sys_set_thread_area 1 + sys sys_inotify_init 0 + sys sys_inotify_add_watch 3 /* 4285 */ + sys sys_inotify_rm_watch 2 + sys sys_migrate_pages 4 + sys sys_openat 4 + sys sys_mkdirat 3 + sys sys_mknodat 4 /* 4290 */ + sys sys_fchownat 5 + sys sys_futimesat 3 + sys sys_fstatat64 4 + sys sys_unlinkat 3 + sys sys_renameat 4 /* 4295 */ + sys sys_linkat 5 + sys sys_symlinkat 3 + sys sys_readlinkat 4 + sys sys_fchmodat 3 + sys sys_faccessat 3 /* 4300 */ + sys sys_pselect6 6 + sys sys_ppoll 5 + sys sys_unshare 1 + sys sys_splice 6 + sys sys_sync_file_range 7 /* 4305 */ + sys sys_tee 4 + sys sys_vmsplice 4 + sys sys_move_pages 6 + sys sys_set_robust_list 2 + sys sys_get_robust_list 3 /* 4310 */ + sys sys_kexec_load 4 + sys sys_getcpu 3 + sys sys_epoll_pwait 6 + sys sys_ioprio_set 3 + sys sys_ioprio_get 2 /* 4315 */ + sys sys_utimensat 4 + sys sys_signalfd 3 + sys sys_ni_syscall 0 /* was timerfd */ + sys sys_eventfd 1 + sys sys_fallocate 6 /* 4320 */ + sys sys_timerfd_create 2 + sys sys_timerfd_gettime 2 + sys sys_timerfd_settime 4 + sys sys_signalfd4 4 + sys sys_eventfd2 2 /* 4325 */ + sys sys_epoll_create1 1 + sys sys_dup3 3 + sys sys_pipe2 2 + sys sys_inotify_init1 1 + sys sys_preadv 6 /* 4330 */ + sys sys_pwritev 6 + sys sys_rt_tgsigqueueinfo 4 + sys sys_perf_event_open 5 + sys sys_accept4 4 + sys sys_recvmmsg 5 /* 4335 */ + sys sys_fanotify_init 2 + sys sys_fanotify_mark 6 + sys sys_prlimit64 4 + sys sys_name_to_handle_at 5 + sys sys_open_by_handle_at 3 /* 4340 */ + sys sys_clock_adjtime 2 + sys sys_syncfs 1 + sys sys_sendmmsg 4 + sys sys_setns 2 + sys sys_process_vm_readv 6 /* 4345 */ + sys sys_process_vm_writev 6 + .endm + + /* We pre-compute the number of _instruction_ bytes needed to + load or store the arguments 6-8. Negative values are ignored. */ + + .macro sys function, nargs + PTR \function + LONG (\nargs << 2) - (5 << 2) + .endm + + .align 3 + .type sys_call_table,@object +EXPORT(sys_call_table) + syscalltable + .size sys_call_table, . - sys_call_table diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S new file mode 100644 index 00000000..3b5a5e9a --- /dev/null +++ b/arch/mips/kernel/scall64-64.S @@ -0,0 +1,435 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/irqflags.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/asm-offsets.h> +#include <asm/sysmips.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> +#include <asm/war.h> + +#ifndef CONFIG_BINFMT_ELF32 +/* Neither O32 nor N32, so define handle_sys here */ +#define handle_sys64 handle_sys +#endif + + .align 5 +NESTED(handle_sys64, PT_SIZE, sp) +#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) + /* + * When 32-bit compatibility is configured scall_o32.S + * already did this. + */ + .set noat + SAVE_SOME + TRACE_IRQS_ON_RELOAD + STI + .set at +#endif + + dsubu t0, v0, __NR_64_Linux # check syscall number + sltiu t0, t0, __NR_64_Linux_syscalls + 1 +#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) + ld t1, PT_EPC(sp) # skip syscall on return + daddiu t1, 4 # skip to next instruction + sd t1, PT_EPC(sp) +#endif + beqz t0, illegal_syscall + + dsll t0, v0, 3 # offset into table + ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) + # syscall routine + + sd a3, PT_R26(sp) # save a3 for syscall restarting + + li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, syscall_trace_entry + + jalr t2 # Do The Real Thing (TM) + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + +n64_syscall_exit: + local_irq_disable # make sure need_resched and + # signals dont change between + # sampling and return + LONG_L a2, TI_FLAGS($28) # current->work + li t0, _TIF_ALLWORK_MASK + and t0, a2, t0 + bnez t0, n64_syscall_exit_work + + j restore_partial + +n64_syscall_exit_work: + j syscall_exit_work_partial + +/* ------------------------------------------------------------------------ */ + +syscall_trace_entry: + SAVE_STATIC + move s0, t2 + move a0, sp + jal syscall_trace_enter + + move t0, s0 + RESTORE_STATIC + ld a0, PT_R4(sp) # Restore argument registers + ld a1, PT_R5(sp) + ld a2, PT_R6(sp) + ld a3, PT_R7(sp) + ld a4, PT_R8(sp) + ld a5, PT_R9(sp) + jalr t0 + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + + j syscall_exit + +illegal_syscall: + /* This also isn't a 64-bit syscall, throw an error. */ + li v0, ENOSYS # error + sd v0, PT_R2(sp) + li t0, 1 # set error flag + sd t0, PT_R7(sp) + j n64_syscall_exit + END(handle_sys64) + + .align 3 +sys_call_table: + PTR sys_read /* 5000 */ + PTR sys_write + PTR sys_open + PTR sys_close + PTR sys_newstat + PTR sys_newfstat /* 5005 */ + PTR sys_newlstat + PTR sys_poll + PTR sys_lseek + PTR sys_mips_mmap + PTR sys_mprotect /* 5010 */ + PTR sys_munmap + PTR sys_brk + PTR sys_rt_sigaction + PTR sys_rt_sigprocmask + PTR sys_ioctl /* 5015 */ + PTR sys_pread64 + PTR sys_pwrite64 + PTR sys_readv + PTR sys_writev + PTR sys_access /* 5020 */ + PTR sysm_pipe + PTR sys_select + PTR sys_sched_yield + PTR sys_mremap + PTR sys_msync /* 5025 */ + PTR sys_mincore + PTR sys_madvise + PTR sys_shmget + PTR sys_shmat + PTR sys_shmctl /* 5030 */ + PTR sys_dup + PTR sys_dup2 + PTR sys_pause + PTR sys_nanosleep + PTR sys_getitimer /* 5035 */ + PTR sys_setitimer + PTR sys_alarm + PTR sys_getpid + PTR sys_sendfile64 + PTR sys_socket /* 5040 */ + PTR sys_connect + PTR sys_accept + PTR sys_sendto + PTR sys_recvfrom + PTR sys_sendmsg /* 5045 */ + PTR sys_recvmsg + PTR sys_shutdown + PTR sys_bind + PTR sys_listen + PTR sys_getsockname /* 5050 */ + PTR sys_getpeername + PTR sys_socketpair + PTR sys_setsockopt + PTR sys_getsockopt + PTR sys_clone /* 5055 */ + PTR sys_fork + PTR sys_execve + PTR sys_exit + PTR sys_wait4 + PTR sys_kill /* 5060 */ + PTR sys_newuname + PTR sys_semget + PTR sys_semop + PTR sys_semctl + PTR sys_shmdt /* 5065 */ + PTR sys_msgget + PTR sys_msgsnd + PTR sys_msgrcv + PTR sys_msgctl + PTR sys_fcntl /* 5070 */ + PTR sys_flock + PTR sys_fsync + PTR sys_fdatasync + PTR sys_truncate + PTR sys_ftruncate /* 5075 */ + PTR sys_getdents + PTR sys_getcwd + PTR sys_chdir + PTR sys_fchdir + PTR sys_rename /* 5080 */ + PTR sys_mkdir + PTR sys_rmdir + PTR sys_creat + PTR sys_link + PTR sys_unlink /* 5085 */ + PTR sys_symlink + PTR sys_readlink + PTR sys_chmod + PTR sys_fchmod + PTR sys_chown /* 5090 */ + PTR sys_fchown + PTR sys_lchown + PTR sys_umask + PTR sys_gettimeofday + PTR sys_getrlimit /* 5095 */ + PTR sys_getrusage + PTR sys_sysinfo + PTR sys_times + PTR sys_ptrace + PTR sys_getuid /* 5100 */ + PTR sys_syslog + PTR sys_getgid + PTR sys_setuid + PTR sys_setgid + PTR sys_geteuid /* 5105 */ + PTR sys_getegid + PTR sys_setpgid + PTR sys_getppid + PTR sys_getpgrp + PTR sys_setsid /* 5110 */ + PTR sys_setreuid + PTR sys_setregid + PTR sys_getgroups + PTR sys_setgroups + PTR sys_setresuid /* 5115 */ + PTR sys_getresuid + PTR sys_setresgid + PTR sys_getresgid + PTR sys_getpgid + PTR sys_setfsuid /* 5120 */ + PTR sys_setfsgid + PTR sys_getsid + PTR sys_capget + PTR sys_capset + PTR sys_rt_sigpending /* 5125 */ + PTR sys_rt_sigtimedwait + PTR sys_rt_sigqueueinfo + PTR sys_rt_sigsuspend + PTR sys_sigaltstack + PTR sys_utime /* 5130 */ + PTR sys_mknod + PTR sys_personality + PTR sys_ustat + PTR sys_statfs + PTR sys_fstatfs /* 5135 */ + PTR sys_sysfs + PTR sys_getpriority + PTR sys_setpriority + PTR sys_sched_setparam + PTR sys_sched_getparam /* 5140 */ + PTR sys_sched_setscheduler + PTR sys_sched_getscheduler + PTR sys_sched_get_priority_max + PTR sys_sched_get_priority_min + PTR sys_sched_rr_get_interval /* 5145 */ + PTR sys_mlock + PTR sys_munlock + PTR sys_mlockall + PTR sys_munlockall + PTR sys_vhangup /* 5150 */ + PTR sys_pivot_root + PTR sys_sysctl + PTR sys_prctl + PTR sys_adjtimex + PTR sys_setrlimit /* 5155 */ + PTR sys_chroot + PTR sys_sync + PTR sys_acct + PTR sys_settimeofday + PTR sys_mount /* 5160 */ + PTR sys_umount + PTR sys_swapon + PTR sys_swapoff + PTR sys_reboot + PTR sys_sethostname /* 5165 */ + PTR sys_setdomainname + PTR sys_ni_syscall /* was create_module */ + PTR sys_init_module + PTR sys_delete_module + PTR sys_ni_syscall /* 5170, was get_kernel_syms */ + PTR sys_ni_syscall /* was query_module */ + PTR sys_quotactl + PTR sys_ni_syscall /* was nfsservctl */ + PTR sys_ni_syscall /* res. for getpmsg */ + PTR sys_ni_syscall /* 5175 for putpmsg */ + PTR sys_ni_syscall /* res. for afs_syscall */ + PTR sys_ni_syscall /* res. for security */ + PTR sys_gettid + PTR sys_readahead + PTR sys_setxattr /* 5180 */ + PTR sys_lsetxattr + PTR sys_fsetxattr + PTR sys_getxattr + PTR sys_lgetxattr + PTR sys_fgetxattr /* 5185 */ + PTR sys_listxattr + PTR sys_llistxattr + PTR sys_flistxattr + PTR sys_removexattr + PTR sys_lremovexattr /* 5190 */ + PTR sys_fremovexattr + PTR sys_tkill + PTR sys_ni_syscall + PTR sys_futex + PTR sys_sched_setaffinity /* 5195 */ + PTR sys_sched_getaffinity + PTR sys_cacheflush + PTR sys_cachectl + PTR sys_sysmips + PTR sys_io_setup /* 5200 */ + PTR sys_io_destroy + PTR sys_io_getevents + PTR sys_io_submit + PTR sys_io_cancel + PTR sys_exit_group /* 5205 */ + PTR sys_lookup_dcookie + PTR sys_epoll_create + PTR sys_epoll_ctl + PTR sys_epoll_wait + PTR sys_remap_file_pages /* 5210 */ + PTR sys_rt_sigreturn + PTR sys_set_tid_address + PTR sys_restart_syscall + PTR sys_semtimedop + PTR sys_fadvise64_64 /* 5215 */ + PTR sys_timer_create + PTR sys_timer_settime + PTR sys_timer_gettime + PTR sys_timer_getoverrun + PTR sys_timer_delete /* 5220 */ + PTR sys_clock_settime + PTR sys_clock_gettime + PTR sys_clock_getres + PTR sys_clock_nanosleep + PTR sys_tgkill /* 5225 */ + PTR sys_utimes + PTR sys_mbind + PTR sys_ni_syscall /* sys_get_mempolicy */ + PTR sys_ni_syscall /* sys_set_mempolicy */ + PTR sys_mq_open /* 5230 */ + PTR sys_mq_unlink + PTR sys_mq_timedsend + PTR sys_mq_timedreceive + PTR sys_mq_notify + PTR sys_mq_getsetattr /* 5235 */ + PTR sys_ni_syscall /* sys_vserver */ + PTR sys_waitid + PTR sys_ni_syscall /* available, was setaltroot */ + PTR sys_add_key + PTR sys_request_key /* 5240 */ + PTR sys_keyctl + PTR sys_set_thread_area + PTR sys_inotify_init + PTR sys_inotify_add_watch + PTR sys_inotify_rm_watch /* 5245 */ + PTR sys_migrate_pages + PTR sys_openat + PTR sys_mkdirat + PTR sys_mknodat + PTR sys_fchownat /* 5250 */ + PTR sys_futimesat + PTR sys_newfstatat + PTR sys_unlinkat + PTR sys_renameat + PTR sys_linkat /* 5255 */ + PTR sys_symlinkat + PTR sys_readlinkat + PTR sys_fchmodat + PTR sys_faccessat + PTR sys_pselect6 /* 5260 */ + PTR sys_ppoll + PTR sys_unshare + PTR sys_splice + PTR sys_sync_file_range + PTR sys_tee /* 5265 */ + PTR sys_vmsplice + PTR sys_move_pages + PTR sys_set_robust_list + PTR sys_get_robust_list + PTR sys_kexec_load /* 5270 */ + PTR sys_getcpu + PTR sys_epoll_pwait + PTR sys_ioprio_set + PTR sys_ioprio_get + PTR sys_utimensat /* 5275 */ + PTR sys_signalfd + PTR sys_ni_syscall /* was timerfd */ + PTR sys_eventfd + PTR sys_fallocate + PTR sys_timerfd_create /* 5280 */ + PTR sys_timerfd_gettime + PTR sys_timerfd_settime + PTR sys_signalfd4 + PTR sys_eventfd2 + PTR sys_epoll_create1 /* 5285 */ + PTR sys_dup3 + PTR sys_pipe2 + PTR sys_inotify_init1 + PTR sys_preadv + PTR sys_pwritev /* 5290 */ + PTR sys_rt_tgsigqueueinfo + PTR sys_perf_event_open + PTR sys_accept4 + PTR sys_recvmmsg + PTR sys_fanotify_init /* 5295 */ + PTR sys_fanotify_mark + PTR sys_prlimit64 + PTR sys_name_to_handle_at + PTR sys_open_by_handle_at + PTR sys_clock_adjtime /* 5300 */ + PTR sys_syncfs + PTR sys_sendmmsg + PTR sys_setns + PTR sys_process_vm_readv + PTR sys_process_vm_writev /* 5305 */ + .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S new file mode 100644 index 00000000..6be6f702 --- /dev/null +++ b/arch/mips/kernel/scall64-n32.S @@ -0,0 +1,435 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/irqflags.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> + +/* This duplicates the definition from <linux/sched.h> */ +#define PT_TRACESYS 0x00000002 /* tracing system calls */ + +/* This duplicates the definition from <asm/signal.h> */ +#define SIGILL 4 /* Illegal instruction (ANSI). */ + +#ifndef CONFIG_MIPS32_O32 +/* No O32, so define handle_sys here */ +#define handle_sysn32 handle_sys +#endif + + .align 5 +NESTED(handle_sysn32, PT_SIZE, sp) +#ifndef CONFIG_MIPS32_O32 + .set noat + SAVE_SOME + TRACE_IRQS_ON_RELOAD + STI + .set at +#endif + + dsubu t0, v0, __NR_N32_Linux # check syscall number + sltiu t0, t0, __NR_N32_Linux_syscalls + 1 + +#ifndef CONFIG_MIPS32_O32 + ld t1, PT_EPC(sp) # skip syscall on return + daddiu t1, 4 # skip to next instruction + sd t1, PT_EPC(sp) +#endif + beqz t0, not_n32_scall + + dsll t0, v0, 3 # offset into table + ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0) + + sd a3, PT_R26(sp) # save a3 for syscall restarting + + li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, n32_syscall_trace_entry + + jalr t2 # Do The Real Thing (TM) + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + + local_irq_disable # make sure need_resched and + # signals dont change between + # sampling and return + LONG_L a2, TI_FLAGS($28) # current->work + li t0, _TIF_ALLWORK_MASK + and t0, a2, t0 + bnez t0, n32_syscall_exit_work + + j restore_partial + +n32_syscall_exit_work: + j syscall_exit_work_partial + +/* ------------------------------------------------------------------------ */ + +n32_syscall_trace_entry: + SAVE_STATIC + move s0, t2 + move a0, sp + jal syscall_trace_enter + + move t0, s0 + RESTORE_STATIC + ld a0, PT_R4(sp) # Restore argument registers + ld a1, PT_R5(sp) + ld a2, PT_R6(sp) + ld a3, PT_R7(sp) + ld a4, PT_R8(sp) + ld a5, PT_R9(sp) + jalr t0 + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + + j syscall_exit + +not_n32_scall: + /* This is not an n32 compatibility syscall, pass it on to + the n64 syscall handlers. */ + j handle_sys64 + + END(handle_sysn32) + +EXPORT(sysn32_call_table) + PTR sys_read /* 6000 */ + PTR sys_write + PTR sys_open + PTR sys_close + PTR sys_newstat + PTR sys_newfstat /* 6005 */ + PTR sys_newlstat + PTR sys_poll + PTR sys_lseek + PTR sys_mips_mmap + PTR sys_mprotect /* 6010 */ + PTR sys_munmap + PTR sys_brk + PTR sys_32_rt_sigaction + PTR sys_32_rt_sigprocmask + PTR compat_sys_ioctl /* 6015 */ + PTR sys_pread64 + PTR sys_pwrite64 + PTR compat_sys_readv + PTR compat_sys_writev + PTR sys_access /* 6020 */ + PTR sysm_pipe + PTR compat_sys_select + PTR sys_sched_yield + PTR sys_mremap + PTR sys_msync /* 6025 */ + PTR sys_mincore + PTR sys_madvise + PTR sys_shmget + PTR sys_shmat + PTR compat_sys_shmctl /* 6030 */ + PTR sys_dup + PTR sys_dup2 + PTR sys_pause + PTR compat_sys_nanosleep + PTR compat_sys_getitimer /* 6035 */ + PTR compat_sys_setitimer + PTR sys_alarm + PTR sys_getpid + PTR sys_32_sendfile + PTR sys_socket /* 6040 */ + PTR sys_connect + PTR sys_accept + PTR sys_sendto + PTR compat_sys_recvfrom + PTR compat_sys_sendmsg /* 6045 */ + PTR compat_sys_recvmsg + PTR sys_shutdown + PTR sys_bind + PTR sys_listen + PTR sys_getsockname /* 6050 */ + PTR sys_getpeername + PTR sys_socketpair + PTR compat_sys_setsockopt + PTR sys_getsockopt + PTR sys_clone /* 6055 */ + PTR sys_fork + PTR sys32_execve + PTR sys_exit + PTR compat_sys_wait4 + PTR sys_kill /* 6060 */ + PTR sys_newuname + PTR sys_semget + PTR sys_semop + PTR sys_n32_semctl + PTR sys_shmdt /* 6065 */ + PTR sys_msgget + PTR sys_n32_msgsnd + PTR sys_n32_msgrcv + PTR compat_sys_msgctl + PTR compat_sys_fcntl /* 6070 */ + PTR sys_flock + PTR sys_fsync + PTR sys_fdatasync + PTR sys_truncate + PTR sys_ftruncate /* 6075 */ + PTR compat_sys_getdents + PTR sys_getcwd + PTR sys_chdir + PTR sys_fchdir + PTR sys_rename /* 6080 */ + PTR sys_mkdir + PTR sys_rmdir + PTR sys_creat + PTR sys_link + PTR sys_unlink /* 6085 */ + PTR sys_symlink + PTR sys_readlink + PTR sys_chmod + PTR sys_fchmod + PTR sys_chown /* 6090 */ + PTR sys_fchown + PTR sys_lchown + PTR sys_umask + PTR compat_sys_gettimeofday + PTR compat_sys_getrlimit /* 6095 */ + PTR compat_sys_getrusage + PTR compat_sys_sysinfo + PTR compat_sys_times + PTR compat_sys_ptrace + PTR sys_getuid /* 6100 */ + PTR sys_syslog + PTR sys_getgid + PTR sys_setuid + PTR sys_setgid + PTR sys_geteuid /* 6105 */ + PTR sys_getegid + PTR sys_setpgid + PTR sys_getppid + PTR sys_getpgrp + PTR sys_setsid /* 6110 */ + PTR sys_setreuid + PTR sys_setregid + PTR sys_getgroups + PTR sys_setgroups + PTR sys_setresuid /* 6115 */ + PTR sys_getresuid + PTR sys_setresgid + PTR sys_getresgid + PTR sys_getpgid + PTR sys_setfsuid /* 6120 */ + PTR sys_setfsgid + PTR sys_getsid + PTR sys_capget + PTR sys_capset + PTR sys_32_rt_sigpending /* 6125 */ + PTR compat_sys_rt_sigtimedwait + PTR sys_32_rt_sigqueueinfo + PTR sysn32_rt_sigsuspend + PTR sys32_sigaltstack + PTR compat_sys_utime /* 6130 */ + PTR sys_mknod + PTR sys_32_personality + PTR compat_sys_ustat + PTR compat_sys_statfs + PTR compat_sys_fstatfs /* 6135 */ + PTR sys_sysfs + PTR sys_getpriority + PTR sys_setpriority + PTR sys_sched_setparam + PTR sys_sched_getparam /* 6140 */ + PTR sys_sched_setscheduler + PTR sys_sched_getscheduler + PTR sys_sched_get_priority_max + PTR sys_sched_get_priority_min + PTR sys_32_sched_rr_get_interval /* 6145 */ + PTR sys_mlock + PTR sys_munlock + PTR sys_mlockall + PTR sys_munlockall + PTR sys_vhangup /* 6150 */ + PTR sys_pivot_root + PTR compat_sys_sysctl + PTR sys_prctl + PTR compat_sys_adjtimex + PTR compat_sys_setrlimit /* 6155 */ + PTR sys_chroot + PTR sys_sync + PTR sys_acct + PTR compat_sys_settimeofday + PTR compat_sys_mount /* 6160 */ + PTR sys_umount + PTR sys_swapon + PTR sys_swapoff + PTR sys_reboot + PTR sys_sethostname /* 6165 */ + PTR sys_setdomainname + PTR sys_ni_syscall /* was create_module */ + PTR sys_init_module + PTR sys_delete_module + PTR sys_ni_syscall /* 6170, was get_kernel_syms */ + PTR sys_ni_syscall /* was query_module */ + PTR sys_quotactl + PTR sys_ni_syscall /* was nfsservctl */ + PTR sys_ni_syscall /* res. for getpmsg */ + PTR sys_ni_syscall /* 6175 for putpmsg */ + PTR sys_ni_syscall /* res. for afs_syscall */ + PTR sys_ni_syscall /* res. for security */ + PTR sys_gettid + PTR sys_readahead + PTR sys_setxattr /* 6180 */ + PTR sys_lsetxattr + PTR sys_fsetxattr + PTR sys_getxattr + PTR sys_lgetxattr + PTR sys_fgetxattr /* 6185 */ + PTR sys_listxattr + PTR sys_llistxattr + PTR sys_flistxattr + PTR sys_removexattr + PTR sys_lremovexattr /* 6190 */ + PTR sys_fremovexattr + PTR sys_tkill + PTR sys_ni_syscall + PTR sys_32_futex + PTR compat_sys_sched_setaffinity /* 6195 */ + PTR compat_sys_sched_getaffinity + PTR sys_cacheflush + PTR sys_cachectl + PTR sys_sysmips + PTR compat_sys_io_setup /* 6200 */ + PTR sys_io_destroy + PTR compat_sys_io_getevents + PTR compat_sys_io_submit + PTR sys_io_cancel + PTR sys_exit_group /* 6205 */ + PTR sys_lookup_dcookie + PTR sys_epoll_create + PTR sys_epoll_ctl + PTR sys_epoll_wait + PTR sys_remap_file_pages /* 6210 */ + PTR sysn32_rt_sigreturn + PTR compat_sys_fcntl64 + PTR sys_set_tid_address + PTR sys_restart_syscall + PTR compat_sys_semtimedop /* 6215 */ + PTR sys_fadvise64_64 + PTR compat_sys_statfs64 + PTR compat_sys_fstatfs64 + PTR sys_sendfile64 + PTR compat_sys_timer_create /* 6220 */ + PTR compat_sys_timer_settime + PTR compat_sys_timer_gettime + PTR sys_timer_getoverrun + PTR sys_timer_delete + PTR compat_sys_clock_settime /* 6225 */ + PTR compat_sys_clock_gettime + PTR compat_sys_clock_getres + PTR compat_sys_clock_nanosleep + PTR sys_tgkill + PTR compat_sys_utimes /* 6230 */ + PTR sys_ni_syscall /* sys_mbind */ + PTR sys_ni_syscall /* sys_get_mempolicy */ + PTR sys_ni_syscall /* sys_set_mempolicy */ + PTR compat_sys_mq_open + PTR sys_mq_unlink /* 6235 */ + PTR compat_sys_mq_timedsend + PTR compat_sys_mq_timedreceive + PTR compat_sys_mq_notify + PTR compat_sys_mq_getsetattr + PTR sys_ni_syscall /* 6240, sys_vserver */ + PTR compat_sys_waitid + PTR sys_ni_syscall /* available, was setaltroot */ + PTR sys_add_key + PTR sys_request_key + PTR sys_keyctl /* 6245 */ + PTR sys_set_thread_area + PTR sys_inotify_init + PTR sys_inotify_add_watch + PTR sys_inotify_rm_watch + PTR sys_migrate_pages /* 6250 */ + PTR sys_openat + PTR sys_mkdirat + PTR sys_mknodat + PTR sys_fchownat + PTR compat_sys_futimesat /* 6255 */ + PTR sys_newfstatat + PTR sys_unlinkat + PTR sys_renameat + PTR sys_linkat + PTR sys_symlinkat /* 6260 */ + PTR sys_readlinkat + PTR sys_fchmodat + PTR sys_faccessat + PTR compat_sys_pselect6 + PTR compat_sys_ppoll /* 6265 */ + PTR sys_unshare + PTR sys_splice + PTR sys_sync_file_range + PTR sys_tee + PTR compat_sys_vmsplice /* 6270 */ + PTR sys_move_pages + PTR compat_sys_set_robust_list + PTR compat_sys_get_robust_list + PTR compat_sys_kexec_load + PTR sys_getcpu /* 6275 */ + PTR compat_sys_epoll_pwait + PTR sys_ioprio_set + PTR sys_ioprio_get + PTR compat_sys_utimensat + PTR compat_sys_signalfd /* 6280 */ + PTR sys_ni_syscall /* was timerfd */ + PTR sys_eventfd + PTR sys_fallocate + PTR sys_timerfd_create + PTR compat_sys_timerfd_gettime /* 6285 */ + PTR compat_sys_timerfd_settime + PTR sys_signalfd4 + PTR sys_eventfd2 + PTR sys_epoll_create1 + PTR sys_dup3 /* 6290 */ + PTR sys_pipe2 + PTR sys_inotify_init1 + PTR sys_preadv + PTR sys_pwritev + PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ + PTR sys_perf_event_open + PTR sys_accept4 + PTR compat_sys_recvmmsg + PTR sys_getdents64 + PTR sys_fanotify_init /* 6300 */ + PTR sys_fanotify_mark + PTR sys_prlimit64 + PTR sys_name_to_handle_at + PTR sys_open_by_handle_at + PTR compat_sys_clock_adjtime /* 6305 */ + PTR sys_syncfs + PTR compat_sys_sendmmsg + PTR sys_setns + PTR compat_sys_process_vm_readv + PTR compat_sys_process_vm_writev /* 6310 */ + .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S new file mode 100644 index 00000000..54228553 --- /dev/null +++ b/arch/mips/kernel/scall64-o32.S @@ -0,0 +1,553 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2004 Thiemo Seufer + * + * Hairy, the userspace application uses a different argument passing + * convention than the kernel, so we have to translate things from o32 + * to ABI64 calling convention. 64-bit syscalls are also processed + * here for now. + */ +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/irqflags.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> +#include <asm/sysmips.h> + + .align 5 +NESTED(handle_sys, PT_SIZE, sp) + .set noat + SAVE_SOME + TRACE_IRQS_ON_RELOAD + STI + .set at + ld t1, PT_EPC(sp) # skip syscall on return + + dsubu t0, v0, __NR_O32_Linux # check syscall number + sltiu t0, t0, __NR_O32_Linux_syscalls + 1 + daddiu t1, 4 # skip to next instruction + sd t1, PT_EPC(sp) + beqz t0, not_o32_scall +#if 0 + SAVE_ALL + move a1, v0 + PRINT("Scall %ld\n") + RESTORE_ALL +#endif + + /* We don't want to stumble over broken sign extensions from + userland. O32 does never use the upper half. */ + sll a0, a0, 0 + sll a1, a1, 0 + sll a2, a2, 0 + sll a3, a3, 0 + + dsll t0, v0, 3 # offset into table + ld t2, (sys_call_table - (__NR_O32_Linux * 8))(t0) + + sd a3, PT_R26(sp) # save a3 for syscall restarting + + /* + * More than four arguments. Try to deal with it by copying the + * stack arguments from the user stack to the kernel stack. + * This Sucks (TM). + * + * We intentionally keep the kernel stack a little below the top of + * userspace so we don't have to do a slower byte accurate check here. + */ + ld t0, PT_R29(sp) # get old user stack pointer + daddu t1, t0, 32 + bltz t1, bad_stack + +1: lw a4, 16(t0) # argument #5 from usp +2: lw a5, 20(t0) # argument #6 from usp +3: lw a6, 24(t0) # argument #7 from usp +4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls) + + .section __ex_table,"a" + PTR 1b, bad_stack + PTR 2b, bad_stack + PTR 3b, bad_stack + PTR 4b, bad_stack + .previous + + li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, trace_a_syscall + + jalr t2 # Do The Real Thing (TM) + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + +o32_syscall_exit: + local_irq_disable # make need_resched and + # signals dont change between + # sampling and return + LONG_L a2, TI_FLAGS($28) + li t0, _TIF_ALLWORK_MASK + and t0, a2, t0 + bnez t0, o32_syscall_exit_work + + j restore_partial + +o32_syscall_exit_work: + j syscall_exit_work_partial + +/* ------------------------------------------------------------------------ */ + +trace_a_syscall: + SAVE_STATIC + sd a4, PT_R8(sp) # Save argument registers + sd a5, PT_R9(sp) + sd a6, PT_R10(sp) + sd a7, PT_R11(sp) # For indirect syscalls + + move s0, t2 # Save syscall pointer + move a0, sp + jal syscall_trace_enter + + move t0, s0 + RESTORE_STATIC + ld a0, PT_R4(sp) # Restore argument registers + ld a1, PT_R5(sp) + ld a2, PT_R6(sp) + ld a3, PT_R7(sp) + ld a4, PT_R8(sp) + ld a5, PT_R9(sp) + ld a6, PT_R10(sp) + ld a7, PT_R11(sp) # For indirect syscalls + jalr t0 + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sd t0, PT_R7(sp) # set error flag + beqz t0, 1f + + ld t1, PT_R2(sp) # syscall number + dnegu v0 # error + sd t1, PT_R0(sp) # save it for syscall restarting +1: sd v0, PT_R2(sp) # result + + j syscall_exit + +/* ------------------------------------------------------------------------ */ + + /* + * The stackpointer for a call with more than 4 arguments is bad. + */ +bad_stack: + li v0, EFAULT + sd v0, PT_R2(sp) + li t0, 1 # set error flag + sd t0, PT_R7(sp) + j o32_syscall_exit + +not_o32_scall: + /* + * This is not an o32 compatibility syscall, pass it on + * to the 64-bit syscall handlers. + */ +#ifdef CONFIG_MIPS32_N32 + j handle_sysn32 +#else + j handle_sys64 +#endif + END(handle_sys) + +LEAF(sys32_syscall) + subu t0, a0, __NR_O32_Linux # check syscall number + sltiu v0, t0, __NR_O32_Linux_syscalls + 1 + beqz t0, einval # do not recurse + dsll t1, t0, 3 + beqz v0, einval + ld t2, sys_call_table(t1) # syscall routine + + move a0, a1 # shift argument registers + move a1, a2 + move a2, a3 + move a3, a4 + move a4, a5 + move a5, a6 + move a6, a7 + sd a0, PT_R4(sp) # ... and push back a0 - a3, some + sd a1, PT_R5(sp) # syscalls expect them there + sd a2, PT_R6(sp) + sd a3, PT_R7(sp) + sd a3, PT_R26(sp) # update a3 for syscall restarting + jr t2 + /* Unreached */ + +einval: li v0, -ENOSYS + jr ra + END(sys32_syscall) + + .align 3 + .type sys_call_table,@object +sys_call_table: + PTR sys32_syscall /* 4000 */ + PTR sys_exit + PTR sys_fork + PTR sys_read + PTR sys_write + PTR compat_sys_open /* 4005 */ + PTR sys_close + PTR sys_waitpid + PTR sys_creat + PTR sys_link + PTR sys_unlink /* 4010 */ + PTR sys32_execve + PTR sys_chdir + PTR compat_sys_time + PTR sys_mknod + PTR sys_chmod /* 4015 */ + PTR sys_lchown + PTR sys_ni_syscall + PTR sys_ni_syscall /* was sys_stat */ + PTR sys_lseek + PTR sys_getpid /* 4020 */ + PTR compat_sys_mount + PTR sys_oldumount + PTR sys_setuid + PTR sys_getuid + PTR compat_sys_stime /* 4025 */ + PTR compat_sys_ptrace + PTR sys_alarm + PTR sys_ni_syscall /* was sys_fstat */ + PTR sys_pause + PTR compat_sys_utime /* 4030 */ + PTR sys_ni_syscall + PTR sys_ni_syscall + PTR sys_access + PTR sys_nice + PTR sys_ni_syscall /* 4035 */ + PTR sys_sync + PTR sys_kill + PTR sys_rename + PTR sys_mkdir + PTR sys_rmdir /* 4040 */ + PTR sys_dup + PTR sysm_pipe + PTR compat_sys_times + PTR sys_ni_syscall + PTR sys_brk /* 4045 */ + PTR sys_setgid + PTR sys_getgid + PTR sys_ni_syscall /* was signal 2 */ + PTR sys_geteuid + PTR sys_getegid /* 4050 */ + PTR sys_acct + PTR sys_umount + PTR sys_ni_syscall + PTR compat_sys_ioctl + PTR compat_sys_fcntl /* 4055 */ + PTR sys_ni_syscall + PTR sys_setpgid + PTR sys_ni_syscall + PTR sys_olduname + PTR sys_umask /* 4060 */ + PTR sys_chroot + PTR compat_sys_ustat + PTR sys_dup2 + PTR sys_getppid + PTR sys_getpgrp /* 4065 */ + PTR sys_setsid + PTR sys_32_sigaction + PTR sys_sgetmask + PTR sys_ssetmask + PTR sys_setreuid /* 4070 */ + PTR sys_setregid + PTR sys32_sigsuspend + PTR compat_sys_sigpending + PTR sys_sethostname + PTR compat_sys_setrlimit /* 4075 */ + PTR compat_sys_getrlimit + PTR compat_sys_getrusage + PTR compat_sys_gettimeofday + PTR compat_sys_settimeofday + PTR sys_getgroups /* 4080 */ + PTR sys_setgroups + PTR sys_ni_syscall /* old_select */ + PTR sys_symlink + PTR sys_ni_syscall /* was sys_lstat */ + PTR sys_readlink /* 4085 */ + PTR sys_uselib + PTR sys_swapon + PTR sys_reboot + PTR compat_sys_old_readdir + PTR sys_mips_mmap /* 4090 */ + PTR sys_munmap + PTR sys_truncate + PTR sys_ftruncate + PTR sys_fchmod + PTR sys_fchown /* 4095 */ + PTR sys_getpriority + PTR sys_setpriority + PTR sys_ni_syscall + PTR compat_sys_statfs + PTR compat_sys_fstatfs /* 4100 */ + PTR sys_ni_syscall /* sys_ioperm */ + PTR compat_sys_socketcall + PTR sys_syslog + PTR compat_sys_setitimer + PTR compat_sys_getitimer /* 4105 */ + PTR compat_sys_newstat + PTR compat_sys_newlstat + PTR compat_sys_newfstat + PTR sys_uname + PTR sys_ni_syscall /* sys_ioperm *//* 4110 */ + PTR sys_vhangup + PTR sys_ni_syscall /* was sys_idle */ + PTR sys_ni_syscall /* sys_vm86 */ + PTR compat_sys_wait4 + PTR sys_swapoff /* 4115 */ + PTR compat_sys_sysinfo + PTR sys_32_ipc + PTR sys_fsync + PTR sys32_sigreturn + PTR sys32_clone /* 4120 */ + PTR sys_setdomainname + PTR sys_newuname + PTR sys_ni_syscall /* sys_modify_ldt */ + PTR compat_sys_adjtimex + PTR sys_mprotect /* 4125 */ + PTR compat_sys_sigprocmask + PTR sys_ni_syscall /* was creat_module */ + PTR sys_init_module + PTR sys_delete_module + PTR sys_ni_syscall /* 4130, get_kernel_syms */ + PTR sys_quotactl + PTR sys_getpgid + PTR sys_fchdir + PTR sys_bdflush + PTR sys_sysfs /* 4135 */ + PTR sys_32_personality + PTR sys_ni_syscall /* for afs_syscall */ + PTR sys_setfsuid + PTR sys_setfsgid + PTR sys_32_llseek /* 4140 */ + PTR compat_sys_getdents + PTR compat_sys_select + PTR sys_flock + PTR sys_msync + PTR compat_sys_readv /* 4145 */ + PTR compat_sys_writev + PTR sys_cacheflush + PTR sys_cachectl + PTR sys_sysmips + PTR sys_ni_syscall /* 4150 */ + PTR sys_getsid + PTR sys_fdatasync + PTR compat_sys_sysctl + PTR sys_mlock + PTR sys_munlock /* 4155 */ + PTR sys_mlockall + PTR sys_munlockall + PTR sys_sched_setparam + PTR sys_sched_getparam + PTR sys_sched_setscheduler /* 4160 */ + PTR sys_sched_getscheduler + PTR sys_sched_yield + PTR sys_sched_get_priority_max + PTR sys_sched_get_priority_min + PTR sys_32_sched_rr_get_interval /* 4165 */ + PTR compat_sys_nanosleep + PTR sys_mremap + PTR sys_accept + PTR sys_bind + PTR sys_connect /* 4170 */ + PTR sys_getpeername + PTR sys_getsockname + PTR sys_getsockopt + PTR sys_listen + PTR compat_sys_recv /* 4175 */ + PTR compat_sys_recvfrom + PTR compat_sys_recvmsg + PTR sys_send + PTR compat_sys_sendmsg + PTR sys_sendto /* 4180 */ + PTR compat_sys_setsockopt + PTR sys_shutdown + PTR sys_socket + PTR sys_socketpair + PTR sys_setresuid /* 4185 */ + PTR sys_getresuid + PTR sys_ni_syscall /* was query_module */ + PTR sys_poll + PTR sys_ni_syscall /* was nfsservctl */ + PTR sys_setresgid /* 4190 */ + PTR sys_getresgid + PTR sys_prctl + PTR sys32_rt_sigreturn + PTR sys_32_rt_sigaction + PTR sys_32_rt_sigprocmask /* 4195 */ + PTR sys_32_rt_sigpending + PTR compat_sys_rt_sigtimedwait + PTR sys_32_rt_sigqueueinfo + PTR sys32_rt_sigsuspend + PTR sys_32_pread /* 4200 */ + PTR sys_32_pwrite + PTR sys_chown + PTR sys_getcwd + PTR sys_capget + PTR sys_capset /* 4205 */ + PTR sys32_sigaltstack + PTR sys_32_sendfile + PTR sys_ni_syscall + PTR sys_ni_syscall + PTR sys_mips_mmap2 /* 4210 */ + PTR sys_32_truncate64 + PTR sys_32_ftruncate64 + PTR sys_newstat + PTR sys_newlstat + PTR sys_newfstat /* 4215 */ + PTR sys_pivot_root + PTR sys_mincore + PTR sys_madvise + PTR sys_getdents64 + PTR compat_sys_fcntl64 /* 4220 */ + PTR sys_ni_syscall + PTR sys_gettid + PTR sys32_readahead + PTR sys_setxattr + PTR sys_lsetxattr /* 4225 */ + PTR sys_fsetxattr + PTR sys_getxattr + PTR sys_lgetxattr + PTR sys_fgetxattr + PTR sys_listxattr /* 4230 */ + PTR sys_llistxattr + PTR sys_flistxattr + PTR sys_removexattr + PTR sys_lremovexattr + PTR sys_fremovexattr /* 4235 */ + PTR sys_tkill + PTR sys_sendfile64 + PTR sys_32_futex + PTR compat_sys_sched_setaffinity + PTR compat_sys_sched_getaffinity /* 4240 */ + PTR compat_sys_io_setup + PTR sys_io_destroy + PTR compat_sys_io_getevents + PTR compat_sys_io_submit + PTR sys_io_cancel /* 4245 */ + PTR sys_exit_group + PTR sys32_lookup_dcookie + PTR sys_epoll_create + PTR sys_epoll_ctl + PTR sys_epoll_wait /* 4250 */ + PTR sys_remap_file_pages + PTR sys_set_tid_address + PTR sys_restart_syscall + PTR sys32_fadvise64_64 + PTR compat_sys_statfs64 /* 4255 */ + PTR compat_sys_fstatfs64 + PTR compat_sys_timer_create + PTR compat_sys_timer_settime + PTR compat_sys_timer_gettime + PTR sys_timer_getoverrun /* 4260 */ + PTR sys_timer_delete + PTR compat_sys_clock_settime + PTR compat_sys_clock_gettime + PTR compat_sys_clock_getres + PTR compat_sys_clock_nanosleep /* 4265 */ + PTR sys_tgkill + PTR compat_sys_utimes + PTR sys_ni_syscall /* sys_mbind */ + PTR sys_ni_syscall /* sys_get_mempolicy */ + PTR sys_ni_syscall /* 4270 sys_set_mempolicy */ + PTR compat_sys_mq_open + PTR sys_mq_unlink + PTR compat_sys_mq_timedsend + PTR compat_sys_mq_timedreceive + PTR compat_sys_mq_notify /* 4275 */ + PTR compat_sys_mq_getsetattr + PTR sys_ni_syscall /* sys_vserver */ + PTR sys_32_waitid + PTR sys_ni_syscall /* available, was setaltroot */ + PTR sys_add_key /* 4280 */ + PTR sys_request_key + PTR sys_keyctl + PTR sys_set_thread_area + PTR sys_inotify_init + PTR sys_inotify_add_watch /* 4285 */ + PTR sys_inotify_rm_watch + PTR sys_migrate_pages + PTR compat_sys_openat + PTR sys_mkdirat + PTR sys_mknodat /* 4290 */ + PTR sys_fchownat + PTR compat_sys_futimesat + PTR sys_newfstatat + PTR sys_unlinkat + PTR sys_renameat /* 4295 */ + PTR sys_linkat + PTR sys_symlinkat + PTR sys_readlinkat + PTR sys_fchmodat + PTR sys_faccessat /* 4300 */ + PTR compat_sys_pselect6 + PTR compat_sys_ppoll + PTR sys_unshare + PTR sys_splice + PTR sys32_sync_file_range /* 4305 */ + PTR sys_tee + PTR compat_sys_vmsplice + PTR compat_sys_move_pages + PTR compat_sys_set_robust_list + PTR compat_sys_get_robust_list /* 4310 */ + PTR compat_sys_kexec_load + PTR sys_getcpu + PTR compat_sys_epoll_pwait + PTR sys_ioprio_set + PTR sys_ioprio_get /* 4315 */ + PTR compat_sys_utimensat + PTR compat_sys_signalfd + PTR sys_ni_syscall /* was timerfd */ + PTR sys_eventfd + PTR sys32_fallocate /* 4320 */ + PTR sys_timerfd_create + PTR compat_sys_timerfd_gettime + PTR compat_sys_timerfd_settime + PTR compat_sys_signalfd4 + PTR sys_eventfd2 /* 4325 */ + PTR sys_epoll_create1 + PTR sys_dup3 + PTR sys_pipe2 + PTR sys_inotify_init1 + PTR compat_sys_preadv /* 4330 */ + PTR compat_sys_pwritev + PTR compat_sys_rt_tgsigqueueinfo + PTR sys_perf_event_open + PTR sys_accept4 + PTR compat_sys_recvmmsg /* 4335 */ + PTR sys_fanotify_init + PTR sys_32_fanotify_mark + PTR sys_prlimit64 + PTR sys_name_to_handle_at + PTR compat_sys_open_by_handle_at /* 4340 */ + PTR compat_sys_clock_adjtime + PTR sys_syncfs + PTR compat_sys_sendmmsg + PTR sys_setns + PTR compat_sys_process_vm_readv /* 4345 */ + PTR compat_sys_process_vm_writev + .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c new file mode 100644 index 00000000..c504b212 --- /dev/null +++ b/arch/mips/kernel/setup.c @@ -0,0 +1,626 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1995 Waldorf Electronics + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle + * Copyright (C) 1996 Stoned Elipot + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki + */ +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/export.h> +#include <linux/screen_info.h> +#include <linux/memblock.h> +#include <linux/bootmem.h> +#include <linux/initrd.h> +#include <linux/root_dev.h> +#include <linux/highmem.h> +#include <linux/console.h> +#include <linux/pfn.h> +#include <linux/debugfs.h> + +#include <asm/addrspace.h> +#include <asm/bootinfo.h> +#include <asm/bugs.h> +#include <asm/cache.h> +#include <asm/cpu.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <asm/smp-ops.h> +#include <asm/prom.h> + +struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; + +EXPORT_SYMBOL(cpu_data); + +#ifdef CONFIG_VT +struct screen_info screen_info; +#endif + +/* + * Despite it's name this variable is even if we don't have PCI + */ +unsigned int PCI_DMA_BUS_IS_PHYS; + +EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS); + +/* + * Setup information + * + * These are initialized so they are in the .data section + */ +unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; + +EXPORT_SYMBOL(mips_machtype); + +struct boot_mem_map boot_mem_map; + +static char __initdata command_line[COMMAND_LINE_SIZE]; +char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; + +#ifdef CONFIG_CMDLINE_BOOL +static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; +#endif + +/* + * mips_io_port_base is the begin of the address space to which x86 style + * I/O ports are mapped. + */ +const unsigned long mips_io_port_base = -1; +EXPORT_SYMBOL(mips_io_port_base); + +static struct resource code_resource = { .name = "Kernel code", }; +static struct resource data_resource = { .name = "Kernel data", }; + +void __init add_memory_region(phys_t start, phys_t size, long type) +{ + int x = boot_mem_map.nr_map; + struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; + + /* Sanity check */ + if (start + size < start) { + pr_warning("Trying to add an invalid memory region, skipped\n"); + return; + } + + /* + * Try to merge with previous entry if any. This is far less than + * perfect but is sufficient for most real world cases. + */ + if (x && prev->addr + prev->size == start && prev->type == type) { + prev->size += size; + return; + } + + if (x == BOOT_MEM_MAP_MAX) { + pr_err("Ooops! Too many entries in the memory map!\n"); + return; + } + + boot_mem_map.map[x].addr = start; + boot_mem_map.map[x].size = size; + boot_mem_map.map[x].type = type; + boot_mem_map.nr_map++; +} + +static void __init print_memory_map(void) +{ + int i; + const int field = 2 * sizeof(unsigned long); + + for (i = 0; i < boot_mem_map.nr_map; i++) { + printk(KERN_INFO " memory: %0*Lx @ %0*Lx ", + field, (unsigned long long) boot_mem_map.map[i].size, + field, (unsigned long long) boot_mem_map.map[i].addr); + + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + printk(KERN_CONT "(usable)\n"); + break; + case BOOT_MEM_INIT_RAM: + printk(KERN_CONT "(usable after init)\n"); + break; + case BOOT_MEM_ROM_DATA: + printk(KERN_CONT "(ROM data)\n"); + break; + case BOOT_MEM_RESERVED: + printk(KERN_CONT "(reserved)\n"); + break; + default: + printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type); + break; + } + } +} + +/* + * Manage initrd + */ +#ifdef CONFIG_BLK_DEV_INITRD + +static int __init rd_start_early(char *p) +{ + unsigned long start = memparse(p, &p); + +#ifdef CONFIG_64BIT + /* Guess if the sign extension was forgotten by bootloader */ + if (start < XKPHYS) + start = (int)start; +#endif + initrd_start = start; + initrd_end += start; + return 0; +} +early_param("rd_start", rd_start_early); + +static int __init rd_size_early(char *p) +{ + initrd_end += memparse(p, &p); + return 0; +} +early_param("rd_size", rd_size_early); + +/* it returns the next free pfn after initrd */ +static unsigned long __init init_initrd(void) +{ + unsigned long end; + + /* + * Board specific code or command line parser should have + * already set up initrd_start and initrd_end. In these cases + * perfom sanity checks and use them if all looks good. + */ + if (!initrd_start || initrd_end <= initrd_start) + goto disable; + + if (initrd_start & ~PAGE_MASK) { + pr_err("initrd start must be page aligned\n"); + goto disable; + } + if (initrd_start < PAGE_OFFSET) { + pr_err("initrd start < PAGE_OFFSET\n"); + goto disable; + } + + /* + * Sanitize initrd addresses. For example firmware + * can't guess if they need to pass them through + * 64-bits values if the kernel has been built in pure + * 32-bit. We need also to switch from KSEG0 to XKPHYS + * addresses now, so the code can now safely use __pa(). + */ + end = __pa(initrd_end); + initrd_end = (unsigned long)__va(end); + initrd_start = (unsigned long)__va(__pa(initrd_start)); + + ROOT_DEV = Root_RAM0; + return PFN_UP(end); +disable: + initrd_start = 0; + initrd_end = 0; + return 0; +} + +static void __init finalize_initrd(void) +{ + unsigned long size = initrd_end - initrd_start; + + if (size == 0) { + printk(KERN_INFO "Initrd not found or empty"); + goto disable; + } + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + printk(KERN_ERR "Initrd extends beyond end of memory"); + goto disable; + } + + reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); + initrd_below_start_ok = 1; + + pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", + initrd_start, size); + return; +disable: + printk(KERN_CONT " - disabling initrd\n"); + initrd_start = 0; + initrd_end = 0; +} + +#else /* !CONFIG_BLK_DEV_INITRD */ + +static unsigned long __init init_initrd(void) +{ + return 0; +} + +#define finalize_initrd() do {} while (0) + +#endif + +/* + * Initialize the bootmem allocator. It also setup initrd related data + * if needed. + */ +#ifdef CONFIG_SGI_IP27 + +static void __init bootmem_init(void) +{ + init_initrd(); + finalize_initrd(); +} + +#else /* !CONFIG_SGI_IP27 */ + +static void __init bootmem_init(void) +{ + unsigned long reserved_end; + unsigned long mapstart = ~0UL; + unsigned long bootmap_size; + int i; + + /* + * Init any data related to initrd. It's a nop if INITRD is + * not selected. Once that done we can determine the low bound + * of usable memory. + */ + reserved_end = max(init_initrd(), + (unsigned long) PFN_UP(__pa_symbol(&_end))); + + /* + * max_low_pfn is not a number of pages. The number of pages + * of the system is given by 'max_low_pfn - min_low_pfn'. + */ + min_low_pfn = ~0UL; + max_low_pfn = 0; + + /* + * Find the highest page frame number we have available. + */ + for (i = 0; i < boot_mem_map.nr_map; i++) { + unsigned long start, end; + + if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + continue; + + start = PFN_UP(boot_mem_map.map[i].addr); + end = PFN_DOWN(boot_mem_map.map[i].addr + + boot_mem_map.map[i].size); + + if (end > max_low_pfn) + max_low_pfn = end; + if (start < min_low_pfn) + min_low_pfn = start; + if (end <= reserved_end) + continue; + if (start >= mapstart) + continue; + mapstart = max(reserved_end, start); + } + + if (min_low_pfn >= max_low_pfn) + panic("Incorrect memory mapping !!!"); + if (min_low_pfn > ARCH_PFN_OFFSET) { + pr_info("Wasting %lu bytes for tracking %lu unused pages\n", + (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), + min_low_pfn - ARCH_PFN_OFFSET); + } else if (min_low_pfn < ARCH_PFN_OFFSET) { + pr_info("%lu free pages won't be used\n", + ARCH_PFN_OFFSET - min_low_pfn); + } + min_low_pfn = ARCH_PFN_OFFSET; + + /* + * Determine low and high memory ranges + */ + max_pfn = max_low_pfn; + if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { +#ifdef CONFIG_HIGHMEM + highstart_pfn = PFN_DOWN(HIGHMEM_START); + highend_pfn = max_low_pfn; +#endif + max_low_pfn = PFN_DOWN(HIGHMEM_START); + } + + /* + * Initialize the boot-time allocator with low memory only. + */ + bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, + min_low_pfn, max_low_pfn); + + + for (i = 0; i < boot_mem_map.nr_map; i++) { + unsigned long start, end; + + start = PFN_UP(boot_mem_map.map[i].addr); + end = PFN_DOWN(boot_mem_map.map[i].addr + + boot_mem_map.map[i].size); + + if (start <= min_low_pfn) + start = min_low_pfn; + if (start >= end) + continue; + +#ifndef CONFIG_HIGHMEM + if (end > max_low_pfn) + end = max_low_pfn; + + /* + * ... finally, is the area going away? + */ + if (end <= start) + continue; +#endif + + memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); + } + + /* + * Register fully available low RAM pages with the bootmem allocator. + */ + for (i = 0; i < boot_mem_map.nr_map; i++) { + unsigned long start, end, size; + + start = PFN_UP(boot_mem_map.map[i].addr); + end = PFN_DOWN(boot_mem_map.map[i].addr + + boot_mem_map.map[i].size); + + /* + * Reserve usable memory. + */ + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + break; + case BOOT_MEM_INIT_RAM: + memory_present(0, start, end); + continue; + default: + /* Not usable memory */ + continue; + } + + /* + * We are rounding up the start address of usable memory + * and at the end of the usable range downwards. + */ + if (start >= max_low_pfn) + continue; + if (start < reserved_end) + start = reserved_end; + if (end > max_low_pfn) + end = max_low_pfn; + + /* + * ... finally, is the area going away? + */ + if (end <= start) + continue; + size = end - start; + + /* Register lowmem ranges */ + free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); + memory_present(0, start, end); + } + + /* + * Reserve the bootmap memory. + */ + reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT); + + /* + * Reserve initrd memory if needed. + */ + finalize_initrd(); +} + +#endif /* CONFIG_SGI_IP27 */ + +/* + * arch_mem_init - initialize memory management subsystem + * + * o plat_mem_setup() detects the memory configuration and will record detected + * memory areas using add_memory_region. + * + * At this stage the memory configuration of the system is known to the + * kernel but generic memory management system is still entirely uninitialized. + * + * o bootmem_init() + * o sparse_init() + * o paging_init() + * + * At this stage the bootmem allocator is ready to use. + * + * NOTE: historically plat_mem_setup did the entire platform initialization. + * This was rather impractical because it meant plat_mem_setup had to + * get away without any kind of memory allocator. To keep old code from + * breaking plat_setup was just renamed to plat_setup and a second platform + * initialization hook for anything else was introduced. + */ + +static int usermem __initdata; + +static int __init early_parse_mem(char *p) +{ + unsigned long start, size; + + /* + * If a user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + boot_mem_map.nr_map = 0; + usermem = 1; + } + start = 0; + size = memparse(p, &p); + if (*p == '@') + start = memparse(p + 1, &p); + + add_memory_region(start, size, BOOT_MEM_RAM); + return 0; +} +early_param("mem", early_parse_mem); + +static void __init arch_mem_init(char **cmdline_p) +{ + phys_t init_mem, init_end, init_size; + + extern void plat_mem_setup(void); + + /* call board setup routine */ + plat_mem_setup(); + + init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT; + init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT; + init_size = init_end - init_mem; + if (init_size) { + /* Make sure it is in the boot_mem_map */ + int i, found; + found = 0; + for (i = 0; i < boot_mem_map.nr_map; i++) { + if (init_mem >= boot_mem_map.map[i].addr && + init_mem < (boot_mem_map.map[i].addr + + boot_mem_map.map[i].size)) { + found = 1; + break; + } + } + if (!found) + add_memory_region(init_mem, init_size, + BOOT_MEM_INIT_RAM); + } + + pr_info("Determined physical RAM map:\n"); + print_memory_map(); + +#ifdef CONFIG_CMDLINE_BOOL +#ifdef CONFIG_CMDLINE_OVERRIDE + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); +#else + if (builtin_cmdline[0]) { + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE); + } + strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); +#endif +#else + strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); +#endif + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + + *cmdline_p = command_line; + + parse_early_param(); + + if (usermem) { + pr_info("User-defined physical RAM map:\n"); + print_memory_map(); + } + + bootmem_init(); + device_tree_init(); + sparse_init(); + plat_swiotlb_setup(); + paging_init(); +} + +static void __init resource_init(void) +{ + int i; + + if (UNCAC_BASE != IO_BASE) + return; + + code_resource.start = __pa_symbol(&_text); + code_resource.end = __pa_symbol(&_etext) - 1; + data_resource.start = __pa_symbol(&_etext); + data_resource.end = __pa_symbol(&_edata) - 1; + + /* + * Request address space for all standard RAM. + */ + for (i = 0; i < boot_mem_map.nr_map; i++) { + struct resource *res; + unsigned long start, end; + + start = boot_mem_map.map[i].addr; + end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1; + if (start >= HIGHMEM_START) + continue; + if (end >= HIGHMEM_START) + end = HIGHMEM_START - 1; + + res = alloc_bootmem(sizeof(struct resource)); + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + case BOOT_MEM_ROM_DATA: + res->name = "System RAM"; + break; + case BOOT_MEM_RESERVED: + default: + res->name = "reserved"; + } + + res->start = start; + res->end = end; + + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + request_resource(&iomem_resource, res); + + /* + * We don't know which RAM region contains kernel data, + * so we try it repeatedly and let the resource manager + * test it. + */ + request_resource(res, &code_resource); + request_resource(res, &data_resource); + } +} + +void __init setup_arch(char **cmdline_p) +{ + cpu_probe(); + prom_init(); + +#ifdef CONFIG_EARLY_PRINTK + setup_early_printk(); +#endif + cpu_report(); + check_bugs_early(); + +#if defined(CONFIG_VT) +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + arch_mem_init(cmdline_p); + + resource_init(); + plat_smp_setup(); +} + +unsigned long kernelsp[NR_CPUS]; +unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; + +#ifdef CONFIG_DEBUG_FS +struct dentry *mips_debugfs_dir; +static int __init debugfs_mips(void) +{ + struct dentry *d; + + d = debugfs_create_dir("mips", NULL); + if (!d) + return -ENOMEM; + mips_debugfs_dir = d; + return 0; +} +arch_initcall(debugfs_mips); +#endif diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h new file mode 100644 index 00000000..10263b40 --- /dev/null +++ b/arch/mips/kernel/signal-common.h @@ -0,0 +1,41 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994 - 2000 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ + +#ifndef __SIGNAL_COMMON_H +#define __SIGNAL_COMMON_H + +/* #define DEBUG_SIG */ + +#ifdef DEBUG_SIG +# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args) +#else +# define DEBUGP(fmt, args...) +#endif + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +/* + * Determine which stack to use.. + */ +extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size); +/* Check and clear pending FPU exceptions in saved CSR */ +extern int fpcsr_pending(unsigned int __user *fpcsr); + +/* Make sure we will not lose FPU ownership */ +#ifdef CONFIG_PREEMPT +#define lock_fpu_owner() preempt_disable() +#define unlock_fpu_owner() preempt_enable() +#else +#define lock_fpu_owner() pagefault_disable() +#define unlock_fpu_owner() pagefault_enable() +#endif + +#endif /* __SIGNAL_COMMON_H */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c new file mode 100644 index 00000000..d5a338a1 --- /dev/null +++ b/arch/mips/kernel/signal.c @@ -0,0 +1,695 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994 - 2000 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#include <linux/cache.h> +#include <linux/irqflags.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/personality.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/compiler.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> +#include <linux/tracehook.h> + +#include <asm/abi.h> +#include <asm/asm.h> +#include <linux/bitops.h> +#include <asm/cacheflush.h> +#include <asm/fpu.h> +#include <asm/sim.h> +#include <asm/ucontext.h> +#include <asm/cpu-features.h> +#include <asm/war.h> +#include <asm/vdso.h> +#include <asm/dsp.h> + +#include "signal-common.h" + +static int (*save_fp_context)(struct sigcontext __user *sc); +static int (*restore_fp_context)(struct sigcontext __user *sc); + +extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); +extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); + +extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); +extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); + +struct sigframe { + u32 sf_ass[4]; /* argument save space for o32 */ + u32 sf_pad[2]; /* Was: signal trampoline */ + struct sigcontext sf_sc; + sigset_t sf_mask; +}; + +struct rt_sigframe { + u32 rs_ass[4]; /* argument save space for o32 */ + u32 rs_pad[2]; /* Was: signal trampoline */ + struct siginfo rs_info; + struct ucontext rs_uc; +}; + +/* + * Helper routines + */ +static int protected_save_fp_context(struct sigcontext __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context(struct sigcontext __user *sc) +{ + int err, tmp __maybe_unused; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + int i; + unsigned int used_math; + + err |= __put_user(regs->cp0_epc, &sc->sc_pc); + + err |= __put_user(0, &sc->sc_regs[0]); + for (i = 1; i < 32; i++) + err |= __put_user(regs->regs[i], &sc->sc_regs[i]); + +#ifdef CONFIG_CPU_HAS_SMARTMIPS + err |= __put_user(regs->acx, &sc->sc_acx); +#endif + err |= __put_user(regs->hi, &sc->sc_mdhi); + err |= __put_user(regs->lo, &sc->sc_mdlo); + if (cpu_has_dsp) { + err |= __put_user(mfhi1(), &sc->sc_hi1); + err |= __put_user(mflo1(), &sc->sc_lo1); + err |= __put_user(mfhi2(), &sc->sc_hi2); + err |= __put_user(mflo2(), &sc->sc_lo2); + err |= __put_user(mfhi3(), &sc->sc_hi3); + err |= __put_user(mflo3(), &sc->sc_lo3); + err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); + } + + used_math = !!used_math(); + err |= __put_user(used_math, &sc->sc_used_math); + + if (used_math) { + /* + * Save FPU state to signal context. Signal handler + * will "inherit" current FPU state. + */ + err |= protected_save_fp_context(sc); + } + return err; +} + +int fpcsr_pending(unsigned int __user *fpcsr) +{ + int err, sig = 0; + unsigned int csr, enabled; + + err = __get_user(csr, fpcsr); + enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); + /* + * If the signal handler set some FPU exceptions, clear it and + * send SIGFPE. + */ + if (csr & enabled) { + csr &= ~enabled; + err |= __put_user(csr, fpcsr); + sig = SIGFPE; + } + return err ?: sig; +} + +static int +check_and_restore_fp_context(struct sigcontext __user *sc) +{ + int err, sig; + + err = sig = fpcsr_pending(&sc->sc_fpc_csr); + if (err > 0) + err = 0; + err |= protected_restore_fp_context(sc); + return err ?: sig; +} + +int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + unsigned int used_math; + unsigned long treg; + int err = 0; + int i; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + + err |= __get_user(regs->cp0_epc, &sc->sc_pc); + +#ifdef CONFIG_CPU_HAS_SMARTMIPS + err |= __get_user(regs->acx, &sc->sc_acx); +#endif + err |= __get_user(regs->hi, &sc->sc_mdhi); + err |= __get_user(regs->lo, &sc->sc_mdlo); + if (cpu_has_dsp) { + err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); + err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); + err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); + err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); + err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); + err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); + err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); + } + + for (i = 1; i < 32; i++) + err |= __get_user(regs->regs[i], &sc->sc_regs[i]); + + err |= __get_user(used_math, &sc->sc_used_math); + conditional_used_math(used_math); + + if (used_math) { + /* restore fpu context if we have used it before */ + if (!err) + err = check_and_restore_fp_context(sc); + } else { + /* signal handler may have used FPU. Give it up. */ + lose_fpu(0); + } + + return err; +} + +void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size) +{ + unsigned long sp; + + /* Default to using normal stack */ + sp = regs->regs[29]; + + /* + * FPU emulator may have it's own trampoline active just + * above the user stack, 16-bytes before the next lowest + * 16 byte boundary. Try to avoid trashing it. + */ + sp -= 32; + + /* This is the X/Open sanctioned signal stack switching. */ + if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) + sp = current->sas_ss_sp + current->sas_ss_size; + + return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); +} + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ + +#ifdef CONFIG_TRAD_SIGNALS +asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) +{ + sigset_t newset; + sigset_t __user *uset; + + uset = (sigset_t __user *) regs.regs[4]; + if (copy_from_user(&newset, uset, sizeof(sigset_t))) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + current->saved_sigmask = current->blocked; + set_current_blocked(&newset); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} +#endif + +asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) +{ + sigset_t newset; + sigset_t __user *unewset; + size_t sigsetsize; + + /* XXX Don't preclude handling different sized sigset_t's. */ + sigsetsize = regs.regs[5]; + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + unewset = (sigset_t __user *) regs.regs[4]; + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + current->saved_sigmask = current->blocked; + set_current_blocked(&newset); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} + +#ifdef CONFIG_TRAD_SIGNALS +SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, + struct sigaction __user *, oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + int err = 0; + + if (act) { + old_sigset_t mask; + + if (!access_ok(VERIFY_READ, act, sizeof(*act))) + return -EFAULT; + err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); + err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); + err |= __get_user(mask, &act->sa_mask.sig[0]); + if (err) + return -EFAULT; + + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) + return -EFAULT; + err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); + err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); + err |= __put_user(0, &oact->sa_mask.sig[1]); + err |= __put_user(0, &oact->sa_mask.sig[2]); + err |= __put_user(0, &oact->sa_mask.sig[3]); + if (err) + return -EFAULT; + } + + return ret; +} +#endif + +asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) +{ + const stack_t __user *uss = (const stack_t __user *) regs.regs[4]; + stack_t __user *uoss = (stack_t __user *) regs.regs[5]; + unsigned long usp = regs.regs[29]; + + return do_sigaltstack(uss, uoss, usp); +} + +#ifdef CONFIG_TRAD_SIGNALS +asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) +{ + struct sigframe __user *frame; + sigset_t blocked; + int sig; + + frame = (struct sigframe __user *) regs.regs[29]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) + goto badframe; + + sigdelsetmask(&blocked, ~_BLOCKABLE); + set_current_blocked(&blocked); + + sig = restore_sigcontext(®s, &frame->sf_sc); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + /* + * Don't let your children do this ... + */ + __asm__ __volatile__( + "move\t$29, %0\n\t" + "j\tsyscall_exit" + :/* no outputs */ + :"r" (®s)); + /* Unreached */ + +badframe: + force_sig(SIGSEGV, current); +} +#endif /* CONFIG_TRAD_SIGNALS */ + +asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) +{ + struct rt_sigframe __user *frame; + sigset_t set; + int sig; + + frame = (struct rt_sigframe __user *) regs.regs[29]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + set_current_blocked(&set); + + sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); + + /* + * Don't let your children do this ... + */ + __asm__ __volatile__( + "move\t$29, %0\n\t" + "j\tsyscall_exit" + :/* no outputs */ + :"r" (®s)); + /* Unreached */ + +badframe: + force_sig(SIGSEGV, current); +} + +#ifdef CONFIG_TRAD_SIGNALS +static int setup_frame(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set) +{ + struct sigframe __user *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + goto give_sigsegv; + + err |= setup_sigcontext(regs, &frame->sf_sc); + err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); + if (err) + goto give_sigsegv; + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = 0 (should be cause) + * a2 = pointer to struct sigcontext + * + * $25 and c0_epc point to the signal handler, $29 points to the + * struct sigframe. + */ + regs->regs[ 4] = signr; + regs->regs[ 5] = 0; + regs->regs[ 6] = (unsigned long) &frame->sf_sc; + regs->regs[29] = (unsigned long) frame; + regs->regs[31] = (unsigned long) sig_return; + regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", + current->comm, current->pid, + frame, regs->cp0_epc, regs->regs[31]); + return 0; + +give_sigsegv: + force_sigsegv(signr, current); + return -EFAULT; +} +#endif + +static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set, + siginfo_t *info) +{ + struct rt_sigframe __user *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + goto give_sigsegv; + + /* Create siginfo. */ + err |= copy_siginfo_to_user(&frame->rs_info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->rs_uc.uc_flags); + err |= __put_user(NULL, &frame->rs_uc.uc_link); + err |= __put_user((void __user *)current->sas_ss_sp, + &frame->rs_uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->regs[29]), + &frame->rs_uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, + &frame->rs_uc.uc_stack.ss_size); + err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); + err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); + + if (err) + goto give_sigsegv; + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = 0 (should be cause) + * a2 = pointer to ucontext + * + * $25 and c0_epc point to the signal handler, $29 points to + * the struct rt_sigframe. + */ + regs->regs[ 4] = signr; + regs->regs[ 5] = (unsigned long) &frame->rs_info; + regs->regs[ 6] = (unsigned long) &frame->rs_uc; + regs->regs[29] = (unsigned long) frame; + regs->regs[31] = (unsigned long) sig_return; + regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", + current->comm, current->pid, + frame, regs->cp0_epc, regs->regs[31]); + + return 0; + +give_sigsegv: + force_sigsegv(signr, current); + return -EFAULT; +} + +struct mips_abi mips_abi = { +#ifdef CONFIG_TRAD_SIGNALS + .setup_frame = setup_frame, + .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), +#endif + .setup_rt_frame = setup_rt_frame, + .rt_signal_return_offset = + offsetof(struct mips_vdso, rt_signal_trampoline), + .restart = __NR_restart_syscall +}; + +static int handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) +{ + int ret; + struct mips_abi *abi = current->thread.abi; + void *vdso = current->mm->context.vdso; + + if (regs->regs[0]) { + switch(regs->regs[2]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: + regs->regs[2] = EINTR; + break; + case ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->regs[2] = EINTR; + break; + } + /* fallthrough */ + case ERESTARTNOINTR: + regs->regs[7] = regs->regs[26]; + regs->regs[2] = regs->regs[0]; + regs->cp0_epc -= 4; + } + + regs->regs[0] = 0; /* Don't deal with this again. */ + } + + if (sig_uses_siginfo(ka)) + ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, + ka, regs, sig, oldset, info); + else + ret = abi->setup_frame(vdso + abi->signal_return_offset, + ka, regs, sig, oldset); + + if (ret) + return ret; + + block_sigmask(ka, sig); + + return ret; +} + +static void do_signal(struct pt_regs *regs) +{ + struct k_sigaction ka; + sigset_t *oldset; + siginfo_t info; + int signr; + + /* + * We want the common case to go fast, which is why we may in certain + * cases get here from kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return; + + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else + oldset = ¤t->blocked; + + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + if (signr > 0) { + /* Whee! Actually deliver the signal. */ + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { + /* + * A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } + + return; + } + + if (regs->regs[0]) { + if (regs->regs[2] == ERESTARTNOHAND || + regs->regs[2] == ERESTARTSYS || + regs->regs[2] == ERESTARTNOINTR) { + regs->regs[2] = regs->regs[0]; + regs->regs[7] = regs->regs[26]; + regs->cp0_epc -= 4; + } + if (regs->regs[2] == ERESTART_RESTARTBLOCK) { + regs->regs[2] = current->thread.abi->restart; + regs->regs[7] = regs->regs[26]; + regs->cp0_epc -= 4; + } + regs->regs[0] = 0; /* Don't deal with this again. */ + } + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } +} + +/* + * notification of userspace execution resumption + * - triggered by the TIF_WORK_MASK flags + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, + __u32 thread_info_flags) +{ + local_irq_enable(); + + /* deal with pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + do_signal(regs); + + if (thread_info_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); + } +} + +#ifdef CONFIG_SMP +static int smp_save_fp_context(struct sigcontext __user *sc) +{ + return raw_cpu_has_fpu + ? _save_fp_context(sc) + : fpu_emulator_save_context(sc); +} + +static int smp_restore_fp_context(struct sigcontext __user *sc) +{ + return raw_cpu_has_fpu + ? _restore_fp_context(sc) + : fpu_emulator_restore_context(sc); +} +#endif + +static int signal_setup(void) +{ +#ifdef CONFIG_SMP + /* For now just do the cpu_has_fpu check when the functions are invoked */ + save_fp_context = smp_save_fp_context; + restore_fp_context = smp_restore_fp_context; +#else + if (cpu_has_fpu) { + save_fp_context = _save_fp_context; + restore_fp_context = _restore_fp_context; + } else { + save_fp_context = fpu_emulator_save_context; + restore_fp_context = fpu_emulator_restore_context; + } +#endif + + return 0; +} + +arch_initcall(signal_setup); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c new file mode 100644 index 00000000..ac3b8d89 --- /dev/null +++ b/arch/mips/kernel/signal32.c @@ -0,0 +1,815 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994 - 2000, 2006 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#include <linux/cache.h> +#include <linux/compat.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/syscalls.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/suspend.h> +#include <linux/compiler.h> +#include <linux/uaccess.h> + +#include <asm/abi.h> +#include <asm/asm.h> +#include <asm/compat-signal.h> +#include <linux/bitops.h> +#include <asm/cacheflush.h> +#include <asm/sim.h> +#include <asm/ucontext.h> +#include <asm/fpu.h> +#include <asm/war.h> +#include <asm/vdso.h> +#include <asm/dsp.h> + +#include "signal-common.h" + +static int (*save_fp_context32)(struct sigcontext32 __user *sc); +static int (*restore_fp_context32)(struct sigcontext32 __user *sc); + +extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); +extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); + +extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); +extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); + +/* + * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... + */ +#define __NR_O32_restart_syscall 4253 + +/* 32-bit compatibility types */ + +typedef unsigned int __sighandler32_t; +typedef void (*vfptr_t)(void); + +struct sigaction32 { + unsigned int sa_flags; + __sighandler32_t sa_handler; + compat_sigset_t sa_mask; +}; + +/* IRIX compatible stack_t */ +typedef struct sigaltstack32 { + s32 ss_sp; + compat_size_t ss_size; + int ss_flags; +} stack32_t; + +struct ucontext32 { + u32 uc_flags; + s32 uc_link; + stack32_t uc_stack; + struct sigcontext32 uc_mcontext; + compat_sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +struct sigframe32 { + u32 sf_ass[4]; /* argument save space for o32 */ + u32 sf_pad[2]; /* Was: signal trampoline */ + struct sigcontext32 sf_sc; + compat_sigset_t sf_mask; +}; + +struct rt_sigframe32 { + u32 rs_ass[4]; /* argument save space for o32 */ + u32 rs_pad[2]; /* Was: signal trampoline */ + compat_siginfo_t rs_info; + struct ucontext32 rs_uc; +}; + +/* + * sigcontext handlers + */ +static int protected_save_fp_context32(struct sigcontext32 __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context32(struct sigcontext32 __user *sc) +{ + int err, tmp __maybe_unused; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int setup_sigcontext32(struct pt_regs *regs, + struct sigcontext32 __user *sc) +{ + int err = 0; + int i; + u32 used_math; + + err |= __put_user(regs->cp0_epc, &sc->sc_pc); + + err |= __put_user(0, &sc->sc_regs[0]); + for (i = 1; i < 32; i++) + err |= __put_user(regs->regs[i], &sc->sc_regs[i]); + + err |= __put_user(regs->hi, &sc->sc_mdhi); + err |= __put_user(regs->lo, &sc->sc_mdlo); + if (cpu_has_dsp) { + err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); + err |= __put_user(mfhi1(), &sc->sc_hi1); + err |= __put_user(mflo1(), &sc->sc_lo1); + err |= __put_user(mfhi2(), &sc->sc_hi2); + err |= __put_user(mflo2(), &sc->sc_lo2); + err |= __put_user(mfhi3(), &sc->sc_hi3); + err |= __put_user(mflo3(), &sc->sc_lo3); + } + + used_math = !!used_math(); + err |= __put_user(used_math, &sc->sc_used_math); + + if (used_math) { + /* + * Save FPU state to signal context. Signal handler + * will "inherit" current FPU state. + */ + err |= protected_save_fp_context32(sc); + } + return err; +} + +static int +check_and_restore_fp_context32(struct sigcontext32 __user *sc) +{ + int err, sig; + + err = sig = fpcsr_pending(&sc->sc_fpc_csr); + if (err > 0) + err = 0; + err |= protected_restore_fp_context32(sc); + return err ?: sig; +} + +static int restore_sigcontext32(struct pt_regs *regs, + struct sigcontext32 __user *sc) +{ + u32 used_math; + int err = 0; + s32 treg; + int i; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + + err |= __get_user(regs->cp0_epc, &sc->sc_pc); + err |= __get_user(regs->hi, &sc->sc_mdhi); + err |= __get_user(regs->lo, &sc->sc_mdlo); + if (cpu_has_dsp) { + err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); + err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); + err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); + err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); + err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); + err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); + err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); + } + + for (i = 1; i < 32; i++) + err |= __get_user(regs->regs[i], &sc->sc_regs[i]); + + err |= __get_user(used_math, &sc->sc_used_math); + conditional_used_math(used_math); + + if (used_math) { + /* restore fpu context if we have used it before */ + if (!err) + err = check_and_restore_fp_context32(sc); + } else { + /* signal handler may have used FPU. Give it up. */ + lose_fpu(0); + } + + return err; +} + +/* + * + */ +extern void __put_sigset_unknown_nsig(void); +extern void __get_sigset_unknown_nsig(void); + +static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf) +{ + int err = 0; + + if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf))) + return -EFAULT; + + switch (_NSIG_WORDS) { + default: + __put_sigset_unknown_nsig(); + case 2: + err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]); + err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]); + case 1: + err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]); + err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]); + } + + return err; +} + +static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf) +{ + int err = 0; + unsigned long sig[4]; + + if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf))) + return -EFAULT; + + switch (_NSIG_WORDS) { + default: + __get_sigset_unknown_nsig(); + case 2: + err |= __get_user(sig[3], &ubuf->sig[3]); + err |= __get_user(sig[2], &ubuf->sig[2]); + kbuf->sig[1] = sig[2] | (sig[3] << 32); + case 1: + err |= __get_user(sig[1], &ubuf->sig[1]); + err |= __get_user(sig[0], &ubuf->sig[0]); + kbuf->sig[0] = sig[0] | (sig[1] << 32); + } + + return err; +} + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ + +asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) +{ + compat_sigset_t __user *uset; + sigset_t newset; + + uset = (compat_sigset_t __user *) regs.regs[4]; + if (get_sigset(&newset, uset)) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + current->saved_sigmask = current->blocked; + set_current_blocked(&newset); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} + +asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) +{ + compat_sigset_t __user *uset; + sigset_t newset; + size_t sigsetsize; + + /* XXX Don't preclude handling different sized sigset_t's. */ + sigsetsize = regs.regs[5]; + if (sigsetsize != sizeof(compat_sigset_t)) + return -EINVAL; + + uset = (compat_sigset_t __user *) regs.regs[4]; + if (get_sigset(&newset, uset)) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + current->saved_sigmask = current->blocked; + set_current_blocked(&newset); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} + +SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act, + struct sigaction32 __user *, oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + int err = 0; + + if (act) { + old_sigset_t mask; + s32 handler; + + if (!access_ok(VERIFY_READ, act, sizeof(*act))) + return -EFAULT; + err |= __get_user(handler, &act->sa_handler); + new_ka.sa.sa_handler = (void __user *)(s64)handler; + err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); + err |= __get_user(mask, &act->sa_mask.sig[0]); + if (err) + return -EFAULT; + + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) + return -EFAULT; + err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + err |= __put_user((u32)(u64)old_ka.sa.sa_handler, + &oact->sa_handler); + err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); + err |= __put_user(0, &oact->sa_mask.sig[1]); + err |= __put_user(0, &oact->sa_mask.sig[2]); + err |= __put_user(0, &oact->sa_mask.sig[3]); + if (err) + return -EFAULT; + } + + return ret; +} + +asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs) +{ + const stack32_t __user *uss = (const stack32_t __user *) regs.regs[4]; + stack32_t __user *uoss = (stack32_t __user *) regs.regs[5]; + unsigned long usp = regs.regs[29]; + stack_t kss, koss; + int ret, err = 0; + mm_segment_t old_fs = get_fs(); + s32 sp; + + if (uss) { + if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) + return -EFAULT; + err |= __get_user(sp, &uss->ss_sp); + kss.ss_sp = (void __user *) (long) sp; + err |= __get_user(kss.ss_size, &uss->ss_size); + err |= __get_user(kss.ss_flags, &uss->ss_flags); + if (err) + return -EFAULT; + } + + set_fs(KERNEL_DS); + ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL, + uoss ? (stack_t __user *)&koss : NULL, usp); + set_fs(old_fs); + + if (!ret && uoss) { + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) + return -EFAULT; + sp = (int) (unsigned long) koss.ss_sp; + err |= __put_user(sp, &uoss->ss_sp); + err |= __put_user(koss.ss_size, &uoss->ss_size); + err |= __put_user(koss.ss_flags, &uoss->ss_flags); + if (err) + return -EFAULT; + } + return ret; +} + +int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +{ + int err; + + if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t))) + return -EFAULT; + + /* If you change siginfo_t structure, please be sure + this code is fixed accordingly. + It should never copy any pad contained in the structure + to avoid security leaks, but must copy the generic + 3 ints plus the relevant union member. + This routine must convert siginfo from 64bit to 32bit as well + at the same time. */ + err = __put_user(from->si_signo, &to->si_signo); + err |= __put_user(from->si_errno, &to->si_errno); + err |= __put_user((short)from->si_code, &to->si_code); + if (from->si_code < 0) + err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); + else { + switch (from->si_code >> 16) { + case __SI_TIMER >> 16: + err |= __put_user(from->si_tid, &to->si_tid); + err |= __put_user(from->si_overrun, &to->si_overrun); + err |= __put_user(from->si_int, &to->si_int); + break; + case __SI_CHLD >> 16: + err |= __put_user(from->si_utime, &to->si_utime); + err |= __put_user(from->si_stime, &to->si_stime); + err |= __put_user(from->si_status, &to->si_status); + default: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + case __SI_FAULT >> 16: + err |= __put_user((unsigned long)from->si_addr, &to->si_addr); + break; + case __SI_POLL >> 16: + err |= __put_user(from->si_band, &to->si_band); + err |= __put_user(from->si_fd, &to->si_fd); + break; + case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ + case __SI_MESGQ >> 16: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_int, &to->si_int); + break; + } + } + return err; +} + +int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) +{ + memset(to, 0, sizeof *to); + + if (copy_from_user(to, from, 3*sizeof(int)) || + copy_from_user(to->_sifields._pad, + from->_sifields._pad, SI_PAD_SIZE32)) + return -EFAULT; + + return 0; +} + +asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) +{ + struct sigframe32 __user *frame; + sigset_t blocked; + int sig; + + frame = (struct sigframe32 __user *) regs.regs[29]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) + goto badframe; + + sigdelsetmask(&blocked, ~_BLOCKABLE); + set_current_blocked(&blocked); + + sig = restore_sigcontext32(®s, &frame->sf_sc); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + /* + * Don't let your children do this ... + */ + __asm__ __volatile__( + "move\t$29, %0\n\t" + "j\tsyscall_exit" + :/* no outputs */ + :"r" (®s)); + /* Unreached */ + +badframe: + force_sig(SIGSEGV, current); +} + +asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) +{ + struct rt_sigframe32 __user *frame; + mm_segment_t old_fs; + sigset_t set; + stack_t st; + s32 sp; + int sig; + + frame = (struct rt_sigframe32 __user *) regs.regs[29]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + set_current_blocked(&set); + + sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + /* The ucontext contains a stack32_t, so we must convert! */ + if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) + goto badframe; + st.ss_sp = (void __user *)(long) sp; + if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) + goto badframe; + if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) + goto badframe; + + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); + set_fs(old_fs); + + /* + * Don't let your children do this ... + */ + __asm__ __volatile__( + "move\t$29, %0\n\t" + "j\tsyscall_exit" + :/* no outputs */ + :"r" (®s)); + /* Unreached */ + +badframe: + force_sig(SIGSEGV, current); +} + +static int setup_frame_32(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set) +{ + struct sigframe32 __user *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + goto give_sigsegv; + + err |= setup_sigcontext32(regs, &frame->sf_sc); + err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); + + if (err) + goto give_sigsegv; + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = 0 (should be cause) + * a2 = pointer to struct sigcontext + * + * $25 and c0_epc point to the signal handler, $29 points to the + * struct sigframe. + */ + regs->regs[ 4] = signr; + regs->regs[ 5] = 0; + regs->regs[ 6] = (unsigned long) &frame->sf_sc; + regs->regs[29] = (unsigned long) frame; + regs->regs[31] = (unsigned long) sig_return; + regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", + current->comm, current->pid, + frame, regs->cp0_epc, regs->regs[31]); + + return 0; + +give_sigsegv: + force_sigsegv(signr, current); + return -EFAULT; +} + +static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set, + siginfo_t *info) +{ + struct rt_sigframe32 __user *frame; + int err = 0; + s32 sp; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + goto give_sigsegv; + + /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ + err |= copy_siginfo_to_user32(&frame->rs_info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->rs_uc.uc_flags); + err |= __put_user(0, &frame->rs_uc.uc_link); + sp = (int) (long) current->sas_ss_sp; + err |= __put_user(sp, + &frame->rs_uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->regs[29]), + &frame->rs_uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, + &frame->rs_uc.uc_stack.ss_size); + err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); + err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); + + if (err) + goto give_sigsegv; + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = 0 (should be cause) + * a2 = pointer to ucontext + * + * $25 and c0_epc point to the signal handler, $29 points to + * the struct rt_sigframe32. + */ + regs->regs[ 4] = signr; + regs->regs[ 5] = (unsigned long) &frame->rs_info; + regs->regs[ 6] = (unsigned long) &frame->rs_uc; + regs->regs[29] = (unsigned long) frame; + regs->regs[31] = (unsigned long) sig_return; + regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", + current->comm, current->pid, + frame, regs->cp0_epc, regs->regs[31]); + + return 0; + +give_sigsegv: + force_sigsegv(signr, current); + return -EFAULT; +} + +/* + * o32 compatibility on 64-bit kernels, without DSP ASE + */ +struct mips_abi mips_abi_32 = { + .setup_frame = setup_frame_32, + .signal_return_offset = + offsetof(struct mips_vdso, o32_signal_trampoline), + .setup_rt_frame = setup_rt_frame_32, + .rt_signal_return_offset = + offsetof(struct mips_vdso, o32_rt_signal_trampoline), + .restart = __NR_O32_restart_syscall +}; + +SYSCALL_DEFINE4(32_rt_sigaction, int, sig, + const struct sigaction32 __user *, act, + struct sigaction32 __user *, oact, unsigned int, sigsetsize) +{ + struct k_sigaction new_sa, old_sa; + int ret = -EINVAL; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + goto out; + + if (act) { + s32 handler; + int err = 0; + + if (!access_ok(VERIFY_READ, act, sizeof(*act))) + return -EFAULT; + err |= __get_user(handler, &act->sa_handler); + new_sa.sa.sa_handler = (void __user *)(s64)handler; + err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags); + err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask); + if (err) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); + + if (!ret && oact) { + int err = 0; + + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) + return -EFAULT; + + err |= __put_user((u32)(u64)old_sa.sa.sa_handler, + &oact->sa_handler); + err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags); + err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask); + if (err) + return -EFAULT; + } +out: + return ret; +} + +SYSCALL_DEFINE4(32_rt_sigprocmask, int, how, compat_sigset_t __user *, set, + compat_sigset_t __user *, oset, unsigned int, sigsetsize) +{ + sigset_t old_set, new_set; + int ret; + mm_segment_t old_fs = get_fs(); + + if (set && get_sigset(&new_set, set)) + return -EFAULT; + + set_fs(KERNEL_DS); + ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL, + oset ? (sigset_t __user *)&old_set : NULL, + sigsetsize); + set_fs(old_fs); + + if (!ret && oset && put_sigset(&old_set, oset)) + return -EFAULT; + + return ret; +} + +SYSCALL_DEFINE2(32_rt_sigpending, compat_sigset_t __user *, uset, + unsigned int, sigsetsize) +{ + int ret; + sigset_t set; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize); + set_fs(old_fs); + + if (!ret && put_sigset(&set, uset)) + return -EFAULT; + + return ret; +} + +SYSCALL_DEFINE3(32_rt_sigqueueinfo, int, pid, int, sig, + compat_siginfo_t __user *, uinfo) +{ + siginfo_t info; + int ret; + mm_segment_t old_fs = get_fs(); + + if (copy_from_user(&info, uinfo, 3*sizeof(int)) || + copy_from_user(info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE)) + return -EFAULT; + set_fs(KERNEL_DS); + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); + set_fs(old_fs); + return ret; +} + +SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid, + compat_siginfo_t __user *, uinfo, int, options, + struct compat_rusage __user *, uru) +{ + siginfo_t info; + struct rusage ru; + long ret; + mm_segment_t old_fs = get_fs(); + + info.si_signo = 0; + set_fs(KERNEL_DS); + ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options, + uru ? (struct rusage __user *) &ru : NULL); + set_fs(old_fs); + + if (ret < 0 || info.si_signo == 0) + return ret; + + if (uru && (ret = put_compat_rusage(&ru, uru))) + return ret; + + BUG_ON(info.si_code & __SI_MASK); + info.si_code |= __SI_CHLD; + return copy_siginfo_to_user32(uinfo, &info); +} + +static int signal32_init(void) +{ + if (cpu_has_fpu) { + save_fp_context32 = _save_fp_context32; + restore_fp_context32 = _restore_fp_context32; + } else { + save_fp_context32 = fpu_emulator_save_context32; + restore_fp_context32 = fpu_emulator_restore_context32; + } + + return 0; +} + +arch_initcall(signal32_init); diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c new file mode 100644 index 00000000..86eb4b04 --- /dev/null +++ b/arch/mips/kernel/signal_n32.c @@ -0,0 +1,223 @@ +/* + * Copyright (C) 2003 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/cache.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/compat.h> +#include <linux/bitops.h> + +#include <asm/abi.h> +#include <asm/asm.h> +#include <asm/cacheflush.h> +#include <asm/compat-signal.h> +#include <asm/sim.h> +#include <asm/uaccess.h> +#include <asm/ucontext.h> +#include <asm/fpu.h> +#include <asm/cpu-features.h> +#include <asm/war.h> +#include <asm/vdso.h> + +#include "signal-common.h" + +/* + * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... + */ +#define __NR_N32_restart_syscall 6214 + +extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); +extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); + + +/* IRIX compatible stack_t */ +typedef struct sigaltstack32 { + s32 ss_sp; + compat_size_t ss_size; + int ss_flags; +} stack32_t; + +struct ucontextn32 { + u32 uc_flags; + s32 uc_link; + stack32_t uc_stack; + struct sigcontext uc_mcontext; + compat_sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +struct rt_sigframe_n32 { + u32 rs_ass[4]; /* argument save space for o32 */ + u32 rs_pad[2]; /* Was: signal trampoline */ + struct compat_siginfo rs_info; + struct ucontextn32 rs_uc; +}; + +extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); + +asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) +{ + compat_sigset_t __user *unewset; + compat_sigset_t uset; + size_t sigsetsize; + sigset_t newset; + + /* XXX Don't preclude handling different sized sigset_t's. */ + sigsetsize = regs.regs[5]; + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + unewset = (compat_sigset_t __user *) regs.regs[4]; + if (copy_from_user(&uset, unewset, sizeof(uset))) + return -EFAULT; + sigset_from_compat(&newset, &uset); + sigdelsetmask(&newset, ~_BLOCKABLE); + + current->saved_sigmask = current->blocked; + set_current_blocked(&newset); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} + +asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) +{ + struct rt_sigframe_n32 __user *frame; + mm_segment_t old_fs; + sigset_t set; + stack_t st; + s32 sp; + int sig; + + frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + set_current_blocked(&set); + + sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + /* The ucontext contains a stack32_t, so we must convert! */ + if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) + goto badframe; + st.ss_sp = (void __user *)(long) sp; + if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) + goto badframe; + if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) + goto badframe; + + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); + set_fs(old_fs); + + + /* + * Don't let your children do this ... + */ + __asm__ __volatile__( + "move\t$29, %0\n\t" + "j\tsyscall_exit" + :/* no outputs */ + :"r" (®s)); + /* Unreached */ + +badframe: + force_sig(SIGSEGV, current); +} + +static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) +{ + struct rt_sigframe_n32 __user *frame; + int err = 0; + s32 sp; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + goto give_sigsegv; + + /* Create siginfo. */ + err |= copy_siginfo_to_user32(&frame->rs_info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->rs_uc.uc_flags); + err |= __put_user(0, &frame->rs_uc.uc_link); + sp = (int) (long) current->sas_ss_sp; + err |= __put_user(sp, + &frame->rs_uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->regs[29]), + &frame->rs_uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, + &frame->rs_uc.uc_stack.ss_size); + err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); + err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); + + if (err) + goto give_sigsegv; + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = 0 (should be cause) + * a2 = pointer to ucontext + * + * $25 and c0_epc point to the signal handler, $29 points to + * the struct rt_sigframe. + */ + regs->regs[ 4] = signr; + regs->regs[ 5] = (unsigned long) &frame->rs_info; + regs->regs[ 6] = (unsigned long) &frame->rs_uc; + regs->regs[29] = (unsigned long) frame; + regs->regs[31] = (unsigned long) sig_return; + regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", + current->comm, current->pid, + frame, regs->cp0_epc, regs->regs[31]); + + return 0; + +give_sigsegv: + force_sigsegv(signr, current); + return -EFAULT; +} + +struct mips_abi mips_abi_n32 = { + .setup_rt_frame = setup_rt_frame_n32, + .rt_signal_return_offset = + offsetof(struct mips_vdso, n32_rt_signal_trampoline), + .restart = __NR_N32_restart_syscall +}; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c new file mode 100644 index 00000000..3046e298 --- /dev/null +++ b/arch/mips/kernel/smp-bmips.c @@ -0,0 +1,456 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) + * + * SMP support for BMIPS + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/reboot.h> +#include <linux/io.h> +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/bug.h> +#include <linux/kernel.h> + +#include <asm/time.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/bootinfo.h> +#include <asm/pmon.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm/mipsregs.h> +#include <asm/bmips.h> +#include <asm/traps.h> +#include <asm/barrier.h> + +static int __maybe_unused max_cpus = 1; + +/* these may be configured by the platform code */ +int bmips_smp_enabled = 1; +int bmips_cpu_offset; +cpumask_t bmips_booted_mask; + +#ifdef CONFIG_SMP + +/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ +unsigned long bmips_smp_boot_sp; +unsigned long bmips_smp_boot_gp; + +static void bmips_send_ipi_single(int cpu, unsigned int action); +static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); + +/* SW interrupts 0,1 are used for interprocessor signaling */ +#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) +#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1) + +#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) +#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) +#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) +#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) + +static void __init bmips_smp_setup(void) +{ + int i; + +#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + /* arbitration priority */ + clear_c0_brcm_cmt_ctrl(0x30); + + /* NBK and weak order flags */ + set_c0_brcm_config_0(0x30000); + + /* + * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread + * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output + * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output + */ + change_c0_brcm_cmt_intr(0xf8018000, + (0x02 << 27) | (0x03 << 15)); + + /* single core, 2 threads (2 pipelines) */ + max_cpus = 2; +#elif defined(CONFIG_CPU_BMIPS5000) + /* enable raceless SW interrupts */ + set_c0_brcm_config(0x03 << 22); + + /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ + change_c0_brcm_mode(0x1f << 27, 0x02 << 27); + + /* N cores, 2 threads per core */ + max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; + + /* clear any pending SW interrupts */ + for (i = 0; i < max_cpus; i++) { + write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); + write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); + } +#endif + + if (!bmips_smp_enabled) + max_cpus = 1; + + /* this can be overridden by the BSP */ + if (!board_ebase_setup) + board_ebase_setup = &bmips_ebase_setup; + + for (i = 0; i < max_cpus; i++) { + __cpu_number_map[i] = 1; + __cpu_logical_map[i] = 1; + set_cpu_possible(i, 1); + set_cpu_present(i, 1); + } +} + +/* + * IPI IRQ setup - runs on CPU0 + */ +static void bmips_prepare_cpus(unsigned int max_cpus) +{ + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, + "smp_ipi0", NULL)) + panic("Can't request IPI0 interrupt\n"); + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, + "smp_ipi1", NULL)) + panic("Can't request IPI1 interrupt\n"); +} + +/* + * Tell the hardware to boot CPUx - runs on CPU0 + */ +static void bmips_boot_secondary(int cpu, struct task_struct *idle) +{ + bmips_smp_boot_sp = __KSTK_TOS(idle); + bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); + mb(); + + /* + * Initial boot sequence for secondary CPU: + * bmips_reset_nmi_vec @ a000_0000 -> + * bmips_smp_entry -> + * plat_wired_tlb_setup (cached function call; optional) -> + * start_secondary (cached jump) + * + * Warm restart sequence: + * play_dead WAIT loop -> + * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC -> + * eret to play_dead -> + * bmips_secondary_reentry -> + * start_secondary + */ + + pr_info("SMP: Booting CPU%d...\n", cpu); + + if (cpumask_test_cpu(cpu, &bmips_booted_mask)) + bmips_send_ipi_single(cpu, 0); + else { +#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + set_c0_brcm_cmt_ctrl(0x01); +#elif defined(CONFIG_CPU_BMIPS5000) + if (cpu & 0x01) + write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); + else { + /* + * core N thread 0 was already booted; just + * pulse the NMI line + */ + bmips_write_zscm_reg(0x210, 0xc0000000); + udelay(10); + bmips_write_zscm_reg(0x210, 0x00); + } +#endif + cpumask_set_cpu(cpu, &bmips_booted_mask); + } +} + +/* + * Early setup - runs on secondary CPU after cache probe + */ +static void bmips_init_secondary(void) +{ + /* move NMI vector to kseg0, in case XKS01 is enabled */ + +#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + void __iomem *cbr = BMIPS_GET_CBR(); + unsigned long old_vec; + + old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); + __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + + clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); +#elif defined(CONFIG_CPU_BMIPS5000) + write_c0_brcm_bootvec(read_c0_brcm_bootvec() & + (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); + + write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); +#endif + + /* make sure there won't be a timer interrupt for a little while */ + write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); + + irq_enable_hazard(); + set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); + irq_enable_hazard(); +} + +/* + * Late setup - runs on secondary CPU before entering the idle loop + */ +static void bmips_smp_finish(void) +{ + pr_info("SMP: CPU%d is running\n", smp_processor_id()); +} + +/* + * Runs on CPU0 after all CPUs have been booted + */ +static void bmips_cpus_done(void) +{ +} + +#if defined(CONFIG_CPU_BMIPS5000) + +/* + * BMIPS5000 raceless IPIs + * + * Each CPU has two inbound SW IRQs which are independent of all other CPUs. + * IPI0 is used for SMP_RESCHEDULE_YOURSELF + * IPI1 is used for SMP_CALL_FUNCTION + */ + +static void bmips_send_ipi_single(int cpu, unsigned int action) +{ + write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); +} + +static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) +{ + int action = irq - IPI0_IRQ; + + write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action)); + + if (action == 0) + scheduler_ipi(); + else + smp_call_function_interrupt(); + + return IRQ_HANDLED; +} + +#else + +/* + * BMIPS43xx racey IPIs + * + * We use one inbound SW IRQ for each CPU. + * + * A spinlock must be held in order to keep CPUx from accidentally clearing + * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The + * same spinlock is used to protect the action masks. + */ + +static DEFINE_SPINLOCK(ipi_lock); +static DEFINE_PER_CPU(int, ipi_action_mask); + +static void bmips_send_ipi_single(int cpu, unsigned int action) +{ + unsigned long flags; + + spin_lock_irqsave(&ipi_lock, flags); + set_c0_cause(cpu ? C_SW1 : C_SW0); + per_cpu(ipi_action_mask, cpu) |= action; + irq_enable_hazard(); + spin_unlock_irqrestore(&ipi_lock, flags); +} + +static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) +{ + unsigned long flags; + int action, cpu = irq - IPI0_IRQ; + + spin_lock_irqsave(&ipi_lock, flags); + action = __get_cpu_var(ipi_action_mask); + per_cpu(ipi_action_mask, cpu) = 0; + clear_c0_cause(cpu ? C_SW1 : C_SW0); + spin_unlock_irqrestore(&ipi_lock, flags); + + if (action & SMP_RESCHEDULE_YOURSELF) + scheduler_ipi(); + if (action & SMP_CALL_FUNCTION) + smp_call_function_interrupt(); + + return IRQ_HANDLED; +} + +#endif /* BMIPS type */ + +static void bmips_send_ipi_mask(const struct cpumask *mask, + unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + bmips_send_ipi_single(i, action); +} + +#ifdef CONFIG_HOTPLUG_CPU + +static int bmips_cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + if (cpu == 0) + return -EBUSY; + + pr_info("SMP: CPU%d is offline\n", cpu); + + set_cpu_online(cpu, false); + cpu_clear(cpu, cpu_callin_map); + + local_flush_tlb_all(); + local_flush_icache_range(0, ~0); + + return 0; +} + +static void bmips_cpu_die(unsigned int cpu) +{ +} + +void __ref play_dead(void) +{ + idle_task_exit(); + + /* flush data cache */ + _dma_cache_wback_inv(0, ~0); + + /* + * Wakeup is on SW0 or SW1; disable everything else + * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux + * IRQ handlers; this clears ST0_IE and returns immediately. + */ + clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); + change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, + IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); + irq_disable_hazard(); + + /* + * wait for SW interrupt from bmips_boot_secondary(), then jump + * back to start_secondary() + */ + __asm__ __volatile__( + " wait\n" + " j bmips_secondary_reentry\n" + : : : "memory"); +} + +#endif /* CONFIG_HOTPLUG_CPU */ + +struct plat_smp_ops bmips_smp_ops = { + .smp_setup = bmips_smp_setup, + .prepare_cpus = bmips_prepare_cpus, + .boot_secondary = bmips_boot_secondary, + .smp_finish = bmips_smp_finish, + .init_secondary = bmips_init_secondary, + .cpus_done = bmips_cpus_done, + .send_ipi_single = bmips_send_ipi_single, + .send_ipi_mask = bmips_send_ipi_mask, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = bmips_cpu_disable, + .cpu_die = bmips_cpu_die, +#endif +}; + +#endif /* CONFIG_SMP */ + +/*********************************************************************** + * BMIPS vector relocation + * This is primarily used for SMP boot, but it is applicable to some + * UP BMIPS systems as well. + ***********************************************************************/ + +static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) +{ + memcpy((void *)dst, start, end - start); + dma_cache_wback((unsigned long)start, end - start); + local_flush_icache_range(dst, dst + (end - start)); + instruction_hazard(); +} + +static inline void __cpuinit bmips_nmi_handler_setup(void) +{ + bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, + &bmips_reset_nmi_vec_end); + bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, + &bmips_smp_int_vec_end); +} + +void __cpuinit bmips_ebase_setup(void) +{ + unsigned long new_ebase = ebase; + void __iomem __maybe_unused *cbr; + + BUG_ON(ebase != CKSEG0); + +#if defined(CONFIG_CPU_BMIPS4350) + /* + * BMIPS4350 cannot relocate the normal vectors, but it + * can relocate the BEV=1 vectors. So CPU1 starts up at + * the relocated BEV=1, IV=0 general exception vector @ + * 0xa000_0380. + * + * set_uncached_handler() is used here because: + * - CPU1 will run this from uncached space + * - None of the cacheflush functions are set up yet + */ + set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, + &bmips_smp_int_vec, 0x80); + __sync(); + return; +#elif defined(CONFIG_CPU_BMIPS4380) + /* + * 0x8000_0000: reset/NMI (initially in kseg1) + * 0x8000_0400: normal vectors + */ + new_ebase = 0x80000400; + cbr = BMIPS_GET_CBR(); + __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); + __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); +#elif defined(CONFIG_CPU_BMIPS5000) + /* + * 0x8000_0000: reset/NMI (initially in kseg1) + * 0x8000_1000: normal vectors + */ + new_ebase = 0x80001000; + write_c0_brcm_bootvec(0xa0088008); + write_c0_ebase(new_ebase); + if (max_cpus > 2) + bmips_write_zscm_reg(0xa0, 0xa008a008); +#else + return; +#endif + board_nmi_handler_setup = &bmips_nmi_handler_setup; + ebase = new_ebase; +} + +asmlinkage void __weak plat_wired_tlb_setup(void) +{ + /* + * Called when starting/restarting a secondary CPU. + * Kernel stacks and other important data might only be accessible + * once the wired entries are present. + */ +} diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c new file mode 100644 index 00000000..e7e03ecf --- /dev/null +++ b/arch/mips/kernel/smp-cmp.c @@ -0,0 +1,209 @@ +/* + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Chris Dearman (chris@mips.com) + */ + +#undef DEBUG + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/compiler.h> + +#include <linux/atomic.h> +#include <asm/cacheflush.h> +#include <asm/cpu.h> +#include <asm/processor.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/time.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/mips_mt.h> +#include <asm/amon.h> +#include <asm/gic.h> + +static void ipi_call_function(unsigned int cpu) +{ + pr_debug("CPU%d: %s cpu %d status %08x\n", + smp_processor_id(), __func__, cpu, read_c0_status()); + + gic_send_ipi(plat_ipi_call_int_xlate(cpu)); +} + + +static void ipi_resched(unsigned int cpu) +{ + pr_debug("CPU%d: %s cpu %d status %08x\n", + smp_processor_id(), __func__, cpu, read_c0_status()); + + gic_send_ipi(plat_ipi_resched_int_xlate(cpu)); +} + +/* + * FIXME: This isn't restricted to CMP + * The SMVP kernel could use GIC interrupts if available + */ +void cmp_send_ipi_single(int cpu, unsigned int action) +{ + unsigned long flags; + + local_irq_save(flags); + + switch (action) { + case SMP_CALL_FUNCTION: + ipi_call_function(cpu); + break; + + case SMP_RESCHEDULE_YOURSELF: + ipi_resched(cpu); + break; + } + + local_irq_restore(flags); +} + +static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + cmp_send_ipi_single(i, action); +} + +static void cmp_init_secondary(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + /* Assume GIC is present */ + change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | + STATUSF_IP7); + + /* Enable per-cpu interrupts: platform specific */ + + c->core = (read_c0_ebase() >> 1) & 0xff; +#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) + c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; +#endif +#ifdef CONFIG_MIPS_MT_SMTC + c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC; +#endif +} + +static void cmp_smp_finish(void) +{ + pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); + + /* CDFIXME: remove this? */ + write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(smp_processor_id(), mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ + + local_irq_enable(); +} + +static void cmp_cpus_done(void) +{ + pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); +} + +/* + * Setup the PC, SP, and GP of a secondary processor and start it running + * smp_bootstrap is the place to resume from + * __KSTK_TOS(idle) is apparently the stack pointer + * (unsigned long)idle->thread_info the gp + */ +static void cmp_boot_secondary(int cpu, struct task_struct *idle) +{ + struct thread_info *gp = task_thread_info(idle); + unsigned long sp = __KSTK_TOS(idle); + unsigned long pc = (unsigned long)&smp_bootstrap; + unsigned long a0 = 0; + + pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(), + __func__, cpu); + +#if 0 + /* Needed? */ + flush_icache_range((unsigned long)gp, + (unsigned long)(gp + sizeof(struct thread_info))); +#endif + + amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); +} + +/* + * Common setup before any secondaries are started + */ +void __init cmp_smp_setup(void) +{ + int i; + int ncpu = 0; + + pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(0, mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ + + for (i = 1; i < NR_CPUS; i++) { + if (amon_cpu_avail(i)) { + set_cpu_possible(i, true); + __cpu_number_map[i] = ++ncpu; + __cpu_logical_map[ncpu] = i; + } + } + + if (cpu_has_mipsmt) { + unsigned int nvpe, mvpconf0 = read_c0_mvpconf0(); + + nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; + smp_num_siblings = nvpe; + } + pr_info("Detected %i available secondary CPU(s)\n", ncpu); +} + +void __init cmp_prepare_cpus(unsigned int max_cpus) +{ + pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", + smp_processor_id(), __func__, max_cpus); + + /* + * FIXME: some of these options are per-system, some per-core and + * some per-cpu + */ + mips_mt_set_cpuoptions(); +} + +struct plat_smp_ops cmp_smp_ops = { + .send_ipi_single = cmp_send_ipi_single, + .send_ipi_mask = cmp_send_ipi_mask, + .init_secondary = cmp_init_secondary, + .smp_finish = cmp_smp_finish, + .cpus_done = cmp_cpus_done, + .boot_secondary = cmp_boot_secondary, + .smp_setup = cmp_smp_setup, + .prepare_cpus = cmp_prepare_cpus, +}; diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c new file mode 100644 index 00000000..ff178687 --- /dev/null +++ b/arch/mips/kernel/smp-mt.c @@ -0,0 +1,286 @@ +/* + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc. + * Elizabeth Clarke (beth@mips.com) + * Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + */ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/compiler.h> +#include <linux/smp.h> + +#include <linux/atomic.h> +#include <asm/cacheflush.h> +#include <asm/cpu.h> +#include <asm/processor.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/time.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/mips_mt.h> + +static void __init smvp_copy_vpe_config(void) +{ + write_vpe_c0_status( + (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); + + /* set config to be the same as vpe0, particularly kseg0 coherency alg */ + write_vpe_c0_config( read_c0_config()); + + /* make sure there are no software interrupts pending */ + write_vpe_c0_cause(0); + + /* Propagate Config7 */ + write_vpe_c0_config7(read_c0_config7()); + + write_vpe_c0_count(read_c0_count()); +} + +static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, + unsigned int ncpu) +{ + if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) + return ncpu; + + /* Deactivate all but VPE 0 */ + if (tc != 0) { + unsigned long tmp = read_vpe_c0_vpeconf0(); + + tmp &= ~VPECONF0_VPA; + + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); + + /* Record this as available CPU */ + set_cpu_possible(tc, true); + __cpu_number_map[tc] = ++ncpu; + __cpu_logical_map[ncpu] = tc; + } + + /* Disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); + + if (tc != 0) + smvp_copy_vpe_config(); + + return ncpu; +} + +static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) +{ + unsigned long tmp; + + if (!tc) + return; + + /* bind a TC to each VPE, May as well put all excess TC's + on the last VPE */ + if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) + write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); + else { + write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); + + /* and set XTC */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); + } + + tmp = read_tc_c0_tcstatus(); + + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + + write_tc_c0_tchalt(TCHALT_H); +} + +static void vsmp_send_ipi_single(int cpu, unsigned int action) +{ + int i; + unsigned long flags; + int vpflags; + + local_irq_save(flags); + + vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ + + switch (action) { + case SMP_CALL_FUNCTION: + i = C_SW1; + break; + + case SMP_RESCHEDULE_YOURSELF: + default: + i = C_SW0; + break; + } + + /* 1:1 mapping of vpe and tc... */ + settc(cpu); + write_vpe_c0_cause(read_vpe_c0_cause() | i); + evpe(vpflags); + + local_irq_restore(flags); +} + +static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + vsmp_send_ipi_single(i, action); +} + +static void __cpuinit vsmp_init_secondary(void) +{ + extern int gic_present; + + /* This is Malta specific: IPI,performance and timer interrupts */ + if (gic_present) + change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP6 | STATUSF_IP7); + else + change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | + STATUSF_IP6 | STATUSF_IP7); +} + +static void __cpuinit vsmp_smp_finish(void) +{ + /* CDFIXME: remove this? */ + write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(smp_processor_id(), mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ + + local_irq_enable(); +} + +static void vsmp_cpus_done(void) +{ +} + +/* + * Setup the PC, SP, and GP of a secondary processor and start it + * running! + * smp_bootstrap is the place to resume from + * __KSTK_TOS(idle) is apparently the stack pointer + * (unsigned long)idle->thread_info the gp + * assumes a 1:1 mapping of TC => VPE + */ +static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) +{ + struct thread_info *gp = task_thread_info(idle); + dvpe(); + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(cpu); + + /* restart */ + write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); + + /* enable the tc this vpe/cpu will be running */ + write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A); + + write_tc_c0_tchalt(0); + + /* enable the VPE */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); + + /* stack pointer */ + write_tc_gpr_sp( __KSTK_TOS(idle)); + + /* global pointer */ + write_tc_gpr_gp((unsigned long)gp); + + flush_icache_range((unsigned long)gp, + (unsigned long)(gp + sizeof(struct thread_info))); + + /* finally out of configuration and into chaos */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + evpe(EVPE_ENABLE); +} + +/* + * Common setup before any secondaries are started + * Make sure all CPU's are in a sensible state before we boot any of the + * secondaries + */ +static void __init vsmp_smp_setup(void) +{ + unsigned int mvpconf0, ntc, tc, ncpu = 0; + unsigned int nvpe; + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(0, mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ + if (!cpu_has_mipsmt) + return; + + /* disable MT so we can configure */ + dvpe(); + dmt(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + mvpconf0 = read_c0_mvpconf0(); + ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; + + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + smp_num_siblings = nvpe; + + /* we'll always have more TC's than VPE's, so loop setting everything + to a sensible state */ + for (tc = 0; tc <= ntc; tc++) { + settc(tc); + + smvp_tc_init(tc, mvpconf0); + ncpu = smvp_vpe_init(tc, mvpconf0, ncpu); + } + + /* Release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* We'll wait until starting the secondaries before starting MVPE */ + + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); +} + +static void __init vsmp_prepare_cpus(unsigned int max_cpus) +{ + mips_mt_set_cpuoptions(); +} + +struct plat_smp_ops vsmp_smp_ops = { + .send_ipi_single = vsmp_send_ipi_single, + .send_ipi_mask = vsmp_send_ipi_mask, + .init_secondary = vsmp_init_secondary, + .smp_finish = vsmp_smp_finish, + .cpus_done = vsmp_cpus_done, + .boot_secondary = vsmp_boot_secondary, + .smp_setup = vsmp_smp_setup, + .prepare_cpus = vsmp_prepare_cpus, +}; diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c new file mode 100644 index 00000000..00500fea --- /dev/null +++ b/arch/mips/kernel/smp-up.c @@ -0,0 +1,84 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org) + * + * Symmetric Uniprocessor (TM) Support + */ +#include <linux/kernel.h> +#include <linux/sched.h> + +/* + * Send inter-processor interrupt + */ +static void up_send_ipi_single(int cpu, unsigned int action) +{ + panic(KERN_ERR "%s called", __func__); +} + +static inline void up_send_ipi_mask(const struct cpumask *mask, + unsigned int action) +{ + panic(KERN_ERR "%s called", __func__); +} + +/* + * After we've done initial boot, this function is called to allow the + * board code to clean up state, if needed + */ +static void __cpuinit up_init_secondary(void) +{ +} + +static void __cpuinit up_smp_finish(void) +{ +} + +/* Hook for after all CPUs are online */ +static void up_cpus_done(void) +{ +} + +/* + * Firmware CPU startup hook + */ +static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) +{ +} + +static void __init up_smp_setup(void) +{ +} + +static void __init up_prepare_cpus(unsigned int max_cpus) +{ +} + +#ifdef CONFIG_HOTPLUG_CPU +static int up_cpu_disable(void) +{ + return -ENOSYS; +} + +static void up_cpu_die(unsigned int cpu) +{ + BUG(); +} +#endif + +struct plat_smp_ops up_smp_ops = { + .send_ipi_single = up_send_ipi_single, + .send_ipi_mask = up_send_ipi_mask, + .init_secondary = up_init_secondary, + .smp_finish = up_smp_finish, + .cpus_done = up_cpus_done, + .boot_secondary = up_boot_secondary, + .smp_setup = up_smp_setup, + .prepare_cpus = up_prepare_cpus, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = up_cpu_disable, + .cpu_die = up_cpu_die, +#endif +}; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c new file mode 100644 index 00000000..ba9376bf --- /dev/null +++ b/arch/mips/kernel/smp.c @@ -0,0 +1,432 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2000, 2001 Kanoj Sarcar + * Copyright (C) 2000, 2001 Ralf Baechle + * Copyright (C) 2000, 2001 Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2003 Broadcom Corporation + */ +#include <linux/cache.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/module.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/sched.h> +#include <linux/cpumask.h> +#include <linux/cpu.h> +#include <linux/err.h> +#include <linux/ftrace.h> + +#include <linux/atomic.h> +#include <asm/cpu.h> +#include <asm/processor.h> +#include <asm/r4k-timer.h> +#include <asm/mmu_context.h> +#include <asm/time.h> +#include <asm/setup.h> + +#ifdef CONFIG_MIPS_MT_SMTC +#include <asm/mipsmtregs.h> +#endif /* CONFIG_MIPS_MT_SMTC */ + +volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ + +int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +EXPORT_SYMBOL(__cpu_number_map); + +int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_logical_map); + +/* Number of TCs (or siblings in Intel speak) per CPU core */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* representing the TCs (or siblings in Intel speak) of each logical CPU */ +cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_sibling_map); + +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + +static inline void set_cpu_sibling_map(int cpu) +{ + int i; + + cpu_set(cpu, cpu_sibling_setup_map); + + if (smp_num_siblings > 1) { + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (cpu_data[cpu].core == cpu_data[i].core) { + cpu_set(i, cpu_sibling_map[cpu]); + cpu_set(cpu, cpu_sibling_map[i]); + } + } + } else + cpu_set(cpu, cpu_sibling_map[cpu]); +} + +struct plat_smp_ops *mp_ops; + +__cpuinit void register_smp_ops(struct plat_smp_ops *ops) +{ + if (mp_ops) + printk(KERN_WARNING "Overriding previously set SMP ops\n"); + + mp_ops = ops; +} + +/* + * First C code run on the secondary CPUs after being started up by + * the master. + */ +asmlinkage __cpuinit void start_secondary(void) +{ + unsigned int cpu; + +#ifdef CONFIG_MIPS_MT_SMTC + /* Only do cpu_probe for first TC of CPU */ + if ((read_c0_tcbind() & TCBIND_CURTC) == 0) +#endif /* CONFIG_MIPS_MT_SMTC */ + cpu_probe(); + cpu_report(); + per_cpu_trap_init(); + mips_clockevent_init(); + mp_ops->init_secondary(); + + /* + * XXX parity protection should be folded in here when it's converted + * to an option instead of something based on .cputype + */ + + calibrate_delay(); + preempt_disable(); + cpu = smp_processor_id(); + cpu_data[cpu].udelay_val = loops_per_jiffy; + + notify_cpu_starting(cpu); + + mp_ops->smp_finish(); + set_cpu_sibling_map(cpu); + + cpu_set(cpu, cpu_callin_map); + + synchronise_count_slave(); + + cpu_idle(); +} + +/* + * Call into both interrupt handlers, as we share the IPI for them + */ +void __irq_entry smp_call_function_interrupt(void) +{ + irq_enter(); + generic_smp_call_function_single_interrupt(); + generic_smp_call_function_interrupt(); + irq_exit(); +} + +static void stop_this_cpu(void *dummy) +{ + /* + * Remove this CPU: + */ + set_cpu_online(smp_processor_id(), false); + for (;;) { + if (cpu_wait) + (*cpu_wait)(); /* Wait if available. */ + } +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, NULL, 0); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + mp_ops->cpus_done(); + synchronise_count_master(); +} + +/* called from main before smp_init() */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + init_new_context(current, &init_mm); + current_thread_info()->cpu = 0; + mp_ops->prepare_cpus(max_cpus); + set_cpu_sibling_map(0); +#ifndef CONFIG_HOTPLUG_CPU + init_cpu_present(cpu_possible_mask); +#endif +} + +/* preload SMP state for boot cpu */ +void __devinit smp_prepare_boot_cpu(void) +{ + set_cpu_possible(0, true); + set_cpu_online(0, true); + cpu_set(0, cpu_callin_map); +} + +/* + * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu + * and keep control until "cpu_online(cpu)" is set. Note: cpu is + * physical, not logical. + */ +static struct task_struct *cpu_idle_thread[NR_CPUS]; + +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +int __cpuinit __cpu_up(unsigned int cpu) +{ + struct task_struct *idle; + + /* + * Processor goes to start_secondary(), sets online flag + * The following code is purely to make sure + * Linux can schedule processes on this slave. + */ + if (!cpu_idle_thread[cpu]) { + /* + * Schedule work item to avoid forking user task + * Ported from arch/x86/kernel/smpboot.c + */ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + idle = cpu_idle_thread[cpu] = c_idle.idle; + + if (IS_ERR(idle)) + panic(KERN_ERR "Fork failed for CPU %d", cpu); + } else { + idle = cpu_idle_thread[cpu]; + init_idle(idle, cpu); + } + + mp_ops->boot_secondary(cpu, idle); + + /* + * Trust is futile. We should really have timeouts ... + */ + while (!cpu_isset(cpu, cpu_callin_map)) + udelay(100); + + set_cpu_online(cpu, true); + + return 0; +} + +/* Not really SMP stuff ... */ +int setup_profiling_timer(unsigned int multiplier) +{ + return 0; +} + +static void flush_tlb_all_ipi(void *info) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + on_each_cpu(flush_tlb_all_ipi, NULL, 1); +} + +static void flush_tlb_mm_ipi(void *mm) +{ + local_flush_tlb_mm((struct mm_struct *)mm); +} + +/* + * Special Variant of smp_call_function for use by TLB functions: + * + * o No return value + * o collapses to normal function call on UP kernels + * o collapses to normal function call on systems with a single shared + * primary cache. + * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. + */ +static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) +{ +#ifndef CONFIG_MIPS_MT_SMTC + smp_call_function(func, info, 1); +#endif +} + +static inline void smp_on_each_tlb(void (*func) (void *info), void *info) +{ + preempt_disable(); + + smp_on_other_tlbs(func, info); + func(info); + + preempt_enable(); +} + +/* + * The following tlb flush calls are invoked when old translations are + * being torn down, or pte attributes are changing. For single threaded + * address spaces, a new context is obtained on the current cpu, and tlb + * context on other cpus are invalidated to force a new context allocation + * at switch_mm time, should the mm ever be used on other cpus. For + * multithreaded address spaces, intercpu interrupts have to be sent. + * Another case where intercpu interrupts are required is when the target + * mm might be active on another cpu (eg debuggers doing the flushes on + * behalf of debugees, kswapd stealing pages from another process etc). + * Kanoj 07/00. + */ + +void flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + smp_on_other_tlbs(flush_tlb_mm_ipi, mm); + } else { + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) + cpu_context(cpu, mm) = 0; + } + } + local_flush_tlb_mm(mm); + + preempt_enable(); +} + +struct flush_tlb_data { + struct vm_area_struct *vma; + unsigned long addr1; + unsigned long addr2; +}; + +static void flush_tlb_range_ipi(void *info) +{ + struct flush_tlb_data *fd = info; + + local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + struct flush_tlb_data fd = { + .vma = vma, + .addr1 = start, + .addr2 = end, + }; + + smp_on_other_tlbs(flush_tlb_range_ipi, &fd); + } else { + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) + cpu_context(cpu, mm) = 0; + } + } + local_flush_tlb_range(vma, start, end); + preempt_enable(); +} + +static void flush_tlb_kernel_range_ipi(void *info) +{ + struct flush_tlb_data *fd = info; + + local_flush_tlb_kernel_range(fd->addr1, fd->addr2); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_data fd = { + .addr1 = start, + .addr2 = end, + }; + + on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); +} + +static void flush_tlb_page_ipi(void *info) +{ + struct flush_tlb_data *fd = info; + + local_flush_tlb_page(fd->vma, fd->addr1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + preempt_disable(); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { + struct flush_tlb_data fd = { + .vma = vma, + .addr1 = page, + }; + + smp_on_other_tlbs(flush_tlb_page_ipi, &fd); + } else { + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) + cpu_context(cpu, vma->vm_mm) = 0; + } + } + local_flush_tlb_page(vma, page); + preempt_enable(); +} + +static void flush_tlb_one_ipi(void *info) +{ + unsigned long vaddr = (unsigned long) info; + + local_flush_tlb_one(vaddr); +} + +void flush_tlb_one(unsigned long vaddr) +{ + smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); +} + +EXPORT_SYMBOL(flush_tlb_page); +EXPORT_SYMBOL(flush_tlb_one); diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S new file mode 100644 index 00000000..20938a4c --- /dev/null +++ b/arch/mips/kernel/smtc-asm.S @@ -0,0 +1,130 @@ +/* + * Assembly Language Functions for MIPS MT SMTC support + */ + +/* + * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ + +#include <asm/regdef.h> +#include <asm/asmmacro.h> +#include <asm/stackframe.h> +#include <asm/irqflags.h> + +/* + * "Software Interrupt" linkage. + * + * This is invoked when an "Interrupt" is sent from one TC to another, + * where the TC to be interrupted is halted, has it's Restart address + * and Status values saved by the "remote control" thread, then modified + * to cause execution to begin here, in kenel mode. This code then + * disguises the TC state as that of an exception and transfers + * control to the general exception or vectored interrupt handler. + */ + .set noreorder + +/* +The __smtc_ipi_vector would use k0 and k1 as temporaries and +1) Set EXL (this is per-VPE, so this can't be done by proxy!) +2) Restore the K/CU and IXMT bits to the pre "exception" state + (EXL means no interrupts and access to the kernel map). +3) Set EPC to be the saved value of TCRestart. +4) Jump to the exception handler entry point passed by the sender. + +CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? +*/ + +/* + * Reviled and slandered vision: Set EXL and restore K/CU/IXMT + * state of pre-halt thread, then save everything and call + * thought some function pointer to imaginary_exception, which + * will parse a register value or memory message queue to + * deliver things like interprocessor interrupts. On return + * from that function, jump to the global ret_from_irq code + * to invoke the scheduler and return as appropriate. + */ + +#define PT_PADSLOT4 (PT_R0-8) +#define PT_PADSLOT5 (PT_R0-4) + + .text + .align 5 +FEXPORT(__smtc_ipi_vector) + .set noat + /* Disable thread scheduling to make Status update atomic */ + DMT 27 # dmt k1 + _ehb + /* Set EXL */ + mfc0 k0,CP0_STATUS + ori k0,k0,ST0_EXL + mtc0 k0,CP0_STATUS + _ehb + /* Thread scheduling now inhibited by EXL. Restore TE state. */ + andi k1,k1,VPECONTROL_TE + beqz k1,1f + emt +1: + /* + * The IPI sender has put some information on the anticipated + * kernel stack frame. If we were in user mode, this will be + * built above the saved kernel SP. If we were already in the + * kernel, it will be built above the current CPU SP. + * + * Were we in kernel mode, as indicated by CU0? + */ + sll k1,k0,3 + .set noreorder + bltz k1,2f + move k1,sp + .set reorder + /* + * If previously in user mode, set CU0 and use kernel stack. + */ + li k1,ST0_CU0 + or k1,k1,k0 + mtc0 k1,CP0_STATUS + _ehb + get_saved_sp + /* Interrupting TC will have pre-set values in slots in the new frame */ +2: subu k1,k1,PT_SIZE + /* Load TCStatus Value */ + lw k0,PT_TCSTATUS(k1) + /* Write it to TCStatus to restore CU/KSU/IXMT state */ + mtc0 k0,$2,1 + _ehb + lw k0,PT_EPC(k1) + mtc0 k0,CP0_EPC + /* Save all will redundantly recompute the SP, but use it for now */ + SAVE_ALL + CLI + TRACE_IRQS_OFF + /* Function to be invoked passed stack pad slot 5 */ + lw t0,PT_PADSLOT5(sp) + /* Argument from sender passed in stack pad slot 4 */ + lw a0,PT_PADSLOT4(sp) + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + PTR_LA ra, ret_from_irq + jr t0 + +/* + * Called from idle loop to provoke processing of queued IPIs + * First IPI message in queue passed as argument. + */ + +LEAF(self_ipi) + /* Before anything else, block interrupts */ + mfc0 t0,CP0_TCSTATUS + ori t1,t0,TCSTATUS_IXMT + mtc0 t1,CP0_TCSTATUS + _ehb + /* We know we're in kernel mode, so prepare stack frame */ + subu t1,sp,PT_SIZE + sw ra,PT_EPC(t1) + sw a0,PT_PADSLOT4(t1) + la t2,ipi_decode + sw t2,PT_PADSLOT5(t1) + /* Save pre-disable value of TCStatus */ + sw t0,PT_TCSTATUS(t1) + j __smtc_ipi_vector + nop +END(self_ipi) diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c new file mode 100644 index 00000000..145771c0 --- /dev/null +++ b/arch/mips/kernel/smtc-proc.c @@ -0,0 +1,91 @@ +/* + * /proc hooks for SMTC kernel + * Copyright (C) 2005 Mips Technologies, Inc + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> + +#include <asm/cpu.h> +#include <asm/processor.h> +#include <linux/atomic.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/mipsregs.h> +#include <asm/cacheflush.h> +#include <linux/proc_fs.h> + +#include <asm/smtc_proc.h> + +/* + * /proc diagnostic and statistics hooks + */ + +/* + * Statistics gathered + */ +unsigned long selfipis[NR_CPUS]; + +struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; + +static struct proc_dir_entry *smtc_stats; + +atomic_t smtc_fpu_recoveries; + +static int proc_read_smtc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int totalen = 0; + int len; + int i; + extern unsigned long ebase; + + len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status); + totalen += len; + page += len; + len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7()); + totalen += len; + page += len; + len = sprintf(page, "EBASE: 0x%08lx\n", ebase); + totalen += len; + page += len; + len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n"); + totalen += len; + page += len; + for (i=0; i < NR_CPUS; i++) { + len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); + totalen += len; + page += len; + } + len = sprintf(page, "Self-IPIs by CPU:\n"); + totalen += len; + page += len; + for(i = 0; i < NR_CPUS; i++) { + len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); + totalen += len; + page += len; + } + len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n", + atomic_read(&smtc_fpu_recoveries)); + totalen += len; + page += len; + + return totalen; +} + +void init_smtc_stats(void) +{ + int i; + + for (i=0; i<NR_CPUS; i++) { + smtc_cpu_stats[i].timerints = 0; + smtc_cpu_stats[i].selfipis = 0; + } + + atomic_set(&smtc_fpu_recoveries, 0); + + smtc_stats = create_proc_read_entry("smtc", 0444, NULL, + proc_read_smtc, NULL); +} diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c new file mode 100644 index 00000000..f5dd38f1 --- /dev/null +++ b/arch/mips/kernel/smtc.c @@ -0,0 +1,1467 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2004 Mips Technologies, Inc + * Copyright (C) 2008 Kevin D. Kissell + */ + +#include <linux/clockchips.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/module.h> +#include <linux/ftrace.h> +#include <linux/slab.h> + +#include <asm/cpu.h> +#include <asm/processor.h> +#include <linux/atomic.h> +#include <asm/hardirq.h> +#include <asm/hazards.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/mipsregs.h> +#include <asm/cacheflush.h> +#include <asm/time.h> +#include <asm/addrspace.h> +#include <asm/smtc.h> +#include <asm/smtc_proc.h> + +/* + * SMTC Kernel needs to manipulate low-level CPU interrupt mask + * in do_IRQ. These are passed in setup_irq_smtc() and stored + * in this table. + */ +unsigned long irq_hwmask[NR_IRQS]; + +#define LOCK_MT_PRA() \ + local_irq_save(flags); \ + mtflags = dmt() + +#define UNLOCK_MT_PRA() \ + emt(mtflags); \ + local_irq_restore(flags) + +#define LOCK_CORE_PRA() \ + local_irq_save(flags); \ + mtflags = dvpe() + +#define UNLOCK_CORE_PRA() \ + evpe(mtflags); \ + local_irq_restore(flags) + +/* + * Data structures purely associated with SMTC parallelism + */ + + +/* + * Table for tracking ASIDs whose lifetime is prolonged. + */ + +asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; + +/* + * Number of InterProcessor Interrupt (IPI) message buffers to allocate + */ + +#define IPIBUF_PER_CPU 4 + +struct smtc_ipi_q IPIQ[NR_CPUS]; +static struct smtc_ipi_q freeIPIq; + + +/* Forward declarations */ + +void ipi_decode(struct smtc_ipi *); +static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); +static void setup_cross_vpe_interrupts(unsigned int nvpe); +void init_smtc_stats(void); + +/* Global SMTC Status */ + +unsigned int smtc_status; + +/* Boot command line configuration overrides */ + +static int vpe0limit; +static int ipibuffers; +static int nostlb; +static int asidmask; +unsigned long smtc_asid_mask = 0xff; + +static int __init vpe0tcs(char *str) +{ + get_option(&str, &vpe0limit); + + return 1; +} + +static int __init ipibufs(char *str) +{ + get_option(&str, &ipibuffers); + return 1; +} + +static int __init stlb_disable(char *s) +{ + nostlb = 1; + return 1; +} + +static int __init asidmask_set(char *str) +{ + get_option(&str, &asidmask); + switch (asidmask) { + case 0x1: + case 0x3: + case 0x7: + case 0xf: + case 0x1f: + case 0x3f: + case 0x7f: + case 0xff: + smtc_asid_mask = (unsigned long)asidmask; + break; + default: + printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); + } + return 1; +} + +__setup("vpe0tcs=", vpe0tcs); +__setup("ipibufs=", ipibufs); +__setup("nostlb", stlb_disable); +__setup("asidmask=", asidmask_set); + +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG + +static int hang_trig; + +static int __init hangtrig_enable(char *s) +{ + hang_trig = 1; + return 1; +} + + +__setup("hangtrig", hangtrig_enable); + +#define DEFAULT_BLOCKED_IPI_LIMIT 32 + +static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; + +static int __init tintq(char *str) +{ + get_option(&str, &timerq_limit); + return 1; +} + +__setup("tintq=", tintq); + +static int imstuckcount[2][8]; +/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ +static int vpemask[2][8] = { + {0, 0, 1, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 0, 0, 1} +}; +int tcnoprog[NR_CPUS]; +static atomic_t idle_hook_initialized = ATOMIC_INIT(0); +static int clock_hang_reported[NR_CPUS]; + +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ + +/* + * Configure shared TLB - VPC configuration bit must be set by caller + */ + +static void smtc_configure_tlb(void) +{ + int i, tlbsiz, vpes; + unsigned long mvpconf0; + unsigned long config1val; + + /* Set up ASID preservation table */ + for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { + for(i = 0; i < MAX_SMTC_ASIDS; i++) { + smtc_live_asid[vpes][i] = 0; + } + } + mvpconf0 = read_c0_mvpconf0(); + + if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) + >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { + /* If we have multiple VPEs, try to share the TLB */ + if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { + /* + * If TLB sizing is programmable, shared TLB + * size is the total available complement. + * Otherwise, we have to take the sum of all + * static VPE TLB entries. + */ + if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) + >> MVPCONF0_PTLBE_SHIFT)) == 0) { + /* + * If there's more than one VPE, there had better + * be more than one TC, because we need one to bind + * to each VPE in turn to be able to read + * its configuration state! + */ + settc(1); + /* Stop the TC from doing anything foolish */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + /* No need to un-Halt - that happens later anyway */ + for (i=0; i < vpes; i++) { + write_tc_c0_tcbind(i); + /* + * To be 100% sure we're really getting the right + * information, we exit the configuration state + * and do an IHB after each rebinding. + */ + write_c0_mvpcontrol( + read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); + mips_ihb(); + /* + * Only count if the MMU Type indicated is TLB + */ + if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { + config1val = read_vpe_c0_config1(); + tlbsiz += ((config1val >> 25) & 0x3f) + 1; + } + + /* Put core back in configuration state */ + write_c0_mvpcontrol( + read_c0_mvpcontrol() | MVPCONTROL_VPC ); + mips_ihb(); + } + } + write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); + ehb(); + + /* + * Setup kernel data structures to use software total, + * rather than read the per-VPE Config1 value. The values + * for "CPU 0" gets copied to all the other CPUs as part + * of their initialization in smtc_cpu_setup(). + */ + + /* MIPS32 limits TLB indices to 64 */ + if (tlbsiz > 64) + tlbsiz = 64; + cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; + smtc_status |= SMTC_TLB_SHARED; + local_flush_tlb_all(); + + printk("TLB of %d entry pairs shared by %d VPEs\n", + tlbsiz, vpes); + } else { + printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); + } + } +} + + +/* + * Incrementally build the CPU map out of constituent MIPS MT cores, + * using the specified available VPEs and TCs. Plaform code needs + * to ensure that each MIPS MT core invokes this routine on reset, + * one at a time(!). + * + * This version of the build_cpu_map and prepare_cpus routines assumes + * that *all* TCs of a MIPS MT core will be used for Linux, and that + * they will be spread across *all* available VPEs (to minimise the + * loss of efficiency due to exception service serialization). + * An improved version would pick up configuration information and + * possibly leave some TCs/VPEs as "slave" processors. + * + * Use c0_MVPConf0 to find out how many TCs are available, setting up + * cpu_possible_mask and the logical/physical mappings. + */ + +int __init smtc_build_cpu_map(int start_cpu_slot) +{ + int i, ntcs; + + /* + * The CPU map isn't actually used for anything at this point, + * so it's not clear what else we should do apart from set + * everything up so that "logical" = "physical". + */ + ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; + for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { + set_cpu_possible(i, true); + __cpu_number_map[i] = i; + __cpu_logical_map[i] = i; + } +#ifdef CONFIG_MIPS_MT_FPAFF + /* Initialize map of CPUs with FPUs */ + cpus_clear(mt_fpu_cpumask); +#endif + + /* One of those TC's is the one booting, and not a secondary... */ + printk("%i available secondary CPU TC(s)\n", i - 1); + + return i; +} + +/* + * Common setup before any secondaries are started + * Make sure all CPU's are in a sensible state before we boot any of the + * secondaries. + * + * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly + * as possible across the available VPEs. + */ + +static void smtc_tc_setup(int vpe, int tc, int cpu) +{ + settc(tc); + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + write_tc_c0_tcstatus((read_tc_c0_tcstatus() + & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) + | TCSTATUS_A); + /* + * TCContext gets an offset from the base of the IPIQ array + * to be used in low-level code to detect the presence of + * an active IPI queue + */ + write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); + /* Bind tc to vpe */ + write_tc_c0_tcbind(vpe); + /* In general, all TCs should have the same cpu_data indications */ + memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); + /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ + if (cpu_data[0].cputype == CPU_34K || + cpu_data[0].cputype == CPU_1004K) + cpu_data[cpu].options &= ~MIPS_CPU_FPU; + cpu_data[cpu].vpe_id = vpe; + cpu_data[cpu].tc_id = tc; + /* Multi-core SMTC hasn't been tested, but be prepared */ + cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; +} + +/* + * Tweak to get Count registes in as close a sync as possible. + * Value seems good for 34K-class cores. + */ + +#define CP0_SKEW 8 + +void smtc_prepare_cpus(int cpus) +{ + int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; + unsigned long flags; + unsigned long val; + int nipi; + struct smtc_ipi *pipi; + + /* disable interrupts so we can disable MT */ + local_irq_save(flags); + /* disable MT so we can configure */ + dvpe(); + dmt(); + + spin_lock_init(&freeIPIq.lock); + + /* + * We probably don't have as many VPEs as we do SMP "CPUs", + * but it's possible - and in any case we'll never use more! + */ + for (i=0; i<NR_CPUS; i++) { + IPIQ[i].head = IPIQ[i].tail = NULL; + spin_lock_init(&IPIQ[i].lock); + IPIQ[i].depth = 0; + IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ + } + + /* cpu_data index starts at zero */ + cpu = 0; + cpu_data[cpu].vpe_id = 0; + cpu_data[cpu].tc_id = 0; + cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; + cpu++; + + /* Report on boot-time options */ + mips_mt_set_cpuoptions(); + if (vpelimit > 0) + printk("Limit of %d VPEs set\n", vpelimit); + if (tclimit > 0) + printk("Limit of %d TCs set\n", tclimit); + if (nostlb) { + printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); + } + if (asidmask) + printk("ASID mask value override to 0x%x\n", asidmask); + + /* Temporary */ +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG + if (hang_trig) + printk("Logic Analyser Trigger on suspected TC hang\n"); +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ + + /* Put MVPE's into 'configuration state' */ + write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); + + val = read_c0_mvpconf0(); + nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + if (vpelimit > 0 && nvpe > vpelimit) + nvpe = vpelimit; + ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; + if (ntc > NR_CPUS) + ntc = NR_CPUS; + if (tclimit > 0 && ntc > tclimit) + ntc = tclimit; + slop = ntc % nvpe; + for (i = 0; i < nvpe; i++) { + tcpervpe[i] = ntc / nvpe; + if (slop) { + if((slop - i) > 0) tcpervpe[i]++; + } + } + /* Handle command line override for VPE0 */ + if (vpe0limit > ntc) vpe0limit = ntc; + if (vpe0limit > 0) { + int slopslop; + if (vpe0limit < tcpervpe[0]) { + /* Reducing TC count - distribute to others */ + slop = tcpervpe[0] - vpe0limit; + slopslop = slop % (nvpe - 1); + tcpervpe[0] = vpe0limit; + for (i = 1; i < nvpe; i++) { + tcpervpe[i] += slop / (nvpe - 1); + if(slopslop && ((slopslop - (i - 1) > 0))) + tcpervpe[i]++; + } + } else if (vpe0limit > tcpervpe[0]) { + /* Increasing TC count - steal from others */ + slop = vpe0limit - tcpervpe[0]; + slopslop = slop % (nvpe - 1); + tcpervpe[0] = vpe0limit; + for (i = 1; i < nvpe; i++) { + tcpervpe[i] -= slop / (nvpe - 1); + if(slopslop && ((slopslop - (i - 1) > 0))) + tcpervpe[i]--; + } + } + } + + /* Set up shared TLB */ + smtc_configure_tlb(); + + for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { + if (tcpervpe[vpe] == 0) + continue; + if (vpe != 0) + printk(", "); + printk("VPE %d: TC", vpe); + for (i = 0; i < tcpervpe[vpe]; i++) { + /* + * TC 0 is bound to VPE 0 at reset, + * and is presumably executing this + * code. Leave it alone! + */ + if (tc != 0) { + smtc_tc_setup(vpe, tc, cpu); + cpu++; + } + printk(" %d", tc); + tc++; + } + if (vpe != 0) { + /* + * Allow this VPE to control others. + */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | + VPECONF0_MVP); + + /* + * Clear any stale software interrupts from VPE's Cause + */ + write_vpe_c0_cause(0); + + /* + * Clear ERL/EXL of VPEs other than 0 + * and set restricted interrupt enable/mask. + */ + write_vpe_c0_status((read_vpe_c0_status() + & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) + | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 + | ST0_IE)); + /* + * set config to be the same as vpe0, + * particularly kseg0 coherency alg + */ + write_vpe_c0_config(read_c0_config()); + /* Clear any pending timer interrupt */ + write_vpe_c0_compare(0); + /* Propagate Config7 */ + write_vpe_c0_config7(read_c0_config7()); + write_vpe_c0_count(read_c0_count() + CP0_SKEW); + ehb(); + } + /* enable multi-threading within VPE */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); + /* enable the VPE */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); + } + + /* + * Pull any physically present but unused TCs out of circulation. + */ + while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { + set_cpu_possible(tc, false); + set_cpu_present(tc, false); + tc++; + } + + /* release config state */ + write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); + + printk("\n"); + + /* Set up coprocessor affinity CPU mask(s) */ + +#ifdef CONFIG_MIPS_MT_FPAFF + for (tc = 0; tc < ntc; tc++) { + if (cpu_data[tc].options & MIPS_CPU_FPU) + cpu_set(tc, mt_fpu_cpumask); + } +#endif + + /* set up ipi interrupts... */ + + /* If we have multiple VPEs running, set up the cross-VPE interrupt */ + + setup_cross_vpe_interrupts(nvpe); + + /* Set up queue of free IPI "messages". */ + nipi = NR_CPUS * IPIBUF_PER_CPU; + if (ipibuffers > 0) + nipi = ipibuffers; + + pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); + if (pipi == NULL) + panic("kmalloc of IPI message buffers failed"); + else + printk("IPI buffer pool of %d buffers\n", nipi); + for (i = 0; i < nipi; i++) { + smtc_ipi_nq(&freeIPIq, pipi); + pipi++; + } + + /* Arm multithreading and enable other VPEs - but all TCs are Halted */ + emt(EMT_ENABLE); + evpe(EVPE_ENABLE); + local_irq_restore(flags); + /* Initialize SMTC /proc statistics/diagnostics */ + init_smtc_stats(); +} + + +/* + * Setup the PC, SP, and GP of a secondary processor and start it + * running! + * smp_bootstrap is the place to resume from + * __KSTK_TOS(idle) is apparently the stack pointer + * (unsigned long)idle->thread_info the gp + * + */ +void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) +{ + extern u32 kernelsp[NR_CPUS]; + unsigned long flags; + int mtflags; + + LOCK_MT_PRA(); + if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { + dvpe(); + } + settc(cpu_data[cpu].tc_id); + + /* pc */ + write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); + + /* stack pointer */ + kernelsp[cpu] = __KSTK_TOS(idle); + write_tc_gpr_sp(__KSTK_TOS(idle)); + + /* global pointer */ + write_tc_gpr_gp((unsigned long)task_thread_info(idle)); + + smtc_status |= SMTC_MTC_ACTIVE; + write_tc_c0_tchalt(0); + if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { + evpe(EVPE_ENABLE); + } + UNLOCK_MT_PRA(); +} + +void smtc_init_secondary(void) +{ + local_irq_enable(); +} + +void smtc_smp_finish(void) +{ + int cpu = smp_processor_id(); + + /* + * Lowest-numbered CPU per VPE starts a clock tick. + * Like per_cpu_trap_init() hack, this assumes that + * SMTC init code assigns TCs consdecutively and + * in ascending order across available VPEs. + */ + if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) + write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); + + printk("TC %d going on-line as CPU %d\n", + cpu_data[smp_processor_id()].tc_id, smp_processor_id()); +} + +void smtc_cpus_done(void) +{ +} + +/* + * Support for SMTC-optimized driver IRQ registration + */ + +/* + * SMTC Kernel needs to manipulate low-level CPU interrupt mask + * in do_IRQ. These are passed in setup_irq_smtc() and stored + * in this table. + */ + +int setup_irq_smtc(unsigned int irq, struct irqaction * new, + unsigned long hwmask) +{ +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG + unsigned int vpe = current_cpu_data.vpe_id; + + vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; +#endif + irq_hwmask[irq] = hwmask; + + return setup_irq(irq, new); +} + +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * Support for IRQ affinity to TCs + */ + +void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + /* + * If a "fast path" cache of quickly decodable affinity state + * is maintained, this is where it gets done, on a call up + * from the platform affinity code. + */ +} + +void smtc_forward_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + int target; + + /* + * OK wise guy, now figure out how to get the IRQ + * to be serviced on an authorized "CPU". + * + * Ideally, to handle the situation where an IRQ has multiple + * eligible CPUS, we would maintain state per IRQ that would + * allow a fair distribution of service requests. Since the + * expected use model is any-or-only-one, for simplicity + * and efficiency, we just pick the easiest one to find. + */ + + target = cpumask_first(d->affinity); + + /* + * We depend on the platform code to have correctly processed + * IRQ affinity change requests to ensure that the IRQ affinity + * mask has been purged of bits corresponding to nonexistent and + * offline "CPUs", and to TCs bound to VPEs other than the VPE + * connected to the physical interrupt input for the interrupt + * in question. Otherwise we have a nasty problem with interrupt + * mask management. This is best handled in non-performance-critical + * platform IRQ affinity setting code, to minimize interrupt-time + * checks. + */ + + /* If no one is eligible, service locally */ + if (target >= NR_CPUS) + do_IRQ_no_affinity(irq); + else + smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); +} + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + +/* + * IPI model for SMTC is tricky, because interrupts aren't TC-specific. + * Within a VPE one TC can interrupt another by different approaches. + * The easiest to get right would probably be to make all TCs except + * the target IXMT and set a software interrupt, but an IXMT-based + * scheme requires that a handler must run before a new IPI could + * be sent, which would break the "broadcast" loops in MIPS MT. + * A more gonzo approach within a VPE is to halt the TC, extract + * its Restart, Status, and a couple of GPRs, and program the Restart + * address to emulate an interrupt. + * + * Within a VPE, one can be confident that the target TC isn't in + * a critical EXL state when halted, since the write to the Halt + * register could not have issued on the writing thread if the + * halting thread had EXL set. So k0 and k1 of the target TC + * can be used by the injection code. Across VPEs, one can't + * be certain that the target TC isn't in a critical exception + * state. So we try a two-step process of sending a software + * interrupt to the target VPE, which either handles the event + * itself (if it was the target) or injects the event within + * the VPE. + */ + +static void smtc_ipi_qdump(void) +{ + int i; + struct smtc_ipi *temp; + + for (i = 0; i < NR_CPUS ;i++) { + pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", + i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, + IPIQ[i].depth); + temp = IPIQ[i].head; + + while (temp != IPIQ[i].tail) { + pr_debug("%d %d %d: ", temp->type, temp->dest, + (int)temp->arg); +#ifdef SMTC_IPI_DEBUG + pr_debug("%u %lu\n", temp->sender, temp->stamp); +#else + pr_debug("\n"); +#endif + temp = temp->flink; + } + } +} + +/* + * The standard atomic.h primitives don't quite do what we want + * here: We need an atomic add-and-return-previous-value (which + * could be done with atomic_add_return and a decrement) and an + * atomic set/zero-and-return-previous-value (which can't really + * be done with the atomic.h primitives). And since this is + * MIPS MT, we can assume that we have LL/SC. + */ +static inline int atomic_postincrement(atomic_t *v) +{ + unsigned long result; + + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %2 \n" + " addu %1, %0, 1 \n" + " sc %1, %2 \n" + " beqz %1, 1b \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "m" (v->counter) + : "memory"); + + return result; +} + +void smtc_send_ipi(int cpu, int type, unsigned int action) +{ + int tcstatus; + struct smtc_ipi *pipi; + unsigned long flags; + int mtflags; + unsigned long tcrestart; + extern void r4k_wait_irqoff(void), __pastwait(void); + int set_resched_flag = (type == LINUX_SMP_IPI && + action == SMP_RESCHEDULE_YOURSELF); + + if (cpu == smp_processor_id()) { + printk("Cannot Send IPI to self!\n"); + return; + } + if (set_resched_flag && IPIQ[cpu].resched_flag != 0) + return; /* There is a reschedule queued already */ + + /* Set up a descriptor, to be delivered either promptly or queued */ + pipi = smtc_ipi_dq(&freeIPIq); + if (pipi == NULL) { + bust_spinlocks(1); + mips_mt_regdump(dvpe()); + panic("IPI Msg. Buffers Depleted"); + } + pipi->type = type; + pipi->arg = (void *)action; + pipi->dest = cpu; + if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { + /* If not on same VPE, enqueue and send cross-VPE interrupt */ + IPIQ[cpu].resched_flag |= set_resched_flag; + smtc_ipi_nq(&IPIQ[cpu], pipi); + LOCK_CORE_PRA(); + settc(cpu_data[cpu].tc_id); + write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); + UNLOCK_CORE_PRA(); + } else { + /* + * Not sufficient to do a LOCK_MT_PRA (dmt) here, + * since ASID shootdown on the other VPE may + * collide with this operation. + */ + LOCK_CORE_PRA(); + settc(cpu_data[cpu].tc_id); + /* Halt the targeted TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + /* + * Inspect TCStatus - if IXMT is set, we have to queue + * a message. Otherwise, we set up the "interrupt" + * of the other TC + */ + tcstatus = read_tc_c0_tcstatus(); + + if ((tcstatus & TCSTATUS_IXMT) != 0) { + /* + * If we're in the the irq-off version of the wait + * loop, we need to force exit from the wait and + * do a direct post of the IPI. + */ + if (cpu_wait == r4k_wait_irqoff) { + tcrestart = read_tc_c0_tcrestart(); + if (tcrestart >= (unsigned long)r4k_wait_irqoff + && tcrestart < (unsigned long)__pastwait) { + write_tc_c0_tcrestart(__pastwait); + tcstatus &= ~TCSTATUS_IXMT; + write_tc_c0_tcstatus(tcstatus); + goto postdirect; + } + } + /* + * Otherwise we queue the message for the target TC + * to pick up when he does a local_irq_restore() + */ + write_tc_c0_tchalt(0); + UNLOCK_CORE_PRA(); + IPIQ[cpu].resched_flag |= set_resched_flag; + smtc_ipi_nq(&IPIQ[cpu], pipi); + } else { +postdirect: + post_direct_ipi(cpu, pipi); + write_tc_c0_tchalt(0); + UNLOCK_CORE_PRA(); + } + } +} + +/* + * Send IPI message to Halted TC, TargTC/TargVPE already having been set + */ +static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) +{ + struct pt_regs *kstack; + unsigned long tcstatus; + unsigned long tcrestart; + extern u32 kernelsp[NR_CPUS]; + extern void __smtc_ipi_vector(void); +//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); + + /* Extract Status, EPC from halted TC */ + tcstatus = read_tc_c0_tcstatus(); + tcrestart = read_tc_c0_tcrestart(); + /* If TCRestart indicates a WAIT instruction, advance the PC */ + if ((tcrestart & 0x80000000) + && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { + tcrestart += 4; + } + /* + * Save on TC's future kernel stack + * + * CU bit of Status is indicator that TC was + * already running on a kernel stack... + */ + if (tcstatus & ST0_CU0) { + /* Note that this "- 1" is pointer arithmetic */ + kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; + } else { + kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; + } + + kstack->cp0_epc = (long)tcrestart; + /* Save TCStatus */ + kstack->cp0_tcstatus = tcstatus; + /* Pass token of operation to be performed kernel stack pad area */ + kstack->pad0[4] = (unsigned long)pipi; + /* Pass address of function to be called likewise */ + kstack->pad0[5] = (unsigned long)&ipi_decode; + /* Set interrupt exempt and kernel mode */ + tcstatus |= TCSTATUS_IXMT; + tcstatus &= ~TCSTATUS_TKSU; + write_tc_c0_tcstatus(tcstatus); + ehb(); + /* Set TC Restart address to be SMTC IPI vector */ + write_tc_c0_tcrestart(__smtc_ipi_vector); +} + +static void ipi_resched_interrupt(void) +{ + scheduler_ipi(); +} + +static void ipi_call_interrupt(void) +{ + /* Invoke generic function invocation code in smp.c */ + smp_call_function_interrupt(); +} + +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); + +static void __irq_entry smtc_clock_tick_interrupt(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + int irq = MIPS_CPU_IRQ_BASE + 1; + + irq_enter(); + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + irq_exit(); +} + +void ipi_decode(struct smtc_ipi *pipi) +{ + void *arg_copy = pipi->arg; + int type_copy = pipi->type; + + smtc_ipi_nq(&freeIPIq, pipi); + + switch (type_copy) { + case SMTC_CLOCK_TICK: + smtc_clock_tick_interrupt(); + break; + + case LINUX_SMP_IPI: + switch ((int)arg_copy) { + case SMP_RESCHEDULE_YOURSELF: + ipi_resched_interrupt(); + break; + case SMP_CALL_FUNCTION: + ipi_call_interrupt(); + break; + default: + printk("Impossible SMTC IPI Argument %p\n", arg_copy); + break; + } + break; +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF + case IRQ_AFFINITY_IPI: + /* + * Accept a "forwarded" interrupt that was initially + * taken by a TC who doesn't have affinity for the IRQ. + */ + do_IRQ_no_affinity((int)arg_copy); + break; +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + default: + printk("Impossible SMTC IPI Type 0x%x\n", type_copy); + break; + } +} + +/* + * Similar to smtc_ipi_replay(), but invoked from context restore, + * so it reuses the current exception frame rather than set up a + * new one with self_ipi. + */ + +void deferred_smtc_ipi(void) +{ + int cpu = smp_processor_id(); + + /* + * Test is not atomic, but much faster than a dequeue, + * and the vast majority of invocations will have a null queue. + * If irq_disabled when this was called, then any IPIs queued + * after we test last will be taken on the next irq_enable/restore. + * If interrupts were enabled, then any IPIs added after the + * last test will be taken directly. + */ + + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It may be possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + if (pipi != NULL) { + if (pipi->type == LINUX_SMP_IPI && + (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) + IPIQ[cpu].resched_flag = 0; + ipi_decode(pipi); + } + /* + * The use of the __raw_local restore isn't + * as obviously necessary here as in smtc_ipi_replay(), + * but it's more efficient, given that we're already + * running down the IPI queue. + */ + __arch_local_irq_restore(flags); + } +} + +/* + * Cross-VPE interrupts in the SMTC prototype use "software interrupts" + * set via cross-VPE MTTR manipulation of the Cause register. It would be + * in some regards preferable to have external logic for "doorbell" hardware + * interrupts. + */ + +static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; + +static irqreturn_t ipi_interrupt(int irq, void *dev_idm) +{ + int my_vpe = cpu_data[smp_processor_id()].vpe_id; + int my_tc = cpu_data[smp_processor_id()].tc_id; + int cpu; + struct smtc_ipi *pipi; + unsigned long tcstatus; + int sent; + unsigned long flags; + unsigned int mtflags; + unsigned int vpflags; + + /* + * So long as cross-VPE interrupts are done via + * MFTR/MTTR read-modify-writes of Cause, we need + * to stop other VPEs whenever the local VPE does + * anything similar. + */ + local_irq_save(flags); + vpflags = dvpe(); + clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); + set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); + irq_enable_hazard(); + evpe(vpflags); + local_irq_restore(flags); + + /* + * Cross-VPE Interrupt handler: Try to directly deliver IPIs + * queued for TCs on this VPE other than the current one. + * Return-from-interrupt should cause us to drain the queue + * for the current TC, so we ought not to have to do it explicitly here. + */ + + for_each_online_cpu(cpu) { + if (cpu_data[cpu].vpe_id != my_vpe) + continue; + + pipi = smtc_ipi_dq(&IPIQ[cpu]); + if (pipi != NULL) { + if (cpu_data[cpu].tc_id != my_tc) { + sent = 0; + LOCK_MT_PRA(); + settc(cpu_data[cpu].tc_id); + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + tcstatus = read_tc_c0_tcstatus(); + if ((tcstatus & TCSTATUS_IXMT) == 0) { + post_direct_ipi(cpu, pipi); + sent = 1; + } + write_tc_c0_tchalt(0); + UNLOCK_MT_PRA(); + if (!sent) { + smtc_ipi_req(&IPIQ[cpu], pipi); + } + } else { + /* + * ipi_decode() should be called + * with interrupts off + */ + local_irq_save(flags); + if (pipi->type == LINUX_SMP_IPI && + (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) + IPIQ[cpu].resched_flag = 0; + ipi_decode(pipi); + local_irq_restore(flags); + } + } + } + + return IRQ_HANDLED; +} + +static void ipi_irq_dispatch(void) +{ + do_IRQ(cpu_ipi_irq); +} + +static struct irqaction irq_ipi = { + .handler = ipi_interrupt, + .flags = IRQF_PERCPU, + .name = "SMTC_IPI" +}; + +static void setup_cross_vpe_interrupts(unsigned int nvpe) +{ + if (nvpe < 1) + return; + + if (!cpu_has_vint) + panic("SMTC Kernel requires Vectored Interrupt support"); + + set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); + + setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); + + irq_set_handler(cpu_ipi_irq, handle_percpu_irq); +} + +/* + * SMTC-specific hacks invoked from elsewhere in the kernel. + */ + + /* + * smtc_ipi_replay is called from raw_local_irq_restore + */ + +void smtc_ipi_replay(void) +{ + unsigned int cpu = smp_processor_id(); + + /* + * To the extent that we've ever turned interrupts off, + * we may have accumulated deferred IPIs. This is subtle. + * we should be OK: If we pick up something and dispatch + * it here, that's great. If we see nothing, but concurrent + * with this operation, another TC sends us an IPI, IXMT + * is clear, and we'll handle it as a real pseudo-interrupt + * and not a pseudo-pseudo interrupt. The important thing + * is to do the last check for queued message *after* the + * re-enabling of interrupts. + */ + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It's just possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + /* + ** But use a raw restore here to avoid recursion. + */ + __arch_local_irq_restore(flags); + + if (pipi) { + self_ipi(pipi); + smtc_cpu_stats[cpu].selfipis++; + } + } +} + +EXPORT_SYMBOL(smtc_ipi_replay); + +void smtc_idle_loop_hook(void) +{ +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG + int im; + int flags; + int mtflags; + int bit; + int vpe; + int tc; + int hook_ntcs; + /* + * printk within DMT-protected regions can deadlock, + * so buffer diagnostic messages for later output. + */ + char *pdb_msg; + char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ + + if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ + if (atomic_add_return(1, &idle_hook_initialized) == 1) { + int mvpconf0; + /* Tedious stuff to just do once */ + mvpconf0 = read_c0_mvpconf0(); + hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; + if (hook_ntcs > NR_CPUS) + hook_ntcs = NR_CPUS; + for (tc = 0; tc < hook_ntcs; tc++) { + tcnoprog[tc] = 0; + clock_hang_reported[tc] = 0; + } + for (vpe = 0; vpe < 2; vpe++) + for (im = 0; im < 8; im++) + imstuckcount[vpe][im] = 0; + printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); + atomic_set(&idle_hook_initialized, 1000); + } else { + /* Someone else is initializing in parallel - let 'em finish */ + while (atomic_read(&idle_hook_initialized) < 1000) + ; + } + } + + /* Have we stupidly left IXMT set somewhere? */ + if (read_c0_tcstatus() & 0x400) { + write_c0_tcstatus(read_c0_tcstatus() & ~0x400); + ehb(); + printk("Dangling IXMT in cpu_idle()\n"); + } + + /* Have we stupidly left an IM bit turned off? */ +#define IM_LIMIT 2000 + local_irq_save(flags); + mtflags = dmt(); + pdb_msg = &id_ho_db_msg[0]; + im = read_c0_status(); + vpe = current_cpu_data.vpe_id; + for (bit = 0; bit < 8; bit++) { + /* + * In current prototype, I/O interrupts + * are masked for VPE > 0 + */ + if (vpemask[vpe][bit]) { + if (!(im & (0x100 << bit))) + imstuckcount[vpe][bit]++; + else + imstuckcount[vpe][bit] = 0; + if (imstuckcount[vpe][bit] > IM_LIMIT) { + set_c0_status(0x100 << bit); + ehb(); + imstuckcount[vpe][bit] = 0; + pdb_msg += sprintf(pdb_msg, + "Dangling IM %d fixed for VPE %d\n", bit, + vpe); + } + } + } + + emt(mtflags); + local_irq_restore(flags); + if (pdb_msg != &id_ho_db_msg[0]) + printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ + + smtc_ipi_replay(); +} + +void smtc_soft_dump(void) +{ + int i; + + printk("Counter Interrupts taken per CPU (TC)\n"); + for (i=0; i < NR_CPUS; i++) { + printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); + } + printk("Self-IPI invocations:\n"); + for (i=0; i < NR_CPUS; i++) { + printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); + } + smtc_ipi_qdump(); + printk("%d Recoveries of \"stolen\" FPU\n", + atomic_read(&smtc_fpu_recoveries)); +} + + +/* + * TLB management routines special to SMTC + */ + +void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +{ + unsigned long flags, mtflags, tcstat, prevhalt, asid; + int tlb, i; + + /* + * It would be nice to be able to use a spinlock here, + * but this is invoked from within TLB flush routines + * that protect themselves with DVPE, so if a lock is + * held by another TC, it'll never be freed. + * + * DVPE/DMT must not be done with interrupts enabled, + * so even so most callers will already have disabled + * them, let's be really careful... + */ + + local_irq_save(flags); + if (smtc_status & SMTC_TLB_SHARED) { + mtflags = dvpe(); + tlb = 0; + } else { + mtflags = dmt(); + tlb = cpu_data[cpu].vpe_id; + } + asid = asid_cache(cpu); + + do { + if (!((asid += ASID_INC) & ASID_MASK) ) { + if (cpu_has_vtag_icache) + flush_icache_all(); + /* Traverse all online CPUs (hack requires contiguous range) */ + for_each_online_cpu(i) { + /* + * We don't need to worry about our own CPU, nor those of + * CPUs who don't share our TLB. + */ + if ((i != smp_processor_id()) && + ((smtc_status & SMTC_TLB_SHARED) || + (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { + settc(cpu_data[i].tc_id); + prevhalt = read_tc_c0_tchalt() & TCHALT_H; + if (!prevhalt) { + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + } + tcstat = read_tc_c0_tcstatus(); + smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); + if (!prevhalt) + write_tc_c0_tchalt(0); + } + } + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + local_flush_tlb_all(); /* start new asid cycle */ + } + } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); + + /* + * SMTC shares the TLB within VPEs and possibly across all VPEs. + */ + for_each_online_cpu(i) { + if ((smtc_status & SMTC_TLB_SHARED) || + (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) + cpu_context(i, mm) = asid_cache(i) = asid; + } + + if (smtc_status & SMTC_TLB_SHARED) + evpe(mtflags); + else + emt(mtflags); + local_irq_restore(flags); +} + +/* + * Invoked from macros defined in mmu_context.h + * which must already have disabled interrupts + * and done a DVPE or DMT as appropriate. + */ + +void smtc_flush_tlb_asid(unsigned long asid) +{ + int entry; + unsigned long ehi; + + entry = read_c0_wired(); + + /* Traverse all non-wired entries */ + while (entry < current_cpu_data.tlbsize) { + write_c0_index(entry); + ehb(); + tlb_read(); + ehb(); + ehi = read_c0_entryhi(); + if ((ehi & ASID_MASK) == asid) { + /* + * Invalidate only entries with specified ASID, + * makiing sure all entries differ. + */ + write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + } + entry++; + } + write_c0_index(PARKED_INDEX); + tlbw_use_hazard(); +} + +/* + * Support for single-threading cache flush operations. + */ + +static int halt_state_save[NR_CPUS]; + +/* + * To really, really be sure that nothing is being done + * by other TCs, halt them all. This code assumes that + * a DVPE has already been done, so while their Halted + * state is theoretically architecturally unstable, in + * practice, it's not going to change while we're looking + * at it. + */ + +void smtc_cflush_lockdown(void) +{ + int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id()) { + settc(cpu_data[cpu].tc_id); + halt_state_save[cpu] = read_tc_c0_tchalt(); + write_tc_c0_tchalt(TCHALT_H); + } + } + mips_ihb(); +} + +/* It would be cheating to change the cpu_online states during a flush! */ + +void smtc_cflush_release(void) +{ + int cpu; + + /* + * Start with a hazard barrier to ensure + * that all CACHE ops have played through. + */ + mips_ihb(); + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id()) { + settc(cpu_data[cpu].tc_id); + write_tc_c0_tchalt(halt_state_save[cpu]); + } + } + mips_ihb(); +} diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c new file mode 100644 index 00000000..39f7ab7b --- /dev/null +++ b/arch/mips/kernel/spinlock_test.c @@ -0,0 +1,141 @@ +#include <linux/init.h> +#include <linux/kthread.h> +#include <linux/hrtimer.h> +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/spinlock.h> + + +static int ss_get(void *data, u64 *val) +{ + ktime_t start, finish; + int loops; + int cont; + DEFINE_RAW_SPINLOCK(ss_spin); + + loops = 1000000; + cont = 1; + + start = ktime_get(); + + while (cont) { + raw_spin_lock(&ss_spin); + loops--; + if (loops == 0) + cont = 0; + raw_spin_unlock(&ss_spin); + } + + finish = ktime_get(); + + *val = ktime_us_delta(finish, start); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); + + + +struct spin_multi_state { + raw_spinlock_t lock; + atomic_t start_wait; + atomic_t enter_wait; + atomic_t exit_wait; + int loops; +}; + +struct spin_multi_per_thread { + struct spin_multi_state *state; + ktime_t start; +}; + +static int multi_other(void *data) +{ + int loops; + int cont; + struct spin_multi_per_thread *pt = data; + struct spin_multi_state *s = pt->state; + + loops = s->loops; + cont = 1; + + atomic_dec(&s->enter_wait); + + while (atomic_read(&s->enter_wait)) + ; /* spin */ + + pt->start = ktime_get(); + + atomic_dec(&s->start_wait); + + while (atomic_read(&s->start_wait)) + ; /* spin */ + + while (cont) { + raw_spin_lock(&s->lock); + loops--; + if (loops == 0) + cont = 0; + raw_spin_unlock(&s->lock); + } + + atomic_dec(&s->exit_wait); + while (atomic_read(&s->exit_wait)) + ; /* spin */ + return 0; +} + +static int multi_get(void *data, u64 *val) +{ + ktime_t finish; + struct spin_multi_state ms; + struct spin_multi_per_thread t1, t2; + + ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get"); + ms.loops = 1000000; + + atomic_set(&ms.start_wait, 2); + atomic_set(&ms.enter_wait, 2); + atomic_set(&ms.exit_wait, 2); + t1.state = &ms; + t2.state = &ms; + + kthread_run(multi_other, &t2, "multi_get"); + + multi_other(&t1); + + finish = ktime_get(); + + *val = ktime_us_delta(finish, t1.start); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); + + +extern struct dentry *mips_debugfs_dir; +static int __init spinlock_test(void) +{ + struct dentry *d; + + if (!mips_debugfs_dir) + return -ENODEV; + + d = debugfs_create_file("spin_single", S_IRUGO, + mips_debugfs_dir, NULL, + &fops_ss); + if (!d) + return -ENOMEM; + + d = debugfs_create_file("spin_multi", S_IRUGO, + mips_debugfs_dir, NULL, + &fops_multi); + if (!d) + return -ENOMEM; + + return 0; +} +device_initcall(spinlock_test); diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c new file mode 100644 index 00000000..6af08d89 --- /dev/null +++ b/arch/mips/kernel/spram.c @@ -0,0 +1,219 @@ +/* + * MIPS SPRAM support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (C) 2007, 2008 MIPS Technologies, Inc. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ptrace.h> +#include <linux/stddef.h> + +#include <asm/fpu.h> +#include <asm/mipsregs.h> +#include <asm/r4kcache.h> +#include <asm/hazards.h> + +/* + * These definitions are correct for the 24K/34K/74K SPRAM sample + * implementation. The 4KS interpreted the tags differently... + */ +#define SPRAM_TAG0_ENABLE 0x00000080 +#define SPRAM_TAG0_PA_MASK 0xfffff000 +#define SPRAM_TAG1_SIZE_MASK 0xfffff000 + +#define SPRAM_TAG_STRIDE 8 + +#define ERRCTL_SPRAM (1 << 28) + +/* errctl access */ +#define read_c0_errctl(x) read_c0_ecc(x) +#define write_c0_errctl(x) write_c0_ecc(x) + +/* + * Different semantics to the set_c0_* function built by __BUILD_SET_C0 + */ +static __cpuinit unsigned int bis_c0_errctl(unsigned int set) +{ + unsigned int res; + res = read_c0_errctl(); + write_c0_errctl(res | set); + return res; +} + +static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data) +{ + unsigned int errctl; + + /* enable SPRAM tag access */ + errctl = bis_c0_errctl(ERRCTL_SPRAM); + ehb(); + + write_c0_taglo(data); + ehb(); + + cache_op(Index_Store_Tag_I, CKSEG0|offset); + ehb(); + + write_c0_errctl(errctl); + ehb(); +} + + +static __cpuinit unsigned int ispram_load_tag(unsigned int offset) +{ + unsigned int data; + unsigned int errctl; + + /* enable SPRAM tag access */ + errctl = bis_c0_errctl(ERRCTL_SPRAM); + ehb(); + cache_op(Index_Load_Tag_I, CKSEG0 | offset); + ehb(); + data = read_c0_taglo(); + ehb(); + write_c0_errctl(errctl); + ehb(); + + return data; +} + +static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data) +{ + unsigned int errctl; + + /* enable SPRAM tag access */ + errctl = bis_c0_errctl(ERRCTL_SPRAM); + ehb(); + write_c0_dtaglo(data); + ehb(); + cache_op(Index_Store_Tag_D, CKSEG0 | offset); + ehb(); + write_c0_errctl(errctl); + ehb(); +} + + +static __cpuinit unsigned int dspram_load_tag(unsigned int offset) +{ + unsigned int data; + unsigned int errctl; + + errctl = bis_c0_errctl(ERRCTL_SPRAM); + ehb(); + cache_op(Index_Load_Tag_D, CKSEG0 | offset); + ehb(); + data = read_c0_dtaglo(); + ehb(); + write_c0_errctl(errctl); + ehb(); + + return data; +} + +static __cpuinit void probe_spram(char *type, + unsigned int base, + unsigned int (*read)(unsigned int), + void (*write)(unsigned int, unsigned int)) +{ + unsigned int firstsize = 0, lastsize = 0; + unsigned int firstpa = 0, lastpa = 0, pa = 0; + unsigned int offset = 0; + unsigned int size, tag0, tag1; + unsigned int enabled; + int i; + + /* + * The limit is arbitrary but avoids the loop running away if + * the SPRAM tags are implemented differently + */ + + for (i = 0; i < 8; i++) { + tag0 = read(offset); + tag1 = read(offset+SPRAM_TAG_STRIDE); + pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n", + type, i, tag0, tag1); + + size = tag1 & SPRAM_TAG1_SIZE_MASK; + + if (size == 0) + break; + + if (i != 0) { + /* tags may repeat... */ + if ((pa == firstpa && size == firstsize) || + (pa == lastpa && size == lastsize)) + break; + } + + /* Align base with size */ + base = (base + size - 1) & ~(size-1); + + /* reprogram the base address base address and enable */ + tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE; + write(offset, tag0); + + base += size; + + /* reread the tag */ + tag0 = read(offset); + pa = tag0 & SPRAM_TAG0_PA_MASK; + enabled = tag0 & SPRAM_TAG0_ENABLE; + + if (i == 0) { + firstpa = pa; + firstsize = size; + } + + lastpa = pa; + lastsize = size; + + if (strcmp(type, "DSPRAM") == 0) { + unsigned int *vp = (unsigned int *)(CKSEG1 | pa); + unsigned int v; +#define TDAT 0x5a5aa5a5 + vp[0] = TDAT; + vp[1] = ~TDAT; + + mb(); + + v = vp[0]; + if (v != TDAT) + printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n", + vp, TDAT, v); + v = vp[1]; + if (v != ~TDAT) + printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n", + vp+1, ~TDAT, v); + } + + pr_info("%s%d: PA=%08x,Size=%08x%s\n", + type, i, pa, size, enabled ? ",enabled" : ""); + offset += 2 * SPRAM_TAG_STRIDE; + } +} +void __cpuinit spram_config(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int config0; + + switch (c->cputype) { + case CPU_24K: + case CPU_34K: + case CPU_74K: + case CPU_1004K: + config0 = read_c0_config(); + /* FIXME: addresses are Malta specific */ + if (config0 & (1<<24)) { + probe_spram("ISPRAM", 0x1c000000, + &ispram_load_tag, &ispram_store_tag); + } + if (config0 & (1<<23)) + probe_spram("DSPRAM", 0x1c100000, + &dspram_load_tag, &dspram_store_tag); + } +} diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c new file mode 100644 index 00000000..1ba775d2 --- /dev/null +++ b/arch/mips/kernel/stacktrace.c @@ -0,0 +1,87 @@ +/* + * Stack trace management functions + * + * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> + */ +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/export.h> +#include <asm/stacktrace.h> + +/* + * Save stack-backtrace addresses into a stack_trace buffer: + */ +static void save_raw_context_stack(struct stack_trace *trace, + unsigned long reg29) +{ + unsigned long *sp = (unsigned long *)reg29; + unsigned long addr; + + while (!kstack_end(sp)) { + addr = *sp++; + if (__kernel_text_address(addr)) { + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = addr; + if (trace->nr_entries >= trace->max_entries) + break; + } + } +} + +static void save_context_stack(struct stack_trace *trace, + struct task_struct *tsk, struct pt_regs *regs) +{ + unsigned long sp = regs->regs[29]; +#ifdef CONFIG_KALLSYMS + unsigned long ra = regs->regs[31]; + unsigned long pc = regs->cp0_epc; + + if (raw_show_trace || !__kernel_text_address(pc)) { + unsigned long stack_page = + (unsigned long)task_stack_page(tsk); + if (stack_page && sp >= stack_page && + sp <= stack_page + THREAD_SIZE - 32) + save_raw_context_stack(trace, sp); + return; + } + do { + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = pc; + if (trace->nr_entries >= trace->max_entries) + break; + pc = unwind_stack(tsk, &sp, pc, &ra); + } while (pc); +#else + save_raw_context_stack(trace, sp); +#endif +} + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + struct pt_regs dummyregs; + struct pt_regs *regs = &dummyregs; + + WARN_ON(trace->nr_entries || !trace->max_entries); + + if (tsk != current) { + regs->regs[29] = tsk->thread.reg29; + regs->regs[31] = 0; + regs->cp0_epc = tsk->thread.reg31; + } else + prepare_frametrace(regs); + save_context_stack(trace, tsk, regs); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c new file mode 100644 index 00000000..99f913c8 --- /dev/null +++ b/arch/mips/kernel/sync-r4k.c @@ -0,0 +1,160 @@ +/* + * Count register synchronisation. + * + * All CPUs will have their count registers synchronised to the CPU0 next time + * value. This can cause a small timewarp for CPU0. All other CPU's should + * not have done anything significant (but they may have had interrupts + * enabled briefly - prom_smp_finish() should not be responsible for enabling + * interrupts...) + * + * FIXME: broken for SMTC + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/irqflags.h> +#include <linux/cpumask.h> + +#include <asm/r4k-timer.h> +#include <linux/atomic.h> +#include <asm/barrier.h> +#include <asm/mipsregs.h> + +static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); +static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); +static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); +static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); + +#define COUNTON 100 +#define NR_LOOPS 5 + +void __cpuinit synchronise_count_master(void) +{ + int i; + unsigned long flags; + unsigned int initcount; + int nslaves; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC needs to synchronise per VPE, not per CPU + * ignore for now + */ + return; +#endif + + printk(KERN_INFO "Synchronize counters across %u CPUs: ", + num_online_cpus()); + + local_irq_save(flags); + + /* + * Notify the slaves that it's time to start + */ + atomic_set(&count_reference, read_c0_count()); + atomic_set(&count_start_flag, 1); + smp_wmb(); + + /* Count will be initialised to current timer for all CPU's */ + initcount = read_c0_count(); + + /* + * We loop a few times to get a primed instruction cache, + * then the last pass is more or less synchronised and + * the master and slaves each set their cycle counters to a known + * value all at once. This reduces the chance of having random offsets + * between the processors, and guarantees that the maximum + * delay between the cycle counters is never bigger than + * the latency of information-passing (cachelines) between + * two CPUs. + */ + + nslaves = num_online_cpus()-1; + for (i = 0; i < NR_LOOPS; i++) { + /* slaves loop on '!= ncpus' */ + while (atomic_read(&count_count_start) != nslaves) + mb(); + atomic_set(&count_count_stop, 0); + smp_wmb(); + + /* this lets the slaves write their count register */ + atomic_inc(&count_count_start); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + write_c0_count(initcount); + + /* + * Wait for all slaves to leave the synchronization point: + */ + while (atomic_read(&count_count_stop) != nslaves) + mb(); + atomic_set(&count_count_start, 0); + smp_wmb(); + atomic_inc(&count_count_stop); + } + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + + local_irq_restore(flags); + + /* + * i386 code reported the skew here, but the + * count registers were almost certainly out of sync + * so no point in alarming people + */ + printk("done.\n"); +} + +void __cpuinit synchronise_count_slave(void) +{ + int i; + unsigned long flags; + unsigned int initcount; + int ncpus; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC needs to synchronise per VPE, not per CPU + * ignore for now + */ + return; +#endif + + local_irq_save(flags); + + /* + * Not every cpu is online at the time this gets called, + * so we first wait for the master to say everyone is ready + */ + + while (!atomic_read(&count_start_flag)) + mb(); + + /* Count will be initialised to next expire for all CPU's */ + initcount = atomic_read(&count_reference); + + ncpus = num_online_cpus(); + for (i = 0; i < NR_LOOPS; i++) { + atomic_inc(&count_count_start); + while (atomic_read(&count_count_start) != ncpus) + mb(); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + write_c0_count(initcount); + + atomic_inc(&count_count_stop); + while (atomic_read(&count_count_stop) != ncpus) + mb(); + } + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + + local_irq_restore(flags); +} +#undef NR_LOOPS diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c new file mode 100644 index 00000000..b08220c8 --- /dev/null +++ b/arch/mips/kernel/syscall.c @@ -0,0 +1,346 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#include <linux/capability.h> +#include <linux/errno.h> +#include <linux/linkage.h> +#include <linux/fs.h> +#include <linux/smp.h> +#include <linux/ptrace.h> +#include <linux/string.h> +#include <linux/syscalls.h> +#include <linux/file.h> +#include <linux/utsname.h> +#include <linux/unistd.h> +#include <linux/sem.h> +#include <linux/msg.h> +#include <linux/shm.h> +#include <linux/compiler.h> +#include <linux/ipc.h> +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <linux/elf.h> + +#include <asm/asm.h> +#include <asm/branch.h> +#include <asm/cachectl.h> +#include <asm/cacheflush.h> +#include <asm/asm-offsets.h> +#include <asm/signal.h> +#include <asm/sim.h> +#include <asm/shmparam.h> +#include <asm/sysmips.h> +#include <asm/uaccess.h> +#include <asm/switch_to.h> + +/* + * For historic reasons the pipe(2) syscall on MIPS has an unusual calling + * convention. It returns results in registers $v0 / $v1 which means there + * is no need for it to do verify the validity of a userspace pointer + * argument. Historically that used to be expensive in Linux. These days + * the performance advantage is negligible. + */ +asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) +{ + int fd[2]; + int error, res; + + error = do_pipe_flags(fd, 0); + if (error) { + res = error; + goto out; + } + regs.regs[3] = fd[1]; + res = fd[0]; +out: + return res; +} + +SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, + fd, off_t, offset) +{ + unsigned long result; + + result = -EINVAL; + if (offset & ~PAGE_MASK) + goto out; + + result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + +out: + return result; +} + +SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, pgoff) +{ + if (pgoff & (~PAGE_MASK >> 12)) + return -EINVAL; + + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); +} + +save_static_function(sys_fork); +static int __used noinline +_sys_fork(nabi_no_regargs struct pt_regs regs) +{ + return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); +} + +save_static_function(sys_clone); +static int __used noinline +_sys_clone(nabi_no_regargs struct pt_regs regs) +{ + unsigned long clone_flags; + unsigned long newsp; + int __user *parent_tidptr, *child_tidptr; + + clone_flags = regs.regs[4]; + newsp = regs.regs[5]; + if (!newsp) + newsp = regs.regs[29]; + parent_tidptr = (int __user *) regs.regs[6]; +#ifdef CONFIG_32BIT + /* We need to fetch the fifth argument off the stack. */ + child_tidptr = NULL; + if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { + int __user *__user *usp = (int __user *__user *) regs.regs[29]; + if (regs.regs[2] == __NR_syscall) { + if (get_user (child_tidptr, &usp[5])) + return -EFAULT; + } + else if (get_user (child_tidptr, &usp[4])) + return -EFAULT; + } +#else + child_tidptr = (int __user *) regs.regs[8]; +#endif + return do_fork(clone_flags, newsp, ®s, 0, + parent_tidptr, child_tidptr); +} + +/* + * sys_execve() executes a new program. + */ +asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) +{ + int error; + char * filename; + + filename = getname((const char __user *) (long)regs.regs[4]); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, + (const char __user *const __user *) (long)regs.regs[5], + (const char __user *const __user *) (long)regs.regs[6], + ®s); + putname(filename); + +out: + return error; +} + +SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) +{ + struct thread_info *ti = task_thread_info(current); + + ti->tp_value = addr; + if (cpu_has_userlocal) + write_c0_userlocal(addr); + + return 0; +} + +static inline int mips_atomic_set(struct pt_regs *regs, + unsigned long addr, unsigned long new) +{ + unsigned long old, tmp; + unsigned int err; + + if (unlikely(addr & 3)) + return -EINVAL; + + if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) + return -EINVAL; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__ ( + " .set mips3 \n" + " li %[err], 0 \n" + "1: ll %[old], (%[addr]) \n" + " move %[tmp], %[new] \n" + "2: sc %[tmp], (%[addr]) \n" + " beqzl %[tmp], 1b \n" + "3: \n" + " .section .fixup,\"ax\" \n" + "4: li %[err], %[efault] \n" + " j 3b \n" + " .previous \n" + " .section __ex_table,\"a\" \n" + " "STR(PTR)" 1b, 4b \n" + " "STR(PTR)" 2b, 4b \n" + " .previous \n" + " .set mips0 \n" + : [old] "=&r" (old), + [err] "=&r" (err), + [tmp] "=&r" (tmp) + : [addr] "r" (addr), + [new] "r" (new), + [efault] "i" (-EFAULT) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__ ( + " .set mips3 \n" + " li %[err], 0 \n" + "1: ll %[old], (%[addr]) \n" + " move %[tmp], %[new] \n" + "2: sc %[tmp], (%[addr]) \n" + " bnez %[tmp], 4f \n" + "3: \n" + " .subsection 2 \n" + "4: b 1b \n" + " .previous \n" + " \n" + " .section .fixup,\"ax\" \n" + "5: li %[err], %[efault] \n" + " j 3b \n" + " .previous \n" + " .section __ex_table,\"a\" \n" + " "STR(PTR)" 1b, 5b \n" + " "STR(PTR)" 2b, 5b \n" + " .previous \n" + " .set mips0 \n" + : [old] "=&r" (old), + [err] "=&r" (err), + [tmp] "=&r" (tmp) + : [addr] "r" (addr), + [new] "r" (new), + [efault] "i" (-EFAULT) + : "memory"); + } else { + do { + preempt_disable(); + ll_bit = 1; + ll_task = current; + preempt_enable(); + + err = __get_user(old, (unsigned int *) addr); + err |= __put_user(new, (unsigned int *) addr); + if (err) + break; + rmb(); + } while (!ll_bit); + } + + if (unlikely(err)) + return err; + + regs->regs[2] = old; + regs->regs[7] = 0; /* No error */ + + /* + * Don't let your children do this ... + */ + __asm__ __volatile__( + " move $29, %0 \n" + " j syscall_exit \n" + : /* no outputs */ + : "r" (regs)); + + /* unreached. Honestly. */ + while (1); +} + +save_static_function(sys_sysmips); +static int __used noinline +_sys_sysmips(nabi_no_regargs struct pt_regs regs) +{ + long cmd, arg1, arg2; + + cmd = regs.regs[4]; + arg1 = regs.regs[5]; + arg2 = regs.regs[6]; + + switch (cmd) { + case MIPS_ATOMIC_SET: + return mips_atomic_set(®s, arg1, arg2); + + case MIPS_FIXADE: + if (arg1 & ~3) + return -EINVAL; + + if (arg1 & 1) + set_thread_flag(TIF_FIXADE); + else + clear_thread_flag(TIF_FIXADE); + if (arg1 & 2) + set_thread_flag(TIF_LOGADE); + else + clear_thread_flag(TIF_LOGADE); + + return 0; + + case FLUSH_CACHE: + __flush_cache_all(); + return 0; + } + + return -EINVAL; +} + +/* + * No implemented yet ... + */ +SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) +{ + return -ENOSYS; +} + +/* + * If we ever come here the user sp is bad. Zap the process right away. + * Due to the bad stack signaling wouldn't work. + */ +asmlinkage void bad_stack(void) +{ + do_exit(SIGSEGV); +} + +/* + * Do a system call from kernel instead of calling sys_execve so we + * end up with proper pt_regs. + */ +int kernel_execve(const char *filename, + const char *const argv[], + const char *const envp[]) +{ + register unsigned long __a0 asm("$4") = (unsigned long) filename; + register unsigned long __a1 asm("$5") = (unsigned long) argv; + register unsigned long __a2 asm("$6") = (unsigned long) envp; + register unsigned long __a3 asm("$7"); + unsigned long __v0; + + __asm__ volatile (" \n" + " .set noreorder \n" + " li $2, %5 # __NR_execve \n" + " syscall \n" + " move %0, $2 \n" + " .set reorder \n" + : "=&r" (__v0), "=r" (__a3) + : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", + "memory"); + + if (__a3 == 0) + return __v0; + + return -__v0; +} diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c new file mode 100644 index 00000000..99d73b72 --- /dev/null +++ b/arch/mips/kernel/time.c @@ -0,0 +1,126 @@ +/* + * Copyright 2001 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * Copyright (c) 2003, 2004 Maciej W. Rozycki + * + * Common time service routines for MIPS machines. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/bug.h> +#include <linux/clockchips.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/param.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/export.h> + +#include <asm/cpu-features.h> +#include <asm/div64.h> +#include <asm/smtc_ipi.h> +#include <asm/time.h> + +/* + * forward reference + */ +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +int __weak rtc_mips_set_time(unsigned long sec) +{ + return 0; +} + +int __weak rtc_mips_set_mmss(unsigned long nowtime) +{ + return rtc_mips_set_time(nowtime); +} + +int update_persistent_clock(struct timespec now) +{ + return rtc_mips_set_mmss(now.tv_sec); +} + +static int null_perf_irq(void) +{ + return 0; +} + +int (*perf_irq)(void) = null_perf_irq; + +EXPORT_SYMBOL(perf_irq); + +/* + * time_init() - it does the following things. + * + * 1) plat_time_init() - + * a) (optional) set up RTC routines, + * b) (optional) calibrate and set the mips_hpt_frequency + * (only needed if you intended to use cpu counter as timer interrupt + * source) + * 2) calculate a couple of cached variables for later usage + */ + +unsigned int mips_hpt_frequency; + +/* + * This function exists in order to cause an error due to a duplicate + * definition if platform code should have its own implementation. The hook + * to use instead is plat_time_init. plat_time_init does not receive the + * irqaction pointer argument anymore. This is because any function which + * initializes an interrupt timer now takes care of its own request_irq rsp. + * setup_irq calls and each clock_event_device should use its own + * struct irqrequest. + */ +void __init plat_timer_setup(void) +{ + BUG(); +} + +static __init int cpu_has_mfc0_count_bug(void) +{ + switch (current_cpu_type()) { + case CPU_R4000PC: + case CPU_R4000SC: + case CPU_R4000MC: + /* + * V3.0 is documented as suffering from the mfc0 from count bug. + * Afaik this is the last version of the R4000. Later versions + * were marketed as R4400. + */ + return 1; + + case CPU_R4400PC: + case CPU_R4400SC: + case CPU_R4400MC: + /* + * The published errata for the R4400 up to 3.0 say the CPU + * has the mfc0 from count bug. + */ + if ((current_cpu_data.processor_id & 0xff) <= 0x30) + return 1; + + /* + * we assume newer revisions are ok + */ + return 0; + } + + return 0; +} + +void __init time_init(void) +{ + plat_time_init(); + + if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug()) + init_mips_clocksource(); +} diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c new file mode 100644 index 00000000..cf3eb61f --- /dev/null +++ b/arch/mips/kernel/topology.c @@ -0,0 +1,32 @@ +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/node.h> +#include <linux/nodemask.h> +#include <linux/percpu.h> + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int __init topology_init(void) +{ + int i, ret; + +#ifdef CONFIG_NUMA + for_each_online_node(i) + register_one_node(i); +#endif /* CONFIG_NUMA */ + + for_each_present_cpu(i) { + struct cpu *c = &per_cpu(cpu_devices, i); + + c->hotpluggable = 1; + ret = register_cpu(c, i); + if (ret) + printk(KERN_WARNING "topology_init: register_cpu %d " + "failed (%d)\n", i, ret); + } + + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c new file mode 100644 index 00000000..cfdaaa4c --- /dev/null +++ b/arch/mips/kernel/traps.c @@ -0,0 +1,1814 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle + * Copyright (C) 1995, 1996 Paul M. Antoine + * Copyright (C) 1998 Ulf Carlsson + * Copyright (C) 1999 Silicon Graphics, Inc. + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000, 01 MIPS Technologies, Inc. + * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki + */ +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/kallsyms.h> +#include <linux/bootmem.h> +#include <linux/interrupt.h> +#include <linux/ptrace.h> +#include <linux/kgdb.h> +#include <linux/kdebug.h> +#include <linux/kprobes.h> +#include <linux/notifier.h> +#include <linux/kdb.h> +#include <linux/irq.h> +#include <linux/perf_event.h> + +#include <asm/bootinfo.h> +#include <asm/branch.h> +#include <asm/break.h> +#include <asm/cop2.h> +#include <asm/cpu.h> +#include <asm/dsp.h> +#include <asm/fpu.h> +#include <asm/fpu_emulator.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/module.h> +#include <asm/pgtable.h> +#include <asm/ptrace.h> +#include <asm/sections.h> +#include <asm/tlbdebug.h> +#include <asm/traps.h> +#include <asm/uaccess.h> +#include <asm/watch.h> +#include <asm/mmu_context.h> +#include <asm/types.h> +#include <asm/stacktrace.h> +#include <asm/uasm.h> + +extern void check_wait(void); +extern asmlinkage void r4k_wait(void); +extern asmlinkage void rollback_handle_int(void); +extern asmlinkage void handle_int(void); +extern asmlinkage void handle_tlbm(void); +extern asmlinkage void handle_tlbl(void); +extern asmlinkage void handle_tlbs(void); +extern asmlinkage void handle_adel(void); +extern asmlinkage void handle_ades(void); +extern asmlinkage void handle_ibe(void); +extern asmlinkage void handle_dbe(void); +extern asmlinkage void handle_sys(void); +extern asmlinkage void handle_bp(void); +extern asmlinkage void handle_ri(void); +extern asmlinkage void handle_ri_rdhwr_vivt(void); +extern asmlinkage void handle_ri_rdhwr(void); +extern asmlinkage void handle_cpu(void); +extern asmlinkage void handle_ov(void); +extern asmlinkage void handle_tr(void); +extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_mdmx(void); +extern asmlinkage void handle_watch(void); +extern asmlinkage void handle_mt(void); +extern asmlinkage void handle_dsp(void); +extern asmlinkage void handle_mcheck(void); +extern asmlinkage void handle_reserved(void); + +extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, + struct mips_fpu_struct *ctx, int has_fpu, + void *__user *fault_addr); + +void (*board_be_init)(void); +int (*board_be_handler)(struct pt_regs *regs, int is_fixup); +void (*board_nmi_handler_setup)(void); +void (*board_ejtag_handler_setup)(void); +void (*board_bind_eic_interrupt)(int irq, int regset); +void (*board_ebase_setup)(void); + + +static void show_raw_backtrace(unsigned long reg29) +{ + unsigned long *sp = (unsigned long *)(reg29 & ~3); + unsigned long addr; + + printk("Call Trace:"); +#ifdef CONFIG_KALLSYMS + printk("\n"); +#endif + while (!kstack_end(sp)) { + unsigned long __user *p = + (unsigned long __user *)(unsigned long)sp++; + if (__get_user(addr, p)) { + printk(" (Bad stack address)"); + break; + } + if (__kernel_text_address(addr)) + print_ip_sym(addr); + } + printk("\n"); +} + +#ifdef CONFIG_KALLSYMS +int raw_show_trace; +static int __init set_raw_show_trace(char *str) +{ + raw_show_trace = 1; + return 1; +} +__setup("raw_show_trace", set_raw_show_trace); +#endif + +static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) +{ + unsigned long sp = regs->regs[29]; + unsigned long ra = regs->regs[31]; + unsigned long pc = regs->cp0_epc; + + if (raw_show_trace || !__kernel_text_address(pc)) { + show_raw_backtrace(sp); + return; + } + printk("Call Trace:\n"); + do { + print_ip_sym(pc); + pc = unwind_stack(task, &sp, pc, &ra); + } while (pc); + printk("\n"); +} + +/* + * This routine abuses get_user()/put_user() to reference pointers + * with at least a bit of error checking ... + */ +static void show_stacktrace(struct task_struct *task, + const struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + long stackdata; + int i; + unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; + + printk("Stack :"); + i = 0; + while ((unsigned long) sp & (PAGE_SIZE - 1)) { + if (i && ((i % (64 / field)) == 0)) + printk("\n "); + if (i > 39) { + printk(" ..."); + break; + } + + if (__get_user(stackdata, sp++)) { + printk(" (Bad stack address)"); + break; + } + + printk(" %0*lx", field, stackdata); + i++; + } + printk("\n"); + show_backtrace(task, regs); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + struct pt_regs regs; + if (sp) { + regs.regs[29] = (unsigned long)sp; + regs.regs[31] = 0; + regs.cp0_epc = 0; + } else { + if (task && task != current) { + regs.regs[29] = task->thread.reg29; + regs.regs[31] = 0; + regs.cp0_epc = task->thread.reg31; +#ifdef CONFIG_KGDB_KDB + } else if (atomic_read(&kgdb_active) != -1 && + kdb_current_regs) { + memcpy(®s, kdb_current_regs, sizeof(regs)); +#endif /* CONFIG_KGDB_KDB */ + } else { + prepare_frametrace(®s); + } + } + show_stacktrace(task, ®s); +} + +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + struct pt_regs regs; + + prepare_frametrace(®s); + show_backtrace(current, ®s); +} + +EXPORT_SYMBOL(dump_stack); + +static void show_code(unsigned int __user *pc) +{ + long i; + unsigned short __user *pc16 = NULL; + + printk("\nCode:"); + + if ((unsigned long)pc & 1) + pc16 = (unsigned short __user *)((unsigned long)pc & ~1); + for(i = -3 ; i < 6 ; i++) { + unsigned int insn; + if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { + printk(" (Bad address in epc)\n"); + break; + } + printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); + } +} + +static void __show_regs(const struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + unsigned int cause = regs->cp0_cause; + int i; + + printk("Cpu %d\n", smp_processor_id()); + + /* + * Saved main processor registers + */ + for (i = 0; i < 32; ) { + if ((i % 4) == 0) + printk("$%2d :", i); + if (i == 0) + printk(" %0*lx", field, 0UL); + else if (i == 26 || i == 27) + printk(" %*s", field, ""); + else + printk(" %0*lx", field, regs->regs[i]); + + i++; + if ((i % 4) == 0) + printk("\n"); + } + +#ifdef CONFIG_CPU_HAS_SMARTMIPS + printk("Acx : %0*lx\n", field, regs->acx); +#endif + printk("Hi : %0*lx\n", field, regs->hi); + printk("Lo : %0*lx\n", field, regs->lo); + + /* + * Saved cp0 registers + */ + printk("epc : %0*lx %pS\n", field, regs->cp0_epc, + (void *) regs->cp0_epc); + printk(" %s\n", print_tainted()); + printk("ra : %0*lx %pS\n", field, regs->regs[31], + (void *) regs->regs[31]); + + printk("Status: %08x ", (uint32_t) regs->cp0_status); + + if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { + if (regs->cp0_status & ST0_KUO) + printk("KUo "); + if (regs->cp0_status & ST0_IEO) + printk("IEo "); + if (regs->cp0_status & ST0_KUP) + printk("KUp "); + if (regs->cp0_status & ST0_IEP) + printk("IEp "); + if (regs->cp0_status & ST0_KUC) + printk("KUc "); + if (regs->cp0_status & ST0_IEC) + printk("IEc "); + } else { + if (regs->cp0_status & ST0_KX) + printk("KX "); + if (regs->cp0_status & ST0_SX) + printk("SX "); + if (regs->cp0_status & ST0_UX) + printk("UX "); + switch (regs->cp0_status & ST0_KSU) { + case KSU_USER: + printk("USER "); + break; + case KSU_SUPERVISOR: + printk("SUPERVISOR "); + break; + case KSU_KERNEL: + printk("KERNEL "); + break; + default: + printk("BAD_MODE "); + break; + } + if (regs->cp0_status & ST0_ERL) + printk("ERL "); + if (regs->cp0_status & ST0_EXL) + printk("EXL "); + if (regs->cp0_status & ST0_IE) + printk("IE "); + } + printk("\n"); + + printk("Cause : %08x\n", cause); + + cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; + if (1 <= cause && cause <= 5) + printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); + + printk("PrId : %08x (%s)\n", read_c0_prid(), + cpu_name_string()); +} + +/* + * FIXME: really the generic show_regs should take a const pointer argument. + */ +void show_regs(struct pt_regs *regs) +{ + __show_regs((struct pt_regs *)regs); +} + +void show_registers(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + + __show_regs(regs); + print_modules(); + printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", + current->comm, current->pid, current_thread_info(), current, + field, current_thread_info()->tp_value); + if (cpu_has_userlocal) { + unsigned long tls; + + tls = read_c0_userlocal(); + if (tls != current_thread_info()->tp_value) + printk("*HwTLS: %0*lx\n", field, tls); + } + + show_stacktrace(current, regs); + show_code((unsigned int __user *) regs->cp0_epc); + printk("\n"); +} + +static int regs_to_trapnr(struct pt_regs *regs) +{ + return (regs->cp0_cause >> 2) & 0x1f; +} + +static DEFINE_RAW_SPINLOCK(die_lock); + +void __noreturn die(const char *str, struct pt_regs *regs) +{ + static int die_counter; + int sig = SIGSEGV; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long dvpret; +#endif /* CONFIG_MIPS_MT_SMTC */ + + oops_enter(); + + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) + sig = 0; + + console_verbose(); + raw_spin_lock_irq(&die_lock); +#ifdef CONFIG_MIPS_MT_SMTC + dvpret = dvpe(); +#endif /* CONFIG_MIPS_MT_SMTC */ + bust_spinlocks(1); +#ifdef CONFIG_MIPS_MT_SMTC + mips_mt_regdump(dvpret); +#endif /* CONFIG_MIPS_MT_SMTC */ + + printk("%s[#%d]:\n", str, ++die_counter); + show_registers(regs); + add_taint(TAINT_DIE); + raw_spin_unlock_irq(&die_lock); + + oops_exit(); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) { + printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); + ssleep(5); + panic("Fatal exception"); + } + + do_exit(sig); +} + +extern struct exception_table_entry __start___dbe_table[]; +extern struct exception_table_entry __stop___dbe_table[]; + +__asm__( +" .section __dbe_table, \"a\"\n" +" .previous \n"); + +/* Given an address, look for it in the exception tables. */ +static const struct exception_table_entry *search_dbe_tables(unsigned long addr) +{ + const struct exception_table_entry *e; + + e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); + if (!e) + e = search_module_dbetables(addr); + return e; +} + +asmlinkage void do_be(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + const struct exception_table_entry *fixup = NULL; + int data = regs->cp0_cause & 4; + int action = MIPS_BE_FATAL; + + /* XXX For now. Fixme, this searches the wrong table ... */ + if (data && !user_mode(regs)) + fixup = search_dbe_tables(exception_epc(regs)); + + if (fixup) + action = MIPS_BE_FIXUP; + + if (board_be_handler) + action = board_be_handler(regs, fixup != NULL); + + switch (action) { + case MIPS_BE_DISCARD: + return; + case MIPS_BE_FIXUP: + if (fixup) { + regs->cp0_epc = fixup->nextinsn; + return; + } + break; + default: + break; + } + + /* + * Assume it would be too dangerous to continue ... + */ + printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", + data ? "Data" : "Instruction", + field, regs->cp0_epc, field, regs->regs[31]); + if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) + == NOTIFY_STOP) + return; + + die_if_kernel("Oops", regs); + force_sig(SIGBUS, current); +} + +/* + * ll/sc, rdhwr, sync emulation + */ + +#define OPCODE 0xfc000000 +#define BASE 0x03e00000 +#define RT 0x001f0000 +#define OFFSET 0x0000ffff +#define LL 0xc0000000 +#define SC 0xe0000000 +#define SPEC0 0x00000000 +#define SPEC3 0x7c000000 +#define RD 0x0000f800 +#define FUNC 0x0000003f +#define SYNC 0x0000000f +#define RDHWR 0x0000003b + +/* + * The ll_bit is cleared by r*_switch.S + */ + +unsigned int ll_bit; +struct task_struct *ll_task; + +static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) +{ + unsigned long value, __user *vaddr; + long offset; + + /* + * analyse the ll instruction that just caused a ri exception + * and put the referenced address to addr. + */ + + /* sign extend offset */ + offset = opcode & OFFSET; + offset <<= 16; + offset >>= 16; + + vaddr = (unsigned long __user *) + ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); + + if ((unsigned long)vaddr & 3) + return SIGBUS; + if (get_user(value, vaddr)) + return SIGSEGV; + + preempt_disable(); + + if (ll_task == NULL || ll_task == current) { + ll_bit = 1; + } else { + ll_bit = 0; + } + ll_task = current; + + preempt_enable(); + + regs->regs[(opcode & RT) >> 16] = value; + + return 0; +} + +static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) +{ + unsigned long __user *vaddr; + unsigned long reg; + long offset; + + /* + * analyse the sc instruction that just caused a ri exception + * and put the referenced address to addr. + */ + + /* sign extend offset */ + offset = opcode & OFFSET; + offset <<= 16; + offset >>= 16; + + vaddr = (unsigned long __user *) + ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); + reg = (opcode & RT) >> 16; + + if ((unsigned long)vaddr & 3) + return SIGBUS; + + preempt_disable(); + + if (ll_bit == 0 || ll_task != current) { + regs->regs[reg] = 0; + preempt_enable(); + return 0; + } + + preempt_enable(); + + if (put_user(regs->regs[reg], vaddr)) + return SIGSEGV; + + regs->regs[reg] = 1; + + return 0; +} + +/* + * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both + * opcodes are supposed to result in coprocessor unusable exceptions if + * executed on ll/sc-less processors. That's the theory. In practice a + * few processors such as NEC's VR4100 throw reserved instruction exceptions + * instead, so we're doing the emulation thing in both exception handlers. + */ +static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) +{ + if ((opcode & OPCODE) == LL) { + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + return simulate_ll(regs, opcode); + } + if ((opcode & OPCODE) == SC) { + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + return simulate_sc(regs, opcode); + } + + return -1; /* Must be something else ... */ +} + +/* + * Simulate trapping 'rdhwr' instructions to provide user accessible + * registers not implemented in hardware. + */ +static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) +{ + struct thread_info *ti = task_thread_info(current); + + if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { + int rd = (opcode & RD) >> 11; + int rt = (opcode & RT) >> 16; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + switch (rd) { + case 0: /* CPU number */ + regs->regs[rt] = smp_processor_id(); + return 0; + case 1: /* SYNCI length */ + regs->regs[rt] = min(current_cpu_data.dcache.linesz, + current_cpu_data.icache.linesz); + return 0; + case 2: /* Read count register */ + regs->regs[rt] = read_c0_count(); + return 0; + case 3: /* Count register resolution */ + switch (current_cpu_data.cputype) { + case CPU_20KC: + case CPU_25KF: + regs->regs[rt] = 1; + break; + default: + regs->regs[rt] = 2; + } + return 0; + case 29: + regs->regs[rt] = ti->tp_value; + return 0; + default: + return -1; + } + } + + /* Not ours. */ + return -1; +} + +static int simulate_sync(struct pt_regs *regs, unsigned int opcode) +{ + if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + return 0; + } + + return -1; /* Must be something else ... */ +} + +asmlinkage void do_ov(struct pt_regs *regs) +{ + siginfo_t info; + + die_if_kernel("Integer overflow", regs); + + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); +} + +static int process_fpemu_return(int sig, void __user *fault_addr) +{ + if (sig == SIGSEGV || sig == SIGBUS) { + struct siginfo si = {0}; + si.si_addr = fault_addr; + si.si_signo = sig; + if (sig == SIGSEGV) { + if (find_vma(current->mm, (unsigned long)fault_addr)) + si.si_code = SEGV_ACCERR; + else + si.si_code = SEGV_MAPERR; + } else { + si.si_code = BUS_ADRERR; + } + force_sig_info(sig, &si, current); + return 1; + } else if (sig) { + force_sig(sig, current); + return 1; + } else { + return 0; + } +} + +/* + * XXX Delayed fp exceptions when doing a lazy ctx switch XXX + */ +asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) +{ + siginfo_t info = {0}; + + if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) + == NOTIFY_STOP) + return; + die_if_kernel("FP exception in kernel code", regs); + + if (fcr31 & FPU_CSR_UNI_X) { + int sig; + void __user *fault_addr = NULL; + + /* + * Unimplemented operation exception. If we've got the full + * software emulator on-board, let's use it... + * + * Force FPU to dump state into task/thread context. We're + * moving a lot of data here for what is probably a single + * instruction, but the alternative is to pre-decode the FP + * register operands before invoking the emulator, which seems + * a bit extreme for what should be an infrequent event. + */ + /* Ensure 'resume' not overwrite saved fp context again. */ + lose_fpu(1); + + /* Run the emulator */ + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + + /* + * We can't allow the emulated instruction to leave any of + * the cause bit set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + + /* Restore the hardware register state */ + own_fpu(1); /* Using the FPU again. */ + + /* If something went wrong, signal */ + process_fpemu_return(sig, fault_addr); + + return; + } else if (fcr31 & FPU_CSR_INV_X) + info.si_code = FPE_FLTINV; + else if (fcr31 & FPU_CSR_DIV_X) + info.si_code = FPE_FLTDIV; + else if (fcr31 & FPU_CSR_OVF_X) + info.si_code = FPE_FLTOVF; + else if (fcr31 & FPU_CSR_UDF_X) + info.si_code = FPE_FLTUND; + else if (fcr31 & FPU_CSR_INE_X) + info.si_code = FPE_FLTRES; + else + info.si_code = __SI_FAULT; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); +} + +static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) +{ + siginfo_t info; + char b[40]; + +#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP + if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + return; +#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ + + if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + return; + + /* + * A short test says that IRIX 5.3 sends SIGTRAP for all trap + * insns, even for trap and break codes that indicate arithmetic + * failures. Weird ... + * But should we continue the brokenness??? --macro + */ + switch (code) { + case BRK_OVERFLOW: + case BRK_DIVZERO: + scnprintf(b, sizeof(b), "%s instruction in kernel code", str); + die_if_kernel(b, regs); + if (code == BRK_DIVZERO) + info.si_code = FPE_INTDIV; + else + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + break; + case BRK_BUG: + die_if_kernel("Kernel bug detected", regs); + force_sig(SIGTRAP, current); + break; + case BRK_MEMU: + /* + * Address errors may be deliberately induced by the FPU + * emulator to retake control of the CPU after executing the + * instruction in the delay slot of an emulated branch. + * + * Terminate if exception was recognized as a delay slot return + * otherwise handle as normal. + */ + if (do_dsemulret(regs)) + return; + + die_if_kernel("Math emu break/trap", regs); + force_sig(SIGTRAP, current); + break; + default: + scnprintf(b, sizeof(b), "%s instruction in kernel code", str); + die_if_kernel(b, regs); + force_sig(SIGTRAP, current); + } +} + +asmlinkage void do_bp(struct pt_regs *regs) +{ + unsigned int opcode, bcode; + + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; + + /* + * There is the ancient bug in the MIPS assemblers that the break + * code starts left to bit 16 instead to bit 6 in the opcode. + * Gas is bug-compatible, but not always, grrr... + * We handle both cases with a simple heuristics. --macro + */ + bcode = ((opcode >> 6) & ((1 << 20) - 1)); + if (bcode >= (1 << 10)) + bcode >>= 10; + + /* + * notify the kprobe handlers, if instruction is likely to + * pertain to them. + */ + switch (bcode) { + case BRK_KPROBE_BP: + if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + return; + else + break; + case BRK_KPROBE_SSTEPBP: + if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + return; + else + break; + default: + break; + } + + do_trap_or_bp(regs, bcode, "Break"); + return; + +out_sigsegv: + force_sig(SIGSEGV, current); +} + +asmlinkage void do_tr(struct pt_regs *regs) +{ + unsigned int opcode, tcode = 0; + + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; + + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = ((opcode >> 6) & ((1 << 10) - 1)); + + do_trap_or_bp(regs, tcode, "Trap"); + return; + +out_sigsegv: + force_sig(SIGSEGV, current); +} + +asmlinkage void do_ri(struct pt_regs *regs) +{ + unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); + unsigned long old_epc = regs->cp0_epc; + unsigned int opcode = 0; + int status = -1; + + if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) + == NOTIFY_STOP) + return; + + die_if_kernel("Reserved instruction in kernel code", regs); + + if (unlikely(compute_return_epc(regs) < 0)) + return; + + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; + + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr(regs, opcode); + + if (status < 0) + status = simulate_sync(regs, opcode); + + if (status < 0) + status = SIGILL; + + if (unlikely(status > 0)) { + regs->cp0_epc = old_epc; /* Undo skip-over. */ + force_sig(status, current); + } +} + +/* + * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've + * emulated more than some threshold number of instructions, force migration to + * a "CPU" that has FP support. + */ +static void mt_ase_fp_affinity(void) +{ +#ifdef CONFIG_MIPS_MT_FPAFF + if (mt_fpemul_threshold > 0 && + ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { + /* + * If there's no FPU present, or if the application has already + * restricted the allowed set to exclude any CPUs with FPUs, + * we'll skip the procedure. + */ + if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { + cpumask_t tmask; + + current->thread.user_cpus_allowed + = current->cpus_allowed; + cpus_and(tmask, current->cpus_allowed, + mt_fpu_cpumask); + set_cpus_allowed_ptr(current, &tmask); + set_thread_flag(TIF_FPUBOUND); + } + } +#endif /* CONFIG_MIPS_MT_FPAFF */ +} + +/* + * No lock; only written during early bootup by CPU 0. + */ +static RAW_NOTIFIER_HEAD(cu2_chain); + +int __ref register_cu2_notifier(struct notifier_block *nb) +{ + return raw_notifier_chain_register(&cu2_chain, nb); +} + +int cu2_notifier_call_chain(unsigned long val, void *v) +{ + return raw_notifier_call_chain(&cu2_chain, val, v); +} + +static int default_cu2_call(struct notifier_block *nfb, unsigned long action, + void *data) +{ + struct pt_regs *regs = data; + + switch (action) { + default: + die_if_kernel("Unhandled kernel unaligned access or invalid " + "instruction", regs); + /* Fall through */ + + case CU2_EXCEPTION: + force_sig(SIGILL, current); + } + + return NOTIFY_OK; +} + +asmlinkage void do_cpu(struct pt_regs *regs) +{ + unsigned int __user *epc; + unsigned long old_epc; + unsigned int opcode; + unsigned int cpid; + int status; + unsigned long __maybe_unused flags; + + die_if_kernel("do_cpu invoked from kernel context!", regs); + + cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; + + switch (cpid) { + case 0: + epc = (unsigned int __user *)exception_epc(regs); + old_epc = regs->cp0_epc; + opcode = 0; + status = -1; + + if (unlikely(compute_return_epc(regs) < 0)) + return; + + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; + + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr(regs, opcode); + + if (status < 0) + status = SIGILL; + + if (unlikely(status > 0)) { + regs->cp0_epc = old_epc; /* Undo skip-over. */ + force_sig(status, current); + } + + return; + + case 1: + if (used_math()) /* Using the FPU again. */ + own_fpu(1); + else { /* First time FPU user. */ + init_fpu(); + set_used_math(); + } + + if (!raw_cpu_has_fpu) { + int sig; + void __user *fault_addr = NULL; + sig = fpu_emulator_cop1Handler(regs, + ¤t->thread.fpu, + 0, &fault_addr); + if (!process_fpemu_return(sig, fault_addr)) + mt_ase_fp_affinity(); + } + + return; + + case 2: + raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); + return; + + case 3: + break; + } + + force_sig(SIGILL, current); +} + +asmlinkage void do_mdmx(struct pt_regs *regs) +{ + force_sig(SIGILL, current); +} + +/* + * Called with interrupts disabled. + */ +asmlinkage void do_watch(struct pt_regs *regs) +{ + u32 cause; + + /* + * Clear WP (bit 22) bit of cause register so we don't loop + * forever. + */ + cause = read_c0_cause(); + cause &= ~(1 << 22); + write_c0_cause(cause); + + /* + * If the current thread has the watch registers loaded, save + * their values and send SIGTRAP. Otherwise another thread + * left the registers set, clear them and continue. + */ + if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { + mips_read_watch_registers(); + local_irq_enable(); + force_sig(SIGTRAP, current); + } else { + mips_clear_watch_registers(); + local_irq_enable(); + } +} + +asmlinkage void do_mcheck(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + int multi_match = regs->cp0_status & ST0_TS; + + show_regs(regs); + + if (multi_match) { + printk("Index : %0x\n", read_c0_index()); + printk("Pagemask: %0x\n", read_c0_pagemask()); + printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); + printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); + printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); + printk("\n"); + dump_tlb_all(); + } + + show_code((unsigned int __user *) regs->cp0_epc); + + /* + * Some chips may have other causes of machine check (e.g. SB1 + * graduation timer) + */ + panic("Caught Machine Check exception - %scaused by multiple " + "matching entries in the TLB.", + (multi_match) ? "" : "not "); +} + +asmlinkage void do_mt(struct pt_regs *regs) +{ + int subcode; + + subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) + >> VPECONTROL_EXCPT_SHIFT; + switch (subcode) { + case 0: + printk(KERN_DEBUG "Thread Underflow\n"); + break; + case 1: + printk(KERN_DEBUG "Thread Overflow\n"); + break; + case 2: + printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); + break; + case 3: + printk(KERN_DEBUG "Gating Storage Exception\n"); + break; + case 4: + printk(KERN_DEBUG "YIELD Scheduler Exception\n"); + break; + case 5: + printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); + break; + default: + printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", + subcode); + break; + } + die_if_kernel("MIPS MT Thread exception in kernel", regs); + + force_sig(SIGILL, current); +} + + +asmlinkage void do_dsp(struct pt_regs *regs) +{ + if (cpu_has_dsp) + panic("Unexpected DSP exception"); + + force_sig(SIGILL, current); +} + +asmlinkage void do_reserved(struct pt_regs *regs) +{ + /* + * Game over - no way to handle this if it ever occurs. Most probably + * caused by a new unknown cpu type or after another deadly + * hard/software error. + */ + show_regs(regs); + panic("Caught reserved exception %ld - should not happen.", + (regs->cp0_cause & 0x7f) >> 2); +} + +static int __initdata l1parity = 1; +static int __init nol1parity(char *s) +{ + l1parity = 0; + return 1; +} +__setup("nol1par", nol1parity); +static int __initdata l2parity = 1; +static int __init nol2parity(char *s) +{ + l2parity = 0; + return 1; +} +__setup("nol2par", nol2parity); + +/* + * Some MIPS CPUs can enable/disable for cache parity detection, but do + * it different ways. + */ +static inline void parity_protection_init(void) +{ + switch (current_cpu_type()) { + case CPU_24K: + case CPU_34K: + case CPU_74K: + case CPU_1004K: + { +#define ERRCTL_PE 0x80000000 +#define ERRCTL_L2P 0x00800000 + unsigned long errctl; + unsigned int l1parity_present, l2parity_present; + + errctl = read_c0_ecc(); + errctl &= ~(ERRCTL_PE|ERRCTL_L2P); + + /* probe L1 parity support */ + write_c0_ecc(errctl | ERRCTL_PE); + back_to_back_c0_hazard(); + l1parity_present = (read_c0_ecc() & ERRCTL_PE); + + /* probe L2 parity support */ + write_c0_ecc(errctl|ERRCTL_L2P); + back_to_back_c0_hazard(); + l2parity_present = (read_c0_ecc() & ERRCTL_L2P); + + if (l1parity_present && l2parity_present) { + if (l1parity) + errctl |= ERRCTL_PE; + if (l1parity ^ l2parity) + errctl |= ERRCTL_L2P; + } else if (l1parity_present) { + if (l1parity) + errctl |= ERRCTL_PE; + } else if (l2parity_present) { + if (l2parity) + errctl |= ERRCTL_L2P; + } else { + /* No parity available */ + } + + printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); + + write_c0_ecc(errctl); + back_to_back_c0_hazard(); + errctl = read_c0_ecc(); + printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); + + if (l1parity_present) + printk(KERN_INFO "Cache parity protection %sabled\n", + (errctl & ERRCTL_PE) ? "en" : "dis"); + + if (l2parity_present) { + if (l1parity_present && l1parity) + errctl ^= ERRCTL_L2P; + printk(KERN_INFO "L2 cache parity protection %sabled\n", + (errctl & ERRCTL_L2P) ? "en" : "dis"); + } + } + break; + + case CPU_5KC: + write_c0_ecc(0x80000000); + back_to_back_c0_hazard(); + /* Set the PE bit (bit 31) in the c0_errctl register. */ + printk(KERN_INFO "Cache parity protection %sabled\n", + (read_c0_ecc() & 0x80000000) ? "en" : "dis"); + break; + case CPU_20KC: + case CPU_25KF: + /* Clear the DE bit (bit 16) in the c0_status register. */ + printk(KERN_INFO "Enable cache parity protection for " + "MIPS 20KC/25KF CPUs.\n"); + clear_c0_status(ST0_DE); + break; + default: + break; + } +} + +asmlinkage void cache_parity_error(void) +{ + const int field = 2 * sizeof(unsigned long); + unsigned int reg_val; + + /* For the moment, report the problem and hang. */ + printk("Cache error exception:\n"); + printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); + reg_val = read_c0_cacheerr(); + printk("c0_cacheerr == %08x\n", reg_val); + + printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", + reg_val & (1<<30) ? "secondary" : "primary", + reg_val & (1<<31) ? "data" : "insn"); + printk("Error bits: %s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); + +#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) + if (reg_val & (1<<22)) + printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); + + if (reg_val & (1<<23)) + printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); +#endif + + panic("Can't handle the cache error!"); +} + +/* + * SDBBP EJTAG debug exception handler. + * We skip the instruction and return to the next instruction. + */ +void ejtag_exception_handler(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + unsigned long depc, old_epc; + unsigned int debug; + + printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); + depc = read_c0_depc(); + debug = read_c0_debug(); + printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); + if (debug & 0x80000000) { + /* + * In branch delay slot. + * We cheat a little bit here and use EPC to calculate the + * debug return address (DEPC). EPC is restored after the + * calculation. + */ + old_epc = regs->cp0_epc; + regs->cp0_epc = depc; + __compute_return_epc(regs); + depc = regs->cp0_epc; + regs->cp0_epc = old_epc; + } else + depc += 4; + write_c0_depc(depc); + +#if 0 + printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); + write_c0_debug(debug | 0x100); +#endif +} + +/* + * NMI exception handler. + * No lock; only written during early bootup by CPU 0. + */ +static RAW_NOTIFIER_HEAD(nmi_chain); + +int register_nmi_notifier(struct notifier_block *nb) +{ + return raw_notifier_chain_register(&nmi_chain, nb); +} + +void __noreturn nmi_exception_handler(struct pt_regs *regs) +{ + raw_notifier_call_chain(&nmi_chain, 0, regs); + bust_spinlocks(1); + printk("NMI taken!!!!\n"); + die("NMI", regs); +} + +#define VECTORSPACING 0x100 /* for EI/VI mode */ + +unsigned long ebase; +unsigned long exception_handlers[32]; +unsigned long vi_handlers[64]; + +void __init *set_except_vector(int n, void *addr) +{ + unsigned long handler = (unsigned long) addr; + unsigned long old_handler = exception_handlers[n]; + + exception_handlers[n] = handler; + if (n == 0 && cpu_has_divec) { + unsigned long jump_mask = ~((1 << 28) - 1); + u32 *buf = (u32 *)(ebase + 0x200); + unsigned int k0 = 26; + if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { + uasm_i_j(&buf, handler & ~jump_mask); + uasm_i_nop(&buf); + } else { + UASM_i_LA(&buf, k0, handler); + uasm_i_jr(&buf, k0); + uasm_i_nop(&buf); + } + local_flush_icache_range(ebase + 0x200, (unsigned long)buf); + } + return (void *)old_handler; +} + +static asmlinkage void do_default_vi(void) +{ + show_regs(get_irq_regs()); + panic("Caught unexpected vectored interrupt."); +} + +static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) +{ + unsigned long handler; + unsigned long old_handler = vi_handlers[n]; + int srssets = current_cpu_data.srsets; + u32 *w; + unsigned char *b; + + BUG_ON(!cpu_has_veic && !cpu_has_vint); + + if (addr == NULL) { + handler = (unsigned long) do_default_vi; + srs = 0; + } else + handler = (unsigned long) addr; + vi_handlers[n] = (unsigned long) addr; + + b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); + + if (srs >= srssets) + panic("Shadow register set %d not supported", srs); + + if (cpu_has_veic) { + if (board_bind_eic_interrupt) + board_bind_eic_interrupt(n, srs); + } else if (cpu_has_vint) { + /* SRSMap is only defined if shadow sets are implemented */ + if (srssets > 1) + change_c0_srsmap(0xf << n*4, srs << n*4); + } + + if (srs == 0) { + /* + * If no shadow set is selected then use the default handler + * that does normal register saving and a standard interrupt exit + */ + + extern char except_vec_vi, except_vec_vi_lui; + extern char except_vec_vi_ori, except_vec_vi_end; + extern char rollback_except_vec_vi; + char *vec_start = (cpu_wait == r4k_wait) ? + &rollback_except_vec_vi : &except_vec_vi; +#ifdef CONFIG_MIPS_MT_SMTC + /* + * We need to provide the SMTC vectored interrupt handler + * not only with the address of the handler, but with the + * Status.IM bit to be masked before going there. + */ + extern char except_vec_vi_mori; + const int mori_offset = &except_vec_vi_mori - vec_start; +#endif /* CONFIG_MIPS_MT_SMTC */ + const int handler_len = &except_vec_vi_end - vec_start; + const int lui_offset = &except_vec_vi_lui - vec_start; + const int ori_offset = &except_vec_vi_ori - vec_start; + + if (handler_len > VECTORSPACING) { + /* + * Sigh... panicing won't help as the console + * is probably not configured :( + */ + panic("VECTORSPACING too small"); + } + + memcpy(b, vec_start, handler_len); +#ifdef CONFIG_MIPS_MT_SMTC + BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ + + w = (u32 *)(b + mori_offset); + *w = (*w & 0xffff0000) | (0x100 << n); +#endif /* CONFIG_MIPS_MT_SMTC */ + w = (u32 *)(b + lui_offset); + *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); + w = (u32 *)(b + ori_offset); + *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); + local_flush_icache_range((unsigned long)b, + (unsigned long)(b+handler_len)); + } + else { + /* + * In other cases jump directly to the interrupt handler + * + * It is the handlers responsibility to save registers if required + * (eg hi/lo) and return from the exception using "eret" + */ + w = (u32 *)b; + *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ + *w = 0; + local_flush_icache_range((unsigned long)b, + (unsigned long)(b+8)); + } + + return (void *)old_handler; +} + +void *set_vi_handler(int n, vi_handler_t addr) +{ + return set_vi_srs_handler(n, addr, 0); +} + +extern void cpu_cache_init(void); +extern void tlb_init(void); +extern void flush_tlb_handlers(void); + +/* + * Timer interrupt + */ +int cp0_compare_irq; +int cp0_compare_irq_shift; + +/* + * Performance counter IRQ or -1 if shared with timer + */ +int cp0_perfcount_irq; +EXPORT_SYMBOL_GPL(cp0_perfcount_irq); + +static int __cpuinitdata noulri; + +static int __init ulri_disable(char *s) +{ + pr_info("Disabling ulri\n"); + noulri = 1; + + return 1; +} +__setup("noulri", ulri_disable); + +void __cpuinit per_cpu_trap_init(void) +{ + unsigned int cpu = smp_processor_id(); + unsigned int status_set = ST0_CU0; + unsigned int hwrena = cpu_hwrena_impl_bits; +#ifdef CONFIG_MIPS_MT_SMTC + int secondaryTC = 0; + int bootTC = (cpu == 0); + + /* + * Only do per_cpu_trap_init() for first TC of Each VPE. + * Note that this hack assumes that the SMTC init code + * assigns TCs consecutively and in ascending order. + */ + + if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && + ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) + secondaryTC = 1; +#endif /* CONFIG_MIPS_MT_SMTC */ + + /* + * Disable coprocessors and select 32-bit or 64-bit addressing + * and the 16/32 or 32/32 FPR register model. Reset the BEV + * flag that some firmware may have left set and the TS bit (for + * IP27). Set XX for ISA IV code to work. + */ +#ifdef CONFIG_64BIT + status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; +#endif + if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + status_set |= ST0_XX; + if (cpu_has_dsp) + status_set |= ST0_MX; + + change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, + status_set); + + if (cpu_has_mips_r2) + hwrena |= 0x0000000f; + + if (!noulri && cpu_has_userlocal) + hwrena |= (1 << 29); + + if (hwrena) + write_c0_hwrena(hwrena); + +#ifdef CONFIG_MIPS_MT_SMTC + if (!secondaryTC) { +#endif /* CONFIG_MIPS_MT_SMTC */ + + if (cpu_has_veic || cpu_has_vint) { + unsigned long sr = set_c0_status(ST0_BEV); + write_c0_ebase(ebase); + write_c0_status(sr); + /* Setting vector spacing enables EI/VI mode */ + change_c0_intctl(0x3e0, VECTORSPACING); + } + if (cpu_has_divec) { + if (cpu_has_mipsmt) { + unsigned int vpflags = dvpe(); + set_c0_cause(CAUSEF_IV); + evpe(vpflags); + } else + set_c0_cause(CAUSEF_IV); + } + + /* + * Before R2 both interrupt numbers were fixed to 7, so on R2 only: + * + * o read IntCtl.IPTI to determine the timer interrupt + * o read IntCtl.IPPCI to determine the performance counter interrupt + */ + if (cpu_has_mips_r2) { + cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; + cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; + cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; + if (cp0_perfcount_irq == cp0_compare_irq) + cp0_perfcount_irq = -1; + } else { + cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; + cp0_compare_irq_shift = cp0_compare_irq; + cp0_perfcount_irq = -1; + } + +#ifdef CONFIG_MIPS_MT_SMTC + } +#endif /* CONFIG_MIPS_MT_SMTC */ + + if (!cpu_data[cpu].asid_cache) + cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; + + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + BUG_ON(current->mm); + enter_lazy_tlb(&init_mm, current); + +#ifdef CONFIG_MIPS_MT_SMTC + if (bootTC) { +#endif /* CONFIG_MIPS_MT_SMTC */ + cpu_cache_init(); + tlb_init(); +#ifdef CONFIG_MIPS_MT_SMTC + } else if (!secondaryTC) { + /* + * First TC in non-boot VPE must do subset of tlb_init() + * for MMU countrol registers. + */ + write_c0_pagemask(PM_DEFAULT_MASK); + write_c0_wired(0); + } +#endif /* CONFIG_MIPS_MT_SMTC */ + TLBMISS_HANDLER_SETUP(); +} + +/* Install CPU exception handler */ +void __init set_handler(unsigned long offset, void *addr, unsigned long size) +{ + memcpy((void *)(ebase + offset), addr, size); + local_flush_icache_range(ebase + offset, ebase + offset + size); +} + +static char panic_null_cerr[] __cpuinitdata = + "Trying to set NULL cache error exception handler"; + +/* + * Install uncached CPU exception handler. + * This is suitable only for the cache error exception which is the only + * exception handler that is being run uncached. + */ +void __cpuinit set_uncached_handler(unsigned long offset, void *addr, + unsigned long size) +{ + unsigned long uncached_ebase = CKSEG1ADDR(ebase); + + if (!addr) + panic(panic_null_cerr); + + memcpy((void *)(uncached_ebase + offset), addr, size); +} + +static int __initdata rdhwr_noopt; +static int __init set_rdhwr_noopt(char *str) +{ + rdhwr_noopt = 1; + return 1; +} + +__setup("rdhwr_noopt", set_rdhwr_noopt); + +void __init trap_init(void) +{ + extern char except_vec3_generic, except_vec3_r4000; + extern char except_vec4; + unsigned long i; + int rollback; + + check_wait(); + rollback = (cpu_wait == r4k_wait); + +#if defined(CONFIG_KGDB) + if (kgdb_early_setup) + return; /* Already done */ +#endif + + if (cpu_has_veic || cpu_has_vint) { + unsigned long size = 0x200 + VECTORSPACING*64; + ebase = (unsigned long) + __alloc_bootmem(size, 1 << fls(size), 0); + } else { + ebase = CKSEG0; + if (cpu_has_mips_r2) + ebase += (read_c0_ebase() & 0x3ffff000); + } + + if (board_ebase_setup) + board_ebase_setup(); + per_cpu_trap_init(); + + /* + * Copy the generic exception handlers to their final destination. + * This will be overriden later as suitable for a particular + * configuration. + */ + set_handler(0x180, &except_vec3_generic, 0x80); + + /* + * Setup default vectors + */ + for (i = 0; i <= 31; i++) + set_except_vector(i, handle_reserved); + + /* + * Copy the EJTAG debug exception vector handler code to it's final + * destination. + */ + if (cpu_has_ejtag && board_ejtag_handler_setup) + board_ejtag_handler_setup(); + + /* + * Only some CPUs have the watch exceptions. + */ + if (cpu_has_watch) + set_except_vector(23, handle_watch); + + /* + * Initialise interrupt handlers + */ + if (cpu_has_veic || cpu_has_vint) { + int nvec = cpu_has_veic ? 64 : 8; + for (i = 0; i < nvec; i++) + set_vi_handler(i, NULL); + } + else if (cpu_has_divec) + set_handler(0x200, &except_vec4, 0x8); + + /* + * Some CPUs can enable/disable for cache parity detection, but does + * it different ways. + */ + parity_protection_init(); + + /* + * The Data Bus Errors / Instruction Bus Errors are signaled + * by external hardware. Therefore these two exceptions + * may have board specific handlers. + */ + if (board_be_init) + board_be_init(); + + set_except_vector(0, rollback ? rollback_handle_int : handle_int); + set_except_vector(1, handle_tlbm); + set_except_vector(2, handle_tlbl); + set_except_vector(3, handle_tlbs); + + set_except_vector(4, handle_adel); + set_except_vector(5, handle_ades); + + set_except_vector(6, handle_ibe); + set_except_vector(7, handle_dbe); + + set_except_vector(8, handle_sys); + set_except_vector(9, handle_bp); + set_except_vector(10, rdhwr_noopt ? handle_ri : + (cpu_has_vtag_icache ? + handle_ri_rdhwr_vivt : handle_ri_rdhwr)); + set_except_vector(11, handle_cpu); + set_except_vector(12, handle_ov); + set_except_vector(13, handle_tr); + + if (current_cpu_type() == CPU_R6000 || + current_cpu_type() == CPU_R6000A) { + /* + * The R6000 is the only R-series CPU that features a machine + * check exception (similar to the R4000 cache error) and + * unaligned ldc1/sdc1 exception. The handlers have not been + * written yet. Well, anyway there is no R6000 machine on the + * current list of targets for Linux/MIPS. + * (Duh, crap, there is someone with a triple R6k machine) + */ + //set_except_vector(14, handle_mc); + //set_except_vector(15, handle_ndc); + } + + + if (board_nmi_handler_setup) + board_nmi_handler_setup(); + + if (cpu_has_fpu && !cpu_has_nofpuex) + set_except_vector(15, handle_fpe); + + set_except_vector(22, handle_mdmx); + + if (cpu_has_mcheck) + set_except_vector(24, handle_mcheck); + + if (cpu_has_mipsmt) + set_except_vector(25, handle_mt); + + set_except_vector(26, handle_dsp); + + if (cpu_has_vce) + /* Special exception: R4[04]00 uses also the divec space. */ + memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); + else if (cpu_has_4kex) + memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); + else + memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); + + local_flush_icache_range(ebase, ebase + 0x400); + flush_tlb_handlers(); + + sort_extable(__start___dbe_table, __stop___dbe_table); + + cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ +} diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c new file mode 100644 index 00000000..9c58bdf5 --- /dev/null +++ b/arch/mips/kernel/unaligned.c @@ -0,0 +1,573 @@ +/* + * Handle unaligned accesses by emulation. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * + * This file contains exception handler for address error exception with the + * special capability to execute faulting instructions in software. The + * handler does not try to handle the case when the program counter points + * to an address not aligned to a word boundary. + * + * Putting data to unaligned addresses is a bad practice even on Intel where + * only the performance is affected. Much worse is that such code is non- + * portable. Due to several programs that die on MIPS due to alignment + * problems I decided to implement this handler anyway though I originally + * didn't intend to do this at all for user code. + * + * For now I enable fixing of address errors by default to make life easier. + * I however intend to disable this somewhen in the future when the alignment + * problems with user programs have been fixed. For programmers this is the + * right way to go. + * + * Fixing address errors is a per process option. The option is inherited + * across fork(2) and execve(2) calls. If you really want to use the + * option in your user programs - I discourage the use of the software + * emulation strongly - use the following code in your userland stuff: + * + * #include <sys/sysmips.h> + * + * ... + * sysmips(MIPS_FIXADE, x); + * ... + * + * The argument x is 0 for disabling software emulation, enabled otherwise. + * + * Below a little program to play around with this feature. + * + * #include <stdio.h> + * #include <sys/sysmips.h> + * + * struct foo { + * unsigned char bar[8]; + * }; + * + * main(int argc, char *argv[]) + * { + * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; + * unsigned int *p = (unsigned int *) (x.bar + 3); + * int i; + * + * if (argc > 1) + * sysmips(MIPS_FIXADE, atoi(argv[1])); + * + * printf("*p = %08lx\n", *p); + * + * *p = 0xdeadface; + * + * for(i = 0; i <= 7; i++) + * printf("%02x ", x.bar[i]); + * printf("\n"); + * } + * + * Coprocessor loads are not supported; I think this case is unimportant + * in the practice. + * + * TODO: Handle ndc (attempted store to doubleword in uncached memory) + * exception for the R6000. + * A store crossing a page boundary might be executed only partially. + * Undo the partial store in this case. + */ +#include <linux/mm.h> +#include <linux/signal.h> +#include <linux/smp.h> +#include <linux/sched.h> +#include <linux/debugfs.h> +#include <linux/perf_event.h> + +#include <asm/asm.h> +#include <asm/branch.h> +#include <asm/byteorder.h> +#include <asm/cop2.h> +#include <asm/inst.h> +#include <asm/uaccess.h> + +#define STR(x) __STR(x) +#define __STR(x) #x + +enum { + UNALIGNED_ACTION_QUIET, + UNALIGNED_ACTION_SIGNAL, + UNALIGNED_ACTION_SHOW, +}; +#ifdef CONFIG_DEBUG_FS +static u32 unaligned_instructions; +static u32 unaligned_action; +#else +#define unaligned_action UNALIGNED_ACTION_QUIET +#endif +extern void show_registers(struct pt_regs *regs); + +static void emulate_load_store_insn(struct pt_regs *regs, + void __user *addr, unsigned int __user *pc) +{ + union mips_instruction insn; + unsigned long value; + unsigned int res; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); + + /* + * This load never faults. + */ + __get_user(insn.word, pc); + + switch (insn.i_format.opcode) { + /* + * These are instructions that a compiler doesn't generate. We + * can assume therefore that the code is MIPS-aware and + * really buggy. Emulating these instructions would break the + * semantics anyway. + */ + case ll_op: + case lld_op: + case sc_op: + case scd_op: + + /* + * For these instructions the only way to create an address + * error is an attempted access to kernel/supervisor address + * space. + */ + case ldl_op: + case ldr_op: + case lwl_op: + case lwr_op: + case sdl_op: + case sdr_op: + case swl_op: + case swr_op: + case lb_op: + case lbu_op: + case sb_op: + goto sigbus; + + /* + * The remaining opcodes are the ones that are really of interest. + */ + case lh_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + __asm__ __volatile__ (".set\tnoat\n" +#ifdef __BIG_ENDIAN + "1:\tlb\t%0, 0(%2)\n" + "2:\tlbu\t$1, 1(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tlb\t%0, 1(%2)\n" + "2:\tlbu\t$1, 0(%2)\n\t" +#endif + "sll\t%0, 0x8\n\t" + "or\t%0, $1\n\t" + "li\t%1, 0\n" + "3:\t.set\tat\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.i_format.rt] = value; + break; + + case lw_op: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + __asm__ __volatile__ ( +#ifdef __BIG_ENDIAN + "1:\tlwl\t%0, (%2)\n" + "2:\tlwr\t%0, 3(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tlwl\t%0, 3(%2)\n" + "2:\tlwr\t%0, (%2)\n\t" +#endif + "li\t%1, 0\n" + "3:\t.section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.i_format.rt] = value; + break; + + case lhu_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + __asm__ __volatile__ ( + ".set\tnoat\n" +#ifdef __BIG_ENDIAN + "1:\tlbu\t%0, 0(%2)\n" + "2:\tlbu\t$1, 1(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tlbu\t%0, 1(%2)\n" + "2:\tlbu\t$1, 0(%2)\n\t" +#endif + "sll\t%0, 0x8\n\t" + "or\t%0, $1\n\t" + "li\t%1, 0\n" + "3:\t.set\tat\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.i_format.rt] = value; + break; + + case lwu_op: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + __asm__ __volatile__ ( +#ifdef __BIG_ENDIAN + "1:\tlwl\t%0, (%2)\n" + "2:\tlwr\t%0, 3(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tlwl\t%0, 3(%2)\n" + "2:\tlwr\t%0, (%2)\n\t" +#endif + "dsll\t%0, %0, 32\n\t" + "dsrl\t%0, %0, 32\n\t" + "li\t%1, 0\n" + "3:\t.section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.i_format.rt] = value; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case ld_op: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + __asm__ __volatile__ ( +#ifdef __BIG_ENDIAN + "1:\tldl\t%0, (%2)\n" + "2:\tldr\t%0, 7(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tldl\t%0, 7(%2)\n" + "2:\tldr\t%0, (%2)\n\t" +#endif + "li\t%1, 0\n" + "3:\t.section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.i_format.rt] = value; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case sh_op: + if (!access_ok(VERIFY_WRITE, addr, 2)) + goto sigbus; + + value = regs->regs[insn.i_format.rt]; + __asm__ __volatile__ ( +#ifdef __BIG_ENDIAN + ".set\tnoat\n" + "1:\tsb\t%1, 1(%2)\n\t" + "srl\t$1, %1, 0x8\n" + "2:\tsb\t$1, 0(%2)\n\t" + ".set\tat\n\t" +#endif +#ifdef __LITTLE_ENDIAN + ".set\tnoat\n" + "1:\tsb\t%1, 0(%2)\n\t" + "srl\t$1,%1, 0x8\n" + "2:\tsb\t$1, 1(%2)\n\t" + ".set\tat\n\t" +#endif + "li\t%0, 0\n" + "3:\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%0, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=r" (res) + : "r" (value), "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + break; + + case sw_op: + if (!access_ok(VERIFY_WRITE, addr, 4)) + goto sigbus; + + value = regs->regs[insn.i_format.rt]; + __asm__ __volatile__ ( +#ifdef __BIG_ENDIAN + "1:\tswl\t%1,(%2)\n" + "2:\tswr\t%1, 3(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tswl\t%1, 3(%2)\n" + "2:\tswr\t%1, (%2)\n\t" +#endif + "li\t%0, 0\n" + "3:\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%0, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=r" (res) + : "r" (value), "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + break; + + case sd_op: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + value = regs->regs[insn.i_format.rt]; + __asm__ __volatile__ ( +#ifdef __BIG_ENDIAN + "1:\tsdl\t%1,(%2)\n" + "2:\tsdr\t%1, 7(%2)\n\t" +#endif +#ifdef __LITTLE_ENDIAN + "1:\tsdl\t%1, 7(%2)\n" + "2:\tsdr\t%1, (%2)\n\t" +#endif + "li\t%0, 0\n" + "3:\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%0, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" + ".section\t__ex_table,\"a\"\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" + ".previous" + : "=r" (res) + : "r" (value), "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + compute_return_epc(regs); + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case lwc1_op: + case ldc1_op: + case swc1_op: + case sdc1_op: + /* + * I herewith declare: this does not happen. So send SIGBUS. + */ + goto sigbus; + + /* + * COP2 is available to implementor for application specific use. + * It's up to applications to register a notifier chain and do + * whatever they have to do, including possible sending of signals. + */ + case lwc2_op: + cu2_notifier_call_chain(CU2_LWC2_OP, regs); + break; + + case ldc2_op: + cu2_notifier_call_chain(CU2_LDC2_OP, regs); + break; + + case swc2_op: + cu2_notifier_call_chain(CU2_SWC2_OP, regs); + break; + + case sdc2_op: + cu2_notifier_call_chain(CU2_SDC2_OP, regs); + break; + + default: + /* + * Pheeee... We encountered an yet unknown instruction or + * cache coherence problem. Die sucker, die ... + */ + goto sigill; + } + +#ifdef CONFIG_DEBUG_FS + unaligned_instructions++; +#endif + + return; + +fault: + /* Did we have an exception handler installed? */ + if (fixup_exception(regs)) + return; + + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV, current); + + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS, current); + + return; + +sigill: + die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); + force_sig(SIGILL, current); +} + +asmlinkage void do_ade(struct pt_regs *regs) +{ + unsigned int __user *pc; + mm_segment_t seg; + + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, + 1, regs, regs->cp0_badvaddr); + /* + * Did we catch a fault trying to load an instruction? + * Or are we running in MIPS16 mode? + */ + if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) + goto sigbus; + + pc = (unsigned int __user *) exception_epc(regs); + if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) + goto sigbus; + if (unaligned_action == UNALIGNED_ACTION_SIGNAL) + goto sigbus; + else if (unaligned_action == UNALIGNED_ACTION_SHOW) + show_registers(regs); + + /* + * Do branch emulation only if we didn't forward the exception. + * This is all so but ugly ... + */ + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); + set_fs(seg); + + return; + +sigbus: + die_if_kernel("Kernel unaligned instruction access", regs); + force_sig(SIGBUS, current); + + /* + * XXX On return from the signal handler we should advance the epc + */ +} + +#ifdef CONFIG_DEBUG_FS +extern struct dentry *mips_debugfs_dir; +static int __init debugfs_unaligned(void) +{ + struct dentry *d; + + if (!mips_debugfs_dir) + return -ENODEV; + d = debugfs_create_u32("unaligned_instructions", S_IRUGO, + mips_debugfs_dir, &unaligned_instructions); + if (!d) + return -ENOMEM; + d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, + mips_debugfs_dir, &unaligned_action); + if (!d) + return -ENOMEM; + return 0; +} +__initcall(debugfs_unaligned); +#endif diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c new file mode 100644 index 00000000..0f1af58b --- /dev/null +++ b/arch/mips/kernel/vdso.c @@ -0,0 +1,109 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009, 2010 Cavium Networks, Inc. + */ + + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/unistd.h> + +#include <asm/vdso.h> +#include <asm/uasm.h> + +/* + * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... + */ +#define __NR_O32_sigreturn 4119 +#define __NR_O32_rt_sigreturn 4193 +#define __NR_N32_rt_sigreturn 6211 + +static struct page *vdso_page; + +static void __init install_trampoline(u32 *tramp, unsigned int sigreturn) +{ + uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */ + uasm_i_syscall(&tramp, 0); +} + +static int __init init_vdso(void) +{ + struct mips_vdso *vdso; + + vdso_page = alloc_page(GFP_KERNEL); + if (!vdso_page) + panic("Cannot allocate vdso"); + + vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); + if (!vdso) + panic("Cannot map vdso"); + clear_page(vdso); + + install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn); +#ifdef CONFIG_32BIT + install_trampoline(vdso->signal_trampoline, __NR_sigreturn); +#else + install_trampoline(vdso->n32_rt_signal_trampoline, + __NR_N32_rt_sigreturn); + install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn); + install_trampoline(vdso->o32_rt_signal_trampoline, + __NR_O32_rt_sigreturn); +#endif + + vunmap(vdso); + + return 0; +} +subsys_initcall(init_vdso); + +static unsigned long vdso_addr(unsigned long start) +{ + return STACK_TOP; +} + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + int ret; + unsigned long addr; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + + addr = vdso_addr(mm->start_stack); + + addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_page); + + if (ret) + goto up_fail; + + mm->context.vdso = (void *)addr; + +up_fail: + up_write(&mm->mmap_sem); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + return NULL; +} diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S new file mode 100644 index 00000000..924da5eb --- /dev/null +++ b/arch/mips/kernel/vmlinux.lds.S @@ -0,0 +1,164 @@ +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm-generic/vmlinux.lds.h> + +#undef mips +#define mips mips +OUTPUT_ARCH(mips) +ENTRY(kernel_entry) +PHDRS { + text PT_LOAD FLAGS(7); /* RWX */ + note PT_NOTE FLAGS(4); /* R__ */ +} + +#ifdef CONFIG_32BIT + #ifdef CONFIG_CPU_LITTLE_ENDIAN + jiffies = jiffies_64; + #else + jiffies = jiffies_64 + 4; + #endif +#else + jiffies = jiffies_64; +#endif + +SECTIONS +{ +#ifdef CONFIG_BOOT_ELF64 + /* Read-only sections, merged into text segment: */ + /* . = 0xc000000000000000; */ + + /* This is the value for an Origin kernel, taken from an IRIX kernel. */ + /* . = 0xc00000000001c000; */ + + /* Set the vaddr for the text segment to a value + * >= 0xa800 0000 0001 9000 if no symmon is going to configured + * >= 0xa800 0000 0030 0000 otherwise + */ + + /* . = 0xa800000000300000; */ + . = 0xffffffff80300000; +#endif + . = VMLINUX_LOAD_ADDRESS; + /* read-only */ + _text = .; /* Text and read-only data */ + .text : { + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + *(.text.*) + *(.fixup) + *(.gnu.warning) + } :text = 0 + _etext = .; /* End of text section */ + + EXCEPTION_TABLE(16) + + /* Exception table for data bus errors */ + __dbe_table : { + __start___dbe_table = .; + *(__dbe_table) + __stop___dbe_table = .; + } + + NOTES :text :note + .dummy : { *(.dummy) } :text + + _sdata = .; /* Start of data section */ + RODATA + + /* writeable */ + .data : { /* Data */ + . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ + + INIT_TASK_DATA(PAGE_SIZE) + NOSAVE_DATA + CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + DATA_DATA + CONSTRUCTORS + } + _gp = . + 0x8000; + .lit8 : { + *(.lit8) + } + .lit4 : { + *(.lit4) + } + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + .sdata : { + *(.sdata) + } + _edata = .; /* End of data section */ + + /* will be freed after init */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ + __init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + + . = ALIGN(4); + .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { + __mips_machines_start = .; + *(.mips.machines.init) + __mips_machines_end = .; + } + + /* .exit.text is discarded at runtime, not link time, to deal with + * references from .rodata + */ + .exit.text : { + EXIT_TEXT + } + .exit.data : { + EXIT_DATA + } + + PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + . = ALIGN(PAGE_SIZE); + __init_end = .; + /* freed after init ends here */ + + BSS_SECTION(0, 0, 0) + + _end = . ; + + /* These mark the ABI of the kernel for debuggers. */ + .mdebug.abi32 : { + KEEP(*(.mdebug.abi32)) + } + .mdebug.abi64 : { + KEEP(*(.mdebug.abi64)) + } + + /* This is the MIPS specific mdebug section. */ + .mdebug : { + *(.mdebug) + } + + STABS_DEBUG + DWARF_DEBUG + + /* These must appear regardless of . */ + .gptab.sdata : { + *(.gptab.data) + *(.gptab.sdata) + } + .gptab.sbss : { + *(.gptab.bss) + *(.gptab.sbss) + } + + /* Sections to be discarded */ + DISCARDS + /DISCARD/ : { + /* ABI crap starts here */ + *(.MIPS.options) + *(.options) + *(.pdr) + *(.reginfo) + } +} diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c new file mode 100644 index 00000000..f6f91523 --- /dev/null +++ b/arch/mips/kernel/vpe.c @@ -0,0 +1,1620 @@ +/* + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + */ + +/* + * VPE support module + * + * Provides support for loading a MIPS SP program on VPE1. + * The SP environment is rather simple, no tlb's. It needs to be relocatable + * (or partially linked). You should initialise your stack in the startup + * code. This loader looks for the symbol __start and sets up + * execution to resume from there. The MIPS SDE kit contains suitable examples. + * + * To load and run, simply cat a SP 'program file' to /dev/vpe1. + * i.e cat spapp >/dev/vpe1. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <asm/uaccess.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/vmalloc.h> +#include <linux/elf.h> +#include <linux/seq_file.h> +#include <linux/syscalls.h> +#include <linux/moduleloader.h> +#include <linux/interrupt.h> +#include <linux/poll.h> +#include <linux/bootmem.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/cacheflush.h> +#include <linux/atomic.h> +#include <asm/cpu.h> +#include <asm/mips_mt.h> +#include <asm/processor.h> +#include <asm/vpe.h> +#include <asm/kspd.h> + +typedef void *vpe_handle; + +#ifndef ARCH_SHF_SMALL +#define ARCH_SHF_SMALL 0 +#endif + +/* If this is set, the section belongs in the init part of the module */ +#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) + +/* + * The number of TCs and VPEs physically available on the core + */ +static int hw_tcs, hw_vpes; +static char module_name[] = "vpe"; +static int major; +static const int minor = 1; /* fixed for now */ + +#ifdef CONFIG_MIPS_APSP_KSPD +static struct kspd_notifications kspd_events; +static int kspd_events_reqd; +#endif + +/* grab the likely amount of memory we will need. */ +#ifdef CONFIG_MIPS_VPE_LOADER_TOM +#define P_SIZE (2 * 1024 * 1024) +#else +/* add an overhead to the max kmalloc size for non-striped symbols/etc */ +#define P_SIZE (256 * 1024) +#endif + +extern unsigned long physical_memsize; + +#define MAX_VPES 16 +#define VPE_PATH_MAX 256 + +enum vpe_state { + VPE_STATE_UNUSED = 0, + VPE_STATE_INUSE, + VPE_STATE_RUNNING +}; + +enum tc_state { + TC_STATE_UNUSED = 0, + TC_STATE_INUSE, + TC_STATE_RUNNING, + TC_STATE_DYNAMIC +}; + +struct vpe { + enum vpe_state state; + + /* (device) minor associated with this vpe */ + int minor; + + /* elfloader stuff */ + void *load_addr; + unsigned long len; + char *pbuffer; + unsigned long plen; + unsigned int uid, gid; + char cwd[VPE_PATH_MAX]; + + unsigned long __start; + + /* tc's associated with this vpe */ + struct list_head tc; + + /* The list of vpe's */ + struct list_head list; + + /* shared symbol address */ + void *shared_ptr; + + /* the list of who wants to know when something major happens */ + struct list_head notify; + + unsigned int ntcs; +}; + +struct tc { + enum tc_state state; + int index; + + struct vpe *pvpe; /* parent VPE */ + struct list_head tc; /* The list of TC's with this VPE */ + struct list_head list; /* The global list of tc's */ +}; + +struct { + spinlock_t vpe_list_lock; + struct list_head vpe_list; /* Virtual processing elements */ + spinlock_t tc_list_lock; + struct list_head tc_list; /* Thread contexts */ +} vpecontrol = { + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), + .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), + .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) +}; + +static void release_progmem(void *ptr); + +/* get the vpe associated with this minor */ +static struct vpe *get_vpe(int minor) +{ + struct vpe *res, *v; + + if (!cpu_has_mipsmt) + return NULL; + + res = NULL; + spin_lock(&vpecontrol.vpe_list_lock); + list_for_each_entry(v, &vpecontrol.vpe_list, list) { + if (v->minor == minor) { + res = v; + break; + } + } + spin_unlock(&vpecontrol.vpe_list_lock); + + return res; +} + +/* get the vpe associated with this minor */ +static struct tc *get_tc(int index) +{ + struct tc *res, *t; + + res = NULL; + spin_lock(&vpecontrol.tc_list_lock); + list_for_each_entry(t, &vpecontrol.tc_list, list) { + if (t->index == index) { + res = t; + break; + } + } + spin_unlock(&vpecontrol.tc_list_lock); + + return res; +} + +/* allocate a vpe and associate it with this minor (or index) */ +static struct vpe *alloc_vpe(int minor) +{ + struct vpe *v; + + if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) + return NULL; + + INIT_LIST_HEAD(&v->tc); + spin_lock(&vpecontrol.vpe_list_lock); + list_add_tail(&v->list, &vpecontrol.vpe_list); + spin_unlock(&vpecontrol.vpe_list_lock); + + INIT_LIST_HEAD(&v->notify); + v->minor = minor; + + return v; +} + +/* allocate a tc. At startup only tc0 is running, all other can be halted. */ +static struct tc *alloc_tc(int index) +{ + struct tc *tc; + + if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) + goto out; + + INIT_LIST_HEAD(&tc->tc); + tc->index = index; + + spin_lock(&vpecontrol.tc_list_lock); + list_add_tail(&tc->list, &vpecontrol.tc_list); + spin_unlock(&vpecontrol.tc_list_lock); + +out: + return tc; +} + +/* clean up and free everything */ +static void release_vpe(struct vpe *v) +{ + list_del(&v->list); + if (v->load_addr) + release_progmem(v); + kfree(v); +} + +static void __maybe_unused dump_mtregs(void) +{ + unsigned long val; + + val = read_c0_config3(); + printk("config3 0x%lx MT %ld\n", val, + (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); + + val = read_c0_mvpcontrol(); + printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, + (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, + (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, + (val & MVPCONTROL_EVP)); + + val = read_c0_mvpconf0(); + printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, + (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, + val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); +} + +/* Find some VPE program space */ +static void *alloc_progmem(unsigned long len) +{ + void *addr; + +#ifdef CONFIG_MIPS_VPE_LOADER_TOM + /* + * This means you must tell Linux to use less memory than you + * physically have, for example by passing a mem= boot argument. + */ + addr = pfn_to_kaddr(max_low_pfn); + memset(addr, 0, len); +#else + /* simple grab some mem for now */ + addr = kzalloc(len, GFP_KERNEL); +#endif + + return addr; +} + +static void release_progmem(void *ptr) +{ +#ifndef CONFIG_MIPS_VPE_LOADER_TOM + kfree(ptr); +#endif +} + +/* Update size with this section: return offset. */ +static long get_offset(unsigned long *size, Elf_Shdr * sechdr) +{ + long ret; + + ret = ALIGN(*size, sechdr->sh_addralign ? : 1); + *size = ret + sechdr->sh_size; + return ret; +} + +/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld + might -- code, read-only data, read-write data, small data. Tally + sizes, and place the offsets into sh_entsize fields: high bit means it + belongs in init. */ +static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, + Elf_Shdr * sechdrs, const char *secstrings) +{ + static unsigned long const masks[][2] = { + /* NOTE: all executable code must be the first section + * in this array; otherwise modify the text_size + * finder in the two loops below */ + {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL}, + {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL}, + {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL}, + {ARCH_SHF_SMALL | SHF_ALLOC, 0} + }; + unsigned int m, i; + + for (i = 0; i < hdr->e_shnum; i++) + sechdrs[i].sh_entsize = ~0UL; + + for (m = 0; m < ARRAY_SIZE(masks); ++m) { + for (i = 0; i < hdr->e_shnum; ++i) { + Elf_Shdr *s = &sechdrs[i]; + + // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) + if ((s->sh_flags & masks[m][0]) != masks[m][0] + || (s->sh_flags & masks[m][1]) + || s->sh_entsize != ~0UL) + continue; + s->sh_entsize = + get_offset((unsigned long *)&mod->core_size, s); + } + + if (m == 0) + mod->core_text_size = mod->core_size; + + } +} + + +/* from module-elf32.c, but subverted a little */ + +struct mips_hi16 { + struct mips_hi16 *next; + Elf32_Addr *addr; + Elf32_Addr value; +}; + +static struct mips_hi16 *mips_hi16_list; +static unsigned int gp_offs, gp_addr; + +static int apply_r_mips_none(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + return 0; +} + +static int apply_r_mips_gprel16(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + int rel; + + if( !(*location & 0xffff) ) { + rel = (int)v - gp_addr; + } + else { + /* .sbss + gp(relative) + offset */ + /* kludge! */ + rel = (int)(short)((int)v + gp_offs + + (int)(short)(*location & 0xffff) - gp_addr); + } + + if( (rel > 32768) || (rel < -32768) ) { + printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: " + "relative address 0x%x out of range of gp register\n", + rel); + return -ENOEXEC; + } + + *location = (*location & 0xffff0000) | (rel & 0xffff); + + return 0; +} + +static int apply_r_mips_pc16(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + int rel; + rel = (((unsigned int)v - (unsigned int)location)); + rel >>= 2; // because the offset is in _instructions_ not bytes. + rel -= 1; // and one instruction less due to the branch delay slot. + + if( (rel > 32768) || (rel < -32768) ) { + printk(KERN_DEBUG "VPE loader: " + "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); + return -ENOEXEC; + } + + *location = (*location & 0xffff0000) | (rel & 0xffff); + + return 0; +} + +static int apply_r_mips_32(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + *location += v; + + return 0; +} + +static int apply_r_mips_26(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + if (v % 4) { + printk(KERN_DEBUG "VPE loader: apply_r_mips_26 " + " unaligned relocation\n"); + return -ENOEXEC; + } + +/* + * Not desperately convinced this is a good check of an overflow condition + * anyway. But it gets in the way of handling undefined weak symbols which + * we want to set to zero. + * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { + * printk(KERN_ERR + * "module %s: relocation overflow\n", + * me->name); + * return -ENOEXEC; + * } + */ + + *location = (*location & ~0x03ffffff) | + ((*location + (v >> 2)) & 0x03ffffff); + return 0; +} + +static int apply_r_mips_hi16(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + struct mips_hi16 *n; + + /* + * We cannot relocate this one now because we don't know the value of + * the carry we need to add. Save the information, and let LO16 do the + * actual relocation. + */ + n = kmalloc(sizeof *n, GFP_KERNEL); + if (!n) + return -ENOMEM; + + n->addr = location; + n->value = v; + n->next = mips_hi16_list; + mips_hi16_list = n; + + return 0; +} + +static int apply_r_mips_lo16(struct module *me, uint32_t *location, + Elf32_Addr v) +{ + unsigned long insnlo = *location; + Elf32_Addr val, vallo; + struct mips_hi16 *l, *next; + + /* Sign extend the addend we extract from the lo insn. */ + vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; + + if (mips_hi16_list != NULL) { + + l = mips_hi16_list; + while (l != NULL) { + unsigned long insn; + + /* + * The value for the HI16 had best be the same. + */ + if (v != l->value) { + printk(KERN_DEBUG "VPE loader: " + "apply_r_mips_lo16/hi16: \t" + "inconsistent value information\n"); + goto out_free; + } + + /* + * Do the HI16 relocation. Note that we actually don't + * need to know anything about the LO16 itself, except + * where to find the low 16 bits of the addend needed + * by the LO16. + */ + insn = *l->addr; + val = ((insn & 0xffff) << 16) + vallo; + val += v; + + /* + * Account for the sign extension that will happen in + * the low bits. + */ + val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; + + insn = (insn & ~0xffff) | val; + *l->addr = insn; + + next = l->next; + kfree(l); + l = next; + } + + mips_hi16_list = NULL; + } + + /* + * Ok, we're done with the HI16 relocs. Now deal with the LO16. + */ + val = v + vallo; + insnlo = (insnlo & ~0xffff) | (val & 0xffff); + *location = insnlo; + + return 0; + +out_free: + while (l != NULL) { + next = l->next; + kfree(l); + l = next; + } + mips_hi16_list = NULL; + + return -ENOEXEC; +} + +static int (*reloc_handlers[]) (struct module *me, uint32_t *location, + Elf32_Addr v) = { + [R_MIPS_NONE] = apply_r_mips_none, + [R_MIPS_32] = apply_r_mips_32, + [R_MIPS_26] = apply_r_mips_26, + [R_MIPS_HI16] = apply_r_mips_hi16, + [R_MIPS_LO16] = apply_r_mips_lo16, + [R_MIPS_GPREL16] = apply_r_mips_gprel16, + [R_MIPS_PC16] = apply_r_mips_pc16 +}; + +static char *rstrs[] = { + [R_MIPS_NONE] = "MIPS_NONE", + [R_MIPS_32] = "MIPS_32", + [R_MIPS_26] = "MIPS_26", + [R_MIPS_HI16] = "MIPS_HI16", + [R_MIPS_LO16] = "MIPS_LO16", + [R_MIPS_GPREL16] = "MIPS_GPREL16", + [R_MIPS_PC16] = "MIPS_PC16" +}; + +static int apply_relocations(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + unsigned int i; + Elf32_Addr v; + int res; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + Elf32_Word r_info = rel[i].r_info; + + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* This is the symbol it is referring to */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(r_info); + + if (!sym->st_value) { + printk(KERN_DEBUG "%s: undefined weak symbol %s\n", + me->name, strtab + sym->st_name); + /* just print the warning, dont barf */ + } + + v = sym->st_value; + + res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); + if( res ) { + char *r = rstrs[ELF32_R_TYPE(r_info)]; + printk(KERN_WARNING "VPE loader: .text+0x%x " + "relocation type %s for symbol \"%s\" failed\n", + rel[i].r_offset, r ? r : "UNKNOWN", + strtab + sym->st_name); + return res; + } + } + + return 0; +} + +static inline void save_gp_address(unsigned int secbase, unsigned int rel) +{ + gp_addr = secbase + rel; + gp_offs = gp_addr - (secbase & 0xffff0000); +} +/* end module-elf32.c */ + + + +/* Change all symbols so that sh_value encodes the pointer directly. */ +static void simplify_symbols(Elf_Shdr * sechdrs, + unsigned int symindex, + const char *strtab, + const char *secstrings, + unsigned int nsecs, struct module *mod) +{ + Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; + unsigned long secbase, bssbase = 0; + unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + int size; + + /* find the .bss section for COMMON symbols */ + for (i = 0; i < nsecs; i++) { + if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) { + bssbase = sechdrs[i].sh_addr; + break; + } + } + + for (i = 1; i < n; i++) { + switch (sym[i].st_shndx) { + case SHN_COMMON: + /* Allocate space for the symbol in the .bss section. + st_value is currently size. + We want it to have the address of the symbol. */ + + size = sym[i].st_value; + sym[i].st_value = bssbase; + + bssbase += size; + break; + + case SHN_ABS: + /* Don't need to do anything */ + break; + + case SHN_UNDEF: + /* ret = -ENOENT; */ + break; + + case SHN_MIPS_SCOMMON: + printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON " + "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name, + sym[i].st_shndx); + // .sbss section + break; + + default: + secbase = sechdrs[sym[i].st_shndx].sh_addr; + + if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { + save_gp_address(secbase, sym[i].st_value); + } + + sym[i].st_value += secbase; + break; + } + } +} + +#ifdef DEBUG_ELFLOADER +static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, + const char *strtab, struct module *mod) +{ + Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; + unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + + printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); + for (i = 1; i < n; i++) { + printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, + strtab + sym[i].st_name, sym[i].st_value); + } +} +#endif + +/* We are prepared so configure and start the VPE... */ +static int vpe_run(struct vpe * v) +{ + unsigned long flags, val, dmt_flag; + struct vpe_notifications *n; + unsigned int vpeflags; + struct tc *t; + + /* check we are the Master VPE */ + local_irq_save(flags); + val = read_c0_vpeconf0(); + if (!(val & VPECONF0_MVP)) { + printk(KERN_WARNING + "VPE loader: only Master VPE's are allowed to configure MT\n"); + local_irq_restore(flags); + + return -1; + } + + dmt_flag = dmt(); + vpeflags = dvpe(); + + if (!list_empty(&v->tc)) { + if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + printk(KERN_WARNING + "VPE loader: TC %d is already in use.\n", + t->index); + return -ENOEXEC; + } + } else { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + printk(KERN_WARNING + "VPE loader: No TC's associated with VPE %d\n", + v->minor); + + return -ENOEXEC; + } + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(t->index); + + /* should check it is halted, and not activated */ + if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + printk(KERN_WARNING "VPE loader: TC %d is already active!\n", + t->index); + + return -ENOEXEC; + } + + /* Write the address we want it to start running from in the TCPC register. */ + write_tc_c0_tcrestart((unsigned long)v->__start); + write_tc_c0_tccontext((unsigned long)0); + + /* + * Mark the TC as activated, not interrupt exempt and not dynamically + * allocatable + */ + val = read_tc_c0_tcstatus(); + val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; + write_tc_c0_tcstatus(val); + + write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); + + /* + * The sde-kit passes 'memsize' to __start in $a3, so set something + * here... Or set $a3 to zero and define DFLT_STACK_SIZE and + * DFLT_HEAP_SIZE when you compile your program + */ + mttgpr(6, v->ntcs); + mttgpr(7, physical_memsize); + + /* set up VPE1 */ + /* + * bind the TC to VPE 1 as late as possible so we only have the final + * VPE registers to set up, and so an EJTAG probe can trigger on it + */ + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); + + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); + + back_to_back_c0_hazard(); + + /* Set up the XTC bit in vpeconf0 to point at our tc */ + write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) + | (t->index << VPECONF0_XTC_SHIFT)); + + back_to_back_c0_hazard(); + + /* enable this VPE */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); + + /* clear out any left overs from a previous program */ + write_vpe_c0_status(0); + write_vpe_c0_cause(0); + + /* take system out of configuration state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* + * SMTC/SMVP kernels manage VPE enable independently, + * but uniprocessor kernels need to turn it on, even + * if that wasn't the pre-dvpe() state. + */ +#ifdef CONFIG_SMP + evpe(vpeflags); +#else + evpe(EVPE_ENABLE); +#endif + emt(dmt_flag); + local_irq_restore(flags); + + list_for_each_entry(n, &v->notify, list) + n->start(minor); + + return 0; +} + +static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, + unsigned int symindex, const char *strtab, + struct module *mod) +{ + Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; + unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + + for (i = 1; i < n; i++) { + if (strcmp(strtab + sym[i].st_name, "__start") == 0) { + v->__start = sym[i].st_value; + } + + if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { + v->shared_ptr = (void *)sym[i].st_value; + } + } + + if ( (v->__start == 0) || (v->shared_ptr == NULL)) + return -1; + + return 0; +} + +/* + * Allocates a VPE with some program code space(the load address), copies the + * contents of the program (p)buffer performing relocatations/etc, free's it + * when finished. + */ +static int vpe_elfload(struct vpe * v) +{ + Elf_Ehdr *hdr; + Elf_Shdr *sechdrs; + long err = 0; + char *secstrings, *strtab = NULL; + unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; + struct module mod; // so we can re-use the relocations code + + memset(&mod, 0, sizeof(struct module)); + strcpy(mod.name, "VPE loader"); + + hdr = (Elf_Ehdr *) v->pbuffer; + len = v->plen; + + /* Sanity checks against insmoding binaries or wrong arch, + weird elf version */ + if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 + || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) + || !elf_check_arch(hdr) + || hdr->e_shentsize != sizeof(*sechdrs)) { + printk(KERN_WARNING + "VPE loader: program wrong arch or weird elf version\n"); + + return -ENOEXEC; + } + + if (hdr->e_type == ET_REL) + relocate = 1; + + if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { + printk(KERN_ERR "VPE loader: program length %u truncated\n", + len); + + return -ENOEXEC; + } + + /* Convenience variables */ + sechdrs = (void *)hdr + hdr->e_shoff; + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + sechdrs[0].sh_addr = 0; + + /* And these should exist, but gcc whinges if we don't init them */ + symindex = strindex = 0; + + if (relocate) { + for (i = 1; i < hdr->e_shnum; i++) { + if (sechdrs[i].sh_type != SHT_NOBITS + && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { + printk(KERN_ERR "VPE program length %u truncated\n", + len); + return -ENOEXEC; + } + + /* Mark all sections sh_addr with their address in the + temporary image. */ + sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + + /* Internal symbols and strings. */ + if (sechdrs[i].sh_type == SHT_SYMTAB) { + symindex = i; + strindex = sechdrs[i].sh_link; + strtab = (char *)hdr + sechdrs[strindex].sh_offset; + } + } + layout_sections(&mod, hdr, sechdrs, secstrings); + } + + v->load_addr = alloc_progmem(mod.core_size); + if (!v->load_addr) + return -ENOMEM; + + pr_info("VPE loader: loading to %p\n", v->load_addr); + + if (relocate) { + for (i = 0; i < hdr->e_shnum; i++) { + void *dest; + + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + + dest = v->load_addr + sechdrs[i].sh_entsize; + + if (sechdrs[i].sh_type != SHT_NOBITS) + memcpy(dest, (void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size); + /* Update sh_addr to point to copy in image. */ + sechdrs[i].sh_addr = (unsigned long)dest; + + printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n", + secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); + } + + /* Fix up syms, so that st_value is a pointer to location. */ + simplify_symbols(sechdrs, symindex, strtab, secstrings, + hdr->e_shnum, &mod); + + /* Now do relocations. */ + for (i = 1; i < hdr->e_shnum; i++) { + const char *strtab = (char *)sechdrs[strindex].sh_addr; + unsigned int info = sechdrs[i].sh_info; + + /* Not a valid relocation section? */ + if (info >= hdr->e_shnum) + continue; + + /* Don't bother with non-allocated sections */ + if (!(sechdrs[info].sh_flags & SHF_ALLOC)) + continue; + + if (sechdrs[i].sh_type == SHT_REL) + err = apply_relocations(sechdrs, strtab, symindex, i, + &mod); + else if (sechdrs[i].sh_type == SHT_RELA) + err = apply_relocate_add(sechdrs, strtab, symindex, i, + &mod); + if (err < 0) + return err; + + } + } else { + struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); + + for (i = 0; i < hdr->e_phnum; i++) { + if (phdr->p_type == PT_LOAD) { + memcpy((void *)phdr->p_paddr, + (char *)hdr + phdr->p_offset, + phdr->p_filesz); + memset((void *)phdr->p_paddr + phdr->p_filesz, + 0, phdr->p_memsz - phdr->p_filesz); + } + phdr++; + } + + for (i = 0; i < hdr->e_shnum; i++) { + /* Internal symbols and strings. */ + if (sechdrs[i].sh_type == SHT_SYMTAB) { + symindex = i; + strindex = sechdrs[i].sh_link; + strtab = (char *)hdr + sechdrs[strindex].sh_offset; + + /* mark the symtab's address for when we try to find the + magic symbols */ + sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + } + } + } + + /* make sure it's physically written out */ + flush_icache_range((unsigned long)v->load_addr, + (unsigned long)v->load_addr + v->len); + + if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { + if (v->__start == 0) { + printk(KERN_WARNING "VPE loader: program does not contain " + "a __start symbol\n"); + return -ENOEXEC; + } + + if (v->shared_ptr == NULL) + printk(KERN_WARNING "VPE loader: " + "program does not contain vpe_shared symbol.\n" + " Unable to use AMVP (AP/SP) facilities.\n"); + } + + printk(" elf loaded\n"); + return 0; +} + +static void cleanup_tc(struct tc *tc) +{ + unsigned long flags; + unsigned int mtflags, vpflags; + int tmp; + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(tc->index); + tmp = read_tc_c0_tcstatus(); + + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + /* bind it to anything other than VPE1 */ +// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE + + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); +} + +static int getcwd(char *buff, int size) +{ + mm_segment_t old_fs; + int ret; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + ret = sys_getcwd(buff, size); + + set_fs(old_fs); + + return ret; +} + +/* checks VPE is unused and gets ready to load program */ +static int vpe_open(struct inode *inode, struct file *filp) +{ + enum vpe_state state; + struct vpe_notifications *not; + struct vpe *v; + int ret; + + if (minor != iminor(inode)) { + /* assume only 1 device at the moment. */ + pr_warning("VPE loader: only vpe1 is supported\n"); + + return -ENODEV; + } + + if ((v = get_vpe(tclimit)) == NULL) { + pr_warning("VPE loader: unable to get vpe\n"); + + return -ENODEV; + } + + state = xchg(&v->state, VPE_STATE_INUSE); + if (state != VPE_STATE_UNUSED) { + printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); + + list_for_each_entry(not, &v->notify, list) { + not->stop(tclimit); + } + + release_progmem(v->load_addr); + cleanup_tc(get_tc(tclimit)); + } + + /* this of-course trashes what was there before... */ + v->pbuffer = vmalloc(P_SIZE); + if (!v->pbuffer) { + pr_warning("VPE loader: unable to allocate memory\n"); + return -ENOMEM; + } + v->plen = P_SIZE; + v->load_addr = NULL; + v->len = 0; + + v->uid = filp->f_cred->fsuid; + v->gid = filp->f_cred->fsgid; + +#ifdef CONFIG_MIPS_APSP_KSPD + /* get kspd to tell us when a syscall_exit happens */ + if (!kspd_events_reqd) { + kspd_notify(&kspd_events); + kspd_events_reqd++; + } +#endif + + v->cwd[0] = 0; + ret = getcwd(v->cwd, VPE_PATH_MAX); + if (ret < 0) + printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret); + + v->shared_ptr = NULL; + v->__start = 0; + + return 0; +} + +static int vpe_release(struct inode *inode, struct file *filp) +{ + struct vpe *v; + Elf_Ehdr *hdr; + int ret = 0; + + v = get_vpe(tclimit); + if (v == NULL) + return -ENODEV; + + hdr = (Elf_Ehdr *) v->pbuffer; + if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { + if (vpe_elfload(v) >= 0) { + vpe_run(v); + } else { + printk(KERN_WARNING "VPE loader: ELF load failed.\n"); + ret = -ENOEXEC; + } + } else { + printk(KERN_WARNING "VPE loader: only elf files are supported\n"); + ret = -ENOEXEC; + } + + /* It's good to be able to run the SP and if it chokes have a look at + the /dev/rt?. But if we reset the pointer to the shared struct we + lose what has happened. So perhaps if garbage is sent to the vpe + device, use it as a trigger for the reset. Hopefully a nice + executable will be along shortly. */ + if (ret < 0) + v->shared_ptr = NULL; + + vfree(v->pbuffer); + v->plen = 0; + + return ret; +} + +static ssize_t vpe_write(struct file *file, const char __user * buffer, + size_t count, loff_t * ppos) +{ + size_t ret = count; + struct vpe *v; + + if (iminor(file->f_path.dentry->d_inode) != minor) + return -ENODEV; + + v = get_vpe(tclimit); + if (v == NULL) + return -ENODEV; + + if ((count + v->len) > v->plen) { + printk(KERN_WARNING + "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); + return -ENOMEM; + } + + count -= copy_from_user(v->pbuffer + v->len, buffer, count); + if (!count) + return -EFAULT; + + v->len += count; + return ret; +} + +static const struct file_operations vpe_fops = { + .owner = THIS_MODULE, + .open = vpe_open, + .release = vpe_release, + .write = vpe_write, + .llseek = noop_llseek, +}; + +/* module wrapper entry points */ +/* give me a vpe */ +vpe_handle vpe_alloc(void) +{ + int i; + struct vpe *v; + + /* find a vpe */ + for (i = 1; i < MAX_VPES; i++) { + if ((v = get_vpe(i)) != NULL) { + v->state = VPE_STATE_INUSE; + return v; + } + } + return NULL; +} + +EXPORT_SYMBOL(vpe_alloc); + +/* start running from here */ +int vpe_start(vpe_handle vpe, unsigned long start) +{ + struct vpe *v = vpe; + + v->__start = start; + return vpe_run(v); +} + +EXPORT_SYMBOL(vpe_start); + +/* halt it for now */ +int vpe_stop(vpe_handle vpe) +{ + struct vpe *v = vpe; + struct tc *t; + unsigned int evpe_flags; + + evpe_flags = dvpe(); + + if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { + + settc(t->index); + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); + } + + evpe(evpe_flags); + + return 0; +} + +EXPORT_SYMBOL(vpe_stop); + +/* I've done with it thank you */ +int vpe_free(vpe_handle vpe) +{ + struct vpe *v = vpe; + struct tc *t; + unsigned int evpe_flags; + + if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { + return -ENOEXEC; + } + + evpe_flags = dvpe(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(t->index); + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); + + /* halt the TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + /* mark the TC unallocated */ + write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); + + v->state = VPE_STATE_UNUSED; + + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(evpe_flags); + + return 0; +} + +EXPORT_SYMBOL(vpe_free); + +void *vpe_get_shared(int index) +{ + struct vpe *v; + + if ((v = get_vpe(index)) == NULL) + return NULL; + + return v->shared_ptr; +} + +EXPORT_SYMBOL(vpe_get_shared); + +int vpe_getuid(int index) +{ + struct vpe *v; + + if ((v = get_vpe(index)) == NULL) + return -1; + + return v->uid; +} + +EXPORT_SYMBOL(vpe_getuid); + +int vpe_getgid(int index) +{ + struct vpe *v; + + if ((v = get_vpe(index)) == NULL) + return -1; + + return v->gid; +} + +EXPORT_SYMBOL(vpe_getgid); + +int vpe_notify(int index, struct vpe_notifications *notify) +{ + struct vpe *v; + + if ((v = get_vpe(index)) == NULL) + return -1; + + list_add(¬ify->list, &v->notify); + return 0; +} + +EXPORT_SYMBOL(vpe_notify); + +char *vpe_getcwd(int index) +{ + struct vpe *v; + + if ((v = get_vpe(index)) == NULL) + return NULL; + + return v->cwd; +} + +EXPORT_SYMBOL(vpe_getcwd); + +#ifdef CONFIG_MIPS_APSP_KSPD +static void kspd_sp_exit( int sp_id) +{ + cleanup_tc(get_tc(sp_id)); +} +#endif + +static ssize_t store_kill(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(tclimit); + struct vpe_notifications *not; + + list_for_each_entry(not, &vpe->notify, list) { + not->stop(tclimit); + } + + release_progmem(vpe->load_addr); + cleanup_tc(get_tc(tclimit)); + vpe_stop(vpe); + vpe_free(vpe); + + return len; +} + +static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr, + char *buf) +{ + struct vpe *vpe = get_vpe(tclimit); + + return sprintf(buf, "%d\n", vpe->ntcs); +} + +static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(tclimit); + unsigned long new; + char *endp; + + new = simple_strtoul(buf, &endp, 0); + if (endp == buf) + goto out_einval; + + if (new == 0 || new > (hw_tcs - tclimit)) + goto out_einval; + + vpe->ntcs = new; + + return len; + +out_einval: + return -EINVAL; +} + +static struct device_attribute vpe_class_attributes[] = { + __ATTR(kill, S_IWUSR, NULL, store_kill), + __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs), + {} +}; + +static void vpe_device_release(struct device *cd) +{ + kfree(cd); +} + +struct class vpe_class = { + .name = "vpe", + .owner = THIS_MODULE, + .dev_release = vpe_device_release, + .dev_attrs = vpe_class_attributes, +}; + +struct device vpe_device; + +static int __init vpe_module_init(void) +{ + unsigned int mtflags, vpflags; + unsigned long flags, val; + struct vpe *v = NULL; + struct tc *t; + int tc, err; + + if (!cpu_has_mipsmt) { + printk("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (vpelimit == 0) { + printk(KERN_WARNING "No VPEs reserved for AP/SP, not " + "initializing VPE loader.\nPass maxvpes=<n> argument as " + "kernel argument\n"); + + return -ENODEV; + } + + if (tclimit == 0) { + printk(KERN_WARNING "No TCs reserved for AP/SP, not " + "initializing VPE loader.\nPass maxtcs=<n> argument as " + "kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, module_name, &vpe_fops); + if (major < 0) { + printk("VPE loader: unable to register character device\n"); + return major; + } + + err = class_register(&vpe_class); + if (err) { + printk(KERN_ERR "vpe_class registration failed\n"); + goto out_chrdev; + } + + device_initialize(&vpe_device); + vpe_device.class = &vpe_class, + vpe_device.parent = NULL, + dev_set_name(&vpe_device, "vpe1"); + vpe_device.devt = MKDEV(major, minor); + err = device_add(&vpe_device); + if (err) { + printk(KERN_ERR "Adding vpe_device failed\n"); + goto out_class; + } + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + /* dump_mtregs(); */ + + val = read_c0_mvpconf0(); + hw_tcs = (val & MVPCONF0_PTC) + 1; + hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + + for (tc = tclimit; tc < hw_tcs; tc++) { + /* + * Must re-enable multithreading temporarily or in case we + * reschedule send IPIs or similar we might hang. + */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); + t = alloc_tc(tc); + if (!t) { + err = -ENOMEM; + goto out; + } + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + set_c0_mvpcontrol(MVPCONTROL_VPC); + + /* VPE's */ + if (tc < hw_tcs) { + settc(tc); + + if ((v = alloc_vpe(tc)) == NULL) { + printk(KERN_WARNING "VPE: unable to allocate VPE\n"); + + goto out_reenable; + } + + v->ntcs = hw_tcs - tclimit; + + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); + + /* deactivate all but vpe0 */ + if (tc >= tclimit) { + unsigned long tmp = read_vpe_c0_vpeconf0(); + + tmp &= ~VPECONF0_VPA; + + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); + } + + /* disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); + + if (tc >= vpelimit) { + /* + * Set config to be the same as vpe0, + * particularly kseg0 coherency alg + */ + write_vpe_c0_config(read_c0_config()); + } + } + + /* TC's */ + t->pvpe = v; /* set the parent vpe */ + + if (tc >= tclimit) { + unsigned long tmp; + + settc(tc); + + /* Any TC that is bound to VPE0 gets left as is - in case + we are running SMTC on VPE0. A TC that is bound to any + other VPE gets bound to VPE0, ideally I'd like to make + it homeless but it doesn't appear to let me bind a TC + to a non-existent VPE. Which is perfectly reasonable. + + The (un)bound state is visible to an EJTAG probe so may + notify GDB... + */ + + if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) { + /* tc is bound >vpe0 */ + write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); + + t->pvpe = get_vpe(0); /* set the parent vpe */ + } + + /* halt the TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + tmp = read_tc_c0_tcstatus(); + + /* mark not activated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + } + } + +out_reenable: + /* release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); + +#ifdef CONFIG_MIPS_APSP_KSPD + kspd_events.kspd_sp_exit = kspd_sp_exit; +#endif + return 0; + +out_class: + class_unregister(&vpe_class); +out_chrdev: + unregister_chrdev(major, module_name); + +out: + return err; +} + +static void __exit vpe_module_exit(void) +{ + struct vpe *v, *n; + + device_del(&vpe_device); + unregister_chrdev(major, module_name); + + /* No locking needed here */ + list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { + if (v->state != VPE_STATE_UNUSED) + release_vpe(v); + } +} + +module_init(vpe_module_init); +module_exit(vpe_module_exit); +MODULE_DESCRIPTION("MIPS VPE Loader"); +MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); +MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c new file mode 100644 index 00000000..c1540696 --- /dev/null +++ b/arch/mips/kernel/watch.c @@ -0,0 +1,188 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 David Daney + */ + +#include <linux/sched.h> + +#include <asm/processor.h> +#include <asm/watch.h> + +/* + * Install the watch registers for the current thread. A maximum of + * four registers are installed although the machine may have more. + */ +void mips_install_watch_registers(void) +{ + struct mips3264_watch_reg_state *watches = + ¤t->thread.watch.mips3264; + switch (current_cpu_data.watch_reg_use_cnt) { + default: + BUG(); + case 4: + write_c0_watchlo3(watches->watchlo[3]); + /* Write 1 to the I, R, and W bits to clear them, and + 1 to G so all ASIDs are trapped. */ + write_c0_watchhi3(0x40000007 | watches->watchhi[3]); + case 3: + write_c0_watchlo2(watches->watchlo[2]); + write_c0_watchhi2(0x40000007 | watches->watchhi[2]); + case 2: + write_c0_watchlo1(watches->watchlo[1]); + write_c0_watchhi1(0x40000007 | watches->watchhi[1]); + case 1: + write_c0_watchlo0(watches->watchlo[0]); + write_c0_watchhi0(0x40000007 | watches->watchhi[0]); + } +} + +/* + * Read back the watchhi registers so the user space debugger has + * access to the I, R, and W bits. A maximum of four registers are + * read although the machine may have more. + */ +void mips_read_watch_registers(void) +{ + struct mips3264_watch_reg_state *watches = + ¤t->thread.watch.mips3264; + switch (current_cpu_data.watch_reg_use_cnt) { + default: + BUG(); + case 4: + watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff); + case 3: + watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff); + case 2: + watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff); + case 1: + watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff); + } + if (current_cpu_data.watch_reg_use_cnt == 1 && + (watches->watchhi[0] & 7) == 0) { + /* Pathological case of release 1 architecture that + * doesn't set the condition bits. We assume that + * since we got here, the watch condition was met and + * signal that the conditions requested in watchlo + * were met. */ + watches->watchhi[0] |= (watches->watchlo[0] & 7); + } + } + +/* + * Disable all watch registers. Although only four registers are + * installed, all are cleared to eliminate the possibility of endless + * looping in the watch handler. + */ +void mips_clear_watch_registers(void) +{ + switch (current_cpu_data.watch_reg_count) { + default: + BUG(); + case 8: + write_c0_watchlo7(0); + case 7: + write_c0_watchlo6(0); + case 6: + write_c0_watchlo5(0); + case 5: + write_c0_watchlo4(0); + case 4: + write_c0_watchlo3(0); + case 3: + write_c0_watchlo2(0); + case 2: + write_c0_watchlo1(0); + case 1: + write_c0_watchlo0(0); + } +} + +__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) +{ + unsigned int t; + + if ((c->options & MIPS_CPU_WATCH) == 0) + return; + /* + * Check which of the I,R and W bits are supported, then + * disable the register. + */ + write_c0_watchlo0(7); + t = read_c0_watchlo0(); + write_c0_watchlo0(0); + c->watch_reg_masks[0] = t & 7; + + /* Write the mask bits and read them back to determine which + * can be used. */ + c->watch_reg_count = 1; + c->watch_reg_use_cnt = 1; + t = read_c0_watchhi0(); + write_c0_watchhi0(t | 0xff8); + t = read_c0_watchhi0(); + c->watch_reg_masks[0] |= (t & 0xff8); + if ((t & 0x80000000) == 0) + return; + + write_c0_watchlo1(7); + t = read_c0_watchlo1(); + write_c0_watchlo1(0); + c->watch_reg_masks[1] = t & 7; + + c->watch_reg_count = 2; + c->watch_reg_use_cnt = 2; + t = read_c0_watchhi1(); + write_c0_watchhi1(t | 0xff8); + t = read_c0_watchhi1(); + c->watch_reg_masks[1] |= (t & 0xff8); + if ((t & 0x80000000) == 0) + return; + + write_c0_watchlo2(7); + t = read_c0_watchlo2(); + write_c0_watchlo2(0); + c->watch_reg_masks[2] = t & 7; + + c->watch_reg_count = 3; + c->watch_reg_use_cnt = 3; + t = read_c0_watchhi2(); + write_c0_watchhi2(t | 0xff8); + t = read_c0_watchhi2(); + c->watch_reg_masks[2] |= (t & 0xff8); + if ((t & 0x80000000) == 0) + return; + + write_c0_watchlo3(7); + t = read_c0_watchlo3(); + write_c0_watchlo3(0); + c->watch_reg_masks[3] = t & 7; + + c->watch_reg_count = 4; + c->watch_reg_use_cnt = 4; + t = read_c0_watchhi3(); + write_c0_watchhi3(t | 0xff8); + t = read_c0_watchhi3(); + c->watch_reg_masks[3] |= (t & 0xff8); + if ((t & 0x80000000) == 0) + return; + + /* We use at most 4, but probe and report up to 8. */ + c->watch_reg_count = 5; + t = read_c0_watchhi4(); + if ((t & 0x80000000) == 0) + return; + + c->watch_reg_count = 6; + t = read_c0_watchhi5(); + if ((t & 0x80000000) == 0) + return; + + c->watch_reg_count = 7; + t = read_c0_watchhi6(); + if ((t & 0x80000000) == 0) + return; + + c->watch_reg_count = 8; +} |