diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /arch/mips/oprofile | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'arch/mips/oprofile')
-rw-r--r-- | arch/mips/oprofile/Makefile | 18 | ||||
-rw-r--r-- | arch/mips/oprofile/backtrace.c | 175 | ||||
-rw-r--r-- | arch/mips/oprofile/common.c | 130 | ||||
-rw-r--r-- | arch/mips/oprofile/op_impl.h | 41 | ||||
-rw-r--r-- | arch/mips/oprofile/op_model_loongson2.c | 161 | ||||
-rw-r--r-- | arch/mips/oprofile/op_model_mipsxx.c | 397 | ||||
-rw-r--r-- | arch/mips/oprofile/op_model_rm9000.c | 138 |
7 files changed, 1060 insertions, 0 deletions
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile new file mode 100644 index 00000000..29f2f13e --- /dev/null +++ b/arch/mips/oprofile/Makefile @@ -0,0 +1,18 @@ +ccflags-y := -Werror + +obj-$(CONFIG_OPROFILE) += oprofile.o + +DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ + oprof.o cpu_buffer.o buffer_sync.o \ + event_buffer.o oprofile_files.o \ + oprofilefs.o oprofile_stats.o \ + timer_int.o ) + +oprofile-y := $(DRIVER_OBJS) common.o backtrace.o + +oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o +oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o +oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o +oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o +oprofile-$(CONFIG_CPU_RM9000) += op_model_rm9000.o +oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c new file mode 100644 index 00000000..6854ed50 --- /dev/null +++ b/arch/mips/oprofile/backtrace.c @@ -0,0 +1,175 @@ +#include <linux/oprofile.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/stacktrace.h> +#include <linux/stacktrace.h> +#include <linux/kernel.h> +#include <asm/sections.h> +#include <asm/inst.h> + +struct stackframe { + unsigned long sp; + unsigned long pc; + unsigned long ra; +}; + +static inline int get_mem(unsigned long addr, unsigned long *result) +{ + unsigned long *address = (unsigned long *) addr; + if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) + return -1; + if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) + return -3; + return 0; +} + +/* + * These two instruction helpers were taken from process.c + */ +static inline int is_ra_save_ins(union mips_instruction *ip) +{ + /* sw / sd $ra, offset($sp) */ + return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) + && ip->i_format.rs == 29 && ip->i_format.rt == 31; +} + +static inline int is_sp_move_ins(union mips_instruction *ip) +{ + /* addiu/daddiu sp,sp,-imm */ + if (ip->i_format.rs != 29 || ip->i_format.rt != 29) + return 0; + if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) + return 1; + return 0; +} + +/* + * Looks for specific instructions that mark the end of a function. + * This usually means we ran into the code area of the previous function. + */ +static inline int is_end_of_function_marker(union mips_instruction *ip) +{ + /* jr ra */ + if (ip->r_format.func == jr_op && ip->r_format.rs == 31) + return 1; + /* lui gp */ + if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) + return 1; + return 0; +} + +/* + * TODO for userspace stack unwinding: + * - handle cases where the stack is adjusted inside a function + * (generally doesn't happen) + * - find optimal value for max_instr_check + * - try to find a way to handle leaf functions + */ + +static inline int unwind_user_frame(struct stackframe *old_frame, + const unsigned int max_instr_check) +{ + struct stackframe new_frame = *old_frame; + off_t ra_offset = 0; + size_t stack_size = 0; + unsigned long addr; + + if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0) + return -9; + + for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc) + && (!ra_offset || !stack_size); --addr) { + union mips_instruction ip; + + if (get_mem(addr, (unsigned long *) &ip)) + return -11; + + if (is_sp_move_ins(&ip)) { + int stack_adjustment = ip.i_format.simmediate; + if (stack_adjustment > 0) + /* This marks the end of the previous function, + which means we overran. */ + break; + stack_size = (unsigned) stack_adjustment; + } else if (is_ra_save_ins(&ip)) { + int ra_slot = ip.i_format.simmediate; + if (ra_slot < 0) + /* This shouldn't happen. */ + break; + ra_offset = ra_slot; + } else if (is_end_of_function_marker(&ip)) + break; + } + + if (!ra_offset || !stack_size) + return -1; + + if (ra_offset) { + new_frame.ra = old_frame->sp + ra_offset; + if (get_mem(new_frame.ra, &(new_frame.ra))) + return -13; + } + + if (stack_size) { + new_frame.sp = old_frame->sp + stack_size; + if (get_mem(new_frame.sp, &(new_frame.sp))) + return -14; + } + + if (new_frame.sp > old_frame->sp) + return -2; + + new_frame.pc = old_frame->ra; + *old_frame = new_frame; + + return 0; +} + +static inline void do_user_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) +{ + const unsigned int max_instr_check = 512; + const unsigned long high_addr = low_addr + THREAD_SIZE; + + while (depth-- && !unwind_user_frame(frame, max_instr_check)) { + oprofile_add_trace(frame->ra); + if (frame->sp < low_addr || frame->sp > high_addr) + break; + } +} + +#ifndef CONFIG_KALLSYMS +static inline void do_kernel_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) { } +#else +static inline void do_kernel_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) +{ + while (depth-- && frame->pc) { + frame->pc = unwind_stack_by_address(low_addr, + &(frame->sp), + frame->pc, + &(frame->ra)); + oprofile_add_trace(frame->ra); + } +} +#endif + +void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) +{ + struct stackframe frame = { .sp = regs->regs[29], + .pc = regs->cp0_epc, + .ra = regs->regs[31] }; + const int userspace = user_mode(regs); + const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE); + + if (userspace) + do_user_backtrace(low_addr, &frame, depth); + else + do_kernel_backtrace(low_addr, &frame, depth); +} diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c new file mode 100644 index 00000000..d1f2d4c5 --- /dev/null +++ b/arch/mips/oprofile/common.c @@ -0,0 +1,130 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 2005 Ralf Baechle + * Copyright (C) 2005 MIPS Technologies, Inc. + */ +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/oprofile.h> +#include <linux/smp.h> +#include <asm/cpu-info.h> + +#include "op_impl.h" + +extern struct op_mips_model op_model_mipsxx_ops __weak; +extern struct op_mips_model op_model_rm9000_ops __weak; +extern struct op_mips_model op_model_loongson2_ops __weak; + +static struct op_mips_model *model; + +static struct op_counter_config ctr[20]; + +static int op_mips_setup(void) +{ + /* Pre-compute the values to stuff in the hardware registers. */ + model->reg_setup(ctr); + + /* Configure the registers on all cpus. */ + on_each_cpu(model->cpu_setup, NULL, 1); + + return 0; +} + +static int op_mips_create_files(struct super_block *sb, struct dentry *root) +{ + int i; + + for (i = 0; i < model->num_counters; ++i) { + struct dentry *dir; + char buf[4]; + + snprintf(buf, sizeof buf, "%d", i); + dir = oprofilefs_mkdir(sb, root, buf); + + oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); + oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); + oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); + oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); + oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); + oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl); + /* Dummy. */ + oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); + } + + return 0; +} + +static int op_mips_start(void) +{ + on_each_cpu(model->cpu_start, NULL, 1); + + return 0; +} + +static void op_mips_stop(void) +{ + /* Disable performance monitoring for all counters. */ + on_each_cpu(model->cpu_stop, NULL, 1); +} + +int __init oprofile_arch_init(struct oprofile_operations *ops) +{ + struct op_mips_model *lmodel = NULL; + int res; + + switch (current_cpu_type()) { + case CPU_5KC: + case CPU_20KC: + case CPU_24K: + case CPU_25KF: + case CPU_34K: + case CPU_1004K: + case CPU_74K: + case CPU_SB1: + case CPU_SB1A: + case CPU_R10000: + case CPU_R12000: + case CPU_R14000: + lmodel = &op_model_mipsxx_ops; + break; + + case CPU_RM9000: + lmodel = &op_model_rm9000_ops; + break; + case CPU_LOONGSON2: + lmodel = &op_model_loongson2_ops; + break; + }; + + if (!lmodel) + return -ENODEV; + + res = lmodel->init(); + if (res) + return res; + + model = lmodel; + + ops->create_files = op_mips_create_files; + ops->setup = op_mips_setup; + //ops->shutdown = op_mips_shutdown; + ops->start = op_mips_start; + ops->stop = op_mips_stop; + ops->cpu_type = lmodel->cpu_type; + ops->backtrace = op_mips_backtrace; + + printk(KERN_INFO "oprofile: using %s performance monitoring.\n", + lmodel->cpu_type); + + return 0; +} + +void oprofile_arch_exit(void) +{ + if (model) + model->exit(); +} diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h new file mode 100644 index 00000000..7c2da27e --- /dev/null +++ b/arch/mips/oprofile/op_impl.h @@ -0,0 +1,41 @@ +/** + * @file arch/alpha/oprofile/op_impl.h + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author Richard Henderson <rth@twiddle.net> + */ + +#ifndef OP_IMPL_H +#define OP_IMPL_H 1 + +extern int (*perf_irq)(void); + +/* Per-counter configuration as set via oprofilefs. */ +struct op_counter_config { + unsigned long enabled; + unsigned long event; + unsigned long count; + /* Dummies because I am too lazy to hack the userspace tools. */ + unsigned long kernel; + unsigned long user; + unsigned long exl; + unsigned long unit_mask; +}; + +/* Per-architecture configury and hooks. */ +struct op_mips_model { + void (*reg_setup) (struct op_counter_config *); + void (*cpu_setup) (void *dummy); + int (*init)(void); + void (*exit)(void); + void (*cpu_start)(void *args); + void (*cpu_stop)(void *args); + char *cpu_type; + unsigned char num_counters; +}; + +void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth); + +#endif diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c new file mode 100644 index 00000000..60d3ea60 --- /dev/null +++ b/arch/mips/oprofile/op_model_loongson2.c @@ -0,0 +1,161 @@ +/* + * Loongson2 performance counter driver for oprofile + * + * Copyright (C) 2009 Lemote Inc. + * Author: Yanhua <yanh@lemote.com> + * Author: Wu Zhangjin <wuzhangjin@gmail.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/oprofile.h> +#include <linux/interrupt.h> + +#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */ +#include "op_impl.h" + +#define LOONGSON2_CPU_TYPE "mips/loongson2" + +#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31) + +#define LOONGSON2_PERFCTRL_EXL (1UL << 0) +#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1) +#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2) +#define LOONGSON2_PERFCTRL_USER (1UL << 3) +#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4) +#define LOONGSON2_PERFCTRL_EVENT(idx, event) \ + (((event) & 0x0f) << ((idx) ? 9 : 5)) + +#define read_c0_perfctrl() __read_64bit_c0_register($24, 0) +#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val) +#define read_c0_perfcnt() __read_64bit_c0_register($25, 0) +#define write_c0_perfcnt(val) __write_64bit_c0_register($25, 0, val) + +static struct loongson2_register_config { + unsigned int ctrl; + unsigned long long reset_counter1; + unsigned long long reset_counter2; + int cnt1_enabled, cnt2_enabled; +} reg; + +static char *oprofid = "LoongsonPerf"; +static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id); + +static void reset_counters(void *arg) +{ + write_c0_perfctrl(0); + write_c0_perfcnt(0); +} + +static void loongson2_reg_setup(struct op_counter_config *cfg) +{ + unsigned int ctrl = 0; + + reg.reset_counter1 = 0; + reg.reset_counter2 = 0; + + /* + * Compute the performance counter ctrl word. + * For now, count kernel and user mode. + */ + if (cfg[0].enabled) { + ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event); + reg.reset_counter1 = 0x80000000ULL - cfg[0].count; + } + + if (cfg[1].enabled) { + ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event); + reg.reset_counter2 = 0x80000000ULL - cfg[1].count; + } + + if (cfg[0].enabled || cfg[1].enabled) { + ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE; + if (cfg[0].kernel || cfg[1].kernel) + ctrl |= LOONGSON2_PERFCTRL_KERNEL; + if (cfg[0].user || cfg[1].user) + ctrl |= LOONGSON2_PERFCTRL_USER; + } + + reg.ctrl = ctrl; + + reg.cnt1_enabled = cfg[0].enabled; + reg.cnt2_enabled = cfg[1].enabled; +} + +static void loongson2_cpu_setup(void *args) +{ + write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1); +} + +static void loongson2_cpu_start(void *args) +{ + /* Start all counters on current CPU */ + if (reg.cnt1_enabled || reg.cnt2_enabled) + write_c0_perfctrl(reg.ctrl); +} + +static void loongson2_cpu_stop(void *args) +{ + /* Stop all counters on current CPU */ + write_c0_perfctrl(0); + memset(®, 0, sizeof(reg)); +} + +static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id) +{ + uint64_t counter, counter1, counter2; + struct pt_regs *regs = get_irq_regs(); + int enabled; + + /* Check whether the irq belongs to me */ + enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE; + if (!enabled) + return IRQ_NONE; + enabled = reg.cnt1_enabled | reg.cnt2_enabled; + if (!enabled) + return IRQ_NONE; + + counter = read_c0_perfcnt(); + counter1 = counter & 0xffffffff; + counter2 = counter >> 32; + + if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) { + if (reg.cnt1_enabled) + oprofile_add_sample(regs, 0); + counter1 = reg.reset_counter1; + } + if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) { + if (reg.cnt2_enabled) + oprofile_add_sample(regs, 1); + counter2 = reg.reset_counter2; + } + + write_c0_perfcnt((counter2 << 32) | counter1); + + return IRQ_HANDLED; +} + +static int __init loongson2_init(void) +{ + return request_irq(LOONGSON2_PERFCNT_IRQ, loongson2_perfcount_handler, + IRQF_SHARED, "Perfcounter", oprofid); +} + +static void loongson2_exit(void) +{ + reset_counters(NULL); + free_irq(LOONGSON2_PERFCNT_IRQ, oprofid); +} + +struct op_mips_model op_model_loongson2_ops = { + .reg_setup = loongson2_reg_setup, + .cpu_setup = loongson2_cpu_setup, + .init = loongson2_init, + .exit = loongson2_exit, + .cpu_start = loongson2_cpu_start, + .cpu_stop = loongson2_cpu_stop, + .cpu_type = LOONGSON2_CPU_TYPE, + .num_counters = 2 +}; diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c new file mode 100644 index 00000000..54759f16 --- /dev/null +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -0,0 +1,397 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 05, 06 by Ralf Baechle + * Copyright (C) 2005 by MIPS Technologies, Inc. + */ +#include <linux/cpumask.h> +#include <linux/oprofile.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <asm/irq_regs.h> + +#include "op_impl.h" + +#define M_PERFCTL_EXL (1UL << 0) +#define M_PERFCTL_KERNEL (1UL << 1) +#define M_PERFCTL_SUPERVISOR (1UL << 2) +#define M_PERFCTL_USER (1UL << 3) +#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4) +#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) +#define M_PERFCTL_VPEID(vpe) ((vpe) << 16) +#define M_PERFCTL_MT_EN(filter) ((filter) << 20) +#define M_TC_EN_ALL M_PERFCTL_MT_EN(0) +#define M_TC_EN_VPE M_PERFCTL_MT_EN(1) +#define M_TC_EN_TC M_PERFCTL_MT_EN(2) +#define M_PERFCTL_TCID(tcid) ((tcid) << 22) +#define M_PERFCTL_WIDE (1UL << 30) +#define M_PERFCTL_MORE (1UL << 31) + +#define M_COUNTER_OVERFLOW (1UL << 31) + +static int (*save_perf_irq)(void); + +#ifdef CONFIG_MIPS_MT_SMP +static int cpu_has_mipsmt_pertccounters; +#define WHAT (M_TC_EN_VPE | \ + M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id)) +#define vpe_id() (cpu_has_mipsmt_pertccounters ? \ + 0 : cpu_data[smp_processor_id()].vpe_id) + +/* + * The number of bits to shift to convert between counters per core and + * counters per VPE. There is no reasonable interface atm to obtain the + * number of VPEs used by Linux and in the 34K this number is fixed to two + * anyways so we hardcore a few things here for the moment. The way it's + * done here will ensure that oprofile VSMP kernel will run right on a lesser + * core like a 24K also or with maxcpus=1. + */ +static inline unsigned int vpe_shift(void) +{ + if (num_possible_cpus() > 1) + return 1; + + return 0; +} + +#else + +#define WHAT 0 +#define vpe_id() 0 + +static inline unsigned int vpe_shift(void) +{ + return 0; +} + +#endif + +static inline unsigned int counters_total_to_per_cpu(unsigned int counters) +{ + return counters >> vpe_shift(); +} + +static inline unsigned int counters_per_cpu_to_total(unsigned int counters) +{ + return counters << vpe_shift(); +} + +#define __define_perf_accessors(r, n, np) \ + \ +static inline unsigned int r_c0_ ## r ## n(void) \ +{ \ + unsigned int cpu = vpe_id(); \ + \ + switch (cpu) { \ + case 0: \ + return read_c0_ ## r ## n(); \ + case 1: \ + return read_c0_ ## r ## np(); \ + default: \ + BUG(); \ + } \ + return 0; \ +} \ + \ +static inline void w_c0_ ## r ## n(unsigned int value) \ +{ \ + unsigned int cpu = vpe_id(); \ + \ + switch (cpu) { \ + case 0: \ + write_c0_ ## r ## n(value); \ + return; \ + case 1: \ + write_c0_ ## r ## np(value); \ + return; \ + default: \ + BUG(); \ + } \ + return; \ +} \ + +__define_perf_accessors(perfcntr, 0, 2) +__define_perf_accessors(perfcntr, 1, 3) +__define_perf_accessors(perfcntr, 2, 0) +__define_perf_accessors(perfcntr, 3, 1) + +__define_perf_accessors(perfctrl, 0, 2) +__define_perf_accessors(perfctrl, 1, 3) +__define_perf_accessors(perfctrl, 2, 0) +__define_perf_accessors(perfctrl, 3, 1) + +struct op_mips_model op_model_mipsxx_ops; + +static struct mipsxx_register_config { + unsigned int control[4]; + unsigned int counter[4]; +} reg; + +/* Compute all of the registers in preparation for enabling profiling. */ + +static void mipsxx_reg_setup(struct op_counter_config *ctr) +{ + unsigned int counters = op_model_mipsxx_ops.num_counters; + int i; + + /* Compute the performance counter control word. */ + for (i = 0; i < counters; i++) { + reg.control[i] = 0; + reg.counter[i] = 0; + + if (!ctr[i].enabled) + continue; + + reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) | + M_PERFCTL_INTERRUPT_ENABLE; + if (ctr[i].kernel) + reg.control[i] |= M_PERFCTL_KERNEL; + if (ctr[i].user) + reg.control[i] |= M_PERFCTL_USER; + if (ctr[i].exl) + reg.control[i] |= M_PERFCTL_EXL; + reg.counter[i] = 0x80000000 - ctr[i].count; + } +} + +/* Program all of the registers in preparation for enabling profiling. */ + +static void mipsxx_cpu_setup(void *args) +{ + unsigned int counters = op_model_mipsxx_ops.num_counters; + + switch (counters) { + case 4: + w_c0_perfctrl3(0); + w_c0_perfcntr3(reg.counter[3]); + case 3: + w_c0_perfctrl2(0); + w_c0_perfcntr2(reg.counter[2]); + case 2: + w_c0_perfctrl1(0); + w_c0_perfcntr1(reg.counter[1]); + case 1: + w_c0_perfctrl0(0); + w_c0_perfcntr0(reg.counter[0]); + } +} + +/* Start all counters on current CPU */ +static void mipsxx_cpu_start(void *args) +{ + unsigned int counters = op_model_mipsxx_ops.num_counters; + + switch (counters) { + case 4: + w_c0_perfctrl3(WHAT | reg.control[3]); + case 3: + w_c0_perfctrl2(WHAT | reg.control[2]); + case 2: + w_c0_perfctrl1(WHAT | reg.control[1]); + case 1: + w_c0_perfctrl0(WHAT | reg.control[0]); + } +} + +/* Stop all counters on current CPU */ +static void mipsxx_cpu_stop(void *args) +{ + unsigned int counters = op_model_mipsxx_ops.num_counters; + + switch (counters) { + case 4: + w_c0_perfctrl3(0); + case 3: + w_c0_perfctrl2(0); + case 2: + w_c0_perfctrl1(0); + case 1: + w_c0_perfctrl0(0); + } +} + +static int mipsxx_perfcount_handler(void) +{ + unsigned int counters = op_model_mipsxx_ops.num_counters; + unsigned int control; + unsigned int counter; + int handled = IRQ_NONE; + + if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) + return handled; + + switch (counters) { +#define HANDLE_COUNTER(n) \ + case n + 1: \ + control = r_c0_perfctrl ## n(); \ + counter = r_c0_perfcntr ## n(); \ + if ((control & M_PERFCTL_INTERRUPT_ENABLE) && \ + (counter & M_COUNTER_OVERFLOW)) { \ + oprofile_add_sample(get_irq_regs(), n); \ + w_c0_perfcntr ## n(reg.counter[n]); \ + handled = IRQ_HANDLED; \ + } + HANDLE_COUNTER(3) + HANDLE_COUNTER(2) + HANDLE_COUNTER(1) + HANDLE_COUNTER(0) + } + + return handled; +} + +#define M_CONFIG1_PC (1 << 4) + +static inline int __n_counters(void) +{ + if (!(read_c0_config1() & M_CONFIG1_PC)) + return 0; + if (!(read_c0_perfctrl0() & M_PERFCTL_MORE)) + return 1; + if (!(read_c0_perfctrl1() & M_PERFCTL_MORE)) + return 2; + if (!(read_c0_perfctrl2() & M_PERFCTL_MORE)) + return 3; + + return 4; +} + +static inline int n_counters(void) +{ + int counters; + + switch (current_cpu_type()) { + case CPU_R10000: + counters = 2; + break; + + case CPU_R12000: + case CPU_R14000: + counters = 4; + break; + + default: + counters = __n_counters(); + } + + return counters; +} + +static void reset_counters(void *arg) +{ + int counters = (int)(long)arg; + switch (counters) { + case 4: + w_c0_perfctrl3(0); + w_c0_perfcntr3(0); + case 3: + w_c0_perfctrl2(0); + w_c0_perfcntr2(0); + case 2: + w_c0_perfctrl1(0); + w_c0_perfcntr1(0); + case 1: + w_c0_perfctrl0(0); + w_c0_perfcntr0(0); + } +} + +static int __init mipsxx_init(void) +{ + int counters; + + counters = n_counters(); + if (counters == 0) { + printk(KERN_ERR "Oprofile: CPU has no performance counters\n"); + return -ENODEV; + } + +#ifdef CONFIG_MIPS_MT_SMP + cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); + if (!cpu_has_mipsmt_pertccounters) + counters = counters_total_to_per_cpu(counters); +#endif + on_each_cpu(reset_counters, (void *)(long)counters, 1); + + op_model_mipsxx_ops.num_counters = counters; + switch (current_cpu_type()) { + case CPU_20KC: + op_model_mipsxx_ops.cpu_type = "mips/20K"; + break; + + case CPU_24K: + op_model_mipsxx_ops.cpu_type = "mips/24K"; + break; + + case CPU_25KF: + op_model_mipsxx_ops.cpu_type = "mips/25K"; + break; + + case CPU_1004K: +#if 0 + /* FIXME: report as 34K for now */ + op_model_mipsxx_ops.cpu_type = "mips/1004K"; + break; +#endif + + case CPU_34K: + op_model_mipsxx_ops.cpu_type = "mips/34K"; + break; + + case CPU_74K: + op_model_mipsxx_ops.cpu_type = "mips/74K"; + break; + + case CPU_5KC: + op_model_mipsxx_ops.cpu_type = "mips/5K"; + break; + + case CPU_R10000: + if ((current_cpu_data.processor_id & 0xff) == 0x20) + op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x"; + else + op_model_mipsxx_ops.cpu_type = "mips/r10000"; + break; + + case CPU_R12000: + case CPU_R14000: + op_model_mipsxx_ops.cpu_type = "mips/r12000"; + break; + + case CPU_SB1: + case CPU_SB1A: + op_model_mipsxx_ops.cpu_type = "mips/sb1"; + break; + + default: + printk(KERN_ERR "Profiling unsupported for this CPU\n"); + + return -ENODEV; + } + + save_perf_irq = perf_irq; + perf_irq = mipsxx_perfcount_handler; + + return 0; +} + +static void mipsxx_exit(void) +{ + int counters = op_model_mipsxx_ops.num_counters; + + counters = counters_per_cpu_to_total(counters); + on_each_cpu(reset_counters, (void *)(long)counters, 1); + + perf_irq = save_perf_irq; +} + +struct op_mips_model op_model_mipsxx_ops = { + .reg_setup = mipsxx_reg_setup, + .cpu_setup = mipsxx_cpu_setup, + .init = mipsxx_init, + .exit = mipsxx_exit, + .cpu_start = mipsxx_cpu_start, + .cpu_stop = mipsxx_cpu_stop, +}; diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c new file mode 100644 index 00000000..3aa81384 --- /dev/null +++ b/arch/mips/oprofile/op_model_rm9000.c @@ -0,0 +1,138 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 by Ralf Baechle + */ +#include <linux/init.h> +#include <linux/oprofile.h> +#include <linux/interrupt.h> +#include <linux/smp.h> + +#include "op_impl.h" + +#define RM9K_COUNTER1_EVENT(event) ((event) << 0) +#define RM9K_COUNTER1_SUPERVISOR (1ULL << 7) +#define RM9K_COUNTER1_KERNEL (1ULL << 8) +#define RM9K_COUNTER1_USER (1ULL << 9) +#define RM9K_COUNTER1_ENABLE (1ULL << 10) +#define RM9K_COUNTER1_OVERFLOW (1ULL << 15) + +#define RM9K_COUNTER2_EVENT(event) ((event) << 16) +#define RM9K_COUNTER2_SUPERVISOR (1ULL << 23) +#define RM9K_COUNTER2_KERNEL (1ULL << 24) +#define RM9K_COUNTER2_USER (1ULL << 25) +#define RM9K_COUNTER2_ENABLE (1ULL << 26) +#define RM9K_COUNTER2_OVERFLOW (1ULL << 31) + +extern unsigned int rm9000_perfcount_irq; + +static struct rm9k_register_config { + unsigned int control; + unsigned int reset_counter1; + unsigned int reset_counter2; +} reg; + +/* Compute all of the registers in preparation for enabling profiling. */ + +static void rm9000_reg_setup(struct op_counter_config *ctr) +{ + unsigned int control = 0; + + /* Compute the performance counter control word. */ + /* For now count kernel and user mode */ + if (ctr[0].enabled) + control |= RM9K_COUNTER1_EVENT(ctr[0].event) | + RM9K_COUNTER1_KERNEL | + RM9K_COUNTER1_USER | + RM9K_COUNTER1_ENABLE; + if (ctr[1].enabled) + control |= RM9K_COUNTER2_EVENT(ctr[1].event) | + RM9K_COUNTER2_KERNEL | + RM9K_COUNTER2_USER | + RM9K_COUNTER2_ENABLE; + reg.control = control; + + reg.reset_counter1 = 0x80000000 - ctr[0].count; + reg.reset_counter2 = 0x80000000 - ctr[1].count; +} + +/* Program all of the registers in preparation for enabling profiling. */ + +static void rm9000_cpu_setup(void *args) +{ + uint64_t perfcount; + + perfcount = ((uint64_t) reg.reset_counter2 << 32) | reg.reset_counter1; + write_c0_perfcount(perfcount); +} + +static void rm9000_cpu_start(void *args) +{ + /* Start all counters on current CPU */ + write_c0_perfcontrol(reg.control); +} + +static void rm9000_cpu_stop(void *args) +{ + /* Stop all counters on current CPU */ + write_c0_perfcontrol(0); +} + +static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id) +{ + unsigned int control = read_c0_perfcontrol(); + struct pt_regs *regs = get_irq_regs(); + uint32_t counter1, counter2; + uint64_t counters; + + /* + * RM9000 combines two 32-bit performance counters into a single + * 64-bit coprocessor zero register. To avoid a race updating the + * registers we need to stop the counters while we're messing with + * them ... + */ + write_c0_perfcontrol(0); + + counters = read_c0_perfcount(); + counter1 = counters; + counter2 = counters >> 32; + + if (control & RM9K_COUNTER1_OVERFLOW) { + oprofile_add_sample(regs, 0); + counter1 = reg.reset_counter1; + } + if (control & RM9K_COUNTER2_OVERFLOW) { + oprofile_add_sample(regs, 1); + counter2 = reg.reset_counter2; + } + + counters = ((uint64_t)counter2 << 32) | counter1; + write_c0_perfcount(counters); + write_c0_perfcontrol(reg.control); + + return IRQ_HANDLED; +} + +static int __init rm9000_init(void) +{ + return request_irq(rm9000_perfcount_irq, rm9000_perfcount_handler, + 0, "Perfcounter", NULL); +} + +static void rm9000_exit(void) +{ + free_irq(rm9000_perfcount_irq, NULL); +} + +struct op_mips_model op_model_rm9000_ops = { + .reg_setup = rm9000_reg_setup, + .cpu_setup = rm9000_cpu_setup, + .init = rm9000_init, + .exit = rm9000_exit, + .cpu_start = rm9000_cpu_start, + .cpu_stop = rm9000_cpu_stop, + .cpu_type = "mips/rm9000", + .num_counters = 2 +}; |