summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/tools/perf/util')
-rwxr-xr-xANDROID_3.4.5/tools/perf/util/PERF-VERSION-GEN42
-rw-r--r--ANDROID_3.4.5/tools/perf/util/abspath.c37
-rw-r--r--ANDROID_3.4.5/tools/perf/util/alias.c77
-rw-r--r--ANDROID_3.4.5/tools/perf/util/annotate.c611
-rw-r--r--ANDROID_3.4.5/tools/perf/util/annotate.h107
-rw-r--r--ANDROID_3.4.5/tools/perf/util/bitmap.c31
-rw-r--r--ANDROID_3.4.5/tools/perf/util/build-id.c86
-rw-r--r--ANDROID_3.4.5/tools/perf/util/build-id.h10
-rw-r--r--ANDROID_3.4.5/tools/perf/util/cache.h102
-rw-r--r--ANDROID_3.4.5/tools/perf/util/callchain.c461
-rw-r--r--ANDROID_3.4.5/tools/perf/util/callchain.h144
-rw-r--r--ANDROID_3.4.5/tools/perf/util/cgroup.c177
-rw-r--r--ANDROID_3.4.5/tools/perf/util/cgroup.h17
-rw-r--r--ANDROID_3.4.5/tools/perf/util/color.c325
-rw-r--r--ANDROID_3.4.5/tools/perf/util/color.h46
-rw-r--r--ANDROID_3.4.5/tools/perf/util/config.c507
-rw-r--r--ANDROID_3.4.5/tools/perf/util/cpumap.c195
-rw-r--r--ANDROID_3.4.5/tools/perf/util/cpumap.h17
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ctype.c39
-rw-r--r--ANDROID_3.4.5/tools/perf/util/debug.c105
-rw-r--r--ANDROID_3.4.5/tools/perf/util/debug.h36
-rw-r--r--ANDROID_3.4.5/tools/perf/util/debugfs.c114
-rw-r--r--ANDROID_3.4.5/tools/perf/util/debugfs.h12
-rw-r--r--ANDROID_3.4.5/tools/perf/util/dwarf-aux.c843
-rw-r--r--ANDROID_3.4.5/tools/perf/util/dwarf-aux.h111
-rw-r--r--ANDROID_3.4.5/tools/perf/util/environment.c9
-rw-r--r--ANDROID_3.4.5/tools/perf/util/event.c898
-rw-r--r--ANDROID_3.4.5/tools/perf/util/event.h212
-rw-r--r--ANDROID_3.4.5/tools/perf/util/evlist.c866
-rw-r--r--ANDROID_3.4.5/tools/perf/util/evlist.h125
-rw-r--r--ANDROID_3.4.5/tools/perf/util/evsel.c677
-rw-r--r--ANDROID_3.4.5/tools/perf/util/evsel.h175
-rw-r--r--ANDROID_3.4.5/tools/perf/util/exec_cmd.c148
-rw-r--r--ANDROID_3.4.5/tools/perf/util/exec_cmd.h12
-rwxr-xr-xANDROID_3.4.5/tools/perf/util/generate-cmdlist.sh24
-rw-r--r--ANDROID_3.4.5/tools/perf/util/gtk/browser.c189
-rw-r--r--ANDROID_3.4.5/tools/perf/util/gtk/gtk.h8
-rw-r--r--ANDROID_3.4.5/tools/perf/util/header.c2452
-rw-r--r--ANDROID_3.4.5/tools/perf/util/header.h138
-rw-r--r--ANDROID_3.4.5/tools/perf/util/help.c338
-rw-r--r--ANDROID_3.4.5/tools/perf/util/help.h29
-rw-r--r--ANDROID_3.4.5/tools/perf/util/hist.c1362
-rw-r--r--ANDROID_3.4.5/tools/perf/util/hist.h169
-rw-r--r--ANDROID_3.4.5/tools/perf/util/hweight.c31
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/alternative-asm.h8
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/asm-offsets.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/bug.h22
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/byteorder.h2
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/cpufeature.h9
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/dwarf2.h13
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/hweight.h8
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/swab.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/system.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/uaccess.h14
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/unistd_32.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/asm/unistd_64.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/dwarf-regs.h8
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/bitmap.h46
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/bitops.h151
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/compiler.h14
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/const.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/ctype.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/export.h6
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/hash.h5
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/kernel.h111
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/linkage.h13
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/list.h29
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/poison.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/prefetch.h6
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/rbtree.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/string.h1
-rw-r--r--ANDROID_3.4.5/tools/perf/util/include/linux/types.h21
-rw-r--r--ANDROID_3.4.5/tools/perf/util/levenshtein.c84
-rw-r--r--ANDROID_3.4.5/tools/perf/util/levenshtein.h8
-rw-r--r--ANDROID_3.4.5/tools/perf/util/map.c715
-rw-r--r--ANDROID_3.4.5/tools/perf/util/map.h258
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pager.c96
-rw-r--r--ANDROID_3.4.5/tools/perf/util/parse-events.c1062
-rw-r--r--ANDROID_3.4.5/tools/perf/util/parse-events.h93
-rw-r--r--ANDROID_3.4.5/tools/perf/util/parse-events.l127
-rw-r--r--ANDROID_3.4.5/tools/perf/util/parse-events.y229
-rw-r--r--ANDROID_3.4.5/tools/perf/util/parse-options.c577
-rw-r--r--ANDROID_3.4.5/tools/perf/util/parse-options.h192
-rw-r--r--ANDROID_3.4.5/tools/perf/util/path.c157
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pmu.c469
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pmu.h41
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pmu.l43
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pmu.y93
-rw-r--r--ANDROID_3.4.5/tools/perf/util/probe-event.c2097
-rw-r--r--ANDROID_3.4.5/tools/perf/util/probe-event.h138
-rw-r--r--ANDROID_3.4.5/tools/perf/util/probe-finder.c1580
-rw-r--r--ANDROID_3.4.5/tools/perf/util/probe-finder.h107
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pstack.c75
-rw-r--r--ANDROID_3.4.5/tools/perf/util/pstack.h14
-rw-r--r--ANDROID_3.4.5/tools/perf/util/python-ext-sources19
-rw-r--r--ANDROID_3.4.5/tools/perf/util/python.c1044
-rw-r--r--ANDROID_3.4.5/tools/perf/util/quote.c54
-rw-r--r--ANDROID_3.4.5/tools/perf/util/quote.h29
-rw-r--r--ANDROID_3.4.5/tools/perf/util/run-command.c214
-rw-r--r--ANDROID_3.4.5/tools/perf/util/run-command.h58
-rw-r--r--ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-perl.c634
-rw-r--r--ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-python.c600
-rw-r--r--ANDROID_3.4.5/tools/perf/util/session.c1508
-rw-r--r--ANDROID_3.4.5/tools/perf/util/session.h160
-rw-r--r--ANDROID_3.4.5/tools/perf/util/setup.py44
-rw-r--r--ANDROID_3.4.5/tools/perf/util/sigchain.c52
-rw-r--r--ANDROID_3.4.5/tools/perf/util/sigchain.h10
-rw-r--r--ANDROID_3.4.5/tools/perf/util/sort.c528
-rw-r--r--ANDROID_3.4.5/tools/perf/util/sort.h123
-rw-r--r--ANDROID_3.4.5/tools/perf/util/strbuf.c134
-rw-r--r--ANDROID_3.4.5/tools/perf/util/strbuf.h92
-rw-r--r--ANDROID_3.4.5/tools/perf/util/strfilter.c199
-rw-r--r--ANDROID_3.4.5/tools/perf/util/strfilter.h48
-rw-r--r--ANDROID_3.4.5/tools/perf/util/string.c315
-rw-r--r--ANDROID_3.4.5/tools/perf/util/strlist.c200
-rw-r--r--ANDROID_3.4.5/tools/perf/util/strlist.h78
-rw-r--r--ANDROID_3.4.5/tools/perf/util/svghelper.c501
-rw-r--r--ANDROID_3.4.5/tools/perf/util/svghelper.h28
-rw-r--r--ANDROID_3.4.5/tools/perf/util/symbol.c2785
-rw-r--r--ANDROID_3.4.5/tools/perf/util/symbol.h270
-rw-r--r--ANDROID_3.4.5/tools/perf/util/sysfs.c60
-rw-r--r--ANDROID_3.4.5/tools/perf/util/sysfs.h6
-rw-r--r--ANDROID_3.4.5/tools/perf/util/thread.c140
-rw-r--r--ANDROID_3.4.5/tools/perf/util/thread.h44
-rw-r--r--ANDROID_3.4.5/tools/perf/util/thread_map.c297
-rw-r--r--ANDROID_3.4.5/tools/perf/util/thread_map.h24
-rw-r--r--ANDROID_3.4.5/tools/perf/util/tool.h50
-rw-r--r--ANDROID_3.4.5/tools/perf/util/top.c104
-rw-r--r--ANDROID_3.4.5/tools/perf/util/top.h55
-rw-r--r--ANDROID_3.4.5/tools/perf/util/trace-event-info.c542
-rw-r--r--ANDROID_3.4.5/tools/perf/util/trace-event-parse.c3153
-rw-r--r--ANDROID_3.4.5/tools/perf/util/trace-event-read.c538
-rw-r--r--ANDROID_3.4.5/tools/perf/util/trace-event-scripting.c166
-rw-r--r--ANDROID_3.4.5/tools/perf/util/trace-event.h317
-rw-r--r--ANDROID_3.4.5/tools/perf/util/types.h19
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/browser.c597
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/browser.h65
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/browsers/annotate.c433
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/browsers/hists.c1341
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/browsers/map.c154
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/browsers/map.h6
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/helpline.c79
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/helpline.h16
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/keysyms.h27
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/libslang.h29
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/progress.c32
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/progress.h8
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/setup.c155
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/ui.h11
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/util.c250
-rw-r--r--ANDROID_3.4.5/tools/perf/util/ui/util.h14
-rw-r--r--ANDROID_3.4.5/tools/perf/util/usage.c122
-rw-r--r--ANDROID_3.4.5/tools/perf/util/util.c150
-rw-r--r--ANDROID_3.4.5/tools/perf/util/util.h268
-rw-r--r--ANDROID_3.4.5/tools/perf/util/values.c232
-rw-r--r--ANDROID_3.4.5/tools/perf/util/values.h27
-rw-r--r--ANDROID_3.4.5/tools/perf/util/wrapper.c40
-rw-r--r--ANDROID_3.4.5/tools/perf/util/xyarray.c20
-rw-r--r--ANDROID_3.4.5/tools/perf/util/xyarray.h20
159 files changed, 40203 insertions, 0 deletions
diff --git a/ANDROID_3.4.5/tools/perf/util/PERF-VERSION-GEN b/ANDROID_3.4.5/tools/perf/util/PERF-VERSION-GEN
new file mode 100755
index 00000000..ad73300f
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/PERF-VERSION-GEN
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+if [ $# -eq 1 ] ; then
+ OUTPUT=$1
+fi
+
+GVF=${OUTPUT}PERF-VERSION-FILE
+
+LF='
+'
+
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel makefile
+if test -d ../../.git -o -f ../../.git &&
+ VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+ case "$VN" in
+ *$LF*) (exit 1) ;;
+ v[0-9]*)
+ git update-index -q --refresh
+ test -z "$(git diff-index --name-only HEAD --)" ||
+ VN="$VN-dirty" ;;
+ esac
+then
+ VN=$(echo "$VN" | sed -e 's/-/./g');
+else
+ VN=$(MAKEFLAGS= make -sC ../.. kernelversion)
+fi
+
+VN=$(expr "$VN" : v*'\(.*\)')
+
+if test -r $GVF
+then
+ VC=$(sed -e 's/^PERF_VERSION = //' <$GVF)
+else
+ VC=unset
+fi
+test "$VN" = "$VC" || {
+ echo >&2 "PERF_VERSION = $VN"
+ echo "PERF_VERSION = $VN" >$GVF
+}
+
+
diff --git a/ANDROID_3.4.5/tools/perf/util/abspath.c b/ANDROID_3.4.5/tools/perf/util/abspath.c
new file mode 100644
index 00000000..0e76affe
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/abspath.c
@@ -0,0 +1,37 @@
+#include "cache.h"
+
+static const char *get_pwd_cwd(void)
+{
+ static char cwd[PATH_MAX + 1];
+ char *pwd;
+ struct stat cwd_stat, pwd_stat;
+ if (getcwd(cwd, PATH_MAX) == NULL)
+ return NULL;
+ pwd = getenv("PWD");
+ if (pwd && strcmp(pwd, cwd)) {
+ stat(cwd, &cwd_stat);
+ if (!stat(pwd, &pwd_stat) &&
+ pwd_stat.st_dev == cwd_stat.st_dev &&
+ pwd_stat.st_ino == cwd_stat.st_ino) {
+ strlcpy(cwd, pwd, PATH_MAX);
+ }
+ }
+ return cwd;
+}
+
+const char *make_nonrelative_path(const char *path)
+{
+ static char buf[PATH_MAX + 1];
+
+ if (is_absolute_path(path)) {
+ if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ } else {
+ const char *cwd = get_pwd_cwd();
+ if (!cwd)
+ die("Cannot determine the current working directory");
+ if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ }
+ return buf;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/alias.c b/ANDROID_3.4.5/tools/perf/util/alias.c
new file mode 100644
index 00000000..b8144e80
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/alias.c
@@ -0,0 +1,77 @@
+#include "cache.h"
+
+static const char *alias_key;
+static char *alias_val;
+
+static int alias_lookup_cb(const char *k, const char *v, void *cb __used)
+{
+ if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
+ if (!v)
+ return config_error_nonbool(k);
+ alias_val = strdup(v);
+ return 0;
+ }
+ return 0;
+}
+
+char *alias_lookup(const char *alias)
+{
+ alias_key = alias;
+ alias_val = NULL;
+ perf_config(alias_lookup_cb, NULL);
+ return alias_val;
+}
+
+int split_cmdline(char *cmdline, const char ***argv)
+{
+ int src, dst, count = 0, size = 16;
+ char quoted = 0;
+
+ *argv = malloc(sizeof(char*) * size);
+
+ /* split alias_string */
+ (*argv)[count++] = cmdline;
+ for (src = dst = 0; cmdline[src];) {
+ char c = cmdline[src];
+ if (!quoted && isspace(c)) {
+ cmdline[dst++] = 0;
+ while (cmdline[++src]
+ && isspace(cmdline[src]))
+ ; /* skip */
+ if (count >= size) {
+ size += 16;
+ *argv = realloc(*argv, sizeof(char*) * size);
+ }
+ (*argv)[count++] = cmdline + dst;
+ } else if (!quoted && (c == '\'' || c == '"')) {
+ quoted = c;
+ src++;
+ } else if (c == quoted) {
+ quoted = 0;
+ src++;
+ } else {
+ if (c == '\\' && quoted != '\'') {
+ src++;
+ c = cmdline[src];
+ if (!c) {
+ free(*argv);
+ *argv = NULL;
+ return error("cmdline ends with \\");
+ }
+ }
+ cmdline[dst++] = c;
+ src++;
+ }
+ }
+
+ cmdline[dst] = 0;
+
+ if (quoted) {
+ free(*argv);
+ *argv = NULL;
+ return error("unclosed quote");
+ }
+
+ return count;
+}
+
diff --git a/ANDROID_3.4.5/tools/perf/util/annotate.c b/ANDROID_3.4.5/tools/perf/util/annotate.c
new file mode 100644
index 00000000..08c6d138
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/annotate.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-annotate.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "util.h"
+#include "build-id.h"
+#include "color.h"
+#include "cache.h"
+#include "symbol.h"
+#include "debug.h"
+#include "annotate.h"
+#include <pthread.h>
+
+const char *disassembler_style;
+
+int symbol__annotate_init(struct map *map __used, struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ pthread_mutex_init(&notes->lock, NULL);
+ return 0;
+}
+
+int symbol__alloc_hist(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ const size_t size = sym->end - sym->start + 1;
+ size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
+
+ notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
+ if (notes->src == NULL)
+ return -1;
+ notes->src->sizeof_sym_hist = sizeof_sym_hist;
+ notes->src->nr_histograms = symbol_conf.nr_events;
+ INIT_LIST_HEAD(&notes->src->source);
+ return 0;
+}
+
+void symbol__annotate_zero_histograms(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ pthread_mutex_lock(&notes->lock);
+ if (notes->src != NULL)
+ memset(notes->src->histograms, 0,
+ notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+ pthread_mutex_unlock(&notes->lock);
+}
+
+int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr)
+{
+ unsigned offset;
+ struct annotation *notes;
+ struct sym_hist *h;
+
+ notes = symbol__annotation(sym);
+ if (notes->src == NULL)
+ return -ENOMEM;
+
+ pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
+
+ if (addr < sym->start || addr > sym->end)
+ return -ERANGE;
+
+ offset = addr - sym->start;
+ h = annotation__histogram(notes, evidx);
+ h->sum++;
+ h->addr[offset]++;
+
+ pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
+ ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
+ addr, addr - sym->start, evidx, h->addr[offset]);
+ return 0;
+}
+
+static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
+{
+ struct objdump_line *self = malloc(sizeof(*self) + privsize);
+
+ if (self != NULL) {
+ self->offset = offset;
+ self->line = line;
+ }
+
+ return self;
+}
+
+void objdump_line__free(struct objdump_line *self)
+{
+ free(self->line);
+ free(self);
+}
+
+static void objdump__add_line(struct list_head *head, struct objdump_line *line)
+{
+ list_add_tail(&line->node, head);
+}
+
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos)
+{
+ list_for_each_entry_continue(pos, head, node)
+ if (pos->offset >= 0)
+ return pos;
+
+ return NULL;
+}
+
+static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
+ int evidx, u64 len, int min_pcnt,
+ int printed, int max_lines,
+ struct objdump_line *queue)
+{
+ static const char *prev_line;
+ static const char *prev_color;
+
+ if (oline->offset != -1) {
+ const char *path = NULL;
+ unsigned int hits = 0;
+ double percent = 0.0;
+ const char *color;
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ s64 offset = oline->offset;
+ struct objdump_line *next;
+
+ next = objdump__get_next_ip_line(&notes->src->source, oline);
+
+ while (offset < (s64)len &&
+ (next == NULL || offset < next->offset)) {
+ if (src_line) {
+ if (path == NULL)
+ path = src_line[offset].path;
+ percent += src_line[offset].percent;
+ } else
+ hits += h->addr[offset];
+
+ ++offset;
+ }
+
+ if (src_line == NULL && h->sum)
+ percent = 100.0 * hits / h->sum;
+
+ if (percent < min_pcnt)
+ return -1;
+
+ if (max_lines && printed >= max_lines)
+ return 1;
+
+ if (queue != NULL) {
+ list_for_each_entry_from(queue, &notes->src->source, node) {
+ if (queue == oline)
+ break;
+ objdump_line__print(queue, sym, evidx, len,
+ 0, 0, 1, NULL);
+ }
+ }
+
+ color = get_percent_color(percent);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored addr with the same filename:line
+ */
+ if (path) {
+ if (!prev_line || strcmp(prev_line, path)
+ || color != prev_color) {
+ color_fprintf(stdout, color, " %s", path);
+ prev_line = path;
+ prev_color = color;
+ }
+ }
+
+ color_fprintf(stdout, color, " %7.2f", percent);
+ printf(" : ");
+ color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line);
+ } else if (max_lines && printed >= max_lines)
+ return 1;
+ else {
+ if (queue)
+ return -1;
+
+ if (!*oline->line)
+ printf(" :\n");
+ else
+ printf(" : %s\n", oline->line);
+ }
+
+ return 0;
+}
+
+static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
+ FILE *file, size_t privsize)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct objdump_line *objdump_line;
+ char *line = NULL, *tmp, *tmp2, *c;
+ size_t line_len;
+ s64 line_ip, offset = -1;
+
+ if (getline(&line, &line_len, file) < 0)
+ return -1;
+
+ if (!line)
+ return -1;
+
+ while (line_len != 0 && isspace(line[line_len - 1]))
+ line[--line_len] = '\0';
+
+ c = strchr(line, '\n');
+ if (c)
+ *c = 0;
+
+ line_ip = -1;
+
+ /*
+ * Strip leading spaces:
+ */
+ tmp = line;
+ while (*tmp) {
+ if (*tmp != ' ')
+ break;
+ tmp++;
+ }
+
+ if (*tmp) {
+ /*
+ * Parse hexa addresses followed by ':'
+ */
+ line_ip = strtoull(tmp, &tmp2, 16);
+ if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
+ line_ip = -1;
+ }
+
+ if (line_ip != -1) {
+ u64 start = map__rip_2objdump(map, sym->start),
+ end = map__rip_2objdump(map, sym->end);
+
+ offset = line_ip - start;
+ if (offset < 0 || (u64)line_ip > end)
+ offset = -1;
+ }
+
+ objdump_line = objdump_line__new(offset, line, privsize);
+ if (objdump_line == NULL) {
+ free(line);
+ return -1;
+ }
+ objdump__add_line(&notes->src->source, objdump_line);
+
+ return 0;
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
+{
+ struct dso *dso = map->dso;
+ char *filename = dso__build_id_filename(dso, NULL, 0);
+ bool free_filename = true;
+ char command[PATH_MAX * 2];
+ FILE *file;
+ int err = 0;
+ char symfs_filename[PATH_MAX];
+
+ if (filename) {
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ }
+
+ if (filename == NULL) {
+ if (dso->has_build_id) {
+ pr_err("Can't annotate %s: not enough memory\n",
+ sym->name);
+ return -ENOMEM;
+ }
+ goto fallback;
+ } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
+ strstr(command, "[kernel.kallsyms]") ||
+ access(symfs_filename, R_OK)) {
+ free(filename);
+fallback:
+ /*
+ * If we don't have build-ids or the build-id file isn't in the
+ * cache, or is just a kallsyms file, well, lets hope that this
+ * DSO is the same as when 'perf record' ran.
+ */
+ filename = dso->long_name;
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ free_filename = false;
+ }
+
+ if (dso->symtab_type == SYMTAB__KALLSYMS) {
+ char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+ char *build_id_msg = NULL;
+
+ if (dso->annotate_warned)
+ goto out_free_filename;
+
+ if (dso->has_build_id) {
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id), bf + 15);
+ build_id_msg = bf;
+ }
+ err = -ENOENT;
+ dso->annotate_warned = 1;
+ pr_err("Can't annotate %s:\n\n"
+ "No vmlinux file%s\nwas found in the path.\n\n"
+ "Please use:\n\n"
+ " perf buildid-cache -av vmlinux\n\n"
+ "or:\n\n"
+ " --vmlinux vmlinux\n",
+ sym->name, build_id_msg ?: "");
+ goto out_free_filename;
+ }
+
+ pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
+ filename, sym->name, map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
+
+ pr_debug("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso->long_name, sym, sym->name);
+
+ snprintf(command, sizeof(command),
+ "objdump %s%s --start-address=0x%016" PRIx64
+ " --stop-address=0x%016" PRIx64
+ " -d %s %s -C %s|grep -v %s|expand",
+ disassembler_style ? "-M " : "",
+ disassembler_style ? disassembler_style : "",
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end+1),
+ symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
+ symbol_conf.annotate_src ? "-S" : "",
+ symfs_filename, filename);
+
+ pr_debug("Executing: %s\n", command);
+
+ file = popen(command, "r");
+ if (!file)
+ goto out_free_filename;
+
+ while (!feof(file))
+ if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
+ break;
+
+ pclose(file);
+out_free_filename:
+ if (free_filename)
+ free(filename);
+ return err;
+}
+
+static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+{
+ struct source_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct source_line, node);
+
+ if (src_line->percent > iter->percent)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&src_line->node, parent, p);
+ rb_insert_color(&src_line->node, root);
+}
+
+static void symbol__free_source_line(struct symbol *sym, int len)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ int i;
+
+ for (i = 0; i < len; i++)
+ free(src_line[i].path);
+
+ free(src_line);
+ notes->src->lines = NULL;
+}
+
+/* Get the filename:line for the colored entries */
+static int symbol__get_source_line(struct symbol *sym, struct map *map,
+ int evidx, struct rb_root *root, int len,
+ const char *filename)
+{
+ u64 start;
+ int i;
+ char cmd[PATH_MAX * 2];
+ struct source_line *src_line;
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+
+ if (!h->sum)
+ return 0;
+
+ src_line = notes->src->lines = calloc(len, sizeof(struct source_line));
+ if (!notes->src->lines)
+ return -1;
+
+ start = map__rip_2objdump(map, sym->start);
+
+ for (i = 0; i < len; i++) {
+ char *path = NULL;
+ size_t line_len;
+ u64 offset;
+ FILE *fp;
+
+ src_line[i].percent = 100.0 * h->addr[i] / h->sum;
+ if (src_line[i].percent <= 0.5)
+ continue;
+
+ offset = start + i;
+ sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
+ fp = popen(cmd, "r");
+ if (!fp)
+ continue;
+
+ if (getline(&path, &line_len, fp) < 0 || !line_len)
+ goto next;
+
+ src_line[i].path = malloc(sizeof(char) * line_len + 1);
+ if (!src_line[i].path)
+ goto next;
+
+ strcpy(src_line[i].path, path);
+ insert_source_line(root, &src_line[i]);
+
+ next:
+ pclose(fp);
+ }
+
+ return 0;
+}
+
+static void print_summary(struct rb_root *root, const char *filename)
+{
+ struct source_line *src_line;
+ struct rb_node *node;
+
+ printf("\nSorted summary for file %s\n", filename);
+ printf("----------------------------------------------\n\n");
+
+ if (RB_EMPTY_ROOT(root)) {
+ printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+ return;
+ }
+
+ node = rb_first(root);
+ while (node) {
+ double percent;
+ const char *color;
+ char *path;
+
+ src_line = rb_entry(node, struct source_line, node);
+ percent = src_line->percent;
+ color = get_percent_color(percent);
+ path = src_line->path;
+
+ color_fprintf(stdout, color, " %7.2f %s", percent, path);
+ node = rb_next(node);
+ }
+}
+
+static void symbol__annotate_hits(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ u64 len = sym->end - sym->start, offset;
+
+ for (offset = 0; offset < len; ++offset)
+ if (h->addr[offset] != 0)
+ printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
+ sym->start + offset, h->addr[offset]);
+ printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
+int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
+ bool full_paths, int min_pcnt, int max_lines,
+ int context)
+{
+ struct dso *dso = map->dso;
+ const char *filename = dso->long_name, *d_filename;
+ struct annotation *notes = symbol__annotation(sym);
+ struct objdump_line *pos, *queue = NULL;
+ int printed = 2, queue_len = 0;
+ int more = 0;
+ u64 len;
+
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
+
+ len = sym->end - sym->start;
+
+ printf(" Percent | Source code & Disassembly of %s\n", d_filename);
+ printf("------------------------------------------------\n");
+
+ if (verbose)
+ symbol__annotate_hits(sym, evidx);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ if (context && queue == NULL) {
+ queue = pos;
+ queue_len = 0;
+ }
+
+ switch (objdump_line__print(pos, sym, evidx, len, min_pcnt,
+ printed, max_lines, queue)) {
+ case 0:
+ ++printed;
+ if (context) {
+ printed += queue_len;
+ queue = NULL;
+ queue_len = 0;
+ }
+ break;
+ case 1:
+ /* filtered by max_lines */
+ ++more;
+ break;
+ case -1:
+ default:
+ /*
+ * Filtered by min_pcnt or non IP lines when
+ * context != 0
+ */
+ if (!context)
+ break;
+ if (queue_len == context)
+ queue = list_entry(queue->node.next, typeof(*queue), node);
+ else
+ ++queue_len;
+ break;
+ }
+ }
+
+ return more;
+}
+
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+
+ memset(h, 0, notes->src->sizeof_sym_hist);
+}
+
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ int len = sym->end - sym->start, offset;
+
+ h->sum = 0;
+ for (offset = 0; offset < len; ++offset) {
+ h->addr[offset] = h->addr[offset] * 7 / 8;
+ h->sum += h->addr[offset];
+ }
+}
+
+void objdump_line_list__purge(struct list_head *head)
+{
+ struct objdump_line *pos, *n;
+
+ list_for_each_entry_safe(pos, n, head, node) {
+ list_del(&pos->node);
+ objdump_line__free(pos);
+ }
+}
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
+ bool print_lines, bool full_paths, int min_pcnt,
+ int max_lines)
+{
+ struct dso *dso = map->dso;
+ const char *filename = dso->long_name;
+ struct rb_root source_line = RB_ROOT;
+ u64 len;
+
+ if (symbol__annotate(sym, map, 0) < 0)
+ return -1;
+
+ len = sym->end - sym->start;
+
+ if (print_lines) {
+ symbol__get_source_line(sym, map, evidx, &source_line,
+ len, filename);
+ print_summary(&source_line, filename);
+ }
+
+ symbol__annotate_printf(sym, map, evidx, full_paths,
+ min_pcnt, max_lines, 0);
+ if (print_lines)
+ symbol__free_source_line(sym, len);
+
+ objdump_line_list__purge(&symbol__annotation(sym)->src->source);
+
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/annotate.h b/ANDROID_3.4.5/tools/perf/util/annotate.h
new file mode 100644
index 00000000..efa5dc82
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/annotate.h
@@ -0,0 +1,107 @@
+#ifndef __PERF_ANNOTATE_H
+#define __PERF_ANNOTATE_H
+
+#include <stdbool.h>
+#include "types.h"
+#include "symbol.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct objdump_line {
+ struct list_head node;
+ s64 offset;
+ char *line;
+};
+
+void objdump_line__free(struct objdump_line *self);
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos);
+
+struct sym_hist {
+ u64 sum;
+ u64 addr[0];
+};
+
+struct source_line {
+ struct rb_node node;
+ double percent;
+ char *path;
+};
+
+/** struct annotated_source - symbols with hits have this attached as in sannotation
+ *
+ * @histogram: Array of addr hit histograms per event being monitored
+ * @lines: If 'print_lines' is specified, per source code line percentages
+ * @source: source parsed from objdump -dS
+ *
+ * lines is allocated, percentages calculated and all sorted by percentage
+ * when the annotation is about to be presented, so the percentages are for
+ * one of the entries in the histogram array, i.e. for the event/counter being
+ * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
+ * returns.
+ */
+struct annotated_source {
+ struct list_head source;
+ struct source_line *lines;
+ int nr_histograms;
+ int sizeof_sym_hist;
+ struct sym_hist histograms[0];
+};
+
+struct annotation {
+ pthread_mutex_t lock;
+ struct annotated_source *src;
+};
+
+struct sannotation {
+ struct annotation annotation;
+ struct symbol symbol;
+};
+
+static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
+{
+ return (((void *)&notes->src->histograms) +
+ (notes->src->sizeof_sym_hist * idx));
+}
+
+static inline struct annotation *symbol__annotation(struct symbol *sym)
+{
+ struct sannotation *a = container_of(sym, struct sannotation, symbol);
+ return &a->annotation;
+}
+
+int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr);
+int symbol__alloc_hist(struct symbol *sym);
+void symbol__annotate_zero_histograms(struct symbol *sym);
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+int symbol__annotate_init(struct map *map __used, struct symbol *sym);
+int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
+ bool full_paths, int min_pcnt, int max_lines,
+ int context);
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+void objdump_line_list__purge(struct list_head *head);
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
+ bool print_lines, bool full_paths, int min_pcnt,
+ int max_lines);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int symbol__tui_annotate(struct symbol *sym __used,
+ struct map *map __used,
+ int evidx __used,
+ void(*timer)(void *arg) __used,
+ void *arg __used, int delay_secs __used)
+{
+ return 0;
+}
+#else
+int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
+ void(*timer)(void *arg), void *arg, int delay_secs);
+#endif
+
+extern const char *disassembler_style;
+
+#endif /* __PERF_ANNOTATE_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/bitmap.c b/ANDROID_3.4.5/tools/perf/util/bitmap.c
new file mode 100644
index 00000000..0a1adc11
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/bitmap.c
@@ -0,0 +1,31 @@
+/*
+ * From lib/bitmap.c
+ * Helper functions for bitmap.h.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/bitmap.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+ int k, w = 0, lim = bits/BITS_PER_LONG;
+
+ for (k = 0; k < lim; k++)
+ w += hweight_long(bitmap[k]);
+
+ if (bits % BITS_PER_LONG)
+ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+ return w;
+}
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] | bitmap2[k];
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/build-id.c b/ANDROID_3.4.5/tools/perf/util/build-id.c
new file mode 100644
index 00000000..dff9c7a7
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/build-id.c
@@ -0,0 +1,86 @@
+/*
+ * build-id.c
+ *
+ * build-id support
+ *
+ * Copyright (C) 2009, 2010 Red Hat Inc.
+ * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "util.h"
+#include <stdio.h>
+#include "build-id.h"
+#include "event.h"
+#include "symbol.h"
+#include <linux/kernel.h>
+#include "debug.h"
+#include "session.h"
+#include "tool.h"
+
+static int build_id__mark_dso_hit(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_evsel *evsel __used,
+ struct machine *machine)
+{
+ struct addr_location al;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+
+ if (thread == NULL) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, &al);
+
+ if (al.map != NULL)
+ al.map->dso->hit = 1;
+
+ return 0;
+}
+
+static int perf_event__exit_del_thread(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+
+ dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+
+ if (thread) {
+ rb_erase(&thread->rb_node, &machine->threads);
+ machine->last_match = NULL;
+ thread__delete(thread);
+ }
+
+ return 0;
+}
+
+struct perf_tool build_id__mark_dso_hit_ops = {
+ .sample = build_id__mark_dso_hit,
+ .mmap = perf_event__process_mmap,
+ .fork = perf_event__process_task,
+ .exit = perf_event__exit_del_thread,
+};
+
+char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
+{
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+
+ if (!self->has_build_id)
+ return NULL;
+
+ build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex);
+ if (bf == NULL) {
+ if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
+ build_id_hex, build_id_hex + 2) < 0)
+ return NULL;
+ } else
+ snprintf(bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
+ build_id_hex, build_id_hex + 2);
+ return bf;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/build-id.h b/ANDROID_3.4.5/tools/perf/util/build-id.h
new file mode 100644
index 00000000..a993ba87
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/build-id.h
@@ -0,0 +1,10 @@
+#ifndef PERF_BUILD_ID_H_
+#define PERF_BUILD_ID_H_ 1
+
+#include "session.h"
+
+extern struct perf_tool build_id__mark_dso_hit_ops;
+
+char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/cache.h b/ANDROID_3.4.5/tools/perf/util/cache.h
new file mode 100644
index 00000000..8dd224df
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/cache.h
@@ -0,0 +1,102 @@
+#ifndef __PERF_CACHE_H
+#define __PERF_CACHE_H
+
+#include <stdbool.h>
+#include "util.h"
+#include "strbuf.h"
+#include "../perf.h"
+
+#define CMD_EXEC_PATH "--exec-path"
+#define CMD_PERF_DIR "--perf-dir="
+#define CMD_WORK_TREE "--work-tree="
+#define CMD_DEBUGFS_DIR "--debugfs-dir="
+
+#define PERF_DIR_ENVIRONMENT "PERF_DIR"
+#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
+#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH"
+#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
+#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
+
+typedef int (*config_fn_t)(const char *, const char *, void *);
+extern int perf_default_config(const char *, const char *, void *);
+extern int perf_config(config_fn_t fn, void *);
+extern int perf_config_int(const char *, const char *);
+extern int perf_config_bool(const char *, const char *);
+extern int config_error_nonbool(const char *);
+extern const char *perf_config_dirname(const char *, const char *);
+
+/* pager.c */
+extern void setup_pager(void);
+extern const char *pager_program;
+extern int pager_in_use(void);
+extern int pager_use_color;
+
+extern int use_browser;
+
+#ifdef NO_NEWT_SUPPORT
+static inline void setup_browser(bool fallback_to_pager)
+{
+ if (fallback_to_pager)
+ setup_pager();
+}
+static inline void exit_browser(bool wait_for_ok __used) {}
+#else
+void setup_browser(bool fallback_to_pager);
+void exit_browser(bool wait_for_ok);
+#endif
+
+#ifdef NO_GTK2_SUPPORT
+static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager)
+{
+ if (fallback_to_pager)
+ setup_pager();
+}
+static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {}
+#else
+void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager);
+void perf_gtk_exit_browser(bool wait_for_ok);
+#endif
+
+char *alias_lookup(const char *alias);
+int split_cmdline(char *cmdline, const char ***argv);
+
+#define alloc_nr(x) (((x)+16)*3/2)
+
+/*
+ * Realloc the buffer pointed at by variable 'x' so that it can hold
+ * at least 'nr' entries; the number of entries currently allocated
+ * is 'alloc', using the standard growing factor alloc_nr() macro.
+ *
+ * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
+ */
+#define ALLOC_GROW(x, nr, alloc) \
+ do { \
+ if ((nr) > alloc) { \
+ if (alloc_nr(alloc) < (nr)) \
+ alloc = (nr); \
+ else \
+ alloc = alloc_nr(alloc); \
+ x = xrealloc((x), alloc * sizeof(*(x))); \
+ } \
+ } while(0)
+
+
+static inline int is_absolute_path(const char *path)
+{
+ return path[0] == '/';
+}
+
+const char *make_nonrelative_path(const char *path);
+char *strip_path_suffix(const char *path, const char *suffix);
+
+extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+
+extern char *perf_pathdup(const char *fmt, ...)
+ __attribute__((format (printf, 1, 2)));
+
+#ifdef NO_STRLCPY
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
+
+#endif /* __PERF_CACHE_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/callchain.c b/ANDROID_3.4.5/tools/perf/util/callchain.c
new file mode 100644
index 00000000..9f7106a8
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/callchain.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Handle the callchains from the stream in an ad-hoc radix tree and then
+ * sort them in an rbtree.
+ *
+ * Using a radix for code path provides a fast retrieval and factorizes
+ * memory use. Also that lets us use the paths in a hierarchical graph view.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <math.h>
+
+#include "util.h"
+#include "callchain.h"
+
+bool ip_callchain__valid(struct ip_callchain *chain,
+ const union perf_event *event)
+{
+ unsigned int chain_size = event->header.size;
+ chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
+ return chain->nr * sizeof(u64) <= chain_size;
+}
+
+#define chain_for_each_child(child, parent) \
+ list_for_each_entry(child, &parent->children, siblings)
+
+#define chain_for_each_child_safe(child, next, parent) \
+ list_for_each_entry_safe(child, next, &parent->children, siblings)
+
+static void
+rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
+ enum chain_mode mode)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct callchain_node *rnode;
+ u64 chain_cumul = callchain_cumul_hits(chain);
+
+ while (*p) {
+ u64 rnode_cumul;
+
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node);
+ rnode_cumul = callchain_cumul_hits(rnode);
+
+ switch (mode) {
+ case CHAIN_FLAT:
+ if (rnode->hit < chain->hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ if (rnode_cumul < chain_cumul)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_NONE:
+ default:
+ break;
+ }
+ }
+
+ rb_link_node(&chain->rb_node, parent, p);
+ rb_insert_color(&chain->rb_node, root);
+}
+
+static void
+__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+ u64 min_hit)
+{
+ struct callchain_node *child;
+
+ chain_for_each_child(child, node)
+ __sort_chain_flat(rb_root, child, min_hit);
+
+ if (node->hit && node->hit >= min_hit)
+ rb_insert_callchain(rb_root, node, CHAIN_FLAT);
+}
+
+/*
+ * Once we get every callchains from the stream, we can now
+ * sort them by hit
+ */
+static void
+sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
+ u64 min_hit, struct callchain_param *param __used)
+{
+ __sort_chain_flat(rb_root, &root->node, min_hit);
+}
+
+static void __sort_chain_graph_abs(struct callchain_node *node,
+ u64 min_hit)
+{
+ struct callchain_node *child;
+
+ node->rb_root = RB_ROOT;
+
+ chain_for_each_child(child, node) {
+ __sort_chain_graph_abs(child, min_hit);
+ if (callchain_cumul_hits(child) >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_ABS);
+ }
+}
+
+static void
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
+ u64 min_hit, struct callchain_param *param __used)
+{
+ __sort_chain_graph_abs(&chain_root->node, min_hit);
+ rb_root->rb_node = chain_root->node.rb_root.rb_node;
+}
+
+static void __sort_chain_graph_rel(struct callchain_node *node,
+ double min_percent)
+{
+ struct callchain_node *child;
+ u64 min_hit;
+
+ node->rb_root = RB_ROOT;
+ min_hit = ceil(node->children_hit * min_percent);
+
+ chain_for_each_child(child, node) {
+ __sort_chain_graph_rel(child, min_percent);
+ if (callchain_cumul_hits(child) >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_REL);
+ }
+}
+
+static void
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
+ u64 min_hit __used, struct callchain_param *param)
+{
+ __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
+ rb_root->rb_node = chain_root->node.rb_root.rb_node;
+}
+
+int callchain_register_param(struct callchain_param *param)
+{
+ switch (param->mode) {
+ case CHAIN_GRAPH_ABS:
+ param->sort = sort_chain_graph_abs;
+ break;
+ case CHAIN_GRAPH_REL:
+ param->sort = sort_chain_graph_rel;
+ break;
+ case CHAIN_FLAT:
+ param->sort = sort_chain_flat;
+ break;
+ case CHAIN_NONE:
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Create a child for a parent. If inherit_children, then the new child
+ * will become the new parent of it's parent children
+ */
+static struct callchain_node *
+create_child(struct callchain_node *parent, bool inherit_children)
+{
+ struct callchain_node *new;
+
+ new = zalloc(sizeof(*new));
+ if (!new) {
+ perror("not enough memory to create child for code path tree");
+ return NULL;
+ }
+ new->parent = parent;
+ INIT_LIST_HEAD(&new->children);
+ INIT_LIST_HEAD(&new->val);
+
+ if (inherit_children) {
+ struct callchain_node *next;
+
+ list_splice(&parent->children, &new->children);
+ INIT_LIST_HEAD(&parent->children);
+
+ chain_for_each_child(next, new)
+ next->parent = new;
+ }
+ list_add_tail(&new->siblings, &parent->children);
+
+ return new;
+}
+
+
+/*
+ * Fill the node with callchain values
+ */
+static void
+fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
+{
+ struct callchain_cursor_node *cursor_node;
+
+ node->val_nr = cursor->nr - cursor->pos;
+ if (!node->val_nr)
+ pr_warning("Warning: empty node in callchain tree\n");
+
+ cursor_node = callchain_cursor_current(cursor);
+
+ while (cursor_node) {
+ struct callchain_list *call;
+
+ call = zalloc(sizeof(*call));
+ if (!call) {
+ perror("not enough memory for the code path tree");
+ return;
+ }
+ call->ip = cursor_node->ip;
+ call->ms.sym = cursor_node->sym;
+ call->ms.map = cursor_node->map;
+ list_add_tail(&call->list, &node->val);
+
+ callchain_cursor_advance(cursor);
+ cursor_node = callchain_cursor_current(cursor);
+ }
+}
+
+static void
+add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ struct callchain_node *new;
+
+ new = create_child(parent, false);
+ fill_node(new, cursor);
+
+ new->children_hit = 0;
+ new->hit = period;
+}
+
+/*
+ * Split the parent in two parts (a new child is created) and
+ * give a part of its callchain to the created child.
+ * Then create another child to host the given callchain of new branch
+ */
+static void
+split_add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ struct callchain_list *to_split,
+ u64 idx_parents, u64 idx_local, u64 period)
+{
+ struct callchain_node *new;
+ struct list_head *old_tail;
+ unsigned int idx_total = idx_parents + idx_local;
+
+ /* split */
+ new = create_child(parent, true);
+
+ /* split the callchain and move a part to the new child */
+ old_tail = parent->val.prev;
+ list_del_range(&to_split->list, old_tail);
+ new->val.next = &to_split->list;
+ new->val.prev = old_tail;
+ to_split->list.prev = &new->val;
+ old_tail->next = &new->val;
+
+ /* split the hits */
+ new->hit = parent->hit;
+ new->children_hit = parent->children_hit;
+ parent->children_hit = callchain_cumul_hits(new);
+ new->val_nr = parent->val_nr - idx_local;
+ parent->val_nr = idx_local;
+
+ /* create a new child for the new branch if any */
+ if (idx_total < cursor->nr) {
+ parent->hit = 0;
+ add_child(parent, cursor, period);
+ parent->children_hit += period;
+ } else {
+ parent->hit = period;
+ }
+}
+
+static int
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period);
+
+static void
+append_chain_children(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ struct callchain_node *rnode;
+
+ /* lookup in childrens */
+ chain_for_each_child(rnode, root) {
+ unsigned int ret = append_chain(rnode, cursor, period);
+
+ if (!ret)
+ goto inc_children_hit;
+ }
+ /* nothing in children, add to the current node */
+ add_child(root, cursor, period);
+
+inc_children_hit:
+ root->children_hit += period;
+}
+
+static int
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ struct callchain_cursor_node *curr_snap = cursor->curr;
+ struct callchain_list *cnode;
+ u64 start = cursor->pos;
+ bool found = false;
+ u64 matches;
+
+ /*
+ * Lookup in the current node
+ * If we have a symbol, then compare the start to match
+ * anywhere inside a function.
+ */
+ list_for_each_entry(cnode, &root->val, list) {
+ struct callchain_cursor_node *node;
+ struct symbol *sym;
+
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ break;
+
+ sym = node->sym;
+
+ if (cnode->ms.sym && sym) {
+ if (cnode->ms.sym->start != sym->start)
+ break;
+ } else if (cnode->ip != node->ip)
+ break;
+
+ if (!found)
+ found = true;
+
+ callchain_cursor_advance(cursor);
+ }
+
+ /* matches not, relay on the parent */
+ if (!found) {
+ cursor->curr = curr_snap;
+ cursor->pos = start;
+ return -1;
+ }
+
+ matches = cursor->pos - start;
+
+ /* we match only a part of the node. Split it and add the new chain */
+ if (matches < root->val_nr) {
+ split_add_child(root, cursor, cnode, start, matches, period);
+ return 0;
+ }
+
+ /* we match 100% of the path, increment the hit */
+ if (matches == root->val_nr && cursor->pos == cursor->nr) {
+ root->hit += period;
+ return 0;
+ }
+
+ /* We match the node and still have a part remaining */
+ append_chain_children(root, cursor, period);
+
+ return 0;
+}
+
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ if (!cursor->nr)
+ return 0;
+
+ callchain_cursor_commit(cursor);
+
+ append_chain_children(&root->node, cursor, period);
+
+ if (cursor->nr > root->max_depth)
+ root->max_depth = cursor->nr;
+
+ return 0;
+}
+
+static int
+merge_chain_branch(struct callchain_cursor *cursor,
+ struct callchain_node *dst, struct callchain_node *src)
+{
+ struct callchain_cursor_node **old_last = cursor->last;
+ struct callchain_node *child, *next_child;
+ struct callchain_list *list, *next_list;
+ int old_pos = cursor->nr;
+ int err = 0;
+
+ list_for_each_entry_safe(list, next_list, &src->val, list) {
+ callchain_cursor_append(cursor, list->ip,
+ list->ms.map, list->ms.sym);
+ list_del(&list->list);
+ free(list);
+ }
+
+ if (src->hit) {
+ callchain_cursor_commit(cursor);
+ append_chain_children(dst, cursor, src->hit);
+ }
+
+ chain_for_each_child_safe(child, next_child, src) {
+ err = merge_chain_branch(cursor, dst, child);
+ if (err)
+ break;
+
+ list_del(&child->siblings);
+ free(child);
+ }
+
+ cursor->nr = old_pos;
+ cursor->last = old_last;
+
+ return err;
+}
+
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src)
+{
+ return merge_chain_branch(cursor, &dst->node, &src->node);
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor,
+ u64 ip, struct map *map, struct symbol *sym)
+{
+ struct callchain_cursor_node *node = *cursor->last;
+
+ if (!node) {
+ node = calloc(sizeof(*node), 1);
+ if (!node)
+ return -ENOMEM;
+
+ *cursor->last = node;
+ }
+
+ node->ip = ip;
+ node->map = map;
+ node->sym = sym;
+
+ cursor->nr++;
+
+ cursor->last = &node->next;
+
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/callchain.h b/ANDROID_3.4.5/tools/perf/util/callchain.h
new file mode 100644
index 00000000..7f9c0f1a
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/callchain.h
@@ -0,0 +1,144 @@
+#ifndef __PERF_CALLCHAIN_H
+#define __PERF_CALLCHAIN_H
+
+#include "../perf.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include "event.h"
+#include "symbol.h"
+
+enum chain_mode {
+ CHAIN_NONE,
+ CHAIN_FLAT,
+ CHAIN_GRAPH_ABS,
+ CHAIN_GRAPH_REL
+};
+
+enum chain_order {
+ ORDER_CALLER,
+ ORDER_CALLEE
+};
+
+struct callchain_node {
+ struct callchain_node *parent;
+ struct list_head siblings;
+ struct list_head children;
+ struct list_head val;
+ struct rb_node rb_node; /* to sort nodes in an rbtree */
+ struct rb_root rb_root; /* sorted tree of children */
+ unsigned int val_nr;
+ u64 hit;
+ u64 children_hit;
+};
+
+struct callchain_root {
+ u64 max_depth;
+ struct callchain_node node;
+};
+
+struct callchain_param;
+
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
+ u64, struct callchain_param *);
+
+struct callchain_param {
+ enum chain_mode mode;
+ u32 print_limit;
+ double min_percent;
+ sort_chain_func_t sort;
+ enum chain_order order;
+};
+
+struct callchain_list {
+ u64 ip;
+ struct map_symbol ms;
+ struct list_head list;
+};
+
+/*
+ * A callchain cursor is a single linked list that
+ * let one feed a callchain progressively.
+ * It keeps persitent allocated entries to minimize
+ * allocations.
+ */
+struct callchain_cursor_node {
+ u64 ip;
+ struct map *map;
+ struct symbol *sym;
+ struct callchain_cursor_node *next;
+};
+
+struct callchain_cursor {
+ u64 nr;
+ struct callchain_cursor_node *first;
+ struct callchain_cursor_node **last;
+ u64 pos;
+ struct callchain_cursor_node *curr;
+};
+
+static inline void callchain_init(struct callchain_root *root)
+{
+ INIT_LIST_HEAD(&root->node.siblings);
+ INIT_LIST_HEAD(&root->node.children);
+ INIT_LIST_HEAD(&root->node.val);
+
+ root->node.parent = NULL;
+ root->node.hit = 0;
+ root->node.children_hit = 0;
+ root->max_depth = 0;
+}
+
+static inline u64 callchain_cumul_hits(struct callchain_node *node)
+{
+ return node->hit + node->children_hit;
+}
+
+int callchain_register_param(struct callchain_param *param);
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period);
+
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src);
+
+struct ip_callchain;
+union perf_event;
+
+bool ip_callchain__valid(struct ip_callchain *chain,
+ const union perf_event *event);
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
+ struct map *map, struct symbol *sym);
+
+/* Close a cursor writing session. Initialize for the reader */
+static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->first;
+ cursor->pos = 0;
+}
+
+/* Cursor reading iteration helpers */
+static inline struct callchain_cursor_node *
+callchain_cursor_current(struct callchain_cursor *cursor)
+{
+ if (cursor->pos == cursor->nr)
+ return NULL;
+
+ return cursor->curr;
+}
+
+static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->curr->next;
+ cursor->pos++;
+}
+#endif /* __PERF_CALLCHAIN_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/cgroup.c b/ANDROID_3.4.5/tools/perf/util/cgroup.c
new file mode 100644
index 00000000..dbe2f16b
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/cgroup.c
@@ -0,0 +1,177 @@
+#include "util.h"
+#include "../perf.h"
+#include "parse-options.h"
+#include "evsel.h"
+#include "cgroup.h"
+#include "evlist.h"
+
+int nr_cgroups;
+
+static int
+cgroupfs_find_mountpoint(char *buf, size_t maxlen)
+{
+ FILE *fp;
+ char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
+ char *token, *saved_ptr = NULL;
+ int found = 0;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return -1;
+
+ /*
+ * in order to handle split hierarchy, we need to scan /proc/mounts
+ * and inspect every cgroupfs mount point to find one that has
+ * perf_event subsystem
+ */
+ while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
+ STR(PATH_MAX)"s %*d %*d\n",
+ mountpoint, type, tokens) == 3) {
+
+ if (!strcmp(type, "cgroup")) {
+
+ token = strtok_r(tokens, ",", &saved_ptr);
+
+ while (token != NULL) {
+ if (!strcmp(token, "perf_event")) {
+ found = 1;
+ break;
+ }
+ token = strtok_r(NULL, ",", &saved_ptr);
+ }
+ }
+ if (found)
+ break;
+ }
+ fclose(fp);
+ if (!found)
+ return -1;
+
+ if (strlen(mountpoint) < maxlen) {
+ strcpy(buf, mountpoint);
+ return 0;
+ }
+ return -1;
+}
+
+static int open_cgroup(char *name)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+ int fd;
+
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
+ return -1;
+
+ snprintf(path, PATH_MAX, "%s/%s", mnt, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ fprintf(stderr, "no access to cgroup %s\n", path);
+
+ return fd;
+}
+
+static int add_cgroup(struct perf_evlist *evlist, char *str)
+{
+ struct perf_evsel *counter;
+ struct cgroup_sel *cgrp = NULL;
+ int n;
+ /*
+ * check if cgrp is already defined, if so we reuse it
+ */
+ list_for_each_entry(counter, &evlist->entries, node) {
+ cgrp = counter->cgrp;
+ if (!cgrp)
+ continue;
+ if (!strcmp(cgrp->name, str))
+ break;
+
+ cgrp = NULL;
+ }
+
+ if (!cgrp) {
+ cgrp = zalloc(sizeof(*cgrp));
+ if (!cgrp)
+ return -1;
+
+ cgrp->name = str;
+
+ cgrp->fd = open_cgroup(str);
+ if (cgrp->fd == -1) {
+ free(cgrp);
+ return -1;
+ }
+ }
+
+ /*
+ * find corresponding event
+ * if add cgroup N, then need to find event N
+ */
+ n = 0;
+ list_for_each_entry(counter, &evlist->entries, node) {
+ if (n == nr_cgroups)
+ goto found;
+ n++;
+ }
+ if (cgrp->refcnt == 0)
+ free(cgrp);
+
+ return -1;
+found:
+ cgrp->refcnt++;
+ counter->cgrp = cgrp;
+ return 0;
+}
+
+void close_cgroup(struct cgroup_sel *cgrp)
+{
+ if (!cgrp)
+ return;
+
+ /* XXX: not reentrant */
+ if (--cgrp->refcnt == 0) {
+ close(cgrp->fd);
+ free(cgrp->name);
+ free(cgrp);
+ }
+}
+
+int parse_cgroups(const struct option *opt __used, const char *str,
+ int unset __used)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ const char *p, *e, *eos = str + strlen(str);
+ char *s;
+ int ret;
+
+ if (list_empty(&evlist->entries)) {
+ fprintf(stderr, "must define events before cgroups\n");
+ return -1;
+ }
+
+ for (;;) {
+ p = strchr(str, ',');
+ e = p ? p : eos;
+
+ /* allow empty cgroups, i.e., skip */
+ if (e - str) {
+ /* termination added */
+ s = strndup(str, e - str);
+ if (!s)
+ return -1;
+ ret = add_cgroup(evlist, s);
+ if (ret) {
+ free(s);
+ return -1;
+ }
+ }
+ /* nr_cgroups is increased een for empty cgroups */
+ nr_cgroups++;
+ if (!p)
+ break;
+ str = p+1;
+ }
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/cgroup.h b/ANDROID_3.4.5/tools/perf/util/cgroup.h
new file mode 100644
index 00000000..89acd6de
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/cgroup.h
@@ -0,0 +1,17 @@
+#ifndef __CGROUP_H__
+#define __CGROUP_H__
+
+struct option;
+
+struct cgroup_sel {
+ char *name;
+ int fd;
+ int refcnt;
+};
+
+
+extern int nr_cgroups; /* number of explicit cgroups defined */
+extern void close_cgroup(struct cgroup_sel *cgrp);
+extern int parse_cgroups(const struct option *opt, const char *str, int unset);
+
+#endif /* __CGROUP_H__ */
diff --git a/ANDROID_3.4.5/tools/perf/util/color.c b/ANDROID_3.4.5/tools/perf/util/color.c
new file mode 100644
index 00000000..11e46da1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/color.c
@@ -0,0 +1,325 @@
+#include <linux/kernel.h>
+#include "cache.h"
+#include "color.h"
+
+int perf_use_color_default = -1;
+
+static int parse_color(const char *name, int len)
+{
+ static const char * const color_names[] = {
+ "normal", "black", "red", "green", "yellow",
+ "blue", "magenta", "cyan", "white"
+ };
+ char *end;
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
+ const char *str = color_names[i];
+ if (!strncasecmp(name, str, len) && !str[len])
+ return i - 1;
+ }
+ i = strtol(name, &end, 10);
+ if (end - name == len && i >= -1 && i <= 255)
+ return i;
+ return -2;
+}
+
+static int parse_attr(const char *name, int len)
+{
+ static const int attr_values[] = { 1, 2, 4, 5, 7 };
+ static const char * const attr_names[] = {
+ "bold", "dim", "ul", "blink", "reverse"
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
+ const char *str = attr_names[i];
+ if (!strncasecmp(name, str, len) && !str[len])
+ return attr_values[i];
+ }
+ return -1;
+}
+
+void color_parse(const char *value, const char *var, char *dst)
+{
+ color_parse_mem(value, strlen(value), var, dst);
+}
+
+void color_parse_mem(const char *value, int value_len, const char *var,
+ char *dst)
+{
+ const char *ptr = value;
+ int len = value_len;
+ int attr = -1;
+ int fg = -2;
+ int bg = -2;
+
+ if (!strncasecmp(value, "reset", len)) {
+ strcpy(dst, PERF_COLOR_RESET);
+ return;
+ }
+
+ /* [fg [bg]] [attr] */
+ while (len > 0) {
+ const char *word = ptr;
+ int val, wordlen = 0;
+
+ while (len > 0 && !isspace(word[wordlen])) {
+ wordlen++;
+ len--;
+ }
+
+ ptr = word + wordlen;
+ while (len > 0 && isspace(*ptr)) {
+ ptr++;
+ len--;
+ }
+
+ val = parse_color(word, wordlen);
+ if (val >= -1) {
+ if (fg == -2) {
+ fg = val;
+ continue;
+ }
+ if (bg == -2) {
+ bg = val;
+ continue;
+ }
+ goto bad;
+ }
+ val = parse_attr(word, wordlen);
+ if (val < 0 || attr != -1)
+ goto bad;
+ attr = val;
+ }
+
+ if (attr >= 0 || fg >= 0 || bg >= 0) {
+ int sep = 0;
+
+ *dst++ = '\033';
+ *dst++ = '[';
+ if (attr >= 0) {
+ *dst++ = '0' + attr;
+ sep++;
+ }
+ if (fg >= 0) {
+ if (sep++)
+ *dst++ = ';';
+ if (fg < 8) {
+ *dst++ = '3';
+ *dst++ = '0' + fg;
+ } else {
+ dst += sprintf(dst, "38;5;%d", fg);
+ }
+ }
+ if (bg >= 0) {
+ if (sep++)
+ *dst++ = ';';
+ if (bg < 8) {
+ *dst++ = '4';
+ *dst++ = '0' + bg;
+ } else {
+ dst += sprintf(dst, "48;5;%d", bg);
+ }
+ }
+ *dst++ = 'm';
+ }
+ *dst = 0;
+ return;
+bad:
+ die("bad color value '%.*s' for variable '%s'", value_len, value, var);
+}
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || (pager_in_use() && pager_use_color)) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return perf_default_config(var, value, cb);
+}
+
+static int __color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(1) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += scnprintf(bf, size, "%s", color);
+ r += vscnprintf(bf + r, size - r, fmt, args);
+ if (perf_use_color_default && *color)
+ r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += scnprintf(bf + r, size - r, "%s", trail);
+ return r;
+}
+
+static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
+ va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(fileno(fp)) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += fprintf(fp, "%s", color);
+ r += vfprintf(fp, fmt, args);
+ if (perf_use_color_default && *color)
+ r += fprintf(fp, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += fprintf(fp, "%s", trail);
+ return r;
+}
+
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args)
+{
+ return __color_vsnprintf(bf, size, color, fmt, args, NULL);
+}
+
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
+{
+ return __color_vfprintf(fp, color, fmt, args, NULL);
+}
+
+int color_snprintf(char *bf, size_t size, const char *color,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vsnprintf(bf, size, color, fmt, args);
+ va_end(args);
+ return r;
+}
+
+int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vfprintf(fp, color, fmt, args);
+ va_end(args);
+ return r;
+}
+
+int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+ va_start(args, fmt);
+ r = __color_vfprintf(fp, color, fmt, args, "\n");
+ va_end(args);
+ return r;
+}
+
+/*
+ * This function splits the buffer by newlines and colors the lines individually.
+ *
+ * Returns 0 on success.
+ */
+int color_fwrite_lines(FILE *fp, const char *color,
+ size_t count, const char *buf)
+{
+ if (!*color)
+ return fwrite(buf, count, 1, fp) != 1;
+
+ while (count) {
+ char *p = memchr(buf, '\n', count);
+
+ if (p != buf && (fputs(color, fp) < 0 ||
+ fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
+ fputs(PERF_COLOR_RESET, fp) < 0))
+ return -1;
+ if (!p)
+ return 0;
+ if (fputc('\n', fp) < 0)
+ return -1;
+ count -= p + 1 - buf;
+ buf = p + 1;
+ }
+ return 0;
+}
+
+const char *get_percent_color(double percent)
+{
+ const char *color = PERF_COLOR_NORMAL;
+
+ /*
+ * We color high-overhead entries in red, mid-overhead
+ * entries in green - and keep the low overhead places
+ * normal:
+ */
+ if (percent >= MIN_RED)
+ color = PERF_COLOR_RED;
+ else {
+ if (percent > MIN_GREEN)
+ color = PERF_COLOR_GREEN;
+ }
+ return color;
+}
+
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
+{
+ int r;
+ const char *color;
+
+ color = get_percent_color(percent);
+ r = color_fprintf(fp, color, fmt, percent);
+
+ return r;
+}
+
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
+{
+ const char *color = get_percent_color(percent);
+ return color_snprintf(bf, size, color, fmt, percent);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/color.h b/ANDROID_3.4.5/tools/perf/util/color.h
new file mode 100644
index 00000000..dea082b7
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/color.h
@@ -0,0 +1,46 @@
+#ifndef __PERF_COLOR_H
+#define __PERF_COLOR_H
+
+/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
+#define COLOR_MAXLEN 24
+
+#define PERF_COLOR_NORMAL ""
+#define PERF_COLOR_RESET "\033[m"
+#define PERF_COLOR_BOLD "\033[1m"
+#define PERF_COLOR_RED "\033[31m"
+#define PERF_COLOR_GREEN "\033[32m"
+#define PERF_COLOR_YELLOW "\033[33m"
+#define PERF_COLOR_BLUE "\033[34m"
+#define PERF_COLOR_MAGENTA "\033[35m"
+#define PERF_COLOR_CYAN "\033[36m"
+#define PERF_COLOR_BG_RED "\033[41m"
+
+#define MIN_GREEN 0.5
+#define MIN_RED 5.0
+
+/*
+ * This variable stores the value of color.ui
+ */
+extern int perf_use_color_default;
+
+
+/*
+ * Use this instead of perf_default_config if you need the value of color.ui.
+ */
+int perf_color_default_config(const char *var, const char *value, void *cb);
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
+void color_parse(const char *value, const char *var, char *dst);
+void color_parse_mem(const char *value, int len, const char *var, char *dst);
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args);
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
+int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
+int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
+int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
+int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent);
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
+const char *get_percent_color(double percent);
+
+#endif /* __PERF_COLOR_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/config.c b/ANDROID_3.4.5/tools/perf/util/config.c
new file mode 100644
index 00000000..0deac6a1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/config.c
@@ -0,0 +1,507 @@
+/*
+ * config.c
+ *
+ * Helper functions for parsing config items.
+ * Originally copied from GIT source.
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ * Copyright (C) Johannes Schindelin, 2005
+ *
+ */
+#include "util.h"
+#include "cache.h"
+#include "exec_cmd.h"
+
+#define MAXNAME (256)
+
+#define DEBUG_CACHE_DIR ".debug"
+
+
+char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */
+
+static FILE *config_file;
+static const char *config_file_name;
+static int config_linenr;
+static int config_file_eof;
+
+static const char *config_exclusive_filename;
+
+static int get_next_char(void)
+{
+ int c;
+ FILE *f;
+
+ c = '\n';
+ if ((f = config_file) != NULL) {
+ c = fgetc(f);
+ if (c == '\r') {
+ /* DOS like systems */
+ c = fgetc(f);
+ if (c != '\n') {
+ ungetc(c, f);
+ c = '\r';
+ }
+ }
+ if (c == '\n')
+ config_linenr++;
+ if (c == EOF) {
+ config_file_eof = 1;
+ c = '\n';
+ }
+ }
+ return c;
+}
+
+static char *parse_value(void)
+{
+ static char value[1024];
+ int quote = 0, comment = 0, space = 0;
+ size_t len = 0;
+
+ for (;;) {
+ int c = get_next_char();
+
+ if (len >= sizeof(value) - 1)
+ return NULL;
+ if (c == '\n') {
+ if (quote)
+ return NULL;
+ value[len] = 0;
+ return value;
+ }
+ if (comment)
+ continue;
+ if (isspace(c) && !quote) {
+ space = 1;
+ continue;
+ }
+ if (!quote) {
+ if (c == ';' || c == '#') {
+ comment = 1;
+ continue;
+ }
+ }
+ if (space) {
+ if (len)
+ value[len++] = ' ';
+ space = 0;
+ }
+ if (c == '\\') {
+ c = get_next_char();
+ switch (c) {
+ case '\n':
+ continue;
+ case 't':
+ c = '\t';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ /* Some characters escape as themselves */
+ case '\\': case '"':
+ break;
+ /* Reject unknown escape sequences */
+ default:
+ return NULL;
+ }
+ value[len++] = c;
+ continue;
+ }
+ if (c == '"') {
+ quote = 1-quote;
+ continue;
+ }
+ value[len++] = c;
+ }
+}
+
+static inline int iskeychar(int c)
+{
+ return isalnum(c) || c == '-';
+}
+
+static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
+{
+ int c;
+ char *value;
+
+ /* Get the full name */
+ for (;;) {
+ c = get_next_char();
+ if (config_file_eof)
+ break;
+ if (!iskeychar(c))
+ break;
+ name[len++] = c;
+ if (len >= MAXNAME)
+ return -1;
+ }
+ name[len] = 0;
+ while (c == ' ' || c == '\t')
+ c = get_next_char();
+
+ value = NULL;
+ if (c != '\n') {
+ if (c != '=')
+ return -1;
+ value = parse_value();
+ if (!value)
+ return -1;
+ }
+ return fn(name, value, data);
+}
+
+static int get_extended_base_var(char *name, int baselen, int c)
+{
+ do {
+ if (c == '\n')
+ return -1;
+ c = get_next_char();
+ } while (isspace(c));
+
+ /* We require the format to be '[base "extension"]' */
+ if (c != '"')
+ return -1;
+ name[baselen++] = '.';
+
+ for (;;) {
+ int ch = get_next_char();
+
+ if (ch == '\n')
+ return -1;
+ if (ch == '"')
+ break;
+ if (ch == '\\') {
+ ch = get_next_char();
+ if (ch == '\n')
+ return -1;
+ }
+ name[baselen++] = ch;
+ if (baselen > MAXNAME / 2)
+ return -1;
+ }
+
+ /* Final ']' */
+ if (get_next_char() != ']')
+ return -1;
+ return baselen;
+}
+
+static int get_base_var(char *name)
+{
+ int baselen = 0;
+
+ for (;;) {
+ int c = get_next_char();
+ if (config_file_eof)
+ return -1;
+ if (c == ']')
+ return baselen;
+ if (isspace(c))
+ return get_extended_base_var(name, baselen, c);
+ if (!iskeychar(c) && c != '.')
+ return -1;
+ if (baselen > MAXNAME / 2)
+ return -1;
+ name[baselen++] = tolower(c);
+ }
+}
+
+static int perf_parse_file(config_fn_t fn, void *data)
+{
+ int comment = 0;
+ int baselen = 0;
+ static char var[MAXNAME];
+
+ /* U+FEFF Byte Order Mark in UTF8 */
+ static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
+ const unsigned char *bomptr = utf8_bom;
+
+ for (;;) {
+ int c = get_next_char();
+ if (bomptr && *bomptr) {
+ /* We are at the file beginning; skip UTF8-encoded BOM
+ * if present. Sane editors won't put this in on their
+ * own, but e.g. Windows Notepad will do it happily. */
+ if ((unsigned char) c == *bomptr) {
+ bomptr++;
+ continue;
+ } else {
+ /* Do not tolerate partial BOM. */
+ if (bomptr != utf8_bom)
+ break;
+ /* No BOM at file beginning. Cool. */
+ bomptr = NULL;
+ }
+ }
+ if (c == '\n') {
+ if (config_file_eof)
+ return 0;
+ comment = 0;
+ continue;
+ }
+ if (comment || isspace(c))
+ continue;
+ if (c == '#' || c == ';') {
+ comment = 1;
+ continue;
+ }
+ if (c == '[') {
+ baselen = get_base_var(var);
+ if (baselen <= 0)
+ break;
+ var[baselen++] = '.';
+ var[baselen] = 0;
+ continue;
+ }
+ if (!isalpha(c))
+ break;
+ var[baselen] = tolower(c);
+ if (get_value(fn, data, var, baselen+1) < 0)
+ break;
+ }
+ die("bad config file line %d in %s", config_linenr, config_file_name);
+}
+
+static int parse_unit_factor(const char *end, unsigned long *val)
+{
+ if (!*end)
+ return 1;
+ else if (!strcasecmp(end, "k")) {
+ *val *= 1024;
+ return 1;
+ }
+ else if (!strcasecmp(end, "m")) {
+ *val *= 1024 * 1024;
+ return 1;
+ }
+ else if (!strcasecmp(end, "g")) {
+ *val *= 1024 * 1024 * 1024;
+ return 1;
+ }
+ return 0;
+}
+
+static int perf_parse_long(const char *value, long *ret)
+{
+ if (value && *value) {
+ char *end;
+ long val = strtol(value, &end, 0);
+ unsigned long factor = 1;
+ if (!parse_unit_factor(end, &factor))
+ return 0;
+ *ret = val * factor;
+ return 1;
+ }
+ return 0;
+}
+
+static void die_bad_config(const char *name)
+{
+ if (config_file_name)
+ die("bad config value for '%s' in %s", name, config_file_name);
+ die("bad config value for '%s'", name);
+}
+
+int perf_config_int(const char *name, const char *value)
+{
+ long ret = 0;
+ if (!perf_parse_long(value, &ret))
+ die_bad_config(name);
+ return ret;
+}
+
+static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
+{
+ *is_bool = 1;
+ if (!value)
+ return 1;
+ if (!*value)
+ return 0;
+ if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on"))
+ return 1;
+ if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
+ return 0;
+ *is_bool = 0;
+ return perf_config_int(name, value);
+}
+
+int perf_config_bool(const char *name, const char *value)
+{
+ int discard;
+ return !!perf_config_bool_or_int(name, value, &discard);
+}
+
+const char *perf_config_dirname(const char *name, const char *value)
+{
+ if (!name)
+ return NULL;
+ return value;
+}
+
+static int perf_default_core_config(const char *var __used, const char *value __used)
+{
+ /* Add other config variables here. */
+ return 0;
+}
+
+int perf_default_config(const char *var, const char *value, void *dummy __used)
+{
+ if (!prefixcmp(var, "core."))
+ return perf_default_core_config(var, value);
+
+ /* Add other config variables here. */
+ return 0;
+}
+
+static int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
+{
+ int ret;
+ FILE *f = fopen(filename, "r");
+
+ ret = -1;
+ if (f) {
+ config_file = f;
+ config_file_name = filename;
+ config_linenr = 1;
+ config_file_eof = 0;
+ ret = perf_parse_file(fn, data);
+ fclose(f);
+ config_file_name = NULL;
+ }
+ return ret;
+}
+
+static const char *perf_etc_perfconfig(void)
+{
+ static const char *system_wide;
+ if (!system_wide)
+ system_wide = system_path(ETC_PERFCONFIG);
+ return system_wide;
+}
+
+static int perf_env_bool(const char *k, int def)
+{
+ const char *v = getenv(k);
+ return v ? perf_config_bool(k, v) : def;
+}
+
+static int perf_config_system(void)
+{
+ return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0);
+}
+
+static int perf_config_global(void)
+{
+ return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
+}
+
+int perf_config(config_fn_t fn, void *data)
+{
+ int ret = 0, found = 0;
+ const char *home = NULL;
+
+ /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+ if (config_exclusive_filename)
+ return perf_config_from_file(fn, config_exclusive_filename, data);
+ if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
+ ret += perf_config_from_file(fn, perf_etc_perfconfig(),
+ data);
+ found += 1;
+ }
+
+ home = getenv("HOME");
+ if (perf_config_global() && home) {
+ char *user_config = strdup(mkpath("%s/.perfconfig", home));
+ struct stat st;
+
+ if (user_config == NULL) {
+ warning("Not enough memory to process %s/.perfconfig, "
+ "ignoring it.", home);
+ goto out;
+ }
+
+ if (stat(user_config, &st) < 0)
+ goto out_free;
+
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ warning("File %s not owned by current user or root, "
+ "ignoring it.", user_config);
+ goto out_free;
+ }
+
+ if (!st.st_size)
+ goto out_free;
+
+ ret += perf_config_from_file(fn, user_config, data);
+ found += 1;
+out_free:
+ free(user_config);
+ }
+out:
+ if (found == 0)
+ return -1;
+ return ret;
+}
+
+/*
+ * Call this to report error for your variable that should not
+ * get a boolean value (i.e. "[my] var" means "true").
+ */
+int config_error_nonbool(const char *var)
+{
+ return error("Missing value for '%s'", var);
+}
+
+struct buildid_dir_config {
+ char *dir;
+};
+
+static int buildid_dir_command_config(const char *var, const char *value,
+ void *data)
+{
+ struct buildid_dir_config *c = data;
+ const char *v;
+
+ /* same dir for all commands */
+ if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) {
+ v = perf_config_dirname(var, value);
+ if (!v)
+ return -1;
+ strncpy(c->dir, v, MAXPATHLEN-1);
+ c->dir[MAXPATHLEN-1] = '\0';
+ }
+ return 0;
+}
+
+static void check_buildid_dir_config(void)
+{
+ struct buildid_dir_config c;
+ c.dir = buildid_dir;
+ perf_config(buildid_dir_command_config, &c);
+}
+
+void set_buildid_dir(void)
+{
+ buildid_dir[0] = '\0';
+
+ /* try config file */
+ check_buildid_dir_config();
+
+ /* default to $HOME/.debug */
+ if (buildid_dir[0] == '\0') {
+ char *v = getenv("HOME");
+ if (v) {
+ snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
+ v, DEBUG_CACHE_DIR);
+ } else {
+ strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
+ }
+ buildid_dir[MAXPATHLEN-1] = '\0';
+ }
+ /* for communicating with external commands */
+ setenv("PERF_BUILDID_DIR", buildid_dir, 1);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/cpumap.c b/ANDROID_3.4.5/tools/perf/util/cpumap.c
new file mode 100644
index 00000000..adc72f09
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/cpumap.c
@@ -0,0 +1,195 @@
+#include "util.h"
+#include "../perf.h"
+#include "cpumap.h"
+#include <assert.h>
+#include <stdio.h>
+
+static struct cpu_map *cpu_map__default_new(void)
+{
+ struct cpu_map *cpus;
+ int nr_cpus;
+
+ nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr_cpus < 0)
+ return NULL;
+
+ cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
+ if (cpus != NULL) {
+ int i;
+ for (i = 0; i < nr_cpus; ++i)
+ cpus->map[i] = i;
+
+ cpus->nr = nr_cpus;
+ }
+
+ return cpus;
+}
+
+static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
+{
+ size_t payload_size = nr_cpus * sizeof(int);
+ struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
+
+ if (cpus != NULL) {
+ cpus->nr = nr_cpus;
+ memcpy(cpus->map, tmp_cpus, payload_size);
+ }
+
+ return cpus;
+}
+
+static struct cpu_map *cpu_map__read_all_cpu_map(void)
+{
+ struct cpu_map *cpus = NULL;
+ FILE *onlnf;
+ int nr_cpus = 0;
+ int *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
+ int n, cpu, prev;
+ char sep;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (!onlnf)
+ return cpu_map__default_new();
+
+ sep = 0;
+ prev = -1;
+ for (;;) {
+ n = fscanf(onlnf, "%u%c", &cpu, &sep);
+ if (n <= 0)
+ break;
+ if (prev >= 0) {
+ int new_max = nr_cpus + cpu - prev - 1;
+
+ if (new_max >= max_entries) {
+ max_entries = new_max + MAX_NR_CPUS / 2;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ while (++prev < cpu)
+ tmp_cpus[nr_cpus++] = prev;
+ }
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ tmp_cpus[nr_cpus++] = cpu;
+ if (n == 2 && sep == '-')
+ prev = cpu;
+ else
+ prev = -1;
+ if (n == 1 || sep == '\n')
+ break;
+ }
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
+out_free_tmp:
+ free(tmp_cpus);
+ fclose(onlnf);
+ return cpus;
+}
+
+struct cpu_map *cpu_map__new(const char *cpu_list)
+{
+ struct cpu_map *cpus = NULL;
+ unsigned long start_cpu, end_cpu = 0;
+ char *p = NULL;
+ int i, nr_cpus = 0;
+ int *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
+
+ if (!cpu_list)
+ return cpu_map__read_all_cpu_map();
+
+ if (!isdigit(*cpu_list))
+ goto out;
+
+ while (isdigit(*cpu_list)) {
+ p = NULL;
+ start_cpu = strtoul(cpu_list, &p, 0);
+ if (start_cpu >= INT_MAX
+ || (*p != '\0' && *p != ',' && *p != '-'))
+ goto invalid;
+
+ if (*p == '-') {
+ cpu_list = ++p;
+ p = NULL;
+ end_cpu = strtoul(cpu_list, &p, 0);
+
+ if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+ goto invalid;
+
+ if (end_cpu < start_cpu)
+ goto invalid;
+ } else {
+ end_cpu = start_cpu;
+ }
+
+ for (; start_cpu <= end_cpu; start_cpu++) {
+ /* check for duplicates */
+ for (i = 0; i < nr_cpus; i++)
+ if (tmp_cpus[i] == (int)start_cpu)
+ goto invalid;
+
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto invalid;
+ tmp_cpus = tmp;
+ }
+ tmp_cpus[nr_cpus++] = (int)start_cpu;
+ }
+ if (*p)
+ ++p;
+
+ cpu_list = p;
+ }
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
+invalid:
+ free(tmp_cpus);
+out:
+ return cpus;
+}
+
+size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
+{
+ int i;
+ size_t printed = fprintf(fp, "%d cpu%s: ",
+ map->nr, map->nr > 1 ? "s" : "");
+ for (i = 0; i < map->nr; ++i)
+ printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
+
+ return printed + fprintf(fp, "\n");
+}
+
+struct cpu_map *cpu_map__dummy_new(void)
+{
+ struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
+
+ if (cpus != NULL) {
+ cpus->nr = 1;
+ cpus->map[0] = -1;
+ }
+
+ return cpus;
+}
+
+void cpu_map__delete(struct cpu_map *map)
+{
+ free(map);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/cpumap.h b/ANDROID_3.4.5/tools/perf/util/cpumap.h
new file mode 100644
index 00000000..c4151857
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/cpumap.h
@@ -0,0 +1,17 @@
+#ifndef __PERF_CPUMAP_H
+#define __PERF_CPUMAP_H
+
+#include <stdio.h>
+
+struct cpu_map {
+ int nr;
+ int map[];
+};
+
+struct cpu_map *cpu_map__new(const char *cpu_list);
+struct cpu_map *cpu_map__dummy_new(void);
+void cpu_map__delete(struct cpu_map *map);
+
+size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
+
+#endif /* __PERF_CPUMAP_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/ctype.c b/ANDROID_3.4.5/tools/perf/util/ctype.c
new file mode 100644
index 00000000..aada3ac5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ctype.c
@@ -0,0 +1,39 @@
+/*
+ * Sane locale-independent, ASCII ctype.
+ *
+ * No surprises, and works with signed and unsigned chars.
+ */
+#include "util.h"
+
+enum {
+ S = GIT_SPACE,
+ A = GIT_ALPHA,
+ D = GIT_DIGIT,
+ G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */
+ R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */
+ P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */
+
+ PS = GIT_SPACE | GIT_PRINT_EXTRA,
+};
+
+unsigned char sane_ctype[256] = {
+/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */
+ PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */
+ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */
+ A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */
+ A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */
+ /* Nothing in the 128.. range */
+};
+
+const char *graph_line =
+ "_____________________________________________________________________"
+ "_____________________________________________________________________";
+const char *graph_dotted_line =
+ "---------------------------------------------------------------------"
+ "---------------------------------------------------------------------"
+ "---------------------------------------------------------------------";
diff --git a/ANDROID_3.4.5/tools/perf/util/debug.c b/ANDROID_3.4.5/tools/perf/util/debug.c
new file mode 100644
index 00000000..26817daa
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/debug.c
@@ -0,0 +1,105 @@
+/* For general debugging purposes */
+
+#include "../perf.h"
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "cache.h"
+#include "color.h"
+#include "event.h"
+#include "debug.h"
+#include "util.h"
+
+int verbose;
+bool dump_trace = false, quiet = false;
+
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (verbose >= level) {
+ va_start(args, fmt);
+ if (use_browser > 0)
+ ret = ui_helpline__show_help(fmt, args);
+ else
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+int dump_printf(const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (dump_trace) {
+ va_start(args, fmt);
+ ret = vprintf(fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+#ifdef NO_NEWT_SUPPORT
+int ui__warning(const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ return 0;
+}
+#endif
+
+int ui__error_paranoid(void)
+{
+ return ui__error("Permission error - are you root?\n"
+ "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
+ " -1 - Not paranoid at all\n"
+ " 0 - Disallow raw tracepoint access for unpriv\n"
+ " 1 - Disallow cpu events for unpriv\n"
+ " 2 - Disallow kernel profiling for unpriv\n");
+}
+
+void trace_event(union perf_event *event)
+{
+ unsigned char *raw_event = (void *)event;
+ const char *color = PERF_COLOR_BLUE;
+ int i, j;
+
+ if (!dump_trace)
+ return;
+
+ printf(".");
+ color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
+ event->header.size);
+
+ for (i = 0; i < event->header.size; i++) {
+ if ((i & 15) == 0) {
+ printf(".");
+ color_fprintf(stdout, color, " %04x: ", i);
+ }
+
+ color_fprintf(stdout, color, " %02x", raw_event[i]);
+
+ if (((i & 15) == 15) || i == event->header.size-1) {
+ color_fprintf(stdout, color, " ");
+ for (j = 0; j < 15-(i & 15); j++)
+ color_fprintf(stdout, color, " ");
+ for (j = i & ~15; j <= i; j++) {
+ color_fprintf(stdout, color, "%c",
+ isprint(raw_event[j]) ?
+ raw_event[j] : '.');
+ }
+ color_fprintf(stdout, color, "\n");
+ }
+ }
+ printf(".\n");
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/debug.h b/ANDROID_3.4.5/tools/perf/util/debug.h
new file mode 100644
index 00000000..f2ce88d0
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/debug.h
@@ -0,0 +1,36 @@
+/* For debugging general purposes */
+#ifndef __PERF_DEBUG_H
+#define __PERF_DEBUG_H
+
+#include <stdbool.h>
+#include "event.h"
+
+extern int verbose;
+extern bool quiet, dump_trace;
+
+int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+void trace_event(union perf_event *event);
+
+struct ui_progress;
+
+#ifdef NO_NEWT_SUPPORT
+static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
+{
+ return 0;
+}
+
+static inline void ui_progress__update(u64 curr __used, u64 total __used,
+ const char *title __used) {}
+
+#define ui__error(format, arg...) ui__warning(format, ##arg)
+#else
+extern char ui_helpline__last_msg[];
+int ui_helpline__show_help(const char *format, va_list ap);
+#include "ui/progress.h"
+int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
+#endif
+
+int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
+int ui__error_paranoid(void);
+
+#endif /* __PERF_DEBUG_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/debugfs.c b/ANDROID_3.4.5/tools/perf/util/debugfs.c
new file mode 100644
index 00000000..dd8b1931
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/debugfs.c
@@ -0,0 +1,114 @@
+#include "util.h"
+#include "debugfs.h"
+#include "cache.h"
+
+#include <linux/kernel.h>
+#include <sys/mount.h>
+
+static int debugfs_premounted;
+char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
+char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
+
+static const char *debugfs_known_mountpoints[] = {
+ "/sys/kernel/debug/",
+ "/debug/",
+ 0,
+};
+
+static int debugfs_found;
+
+/* find the path to the mounted debugfs */
+const char *debugfs_find_mountpoint(void)
+{
+ const char **ptr;
+ char type[100];
+ FILE *fp;
+
+ if (debugfs_found)
+ return (const char *) debugfs_mountpoint;
+
+ ptr = debugfs_known_mountpoints;
+ while (*ptr) {
+ if (debugfs_valid_mountpoint(*ptr) == 0) {
+ debugfs_found = 1;
+ strcpy(debugfs_mountpoint, *ptr);
+ return debugfs_mountpoint;
+ }
+ ptr++;
+ }
+
+ /* give up and parse /proc/mounts */
+ fp = fopen("/proc/mounts", "r");
+ if (fp == NULL)
+ return NULL;
+
+ while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
+ debugfs_mountpoint, type) == 2) {
+ if (strcmp(type, "debugfs") == 0)
+ break;
+ }
+ fclose(fp);
+
+ if (strcmp(type, "debugfs") != 0)
+ return NULL;
+
+ debugfs_found = 1;
+
+ return debugfs_mountpoint;
+}
+
+/* verify that a mountpoint is actually a debugfs instance */
+
+int debugfs_valid_mountpoint(const char *debugfs)
+{
+ struct statfs st_fs;
+
+ if (statfs(debugfs, &st_fs) < 0)
+ return -ENOENT;
+ else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void debugfs_set_tracing_events_path(const char *mountpoint)
+{
+ snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
+ mountpoint, "tracing/events");
+}
+
+/* mount the debugfs somewhere if it's not mounted */
+
+char *debugfs_mount(const char *mountpoint)
+{
+ /* see if it's already mounted */
+ if (debugfs_find_mountpoint()) {
+ debugfs_premounted = 1;
+ goto out;
+ }
+
+ /* if not mounted and no argument */
+ if (mountpoint == NULL) {
+ /* see if environment variable set */
+ mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
+ /* if no environment variable, use default */
+ if (mountpoint == NULL)
+ mountpoint = "/sys/kernel/debug";
+ }
+
+ if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
+ return NULL;
+
+ /* save the mountpoint */
+ debugfs_found = 1;
+ strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+out:
+ debugfs_set_tracing_events_path(debugfs_mountpoint);
+ return debugfs_mountpoint;
+}
+
+void debugfs_set_path(const char *mountpoint)
+{
+ snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint);
+ debugfs_set_tracing_events_path(mountpoint);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/debugfs.h b/ANDROID_3.4.5/tools/perf/util/debugfs.h
new file mode 100644
index 00000000..68f3e87e
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/debugfs.h
@@ -0,0 +1,12 @@
+#ifndef __DEBUGFS_H__
+#define __DEBUGFS_H__
+
+const char *debugfs_find_mountpoint(void);
+int debugfs_valid_mountpoint(const char *debugfs);
+char *debugfs_mount(const char *mountpoint);
+void debugfs_set_path(const char *mountpoint);
+
+extern char debugfs_mountpoint[];
+extern char tracing_events_path[];
+
+#endif /* __DEBUGFS_H__ */
diff --git a/ANDROID_3.4.5/tools/perf/util/dwarf-aux.c b/ANDROID_3.4.5/tools/perf/util/dwarf-aux.c
new file mode 100644
index 00000000..ee51e9b4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/dwarf-aux.c
@@ -0,0 +1,843 @@
+/*
+ * dwarf-aux.c : libdw auxiliary interfaces
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <stdbool.h>
+#include "util.h"
+#include "debug.h"
+#include "dwarf-aux.h"
+
+/**
+ * cu_find_realpath - Find the realpath of the target file
+ * @cu_die: A DIE(dwarf information entry) of CU(compilation Unit)
+ * @fname: The tail filename of the target file
+ *
+ * Find the real(long) path of @fname in @cu_die.
+ */
+const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
+{
+ Dwarf_Files *files;
+ size_t nfiles, i;
+ const char *src = NULL;
+ int ret;
+
+ if (!fname)
+ return NULL;
+
+ ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
+ if (ret != 0)
+ return NULL;
+
+ for (i = 0; i < nfiles; i++) {
+ src = dwarf_filesrc(files, i, NULL, NULL);
+ if (strtailcmp(src, fname) == 0)
+ break;
+ }
+ if (i == nfiles)
+ return NULL;
+ return src;
+}
+
+/**
+ * cu_get_comp_dir - Get the path of compilation directory
+ * @cu_die: a CU DIE
+ *
+ * Get the path of compilation directory of given @cu_die.
+ * Since this depends on DW_AT_comp_dir, older gcc will not
+ * embedded it. In that case, this returns NULL.
+ */
+const char *cu_get_comp_dir(Dwarf_Die *cu_die)
+{
+ Dwarf_Attribute attr;
+ if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL)
+ return NULL;
+ return dwarf_formstring(&attr);
+}
+
+/**
+ * cu_find_lineinfo - Get a line number and file name for given address
+ * @cu_die: a CU DIE
+ * @addr: An address
+ * @fname: a pointer which returns the file name string
+ * @lineno: a pointer which returns the line number
+ *
+ * Find a line number and file name for @addr in @cu_die.
+ */
+int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
+ const char **fname, int *lineno)
+{
+ Dwarf_Line *line;
+ Dwarf_Addr laddr;
+
+ line = dwarf_getsrc_die(cu_die, (Dwarf_Addr)addr);
+ if (line && dwarf_lineaddr(line, &laddr) == 0 &&
+ addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) {
+ *fname = dwarf_linesrc(line, NULL, NULL);
+ if (!*fname)
+ /* line number is useless without filename */
+ *lineno = 0;
+ }
+
+ return *lineno ?: -ENOENT;
+}
+
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data);
+
+/**
+ * cu_walk_functions_at - Walk on function DIEs at given address
+ * @cu_die: A CU DIE
+ * @addr: An address
+ * @callback: A callback which called with found DIEs
+ * @data: A user data
+ *
+ * Walk on function DIEs at given @addr in @cu_die. Passed DIEs
+ * should be subprogram or inlined-subroutines.
+ */
+int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ int (*callback)(Dwarf_Die *, void *), void *data)
+{
+ Dwarf_Die die_mem;
+ Dwarf_Die *sc_die;
+ int ret = -ENOENT;
+
+ /* Inlined function could be recursive. Trace it until fail */
+ for (sc_die = die_find_realfunc(cu_die, addr, &die_mem);
+ sc_die != NULL;
+ sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr,
+ &die_mem)) {
+ ret = callback(sc_die, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
+
+}
+
+/**
+ * die_compare_name - Compare diename and tname
+ * @dw_die: a DIE
+ * @tname: a string of target name
+ *
+ * Compare the name of @dw_die and @tname. Return false if @dw_die has no name.
+ */
+bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
+{
+ const char *name;
+ name = dwarf_diename(dw_die);
+ return name ? (strcmp(tname, name) == 0) : false;
+}
+
+/**
+ * die_get_call_lineno - Get callsite line number of inline-function instance
+ * @in_die: a DIE of an inlined function instance
+ *
+ * Get call-site line number of @in_die. This means from where the inline
+ * function is called.
+ */
+int die_get_call_lineno(Dwarf_Die *in_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
+ return -ENOENT;
+
+ dwarf_formudata(&attr, &ret);
+ return (int)ret;
+}
+
+/**
+ * die_get_type - Get type DIE
+ * @vr_die: a DIE of a variable
+ * @die_mem: where to store a type DIE
+ *
+ * Get a DIE of the type of given variable (@vr_die), and store
+ * it to die_mem. Return NULL if fails to get a type DIE.
+ */
+Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
+ dwarf_formref_die(&attr, die_mem))
+ return die_mem;
+ else
+ return NULL;
+}
+
+/* Get a type die, but skip qualifiers */
+static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ int tag;
+
+ do {
+ vr_die = die_get_type(vr_die, die_mem);
+ if (!vr_die)
+ break;
+ tag = dwarf_tag(vr_die);
+ } while (tag == DW_TAG_const_type ||
+ tag == DW_TAG_restrict_type ||
+ tag == DW_TAG_volatile_type ||
+ tag == DW_TAG_shared_type);
+
+ return vr_die;
+}
+
+/**
+ * die_get_real_type - Get a type die, but skip qualifiers and typedef
+ * @vr_die: a DIE of a variable
+ * @die_mem: where to store a type DIE
+ *
+ * Get a DIE of the type of given variable (@vr_die), and store
+ * it to die_mem. Return NULL if fails to get a type DIE.
+ * If the type is qualifiers (e.g. const) or typedef, this skips it
+ * and tries to find real type (structure or basic types, e.g. int).
+ */
+Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ do {
+ vr_die = __die_get_real_type(vr_die, die_mem);
+ } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
+
+ return vr_die;
+}
+
+/* Get attribute and translate it as a udata */
+static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Word *result)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formudata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+/* Get attribute and translate it as a sdata */
+static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Sword *result)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formsdata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * die_is_signed_type - Check whether a type DIE is signed or not
+ * @tp_die: a DIE of a type
+ *
+ * Get the encoding of @tp_die and return true if the encoding
+ * is signed.
+ */
+bool die_is_signed_type(Dwarf_Die *tp_die)
+{
+ Dwarf_Word ret;
+
+ if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
+ return false;
+
+ return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
+ ret == DW_ATE_signed_fixed);
+}
+
+/**
+ * die_get_data_member_location - Get the data-member offset
+ * @mb_die: a DIE of a member of a data structure
+ * @offs: The offset of the member in the data structure
+ *
+ * Get the offset of @mb_die in the data structure including @mb_die, and
+ * stores result offset to @offs. If any error occurs this returns errno.
+ */
+int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *expr;
+ size_t nexpr;
+ int ret;
+
+ if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
+ return -ENOENT;
+
+ if (dwarf_formudata(&attr, offs) != 0) {
+ /* DW_AT_data_member_location should be DW_OP_plus_uconst */
+ ret = dwarf_getlocation(&attr, &expr, &nexpr);
+ if (ret < 0 || nexpr == 0)
+ return -ENOENT;
+
+ if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
+ pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
+ expr[0].atom, nexpr);
+ return -ENOTSUP;
+ }
+ *offs = (Dwarf_Word)expr[0].number;
+ }
+ return 0;
+}
+
+/* Get the call file index number in CU DIE */
+static int die_get_call_fileno(Dwarf_Die *in_die)
+{
+ Dwarf_Sword idx;
+
+ if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+}
+
+/* Get the declared file index number in CU DIE */
+static int die_get_decl_fileno(Dwarf_Die *pdie)
+{
+ Dwarf_Sword idx;
+
+ if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+}
+
+/**
+ * die_get_call_file - Get callsite file name of inlined function instance
+ * @in_die: a DIE of an inlined function instance
+ *
+ * Get call-site file name of @in_die. This means from which file the inline
+ * function is called.
+ */
+const char *die_get_call_file(Dwarf_Die *in_die)
+{
+ Dwarf_Die cu_die;
+ Dwarf_Files *files;
+ int idx;
+
+ idx = die_get_call_fileno(in_die);
+ if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) ||
+ dwarf_getsrcfiles(&cu_die, &files, NULL) != 0)
+ return NULL;
+
+ return dwarf_filesrc(files, idx, NULL, NULL);
+}
+
+
+/**
+ * die_find_child - Generic DIE search function in DIE tree
+ * @rt_die: a root DIE
+ * @callback: a callback function
+ * @data: a user data passed to the callback function
+ * @die_mem: a buffer for result DIE
+ *
+ * Trace DIE tree from @rt_die and call @callback for each child DIE.
+ * If @callback returns DIE_FIND_CB_END, this stores the DIE into
+ * @die_mem and returns it. If @callback returns DIE_FIND_CB_CONTINUE,
+ * this continues to trace the tree. Optionally, @callback can return
+ * DIE_FIND_CB_CHILD and DIE_FIND_CB_SIBLING, those means trace only
+ * the children and trace only the siblings respectively.
+ * Returns NULL if @callback can't find any appropriate DIE.
+ */
+Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem)
+{
+ Dwarf_Die child_die;
+ int ret;
+
+ ret = dwarf_child(rt_die, die_mem);
+ if (ret != 0)
+ return NULL;
+
+ do {
+ ret = callback(die_mem, data);
+ if (ret == DIE_FIND_CB_END)
+ return die_mem;
+
+ if ((ret & DIE_FIND_CB_CHILD) &&
+ die_find_child(die_mem, callback, data, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while ((ret & DIE_FIND_CB_SIBLING) &&
+ dwarf_siblingof(die_mem, die_mem) == 0);
+
+ return NULL;
+}
+
+struct __addr_die_search_param {
+ Dwarf_Addr addr;
+ Dwarf_Die *die_mem;
+};
+
+/* die_find callback for non-inlined function search */
+static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct __addr_die_search_param *ad = data;
+
+ if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
+ dwarf_haspc(fn_die, ad->addr)) {
+ memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
+ return DWARF_CB_ABORT;
+ }
+ return DWARF_CB_OK;
+}
+
+/**
+ * die_find_realfunc - Search a non-inlined function at given address
+ * @cu_die: a CU DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search a non-inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULl if failed.
+ */
+Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ struct __addr_die_search_param ad;
+ ad.addr = addr;
+ ad.die_mem = die_mem;
+ /* dwarf_getscopes can't find subprogram. */
+ if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
+ return NULL;
+ else
+ return die_mem;
+}
+
+/* die_find callback for inline function search */
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
+{
+ Dwarf_Addr *addr = data;
+
+ if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
+ dwarf_haspc(die_mem, *addr))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/**
+ * die_find_inlinefunc - Search an inlined function at given address
+ * @cu_die: a CU DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULl if failed.
+ * If several inlined functions are expanded recursively, this trace
+ * it and returns deepest one.
+ */
+Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ Dwarf_Die tmp_die;
+
+ sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die);
+ if (!sp_die)
+ return NULL;
+
+ /* Inlined function could be recursive. Trace it until fail */
+ while (sp_die) {
+ memcpy(die_mem, sp_die, sizeof(Dwarf_Die));
+ sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr,
+ &tmp_die);
+ }
+
+ return die_mem;
+}
+
+struct __instance_walk_param {
+ void *addr;
+ int (*callback)(Dwarf_Die *, void *);
+ void *data;
+ int retval;
+};
+
+static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
+{
+ struct __instance_walk_param *iwp = data;
+ Dwarf_Attribute attr_mem;
+ Dwarf_Die origin_mem;
+ Dwarf_Attribute *attr;
+ Dwarf_Die *origin;
+ int tmp;
+
+ attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
+ if (attr == NULL)
+ return DIE_FIND_CB_CONTINUE;
+
+ origin = dwarf_formref_die(attr, &origin_mem);
+ if (origin == NULL || origin->addr != iwp->addr)
+ return DIE_FIND_CB_CONTINUE;
+
+ /* Ignore redundant instances */
+ if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) {
+ dwarf_decl_line(origin, &tmp);
+ if (die_get_call_lineno(inst) == tmp) {
+ tmp = die_get_decl_fileno(origin);
+ if (die_get_call_fileno(inst) == tmp)
+ return DIE_FIND_CB_CONTINUE;
+ }
+ }
+
+ iwp->retval = iwp->callback(inst, iwp->data);
+
+ return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE;
+}
+
+/**
+ * die_walk_instances - Walk on instances of given DIE
+ * @or_die: an abstract original DIE
+ * @callback: a callback function which is called with instance DIE
+ * @data: user data
+ *
+ * Walk on the instances of give @in_die. @in_die must be an inlined function
+ * declartion. This returns the return value of @callback if it returns
+ * non-zero value, or -ENOENT if there is no instance.
+ */
+int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
+ void *data)
+{
+ Dwarf_Die cu_die;
+ Dwarf_Die die_mem;
+ struct __instance_walk_param iwp = {
+ .addr = or_die->addr,
+ .callback = callback,
+ .data = data,
+ .retval = -ENOENT,
+ };
+
+ if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL)
+ return -ENOENT;
+
+ die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem);
+
+ return iwp.retval;
+}
+
+/* Line walker internal parameters */
+struct __line_walk_param {
+ bool recursive;
+ line_walk_callback_t callback;
+ void *data;
+ int retval;
+};
+
+static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+ Dwarf_Addr addr = 0;
+ const char *fname;
+ int lineno;
+
+ if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
+ fname = die_get_call_file(in_die);
+ lineno = die_get_call_lineno(in_die);
+ if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
+ }
+ if (!lw->recursive)
+ /* Don't need to search recursively */
+ return DIE_FIND_CB_SIBLING;
+
+ if (addr) {
+ fname = dwarf_decl_file(in_die);
+ if (fname && dwarf_decl_line(in_die, &lineno) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
+ }
+
+ /* Continue to search nested inlined function call-sites */
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/* Walk on lines of blocks included in given DIE */
+static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
+ line_walk_callback_t callback, void *data)
+{
+ struct __line_walk_param lw = {
+ .recursive = recursive,
+ .callback = callback,
+ .data = data,
+ .retval = 0,
+ };
+ Dwarf_Die die_mem;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno;
+
+ /* Handle function declaration line */
+ fname = dwarf_decl_file(sp_die);
+ if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
+ dwarf_entrypc(sp_die, &addr) == 0) {
+ lw.retval = callback(fname, lineno, addr, data);
+ if (lw.retval != 0)
+ goto done;
+ }
+ die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
+done:
+ return lw.retval;
+}
+
+static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+
+ lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
+ if (lw->retval != 0)
+ return DWARF_CB_ABORT;
+
+ return DWARF_CB_OK;
+}
+
+/**
+ * die_walk_lines - Walk on lines inside given DIE
+ * @rt_die: a root DIE (CU, subprogram or inlined_subroutine)
+ * @callback: callback routine
+ * @data: user data
+ *
+ * Walk on all lines inside given @rt_die and call @callback on each line.
+ * If the @rt_die is a function, walk only on the lines inside the function,
+ * otherwise @rt_die must be a CU DIE.
+ * Note that this walks not only dwarf line list, but also function entries
+ * and inline call-site.
+ */
+int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno, ret = 0;
+ Dwarf_Die die_mem, *cu_die;
+ size_t nlines, i;
+
+ /* Get the CU die */
+ if (dwarf_tag(rt_die) != DW_TAG_compile_unit)
+ cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL);
+ else
+ cu_die = rt_die;
+ if (!cu_die) {
+ pr_debug2("Failed to get CU from given DIE.\n");
+ return -EINVAL;
+ }
+
+ /* Get lines list in the CU */
+ if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
+ pr_debug2("Failed to get source lines on this CU.\n");
+ return -ENOENT;
+ }
+ pr_debug2("Get %zd lines from this CU\n", nlines);
+
+ /* Walk on the lines on lines list */
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ if (line == NULL ||
+ dwarf_lineno(line, &lineno) != 0 ||
+ dwarf_lineaddr(line, &addr) != 0) {
+ pr_debug2("Failed to get line info. "
+ "Possible error in debuginfo.\n");
+ continue;
+ }
+ /* Filter lines based on address */
+ if (rt_die != cu_die)
+ /*
+ * Address filtering
+ * The line is included in given function, and
+ * no inline block includes it.
+ */
+ if (!dwarf_haspc(rt_die, addr) ||
+ die_find_inlinefunc(rt_die, addr, &die_mem))
+ continue;
+ /* Get source line */
+ fname = dwarf_linesrc(line, NULL, NULL);
+
+ ret = callback(fname, lineno, addr, data);
+ if (ret != 0)
+ return ret;
+ }
+
+ /*
+ * Dwarf lines doesn't include function declarations and inlined
+ * subroutines. We have to check functions list or given function.
+ */
+ if (rt_die != cu_die)
+ /*
+ * Don't need walk functions recursively, because nested
+ * inlined functions don't have lines of the specified DIE.
+ */
+ ret = __die_walk_funclines(rt_die, false, callback, data);
+ else {
+ struct __line_walk_param param = {
+ .callback = callback,
+ .data = data,
+ .retval = 0,
+ };
+ dwarf_getfuncs(cu_die, __die_walk_culines_cb, &param, 0);
+ ret = param.retval;
+ }
+
+ return ret;
+}
+
+struct __find_variable_param {
+ const char *name;
+ Dwarf_Addr addr;
+};
+
+static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct __find_variable_param *fvp = data;
+ int tag;
+
+ tag = dwarf_tag(die_mem);
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ die_compare_name(die_mem, fvp->name))
+ return DIE_FIND_CB_END;
+
+ if (dwarf_haspc(die_mem, fvp->addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_at - Find a given name variable at given address
+ * @sp_die: a function DIE
+ * @name: variable name
+ * @addr: address
+ * @die_mem: a buffer for result DIE
+ *
+ * Find a variable DIE called @name at @addr in @sp_die.
+ */
+Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem)
+{
+ struct __find_variable_param fvp = { .name = name, .addr = addr};
+
+ return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
+ die_mem);
+}
+
+static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *name = data;
+
+ if ((dwarf_tag(die_mem) == DW_TAG_member) &&
+ die_compare_name(die_mem, name))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_member - Find a given name member in a data structure
+ * @st_die: a data structure type DIE
+ * @name: member name
+ * @die_mem: a buffer for result DIE
+ *
+ * Find a member DIE called @name in @st_die.
+ */
+Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(st_die, __die_find_member_cb, (void *)name,
+ die_mem);
+}
+
+/**
+ * die_get_typename - Get the name of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a buffer for result type name
+ * @len: a max-length of @buf
+ *
+ * Get the name of @vr_die and stores it to @buf. Return the actual length
+ * of type name if succeeded. Return -E2BIG if @len is not enough long, and
+ * Return -ENOENT if failed to find type name.
+ * Note that the result will stores typedef name if possible, and stores
+ * "*(function_type)" if the type is a function pointer.
+ */
+int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+{
+ Dwarf_Die type;
+ int tag, ret, ret2;
+ const char *tmp = "";
+
+ if (__die_get_real_type(vr_die, &type) == NULL)
+ return -ENOENT;
+
+ tag = dwarf_tag(&type);
+ if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+ tmp = "*";
+ else if (tag == DW_TAG_subroutine_type) {
+ /* Function pointer */
+ ret = snprintf(buf, len, "(function_type)");
+ return (ret >= len) ? -E2BIG : ret;
+ } else {
+ if (!dwarf_diename(&type))
+ return -ENOENT;
+ if (tag == DW_TAG_union_type)
+ tmp = "union ";
+ else if (tag == DW_TAG_structure_type)
+ tmp = "struct ";
+ /* Write a base name */
+ ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
+ return (ret >= len) ? -E2BIG : ret;
+ }
+ ret = die_get_typename(&type, buf, len);
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
+/**
+ * die_get_varname - Get the name and type of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a buffer for type and variable name
+ * @len: the max-length of @buf
+ *
+ * Get the name and type of @vr_die and stores it in @buf as "type\tname".
+ */
+int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+{
+ int ret, ret2;
+
+ ret = die_get_typename(vr_die, buf, len);
+ if (ret < 0) {
+ pr_debug("Failed to get type, make it unknown.\n");
+ ret = snprintf(buf, len, "(unknown_type)");
+ }
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "\t%s",
+ dwarf_diename(vr_die));
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
diff --git a/ANDROID_3.4.5/tools/perf/util/dwarf-aux.h b/ANDROID_3.4.5/tools/perf/util/dwarf-aux.h
new file mode 100644
index 00000000..6ce17177
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/dwarf-aux.h
@@ -0,0 +1,111 @@
+#ifndef _DWARF_AUX_H
+#define _DWARF_AUX_H
+/*
+ * dwarf-aux.h : libdw auxiliary interfaces
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <elfutils/version.h>
+
+/* Find the realpath of the target file */
+extern const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname);
+
+/* Get DW_AT_comp_dir (should be NULL with older gcc) */
+extern const char *cu_get_comp_dir(Dwarf_Die *cu_die);
+
+/* Get a line number and file name for given address */
+extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
+ const char **fname, int *lineno);
+
+/* Walk on funcitons at given address */
+extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ int (*callback)(Dwarf_Die *, void *), void *data);
+
+/* Compare diename and tname */
+extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
+
+/* Get callsite line number of inline-function instance */
+extern int die_get_call_lineno(Dwarf_Die *in_die);
+
+/* Get callsite file name of inlined function instance */
+extern const char *die_get_call_file(Dwarf_Die *in_die);
+
+/* Get type die */
+extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+
+/* Get a type die, but skip qualifiers and typedef */
+extern Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+
+/* Check whether the DIE is signed or not */
+extern bool die_is_signed_type(Dwarf_Die *tp_die);
+
+/* Get data_member_location offset */
+extern int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs);
+
+/* Return values for die_find_child() callbacks */
+enum {
+ DIE_FIND_CB_END = 0, /* End of Search */
+ DIE_FIND_CB_CHILD = 1, /* Search only children */
+ DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
+ DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
+};
+
+/* Search child DIEs */
+extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem);
+
+/* Search a non-inlined function including given address */
+extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Search an inlined function including given address */
+extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Walk on the instances of given DIE */
+extern int die_walk_instances(Dwarf_Die *in_die,
+ int (*callback)(Dwarf_Die *, void *), void *data);
+
+/* Walker on lines (Note: line number will not be sorted) */
+typedef int (* line_walk_callback_t) (const char *fname, int lineno,
+ Dwarf_Addr addr, void *data);
+
+/*
+ * Walk on lines inside given DIE. If the DIE is a subprogram, walk only on
+ * the lines inside the subprogram, otherwise the DIE must be a CU DIE.
+ */
+extern int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback,
+ void *data);
+
+/* Find a variable called 'name' at given address */
+extern Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem);
+
+/* Find a member called 'name' */
+extern Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem);
+
+/* Get the name of given variable DIE */
+extern int die_get_typename(Dwarf_Die *vr_die, char *buf, int len);
+
+/* Get the name and type of given variable DIE, stored as "type\tname" */
+extern int die_get_varname(Dwarf_Die *vr_die, char *buf, int len);
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/environment.c b/ANDROID_3.4.5/tools/perf/util/environment.c
new file mode 100644
index 00000000..275b0ee3
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/environment.c
@@ -0,0 +1,9 @@
+/*
+ * We put all the perf config variables in this same object
+ * file, so that programs can link against the config parser
+ * without having to link against all the rest of perf.
+ */
+#include "cache.h"
+
+const char *pager_program;
+int pager_use_color = 1;
diff --git a/ANDROID_3.4.5/tools/perf/util/event.c b/ANDROID_3.4.5/tools/perf/util/event.c
new file mode 100644
index 00000000..2a6f33cd
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/event.c
@@ -0,0 +1,898 @@
+#include <linux/types.h>
+#include "event.h"
+#include "debug.h"
+#include "sort.h"
+#include "string.h"
+#include "strlist.h"
+#include "thread.h"
+#include "thread_map.h"
+
+static const char *perf_event__names[] = {
+ [0] = "TOTAL",
+ [PERF_RECORD_MMAP] = "MMAP",
+ [PERF_RECORD_LOST] = "LOST",
+ [PERF_RECORD_COMM] = "COMM",
+ [PERF_RECORD_EXIT] = "EXIT",
+ [PERF_RECORD_THROTTLE] = "THROTTLE",
+ [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
+ [PERF_RECORD_FORK] = "FORK",
+ [PERF_RECORD_READ] = "READ",
+ [PERF_RECORD_SAMPLE] = "SAMPLE",
+ [PERF_RECORD_HEADER_ATTR] = "ATTR",
+ [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
+ [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
+ [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
+ [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
+};
+
+const char *perf_event__name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(perf_event__names))
+ return "INVALID";
+ if (!perf_event__names[id])
+ return "UNKNOWN";
+ return perf_event__names[id];
+}
+
+static struct perf_sample synth_sample = {
+ .pid = -1,
+ .tid = -1,
+ .time = -1,
+ .stream_id = -1,
+ .cpu = -1,
+ .period = 1,
+};
+
+static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
+{
+ char filename[PATH_MAX];
+ char bf[BUFSIZ];
+ FILE *fp;
+ size_t size = 0;
+ pid_t tgid = -1;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while (!comm[0] || (tgid < 0)) {
+ if (fgets(bf, sizeof(bf), fp) == NULL) {
+ pr_warning("couldn't get COMM and pgid, malformed %s\n",
+ filename);
+ break;
+ }
+
+ if (memcmp(bf, "Name:", 5) == 0) {
+ char *name = bf + 5;
+ while (*name && isspace(*name))
+ ++name;
+ size = strlen(name) - 1;
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+ comm[size] = '\0';
+
+ } else if (memcmp(bf, "Tgid:", 5) == 0) {
+ char *tgids = bf + 5;
+ while (*tgids && isspace(*tgids))
+ ++tgids;
+ tgid = atoi(tgids);
+ }
+ }
+
+ fclose(fp);
+
+ return tgid;
+}
+
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ int full,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ char filename[PATH_MAX];
+ size_t size;
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid;
+
+ memset(&event->comm, 0, sizeof(event->comm));
+
+ tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+ if (tgid < 0)
+ goto out;
+
+ event->comm.pid = tgid;
+ event->comm.header.type = PERF_RECORD_COMM;
+
+ size = strlen(event->comm.comm) + 1;
+ size = ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+ if (!full) {
+ event->comm.tid = pid;
+
+ process(tool, event, &synth_sample, machine);
+ goto out;
+ }
+
+ snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while (!readdir_r(tasks, &dirent, &next) && next) {
+ char *end;
+ pid = strtol(dirent.d_name, &end, 10);
+ if (*end)
+ continue;
+
+ /* already have tgid; jut want to update the comm */
+ (void) perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+
+ size = strlen(event->comm.comm) + 1;
+ size = ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+
+ event->comm.tid = pid;
+
+ process(tool, event, &synth_sample, machine);
+ }
+
+ closedir(tasks);
+out:
+ return tgid;
+}
+
+static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ char filename[PATH_MAX];
+ FILE *fp;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ /*
+ * We raced with a task exiting - just return:
+ */
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP;
+ /*
+ * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
+ */
+ event->header.misc = PERF_RECORD_MISC_USER;
+
+ while (1) {
+ char bf[BUFSIZ], *pbf = bf;
+ int n;
+ size_t size;
+ if (fgets(bf, sizeof(bf), fp) == NULL)
+ break;
+
+ /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
+ n = hex2u64(pbf, &event->mmap.start);
+ if (n < 0)
+ continue;
+ pbf += n + 1;
+ n = hex2u64(pbf, &event->mmap.len);
+ if (n < 0)
+ continue;
+ pbf += n + 3;
+ if (*pbf == 'x') { /* vm_exec */
+ char anonstr[] = "//anon\n";
+ char *execname = strchr(bf, '/');
+
+ /* Catch VDSO */
+ if (execname == NULL)
+ execname = strstr(bf, "[vdso]");
+
+ /* Catch anonymous mmaps */
+ if ((execname == NULL) && !strstr(bf, "["))
+ execname = anonstr;
+
+ if (execname == NULL)
+ continue;
+
+ pbf += 3;
+ n = hex2u64(pbf, &event->mmap.pgoff);
+
+ size = strlen(execname);
+ execname[size - 1] = '\0'; /* Remove \n */
+ memcpy(event->mmap.filename, execname, size);
+ size = ALIGN(size, sizeof(u64));
+ event->mmap.len -= event->mmap.start;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.pid = tgid;
+ event->mmap.tid = pid;
+
+ process(tool, event, &synth_sample, machine);
+ }
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct rb_node *nd;
+ struct map_groups *kmaps = &machine->kmaps;
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP;
+
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+
+ for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
+ nd; nd = rb_next(nd)) {
+ size_t size;
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ if (pos->dso->kernel)
+ continue;
+
+ size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.start = pos->start;
+ event->mmap.len = pos->end - pos->start;
+ event->mmap.pid = machine->pid;
+
+ memcpy(event->mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ process(tool, event, &synth_sample, machine);
+ }
+
+ free(event);
+ return 0;
+}
+
+static int __event__synthesize_thread(union perf_event *comm_event,
+ union perf_event *mmap_event,
+ pid_t pid, int full,
+ perf_event__handler_t process,
+ struct perf_tool *tool,
+ struct machine *machine)
+{
+ pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
+ process, machine);
+ if (tgid == -1)
+ return -1;
+ return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine);
+}
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event *comm_event, *mmap_event;
+ int err = -1, thread, j;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ err = 0;
+ for (thread = 0; thread < threads->nr; ++thread) {
+ if (__event__synthesize_thread(comm_event, mmap_event,
+ threads->map[thread], 0,
+ process, tool, machine)) {
+ err = -1;
+ break;
+ }
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != threads->map[thread]) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == threads->map[j]) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event,
+ mmap_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine)) {
+ err = -1;
+ break;
+ }
+ }
+ }
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ DIR *proc;
+ struct dirent dirent, *next;
+ union perf_event *comm_event, *mmap_event;
+ int err = -1;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ proc = opendir("/proc");
+ if (proc == NULL)
+ goto out_free_mmap;
+
+ while (!readdir_r(proc, &dirent, &next) && next) {
+ char *end;
+ pid_t pid = strtol(dirent.d_name, &end, 10);
+
+ if (*end) /* only interested in proper numerical dirents */
+ continue;
+
+ __event__synthesize_thread(comm_event, mmap_event, pid, 1,
+ process, tool, machine);
+ }
+
+ closedir(proc);
+ err = 0;
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+struct process_symbol_args {
+ const char *name;
+ u64 start;
+};
+
+static int find_symbol_cb(void *arg, const char *name, char type,
+ u64 start, u64 end __used)
+{
+ struct process_symbol_args *args = arg;
+
+ /*
+ * Must be a function or at least an alias, as in PARISC64, where "_text" is
+ * an 'A' to the same address as "_stext".
+ */
+ if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
+ type == 'A') || strcmp(name, args->name))
+ return 0;
+
+ args->start = start;
+ return 1;
+}
+
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ const char *symbol_name)
+{
+ size_t size;
+ const char *filename, *mmap_name;
+ char path[PATH_MAX];
+ char name_buff[PATH_MAX];
+ struct map *map;
+ int err;
+ /*
+ * We should get this from /sys/kernel/sections/.text, but till that is
+ * available use this, and after it is use this as a fallback for older
+ * kernels.
+ */
+ struct process_symbol_args args = { .name = symbol_name, };
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
+ if (machine__is_host(machine)) {
+ /*
+ * kernel uses PERF_RECORD_MISC_USER for user space maps,
+ * see kernel/perf_event.c __perf_event_mmap
+ */
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ filename = "/proc/kallsyms";
+ } else {
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ if (machine__is_default_guest(machine))
+ filename = (char *) symbol_conf.default_guest_kallsyms;
+ else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ filename = path;
+ }
+ }
+
+ if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
+ return -ENOENT;
+
+ map = machine->vmlinux_maps[MAP__FUNCTION];
+ size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
+ "%s%s", mmap_name, symbol_name) + 1;
+ size = ALIGN(size, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
+ event->mmap.pgoff = args.start;
+ event->mmap.start = map->start;
+ event->mmap.len = map->end - event->mmap.start;
+ event->mmap.pid = machine->pid;
+
+ err = process(tool, event, &synth_sample, machine);
+ free(event);
+
+ return err;
+}
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+}
+
+int perf_event__process_comm(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
+
+ if (dump_trace)
+ perf_event__fprintf_comm(event, stdout);
+
+ if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int perf_event__process_lost(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine __used)
+{
+ dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
+ event->lost.id, event->lost.lost);
+ return 0;
+}
+
+static void perf_event__set_kernel_mmap_len(union perf_event *event,
+ struct map **maps)
+{
+ maps[MAP__FUNCTION]->start = event->mmap.start;
+ maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len;
+ /*
+ * Be a bit paranoid here, some perf.data file came with
+ * a zero sized synthesized MMAP event for the kernel.
+ */
+ if (maps[MAP__FUNCTION]->end == 0)
+ maps[MAP__FUNCTION]->end = ~0ULL;
+}
+
+static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct machine *machine)
+{
+ struct map *map;
+ char kmmap_prefix[PATH_MAX];
+ enum dso_kernel_type kernel_type;
+ bool is_kernel_mmap;
+
+ machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
+ if (machine__is_host(machine))
+ kernel_type = DSO_TYPE_KERNEL;
+ else
+ kernel_type = DSO_TYPE_GUEST_KERNEL;
+
+ is_kernel_mmap = memcmp(event->mmap.filename,
+ kmmap_prefix,
+ strlen(kmmap_prefix) - 1) == 0;
+ if (event->mmap.filename[0] == '/' ||
+ (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
+
+ char short_module_name[1024];
+ char *name, *dot;
+
+ if (event->mmap.filename[0] == '/') {
+ name = strrchr(event->mmap.filename, '/');
+ if (name == NULL)
+ goto out_problem;
+
+ ++name; /* skip / */
+ dot = strrchr(name, '.');
+ if (dot == NULL)
+ goto out_problem;
+ snprintf(short_module_name, sizeof(short_module_name),
+ "[%.*s]", (int)(dot - name), name);
+ strxfrchar(short_module_name, '-', '_');
+ } else
+ strcpy(short_module_name, event->mmap.filename);
+
+ map = machine__new_module(machine, event->mmap.start,
+ event->mmap.filename);
+ if (map == NULL)
+ goto out_problem;
+
+ name = strdup(short_module_name);
+ if (name == NULL)
+ goto out_problem;
+
+ map->dso->short_name = name;
+ map->dso->sname_alloc = 1;
+ map->end = map->start + event->mmap.len;
+ } else if (is_kernel_mmap) {
+ const char *symbol_name = (event->mmap.filename +
+ strlen(kmmap_prefix));
+ /*
+ * Should be there already, from the build-id table in
+ * the header.
+ */
+ struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
+ kmmap_prefix);
+ if (kernel == NULL)
+ goto out_problem;
+
+ kernel->kernel = kernel_type;
+ if (__machine__create_kernel_maps(machine, kernel) < 0)
+ goto out_problem;
+
+ perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
+
+ /*
+ * Avoid using a zero address (kptr_restrict) for the ref reloc
+ * symbol. Effectively having zero here means that at record
+ * time /proc/sys/kernel/kptr_restrict was non zero.
+ */
+ if (event->mmap.pgoff != 0) {
+ maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ event->mmap.pgoff);
+ }
+
+ if (machine__is_default_guest(machine)) {
+ /*
+ * preload dso of guest kernel and modules
+ */
+ dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
+ NULL);
+ }
+ }
+ return 0;
+out_problem:
+ return -1;
+}
+
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
+ event->mmap.pid, event->mmap.tid, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+}
+
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct map *map;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ int ret = 0;
+
+ if (dump_trace)
+ perf_event__fprintf_mmap(event, stdout);
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
+ cpumode == PERF_RECORD_MISC_KERNEL) {
+ ret = perf_event__process_kernel_mmap(tool, event, machine);
+ if (ret < 0)
+ goto out_problem;
+ return 0;
+ }
+
+ thread = machine__findnew_thread(machine, event->mmap.pid);
+ if (thread == NULL)
+ goto out_problem;
+ map = map__new(&machine->user_dsos, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff,
+ event->mmap.pid, event->mmap.filename,
+ MAP__FUNCTION);
+ if (map == NULL)
+ goto out_problem;
+
+ thread__insert_map(thread, map);
+ return 0;
+
+out_problem:
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
+ return 0;
+}
+
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, "(%d:%d):(%d:%d)\n",
+ event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+}
+
+int perf_event__process_task(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+ struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
+
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+ if (event->header.type == PERF_RECORD_EXIT) {
+ machine__remove_thread(machine, thread);
+ return 0;
+ }
+
+ if (thread == NULL || parent == NULL ||
+ thread__fork(thread, parent) < 0) {
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+size_t perf_event__fprintf(union perf_event *event, FILE *fp)
+{
+ size_t ret = fprintf(fp, "PERF_RECORD_%s",
+ perf_event__name(event->header.type));
+
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ ret += perf_event__fprintf_comm(event, fp);
+ break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ ret += perf_event__fprintf_task(event, fp);
+ break;
+ case PERF_RECORD_MMAP:
+ ret += perf_event__fprintf_mmap(event, fp);
+ break;
+ default:
+ ret += fprintf(fp, "\n");
+ }
+
+ return ret;
+}
+
+int perf_event__process(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
+{
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ perf_event__process_comm(tool, event, sample, machine);
+ break;
+ case PERF_RECORD_MMAP:
+ perf_event__process_mmap(tool, event, sample, machine);
+ break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ perf_event__process_task(tool, event, sample, machine);
+ break;
+ case PERF_RECORD_LOST:
+ perf_event__process_lost(tool, event, sample, machine);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void thread__find_addr_map(struct thread *self,
+ struct machine *machine, u8 cpumode,
+ enum map_type type, u64 addr,
+ struct addr_location *al)
+{
+ struct map_groups *mg = &self->mg;
+
+ al->thread = self;
+ al->addr = addr;
+ al->cpumode = cpumode;
+ al->filtered = false;
+
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+
+ if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
+ al->level = 'k';
+ mg = &machine->kmaps;
+ } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
+ al->level = '.';
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ al->level = 'g';
+ mg = &machine->kmaps;
+ } else {
+ /*
+ * 'u' means guest os user space.
+ * TODO: We don't support guest user space. Might support late.
+ */
+ if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
+ al->level = 'u';
+ else
+ al->level = 'H';
+ al->map = NULL;
+
+ if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
+ cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
+ !perf_guest)
+ al->filtered = true;
+ if ((cpumode == PERF_RECORD_MISC_USER ||
+ cpumode == PERF_RECORD_MISC_KERNEL) &&
+ !perf_host)
+ al->filtered = true;
+
+ return;
+ }
+try_again:
+ al->map = map_groups__find(mg, type, al->addr);
+ if (al->map == NULL) {
+ /*
+ * If this is outside of all known maps, and is a negative
+ * address, try to look it up in the kernel dso, as it might be
+ * a vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in the
+ * "[vdso]" dso, but for now lets use the old trick of looking
+ * in the whole kernel symbol list.
+ */
+ if ((long long)al->addr < 0 &&
+ cpumode == PERF_RECORD_MISC_USER &&
+ machine && mg != &machine->kmaps) {
+ mg = &machine->kmaps;
+ goto try_again;
+ }
+ } else
+ al->addr = al->map->map_ip(al->map, al->addr);
+}
+
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al,
+ symbol_filter_t filter)
+{
+ thread__find_addr_map(thread, machine, cpumode, type, addr, al);
+ if (al->map != NULL)
+ al->sym = map__find_symbol(al->map, al->addr, filter);
+ else
+ al->sym = NULL;
+}
+
+int perf_event__preprocess_sample(const union perf_event *event,
+ struct machine *machine,
+ struct addr_location *al,
+ struct perf_sample *sample,
+ symbol_filter_t filter)
+{
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+
+ if (thread == NULL)
+ return -1;
+
+ if (symbol_conf.comm_list &&
+ !strlist__has_entry(symbol_conf.comm_list, thread->comm))
+ goto out_filtered;
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ /*
+ * Have we already created the kernel maps for this machine?
+ *
+ * This should have happened earlier, when we processed the kernel MMAP
+ * events, but for older perf.data files there was no such thing, so do
+ * it now.
+ */
+ if (cpumode == PERF_RECORD_MISC_KERNEL &&
+ machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(machine);
+
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, al);
+ dump_printf(" ...... dso: %s\n",
+ al->map ? al->map->dso->long_name :
+ al->level == 'H' ? "[hypervisor]" : "<not found>");
+ al->sym = NULL;
+ al->cpu = sample->cpu;
+
+ if (al->map) {
+ struct dso *dso = al->map->dso;
+
+ if (symbol_conf.dso_list &&
+ (!dso || !(strlist__has_entry(symbol_conf.dso_list,
+ dso->short_name) ||
+ (dso->short_name != dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ dso->long_name)))))
+ goto out_filtered;
+
+ al->sym = map__find_symbol(al->map, al->addr, filter);
+ }
+
+ if (symbol_conf.sym_list && al->sym &&
+ !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
+ goto out_filtered;
+
+ return 0;
+
+out_filtered:
+ al->filtered = true;
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/event.h b/ANDROID_3.4.5/tools/perf/util/event.h
new file mode 100644
index 00000000..1b197280
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/event.h
@@ -0,0 +1,212 @@
+#ifndef __PERF_RECORD_H
+#define __PERF_RECORD_H
+
+#include <limits.h>
+#include <stdio.h>
+
+#include "../perf.h"
+#include "map.h"
+
+/*
+ * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
+ */
+struct ip_event {
+ struct perf_event_header header;
+ u64 ip;
+ u32 pid, tid;
+ unsigned char __more_data[];
+};
+
+struct mmap_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ char filename[PATH_MAX];
+};
+
+struct comm_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ char comm[16];
+};
+
+struct fork_event {
+ struct perf_event_header header;
+ u32 pid, ppid;
+ u32 tid, ptid;
+ u64 time;
+};
+
+struct lost_event {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
+};
+
+/*
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ */
+struct read_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 value;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+};
+
+
+#define PERF_SAMPLE_MASK \
+ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
+ PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
+ PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+
+struct sample_event {
+ struct perf_event_header header;
+ u64 array[];
+};
+
+struct perf_sample {
+ u64 ip;
+ u32 pid, tid;
+ u64 time;
+ u64 addr;
+ u64 id;
+ u64 stream_id;
+ u64 period;
+ u32 cpu;
+ u32 raw_size;
+ void *raw_data;
+ struct ip_callchain *callchain;
+ struct branch_stack *branch_stack;
+};
+
+#define BUILD_ID_SIZE 20
+
+struct build_id_event {
+ struct perf_event_header header;
+ pid_t pid;
+ u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ char filename[];
+};
+
+enum perf_user_event_type { /* above any possible kernel type */
+ PERF_RECORD_USER_TYPE_START = 64,
+ PERF_RECORD_HEADER_ATTR = 64,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65,
+ PERF_RECORD_HEADER_TRACING_DATA = 66,
+ PERF_RECORD_HEADER_BUILD_ID = 67,
+ PERF_RECORD_FINISHED_ROUND = 68,
+ PERF_RECORD_HEADER_MAX
+};
+
+struct attr_event {
+ struct perf_event_header header;
+ struct perf_event_attr attr;
+ u64 id[];
+};
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+ u64 event_id;
+ char name[MAX_EVENT_NAME];
+};
+
+struct event_type_event {
+ struct perf_event_header header;
+ struct perf_trace_event_type event_type;
+};
+
+struct tracing_data_event {
+ struct perf_event_header header;
+ u32 size;
+};
+
+union perf_event {
+ struct perf_event_header header;
+ struct ip_event ip;
+ struct mmap_event mmap;
+ struct comm_event comm;
+ struct fork_event fork;
+ struct lost_event lost;
+ struct read_event read;
+ struct sample_event sample;
+ struct attr_event attr;
+ struct event_type_event event_type;
+ struct tracing_data_event tracing_data;
+ struct build_id_event build_id;
+};
+
+void perf_event__print_totals(void);
+
+struct perf_tool;
+struct thread_map;
+
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ const char *symbol_name);
+
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+
+int perf_event__process_comm(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_task(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+struct addr_location;
+int perf_event__preprocess_sample(const union perf_event *self,
+ struct machine *machine,
+ struct addr_location *al,
+ struct perf_sample *sample,
+ symbol_filter_t filter);
+
+const char *perf_event__name(unsigned int id);
+
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+ int sample_size, bool sample_id_all,
+ struct perf_sample *sample, bool swapped);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ const struct perf_sample *sample,
+ bool swapped);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+
+#endif /* __PERF_RECORD_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/evlist.c b/ANDROID_3.4.5/tools/perf/util/evlist.c
new file mode 100644
index 00000000..1986d805
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/evlist.c
@@ -0,0 +1,866 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "util.h"
+#include "debugfs.h"
+#include <poll.h>
+#include "cpumap.h"
+#include "thread_map.h"
+#include "evlist.h"
+#include "evsel.h"
+#include <unistd.h>
+
+#include "parse-events.h"
+
+#include <sys/mman.h>
+
+#include <linux/bitops.h>
+#include <linux/hash.h>
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+ INIT_LIST_HEAD(&evlist->entries);
+ perf_evlist__set_maps(evlist, cpus, threads);
+ evlist->workload.pid = -1;
+}
+
+struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ struct perf_evlist *evlist = zalloc(sizeof(*evlist));
+
+ if (evlist != NULL)
+ perf_evlist__init(evlist, cpus, threads);
+
+ return evlist;
+}
+
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+ struct perf_record_opts *opts)
+{
+ struct perf_evsel *evsel, *first;
+
+ if (evlist->cpus->map[0] < 0)
+ opts->no_inherit = true;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ perf_evsel__config(evsel, opts, first);
+
+ if (evlist->nr_entries > 1)
+ evsel->attr.sample_type |= PERF_SAMPLE_ID;
+ }
+}
+
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &evlist->entries, node) {
+ list_del_init(&pos->node);
+ perf_evsel__delete(pos);
+ }
+
+ evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+ free(evlist->mmap);
+ free(evlist->pollfd);
+ evlist->mmap = NULL;
+ evlist->pollfd = NULL;
+}
+
+void perf_evlist__delete(struct perf_evlist *evlist)
+{
+ perf_evlist__purge(evlist);
+ perf_evlist__exit(evlist);
+ free(evlist);
+}
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
+{
+ list_add_tail(&entry->node, &evlist->entries);
+ ++evlist->nr_entries;
+}
+
+void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries)
+{
+ list_splice_tail(list, &evlist->entries);
+ evlist->nr_entries += nr_entries;
+}
+
+int perf_evlist__add_default(struct perf_evlist *evlist)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_evsel *evsel;
+
+ event_attr_init(&attr);
+
+ evsel = perf_evsel__new(&attr, 0);
+ if (evsel == NULL)
+ goto error;
+
+ /* use strdup() because free(evsel) assumes name is allocated */
+ evsel->name = strdup("cycles");
+ if (!evsel->name)
+ goto error_free;
+
+ perf_evlist__add(evlist, evsel);
+ return 0;
+error_free:
+ perf_evsel__delete(evsel);
+error:
+ return -ENOMEM;
+}
+
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(head);
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++) {
+ evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->node, &head);
+ }
+
+ perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+
+ return 0;
+
+out_delete_partial_list:
+ list_for_each_entry_safe(evsel, n, &head, node)
+ perf_evsel__delete(evsel);
+ return -1;
+}
+
+static int trace_event__id(const char *evname)
+{
+ char *filename, *colon;
+ int err = -1, fd;
+
+ if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
+ return -1;
+
+ colon = strrchr(filename, ':');
+ if (colon != NULL)
+ *colon = '/';
+
+ fd = open(filename, O_RDONLY);
+ if (fd >= 0) {
+ char id[16];
+ if (read(fd, id, sizeof(id)) > 0)
+ err = atoi(id);
+ close(fd);
+ }
+
+ free(filename);
+ return err;
+}
+
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+ const char *tracepoints[],
+ size_t nr_tracepoints)
+{
+ int err;
+ size_t i;
+ struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
+
+ if (attrs == NULL)
+ return -1;
+
+ for (i = 0; i < nr_tracepoints; i++) {
+ err = trace_event__id(tracepoints[i]);
+
+ if (err < 0)
+ goto out_free_attrs;
+
+ attrs[i].type = PERF_TYPE_TRACEPOINT;
+ attrs[i].config = err;
+ attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU);
+ attrs[i].sample_period = 1;
+ }
+
+ err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
+out_free_attrs:
+ free(attrs);
+ return err;
+}
+
+static struct perf_evsel *
+ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+ (int)evsel->attr.config == id)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs)
+{
+ struct perf_evsel *evsel;
+ int err;
+ size_t i;
+
+ for (i = 0; i < nr_assocs; i++) {
+ err = trace_event__id(assocs[i].name);
+ if (err < 0)
+ goto out;
+
+ evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
+ if (evsel == NULL)
+ continue;
+
+ err = -EEXIST;
+ if (evsel->handler.func != NULL)
+ goto out;
+ evsel->handler.func = assocs[i].handler;
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+void perf_evlist__disable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ list_for_each_entry(pos, &evlist->entries, node) {
+ for (thread = 0; thread < evlist->threads->nr; thread++)
+ ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
+ }
+ }
+}
+
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ list_for_each_entry(pos, &evlist->entries, node) {
+ for (thread = 0; thread < evlist->threads->nr; thread++)
+ ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+ }
+ }
+}
+
+static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+ int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
+ evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
+ return evlist->pollfd != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+{
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ evlist->pollfd[evlist->nr_fds].fd = fd;
+ evlist->pollfd[evlist->nr_fds].events = POLLIN;
+ evlist->nr_fds++;
+}
+
+static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ int hash;
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->id = id;
+ sid->evsel = evsel;
+ hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&sid->node, &evlist->heads[hash]);
+}
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ evsel->id[evsel->ids++] = id;
+}
+
+static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
+{
+ u64 read_data[4] = { 0, };
+ int id_idx = 1; /* The first entry is the counter value */
+
+ if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+ read(fd, &read_data, sizeof(read_data)) == -1)
+ return -1;
+
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ ++id_idx;
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ ++id_idx;
+
+ perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
+ return 0;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct hlist_head *head;
+ struct hlist_node *pos;
+ struct perf_sample_id *sid;
+ int hash;
+
+ if (evlist->nr_entries == 1)
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, pos, head, node)
+ if (sid->id == id)
+ return sid->evsel;
+
+ if (!perf_evlist__sample_id_all(evlist))
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ return NULL;
+}
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+ /* XXX Move this to perf.c, making it generally available */
+ unsigned int page_size = sysconf(_SC_PAGE_SIZE);
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int head = perf_mmap__read_head(md);
+ unsigned int old = md->prev;
+ unsigned char *data = md->base + page_size;
+ union perf_event *event = NULL;
+
+ if (evlist->overwrite) {
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the head, we got messed up.
+ *
+ * In either case, truncate and restart at head.
+ */
+ int diff = head - old;
+ if (diff > md->mask / 2 || diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+ /*
+ * head points to a known good entry, start there.
+ */
+ old = head;
+ }
+ }
+
+ if (old != head) {
+ size_t size;
+
+ event = (union perf_event *)&data[old & md->mask];
+ size = event->header.size;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((old & md->mask) + size != ((old + size) & md->mask)) {
+ unsigned int offset = old;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = &evlist->event_copy;
+
+ do {
+ cpy = min(md->mask + 1 - (offset & md->mask), len);
+ memcpy(dst, &data[offset & md->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = &evlist->event_copy;
+ }
+
+ old += size;
+ }
+
+ md->prev = old;
+
+ if (!evlist->overwrite)
+ perf_mmap__write_tail(md, old);
+
+ return event;
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+ int i;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ if (evlist->mmap[i].base != NULL) {
+ munmap(evlist->mmap[i].base, evlist->mmap_len);
+ evlist->mmap[i].base = NULL;
+ }
+ }
+
+ free(evlist->mmap);
+ evlist->mmap = NULL;
+}
+
+static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+{
+ evlist->nr_mmaps = evlist->cpus->nr;
+ if (evlist->cpus->map[0] == -1)
+ evlist->nr_mmaps = evlist->threads->nr;
+ evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ return evlist->mmap != NULL ? 0 : -ENOMEM;
+}
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist,
+ int idx, int prot, int mask, int fd)
+{
+ evlist->mmap[idx].prev = 0;
+ evlist->mmap[idx].mask = mask;
+ evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
+ MAP_SHARED, fd, 0);
+ if (evlist->mmap[idx].base == MAP_FAILED) {
+ evlist->mmap[idx].base = NULL;
+ return -1;
+ }
+
+ perf_evlist__add_pollfd(evlist, fd);
+ return 0;
+}
+
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
+{
+ struct perf_evsel *evsel;
+ int cpu, thread;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ int output = -1;
+
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (output == -1) {
+ output = fd;
+ if (__perf_evlist__mmap(evlist, cpu,
+ prot, mask, output) < 0)
+ goto out_unmap;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
+ goto out_unmap;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ goto out_unmap;
+ }
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ if (evlist->mmap[cpu].base != NULL) {
+ munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+ evlist->mmap[cpu].base = NULL;
+ }
+ }
+ return -1;
+}
+
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
+{
+ struct perf_evsel *evsel;
+ int thread;
+
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ int output = -1;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, 0, thread);
+
+ if (output == -1) {
+ output = fd;
+ if (__perf_evlist__mmap(evlist, thread,
+ prot, mask, output) < 0)
+ goto out_unmap;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
+ goto out_unmap;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
+ goto out_unmap;
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ if (evlist->mmap[thread].base != NULL) {
+ munmap(evlist->mmap[thread].base, evlist->mmap_len);
+ evlist->mmap[thread].base = NULL;
+ }
+ }
+ return -1;
+}
+
+/** perf_evlist__mmap - Create per cpu maps to receive events
+ *
+ * @evlist - list of events
+ * @pages - map length in pages
+ * @overwrite - overwrite older events?
+ *
+ * If overwrite is false the user needs to signal event consuption using:
+ *
+ * struct perf_mmap *m = &evlist->mmap[cpu];
+ * unsigned int head = perf_mmap__read_head(m);
+ *
+ * perf_mmap__write_tail(m, head)
+ *
+ * Using perf_evlist__read_on_cpu does this automatically.
+ */
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite)
+{
+ unsigned int page_size = sysconf(_SC_PAGE_SIZE);
+ struct perf_evsel *evsel;
+ const struct cpu_map *cpus = evlist->cpus;
+ const struct thread_map *threads = evlist->threads;
+ int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return -EINVAL;
+
+ mask = pages * page_size - 1;
+
+ if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
+ return -ENOMEM;
+
+ if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ return -ENOMEM;
+
+ evlist->overwrite = overwrite;
+ evlist->mmap_len = (pages + 1) * page_size;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->sample_id == NULL &&
+ perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
+ return -ENOMEM;
+ }
+
+ if (evlist->cpus->map[0] == -1)
+ return perf_evlist__mmap_per_thread(evlist, prot, mask);
+
+ return perf_evlist__mmap_per_cpu(evlist, prot, mask);
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid,
+ const char *target_tid, uid_t uid, const char *cpu_list)
+{
+ evlist->threads = thread_map__new_str(target_pid, target_tid, uid);
+
+ if (evlist->threads == NULL)
+ return -1;
+
+ if (uid != UINT_MAX || (cpu_list == NULL && target_tid))
+ evlist->cpus = cpu_map__dummy_new();
+ else
+ evlist->cpus = cpu_map__new(cpu_list);
+
+ if (evlist->cpus == NULL)
+ goto out_delete_threads;
+
+ return 0;
+
+out_delete_threads:
+ thread_map__delete(evlist->threads);
+ return -1;
+}
+
+void perf_evlist__delete_maps(struct perf_evlist *evlist)
+{
+ cpu_map__delete(evlist->cpus);
+ thread_map__delete(evlist->threads);
+ evlist->cpus = NULL;
+ evlist->threads = NULL;
+}
+
+int perf_evlist__set_filters(struct perf_evlist *evlist)
+{
+ const struct thread_map *threads = evlist->threads;
+ const struct cpu_map *cpus = evlist->cpus;
+ struct perf_evsel *evsel;
+ char *filter;
+ int thread;
+ int cpu;
+ int err;
+ int fd;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ filter = evsel->filter;
+ if (!filter)
+ continue;
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (thread = 0; thread < threads->nr; thread++) {
+ fd = FD(evsel, cpu, thread);
+ err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *first;
+
+ pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry_continue(pos, &evlist->entries, node) {
+ if (first->attr.sample_type != pos->attr.sample_type)
+ return false;
+ }
+
+ return true;
+}
+
+u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ return first->attr.sample_type;
+}
+
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+ struct perf_sample *data;
+ u64 sample_type;
+ u16 size = 0;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ if (!first->attr.sample_id_all)
+ goto out;
+
+ sample_type = first->attr.sample_type;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(data->tid) * 2;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(data->time);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(data->id);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(data->stream_id);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(data->cpu) * 2;
+out:
+ return size;
+}
+
+bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *first;
+
+ pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry_continue(pos, &evlist->entries, node) {
+ if (first->attr.sample_id_all != pos->attr.sample_id_all)
+ return false;
+ }
+
+ return true;
+}
+
+bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ return first->attr.sample_id_all;
+}
+
+void perf_evlist__set_selected(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ evlist->selected = evsel;
+}
+
+int perf_evlist__open(struct perf_evlist *evlist, bool group)
+{
+ struct perf_evsel *evsel, *first;
+ int err, ncpus, nthreads;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ struct xyarray *group_fd = NULL;
+
+ if (group && evsel != first)
+ group_fd = first->fd;
+
+ err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
+ group, group_fd);
+ if (err < 0)
+ goto out_err;
+ }
+
+ return 0;
+out_err:
+ ncpus = evlist->cpus ? evlist->cpus->nr : 1;
+ nthreads = evlist->threads ? evlist->threads->nr : 1;
+
+ list_for_each_entry_reverse(evsel, &evlist->entries, node)
+ perf_evsel__close(evsel, ncpus, nthreads);
+
+ errno = -err;
+ return err;
+}
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct perf_record_opts *opts,
+ const char *argv[])
+{
+ int child_ready_pipe[2], go_pipe[2];
+ char bf;
+
+ if (pipe(child_ready_pipe) < 0) {
+ perror("failed to create 'ready' pipe");
+ return -1;
+ }
+
+ if (pipe(go_pipe) < 0) {
+ perror("failed to create 'go' pipe");
+ goto out_close_ready_pipe;
+ }
+
+ evlist->workload.pid = fork();
+ if (evlist->workload.pid < 0) {
+ perror("failed to fork");
+ goto out_close_pipes;
+ }
+
+ if (!evlist->workload.pid) {
+ if (opts->pipe_output)
+ dup2(2, 1);
+
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &bf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ kill(getppid(), SIGUSR1);
+ exit(-1);
+ }
+
+ if (!opts->system_wide && !opts->target_tid && !opts->target_pid)
+ evlist->threads->map[0] = evlist->workload.pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &bf, 1) == -1) {
+ perror("unable to read pipe");
+ goto out_close_pipes;
+ }
+
+ evlist->workload.cork_fd = go_pipe[1];
+ close(child_ready_pipe[0]);
+ return 0;
+
+out_close_pipes:
+ close(go_pipe[0]);
+ close(go_pipe[1]);
+out_close_ready_pipe:
+ close(child_ready_pipe[0]);
+ close(child_ready_pipe[1]);
+ return -1;
+}
+
+int perf_evlist__start_workload(struct perf_evlist *evlist)
+{
+ if (evlist->workload.cork_fd > 0) {
+ /*
+ * Remove the cork, let it rip!
+ */
+ return close(evlist->workload.cork_fd);
+ }
+
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/evlist.h b/ANDROID_3.4.5/tools/perf/util/evlist.h
new file mode 100644
index 00000000..21f1c9e5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/evlist.h
@@ -0,0 +1,125 @@
+#ifndef __PERF_EVLIST_H
+#define __PERF_EVLIST_H 1
+
+#include <linux/list.h>
+#include <stdio.h>
+#include "../perf.h"
+#include "event.h"
+#include "util.h"
+#include <unistd.h>
+
+struct pollfd;
+struct thread_map;
+struct cpu_map;
+struct perf_record_opts;
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+
+struct perf_evlist {
+ struct list_head entries;
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ int nr_entries;
+ int nr_fds;
+ int nr_mmaps;
+ int mmap_len;
+ struct {
+ int cork_fd;
+ pid_t pid;
+ } workload;
+ bool overwrite;
+ union perf_event event_copy;
+ struct perf_mmap *mmap;
+ struct pollfd *pollfd;
+ struct thread_map *threads;
+ struct cpu_map *cpus;
+ struct perf_evsel *selected;
+};
+
+struct perf_evsel_str_handler {
+ const char *name;
+ void *handler;
+};
+
+struct perf_evsel;
+
+struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evlist__exit(struct perf_evlist *evlist);
+void perf_evlist__delete(struct perf_evlist *evlist);
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
+int perf_evlist__add_default(struct perf_evlist *evlist);
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs);
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+ const char *tracepoints[], size_t nr_tracepoints);
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs);
+
+#define perf_evlist__add_attrs_array(evlist, array) \
+ perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__add_tracepoints_array(evlist, array) \
+ perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
+ perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, u64 id);
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
+
+int perf_evlist__open(struct perf_evlist *evlist, bool group);
+
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+ struct perf_record_opts *opts);
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct perf_record_opts *opts,
+ const char *argv[]);
+int perf_evlist__start_workload(struct perf_evlist *evlist);
+
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite);
+void perf_evlist__munmap(struct perf_evlist *evlist);
+
+void perf_evlist__disable(struct perf_evlist *evlist);
+void perf_evlist__enable(struct perf_evlist *evlist);
+
+void perf_evlist__set_selected(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
+static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ evlist->cpus = cpus;
+ evlist->threads = threads;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid,
+ const char *tid, uid_t uid, const char *cpu_list);
+void perf_evlist__delete_maps(struct perf_evlist *evlist);
+int perf_evlist__set_filters(struct perf_evlist *evlist);
+
+u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
+bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
+
+bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
+
+void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries);
+
+#endif /* __PERF_EVLIST_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/evsel.c b/ANDROID_3.4.5/tools/perf/util/evsel.c
new file mode 100644
index 00000000..8c13dbcb
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/evsel.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <byteswap.h>
+#include "asm/bug.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "util.h"
+#include "cpumap.h"
+#include "thread_map.h"
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
+
+int __perf_evsel__sample_size(u64 sample_type)
+{
+ u64 mask = sample_type & PERF_SAMPLE_MASK;
+ int size = 0;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (mask & (1ULL << i))
+ size++;
+ }
+
+ size *= sizeof(u64);
+
+ return size;
+}
+
+void hists__init(struct hists *hists)
+{
+ memset(hists, 0, sizeof(*hists));
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in = &hists->entries_in_array[0];
+ hists->entries_collapsed = RB_ROOT;
+ hists->entries = RB_ROOT;
+ pthread_mutex_init(&hists->lock, NULL);
+}
+
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx)
+{
+ evsel->idx = idx;
+ evsel->attr = *attr;
+ INIT_LIST_HEAD(&evsel->node);
+ hists__init(&evsel->hists);
+}
+
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL)
+ perf_evsel__init(evsel, attr, idx);
+
+ return evsel;
+}
+
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
+ struct perf_evsel *first)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int track = !evsel->idx; /* only the first counter needs these */
+
+ attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
+ attr->inherit = !opts->no_inherit;
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
+
+ attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
+ /*
+ * We default some events to a 1 default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+ opts->user_interval != ULLONG_MAX)) {
+ if (opts->freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+ }
+
+ if (opts->no_samples)
+ attr->sample_freq = 0;
+
+ if (opts->inherit_stat)
+ attr->inherit_stat = 1;
+
+ if (opts->sample_address) {
+ attr->sample_type |= PERF_SAMPLE_ADDR;
+ attr->mmap_data = track;
+ }
+
+ if (opts->call_graph)
+ attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+
+ if (opts->system_wide)
+ attr->sample_type |= PERF_SAMPLE_CPU;
+
+ if (opts->period)
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+
+ if (!opts->sample_id_all_missing &&
+ (opts->sample_time || opts->system_wide ||
+ !opts->no_inherit || opts->cpu_list))
+ attr->sample_type |= PERF_SAMPLE_TIME;
+
+ if (opts->raw_samples) {
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+ }
+
+ if (opts->no_delay) {
+ attr->watermark = 0;
+ attr->wakeup_events = 1;
+ }
+ if (opts->branch_stack) {
+ attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ attr->branch_sample_type = opts->branch_stack;
+ }
+
+ attr->mmap = track;
+ attr->comm = track;
+
+ if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
+ (!opts->group || evsel == first)) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+}
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ int cpu, thread;
+ evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
+
+ if (evsel->fd) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ FD(evsel, cpu, thread) = -1;
+ }
+ }
+ }
+
+ return evsel->fd != NULL ? 0 : -ENOMEM;
+}
+
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+ if (evsel->sample_id == NULL)
+ return -ENOMEM;
+
+ evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
+ if (evsel->id == NULL) {
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
+{
+ evsel->counts = zalloc((sizeof(*evsel->counts) +
+ (ncpus * sizeof(struct perf_counts_values))));
+ return evsel->counts != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evsel__free_fd(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->fd);
+ evsel->fd = NULL;
+}
+
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ free(evsel->id);
+ evsel->id = NULL;
+}
+
+void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < ncpus; cpu++)
+ for (thread = 0; thread < nthreads; ++thread) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+}
+
+void perf_evsel__exit(struct perf_evsel *evsel)
+{
+ assert(list_empty(&evsel->node));
+ xyarray__delete(evsel->fd);
+ xyarray__delete(evsel->sample_id);
+ free(evsel->id);
+}
+
+void perf_evsel__delete(struct perf_evsel *evsel)
+{
+ perf_evsel__exit(evsel);
+ close_cgroup(evsel->cgrp);
+ free(evsel->name);
+ free(evsel);
+}
+
+int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread, bool scale)
+{
+ struct perf_counts_values count;
+ size_t nv = scale ? 3 : 1;
+
+ if (FD(evsel, cpu, thread) < 0)
+ return -EINVAL;
+
+ if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
+ return -ENOMEM;
+
+ if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
+ return -errno;
+
+ if (scale) {
+ if (count.run == 0)
+ count.val = 0;
+ else if (count.run < count.ena)
+ count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
+ } else
+ count.ena = count.run = 0;
+
+ evsel->counts->cpu[cpu] = count;
+ return 0;
+}
+
+int __perf_evsel__read(struct perf_evsel *evsel,
+ int ncpus, int nthreads, bool scale)
+{
+ size_t nv = scale ? 3 : 1;
+ int cpu, thread;
+ struct perf_counts_values *aggr = &evsel->counts->aggr, count;
+
+ aggr->val = aggr->ena = aggr->run = 0;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ if (FD(evsel, cpu, thread) < 0)
+ continue;
+
+ if (readn(FD(evsel, cpu, thread),
+ &count, nv * sizeof(u64)) < 0)
+ return -errno;
+
+ aggr->val += count.val;
+ if (scale) {
+ aggr->ena += count.ena;
+ aggr->run += count.run;
+ }
+ }
+ }
+
+ evsel->counts->scaled = 0;
+ if (scale) {
+ if (aggr->run == 0) {
+ evsel->counts->scaled = -1;
+ aggr->val = 0;
+ return 0;
+ }
+
+ if (aggr->run < aggr->ena) {
+ evsel->counts->scaled = 1;
+ aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
+ }
+ } else
+ aggr->ena = aggr->run = 0;
+
+ return 0;
+}
+
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads, bool group,
+ struct xyarray *group_fds)
+{
+ int cpu, thread;
+ unsigned long flags = 0;
+ int pid = -1, err;
+
+ if (evsel->fd == NULL &&
+ perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
+ return -ENOMEM;
+
+ if (evsel->cgrp) {
+ flags = PERF_FLAG_PID_CGROUP;
+ pid = evsel->cgrp->fd;
+ }
+
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
+
+ for (thread = 0; thread < threads->nr; thread++) {
+
+ if (!evsel->cgrp)
+ pid = threads->map[thread];
+
+ FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+ pid,
+ cpus->map[cpu],
+ group_fd, flags);
+ if (FD(evsel, cpu, thread) < 0) {
+ err = -errno;
+ goto out_close;
+ }
+
+ if (group && group_fd == -1)
+ group_fd = FD(evsel, cpu, thread);
+ }
+ }
+
+ return 0;
+
+out_close:
+ do {
+ while (--thread >= 0) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+ thread = threads->nr;
+ } while (--cpu >= 0);
+ return err;
+}
+
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ if (evsel->fd == NULL)
+ return;
+
+ perf_evsel__close_fd(evsel, ncpus, nthreads);
+ perf_evsel__free_fd(evsel);
+ evsel->fd = NULL;
+}
+
+static struct {
+ struct cpu_map map;
+ int cpus[1];
+} empty_cpu_map = {
+ .map.nr = 1,
+ .cpus = { -1, },
+};
+
+static struct {
+ struct thread_map map;
+ int threads[1];
+} empty_thread_map = {
+ .map.nr = 1,
+ .threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads, bool group,
+ struct xyarray *group_fd)
+{
+ if (cpus == NULL) {
+ /* Work around old compiler warnings about strict aliasing */
+ cpus = &empty_cpu_map.map;
+ }
+
+ if (threads == NULL)
+ threads = &empty_thread_map.map;
+
+ return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus, bool group,
+ struct xyarray *group_fd)
+{
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
+ group_fd);
+}
+
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads, bool group,
+ struct xyarray *group_fd)
+{
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
+ group_fd);
+}
+
+static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
+ struct perf_sample *sample)
+{
+ const u64 *array = event->sample.array;
+
+ array += ((event->header.size -
+ sizeof(event->header)) / sizeof(u64)) - 1;
+
+ if (type & PERF_SAMPLE_CPU) {
+ u32 *p = (u32 *)array;
+ sample->cpu = *p;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ sample->stream_id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ sample->id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ sample->time = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u32 *p = (u32 *)array;
+ sample->pid = p[0];
+ sample->tid = p[1];
+ }
+
+ return 0;
+}
+
+static bool sample_overlap(const union perf_event *event,
+ const void *offset, u64 size)
+{
+ const void *base = event;
+
+ if (offset + size > base + event->header.size)
+ return true;
+
+ return false;
+}
+
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+ int sample_size, bool sample_id_all,
+ struct perf_sample *data, bool swapped)
+{
+ const u64 *array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union {
+ u64 val64;
+ u32 val32[2];
+ } u;
+
+ memset(data, 0, sizeof(*data));
+ data->cpu = data->pid = data->tid = -1;
+ data->stream_id = data->id = data->time = -1ULL;
+ data->period = 1;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ if (!sample_id_all)
+ return 0;
+ return perf_event__parse_id_sample(event, type, data);
+ }
+
+ array = event->sample.array;
+
+ if (sample_size + sizeof(event->header) > event->header.size)
+ return -EFAULT;
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = event->ip.ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ data->pid = u.val32[0];
+ data->tid = u.val32[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ data->addr = 0;
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ }
+
+ data->cpu = u.val32[0];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ if (sample_overlap(event, array, sizeof(data->callchain->nr)))
+ return -EFAULT;
+
+ data->callchain = (struct ip_callchain *)array;
+
+ if (sample_overlap(event, array, data->callchain->nr))
+ return -EFAULT;
+
+ array += 1 + data->callchain->nr;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ const u64 *pdata;
+
+ u.val64 = *array;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ if (sample_overlap(event, array, sizeof(u32)))
+ return -EFAULT;
+
+ data->raw_size = u.val32[0];
+ pdata = (void *) array + sizeof(u32);
+
+ if (sample_overlap(event, pdata, data->raw_size))
+ return -EFAULT;
+
+ data->raw_data = (void *) pdata;
+
+ array = (void *)array + data->raw_size + sizeof(u32);
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ u64 sz;
+
+ data->branch_stack = (struct branch_stack *)array;
+ array++; /* nr */
+
+ sz = data->branch_stack->nr * sizeof(struct branch_entry);
+ sz /= sizeof(u64);
+ array += sz;
+ }
+ return 0;
+}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ const struct perf_sample *sample,
+ bool swapped)
+{
+ u64 *array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union {
+ u64 val64;
+ u32 val32[2];
+ } u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ event->ip.ip = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_event__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_event__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/evsel.h b/ANDROID_3.4.5/tools/perf/util/evsel.h
new file mode 100644
index 00000000..3d6b3e4c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/evsel.h
@@ -0,0 +1,175 @@
+#ifndef __PERF_EVSEL_H
+#define __PERF_EVSEL_H 1
+
+#include <linux/list.h>
+#include <stdbool.h>
+#include "../../../include/linux/perf_event.h"
+#include "types.h"
+#include "xyarray.h"
+#include "cgroup.h"
+#include "hist.h"
+
+struct perf_counts_values {
+ union {
+ struct {
+ u64 val;
+ u64 ena;
+ u64 run;
+ };
+ u64 values[3];
+ };
+};
+
+struct perf_counts {
+ s8 scaled;
+ struct perf_counts_values aggr;
+ struct perf_counts_values cpu[];
+};
+
+struct perf_evsel;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+ struct hlist_node node;
+ u64 id;
+ struct perf_evsel *evsel;
+};
+
+/** struct perf_evsel - event selector
+ *
+ * @name - Can be set to retain the original event name passed by the user,
+ * so that when showing results in tools such as 'perf stat', we
+ * show the name used, not some alias.
+ */
+struct perf_evsel {
+ struct list_head node;
+ struct perf_event_attr attr;
+ char *filter;
+ struct xyarray *fd;
+ struct xyarray *sample_id;
+ u64 *id;
+ struct perf_counts *counts;
+ int idx;
+ int ids;
+ struct hists hists;
+ char *name;
+ union {
+ void *priv;
+ off_t id_offset;
+ };
+ struct cgroup_sel *cgrp;
+ struct {
+ void *func;
+ void *data;
+ } handler;
+ bool supported;
+};
+
+struct cpu_map;
+struct thread_map;
+struct perf_evlist;
+struct perf_record_opts;
+
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx);
+void perf_evsel__exit(struct perf_evsel *evsel);
+void perf_evsel__delete(struct perf_evsel *evsel);
+
+void perf_evsel__config(struct perf_evsel *evsel,
+ struct perf_record_opts *opts,
+ struct perf_evsel *first);
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__free_fd(struct perf_evsel *evsel);
+void perf_evsel__free_id(struct perf_evsel *evsel);
+void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus, bool group,
+ struct xyarray *group_fds);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads, bool group,
+ struct xyarray *group_fds);
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads, bool group,
+ struct xyarray *group_fds);
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+#define perf_evsel__match(evsel, t, c) \
+ (evsel->attr.type == PERF_TYPE_##t && \
+ evsel->attr.config == PERF_COUNT_##c)
+
+int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread, bool scale);
+
+/**
+ * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
+ *
+ * @evsel - event selector to read value
+ * @cpu - CPU of interest
+ * @thread - thread of interest
+ */
+static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread)
+{
+ return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
+}
+
+/**
+ * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
+ *
+ * @evsel - event selector to read value
+ * @cpu - CPU of interest
+ * @thread - thread of interest
+ */
+static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
+ int cpu, int thread)
+{
+ return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
+}
+
+int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
+ bool scale);
+
+/**
+ * perf_evsel__read - Read the aggregate results on all CPUs
+ *
+ * @evsel - event selector to read value
+ * @ncpus - Number of cpus affected, from zero
+ * @nthreads - Number of threads affected, from zero
+ */
+static inline int perf_evsel__read(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
+{
+ return __perf_evsel__read(evsel, ncpus, nthreads, false);
+}
+
+/**
+ * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
+ *
+ * @evsel - event selector to read value
+ * @ncpus - Number of cpus affected, from zero
+ * @nthreads - Number of threads affected, from zero
+ */
+static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
+{
+ return __perf_evsel__read(evsel, ncpus, nthreads, true);
+}
+
+int __perf_evsel__sample_size(u64 sample_type);
+
+static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
+{
+ return __perf_evsel__sample_size(evsel->attr.sample_type);
+}
+
+void hists__init(struct hists *hists);
+
+#endif /* __PERF_EVSEL_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/exec_cmd.c b/ANDROID_3.4.5/tools/perf/util/exec_cmd.c
new file mode 100644
index 00000000..7adf4ad1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/exec_cmd.c
@@ -0,0 +1,148 @@
+#include "cache.h"
+#include "exec_cmd.h"
+#include "quote.h"
+
+#include <string.h>
+
+#define MAX_ARGS 32
+
+static const char *argv_exec_path;
+static const char *argv0_path;
+
+const char *system_path(const char *path)
+{
+ static const char *prefix = PREFIX;
+ struct strbuf d = STRBUF_INIT;
+
+ if (is_absolute_path(path))
+ return path;
+
+ strbuf_addf(&d, "%s/%s", prefix, path);
+ path = strbuf_detach(&d, NULL);
+ return path;
+}
+
+const char *perf_extract_argv0_path(const char *argv0)
+{
+ const char *slash;
+
+ if (!argv0 || !*argv0)
+ return NULL;
+ slash = argv0 + strlen(argv0);
+
+ while (argv0 <= slash && !is_dir_sep(*slash))
+ slash--;
+
+ if (slash >= argv0) {
+ argv0_path = strndup(argv0, slash - argv0);
+ return argv0_path ? slash + 1 : NULL;
+ }
+
+ return argv0;
+}
+
+void perf_set_argv_exec_path(const char *exec_path)
+{
+ argv_exec_path = exec_path;
+ /*
+ * Propagate this setting to external programs.
+ */
+ setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
+}
+
+
+/* Returns the highest-priority, location to look for perf programs. */
+const char *perf_exec_path(void)
+{
+ const char *env;
+
+ if (argv_exec_path)
+ return argv_exec_path;
+
+ env = getenv(EXEC_PATH_ENVIRONMENT);
+ if (env && *env) {
+ return env;
+ }
+
+ return system_path(PERF_EXEC_PATH);
+}
+
+static void add_path(struct strbuf *out, const char *path)
+{
+ if (path && *path) {
+ if (is_absolute_path(path))
+ strbuf_addstr(out, path);
+ else
+ strbuf_addstr(out, make_nonrelative_path(path));
+
+ strbuf_addch(out, PATH_SEP);
+ }
+}
+
+void setup_path(void)
+{
+ const char *old_path = getenv("PATH");
+ struct strbuf new_path = STRBUF_INIT;
+
+ add_path(&new_path, perf_exec_path());
+ add_path(&new_path, argv0_path);
+
+ if (old_path)
+ strbuf_addstr(&new_path, old_path);
+ else
+ strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin");
+
+ setenv("PATH", new_path.buf, 1);
+
+ strbuf_release(&new_path);
+}
+
+static const char **prepare_perf_cmd(const char **argv)
+{
+ int argc;
+ const char **nargv;
+
+ for (argc = 0; argv[argc]; argc++)
+ ; /* just counting */
+ nargv = malloc(sizeof(*nargv) * (argc + 2));
+
+ nargv[0] = "perf";
+ for (argc = 0; argv[argc]; argc++)
+ nargv[argc + 1] = argv[argc];
+ nargv[argc + 1] = NULL;
+ return nargv;
+}
+
+int execv_perf_cmd(const char **argv) {
+ const char **nargv = prepare_perf_cmd(argv);
+
+ /* execvp() can only ever return if it fails */
+ execvp("perf", (char **)nargv);
+
+ free(nargv);
+ return -1;
+}
+
+
+int execl_perf_cmd(const char *cmd,...)
+{
+ int argc;
+ const char *argv[MAX_ARGS + 1];
+ const char *arg;
+ va_list param;
+
+ va_start(param, cmd);
+ argv[0] = cmd;
+ argc = 1;
+ while (argc < MAX_ARGS) {
+ arg = argv[argc++] = va_arg(param, char *);
+ if (!arg)
+ break;
+ }
+ va_end(param);
+ if (MAX_ARGS <= argc)
+ return error("too many args to run %s", cmd);
+
+ argv[argc] = NULL;
+ return execv_perf_cmd(argv);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/exec_cmd.h b/ANDROID_3.4.5/tools/perf/util/exec_cmd.h
new file mode 100644
index 00000000..bc4b9159
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/exec_cmd.h
@@ -0,0 +1,12 @@
+#ifndef __PERF_EXEC_CMD_H
+#define __PERF_EXEC_CMD_H
+
+extern void perf_set_argv_exec_path(const char *exec_path);
+extern const char *perf_extract_argv0_path(const char *path);
+extern const char *perf_exec_path(void);
+extern void setup_path(void);
+extern int execv_perf_cmd(const char **argv); /* NULL terminated */
+extern int execl_perf_cmd(const char *cmd, ...);
+extern const char *system_path(const char *path);
+
+#endif /* __PERF_EXEC_CMD_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/generate-cmdlist.sh b/ANDROID_3.4.5/tools/perf/util/generate-cmdlist.sh
new file mode 100755
index 00000000..f06f6fd1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/generate-cmdlist.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+echo "/* Automatically generated by $0 */
+struct cmdname_help
+{
+ char name[16];
+ char help[80];
+};
+
+static struct cmdname_help common_cmds[] = {"
+
+sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+echo "};"
diff --git a/ANDROID_3.4.5/tools/perf/util/gtk/browser.c b/ANDROID_3.4.5/tools/perf/util/gtk/browser.c
new file mode 100644
index 00000000..258352a2
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/gtk/browser.c
@@ -0,0 +1,189 @@
+#include "../evlist.h"
+#include "../cache.h"
+#include "../evsel.h"
+#include "../sort.h"
+#include "../hist.h"
+#include "gtk.h"
+
+#include <signal.h>
+
+#define MAX_COLUMNS 32
+
+void perf_gtk_setup_browser(int argc, const char *argv[],
+ bool fallback_to_pager __used)
+{
+ gtk_init(&argc, (char ***)&argv);
+}
+
+void perf_gtk_exit_browser(bool wait_for_ok __used)
+{
+ gtk_main_quit();
+}
+
+static void perf_gtk_signal(int sig)
+{
+ psignal(sig, "perf");
+ gtk_main_quit();
+}
+
+static void perf_gtk_resize_window(GtkWidget *window)
+{
+ GdkRectangle rect;
+ GdkScreen *screen;
+ int monitor;
+ int height;
+ int width;
+
+ screen = gtk_widget_get_screen(window);
+
+ monitor = gdk_screen_get_monitor_at_window(screen, window->window);
+
+ gdk_screen_get_monitor_geometry(screen, monitor, &rect);
+
+ width = rect.width * 3 / 4;
+ height = rect.height * 3 / 4;
+
+ gtk_window_resize(GTK_WINDOW(window), width, height);
+}
+
+static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists)
+{
+ GType col_types[MAX_COLUMNS];
+ GtkCellRenderer *renderer;
+ struct sort_entry *se;
+ GtkListStore *store;
+ struct rb_node *nd;
+ u64 total_period;
+ GtkWidget *view;
+ int col_idx;
+ int nr_cols;
+
+ nr_cols = 0;
+
+ /* The percentage column */
+ col_types[nr_cols++] = G_TYPE_STRING;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ col_types[nr_cols++] = G_TYPE_STRING;
+ }
+
+ store = gtk_list_store_newv(nr_cols, col_types);
+
+ view = gtk_tree_view_new();
+
+ renderer = gtk_cell_renderer_text_new();
+
+ col_idx = 0;
+
+ /* The percentage column */
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, "Overhead (%)",
+ renderer, "text",
+ col_idx++, NULL);
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, se->se_header,
+ renderer, "text",
+ col_idx++, NULL);
+ }
+
+ gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+
+ g_object_unref(GTK_TREE_MODEL(store));
+
+ total_period = hists->stats.total_period;
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ GtkTreeIter iter;
+ double percent;
+ char s[512];
+
+ if (h->filtered)
+ continue;
+
+ gtk_list_store_append(store, &iter);
+
+ col_idx = 0;
+
+ percent = (h->period * 100.0) / total_period;
+
+ snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
+
+ gtk_list_store_set(store, &iter, col_idx++, s, -1);
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ se->se_snprintf(h, s, ARRAY_SIZE(s),
+ hists__col_len(hists, se->se_width_idx));
+
+ gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ }
+ }
+
+ gtk_container_add(GTK_CONTAINER(window), view);
+}
+
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
+ const char *help __used,
+ void (*timer) (void *arg)__used,
+ void *arg __used, int delay_secs __used)
+{
+ struct perf_evsel *pos;
+ GtkWidget *notebook;
+ GtkWidget *window;
+
+ signal(SIGSEGV, perf_gtk_signal);
+ signal(SIGFPE, perf_gtk_signal);
+ signal(SIGINT, perf_gtk_signal);
+ signal(SIGQUIT, perf_gtk_signal);
+ signal(SIGTERM, perf_gtk_signal);
+
+ window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
+
+ gtk_window_set_title(GTK_WINDOW(window), "perf report");
+
+ g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+
+ notebook = gtk_notebook_new();
+
+ list_for_each_entry(pos, &evlist->entries, node) {
+ struct hists *hists = &pos->hists;
+ const char *evname = event_name(pos);
+ GtkWidget *scrolled_window;
+ GtkWidget *tab_label;
+
+ scrolled_window = gtk_scrolled_window_new(NULL, NULL);
+
+ gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
+ GTK_POLICY_AUTOMATIC,
+ GTK_POLICY_AUTOMATIC);
+
+ perf_gtk_show_hists(scrolled_window, hists);
+
+ tab_label = gtk_label_new(evname);
+
+ gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
+ }
+
+ gtk_container_add(GTK_CONTAINER(window), notebook);
+
+ gtk_widget_show_all(window);
+
+ perf_gtk_resize_window(window);
+
+ gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+
+ gtk_main();
+
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/gtk/gtk.h b/ANDROID_3.4.5/tools/perf/util/gtk/gtk.h
new file mode 100644
index 00000000..75177ee0
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/gtk/gtk.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_GTK_H_
+#define _PERF_GTK_H_ 1
+
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+#endif /* _PERF_GTK_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/header.c b/ANDROID_3.4.5/tools/perf/util/header.c
new file mode 100644
index 00000000..c0b70c69
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/header.c
@@ -0,0 +1,2452 @@
+#define _FILE_OFFSET_BITS 64
+
+#include "util.h"
+#include <sys/types.h>
+#include <byteswap.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <sys/utsname.h>
+
+#include "evlist.h"
+#include "evsel.h"
+#include "header.h"
+#include "../perf.h"
+#include "trace-event.h"
+#include "session.h"
+#include "symbol.h"
+#include "debug.h"
+#include "cpumap.h"
+
+static bool no_buildid_cache = false;
+
+static int event_count;
+static struct perf_trace_event_type *events;
+
+static u32 header_argc;
+static const char **header_argv;
+
+int perf_header__push_event(u64 id, const char *name)
+{
+ if (strlen(name) > MAX_EVENT_NAME)
+ pr_warning("Event %s will be truncated\n", name);
+
+ if (!events) {
+ events = malloc(sizeof(struct perf_trace_event_type));
+ if (events == NULL)
+ return -ENOMEM;
+ } else {
+ struct perf_trace_event_type *nevents;
+
+ nevents = realloc(events, (event_count + 1) * sizeof(*events));
+ if (nevents == NULL)
+ return -ENOMEM;
+ events = nevents;
+ }
+ memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
+ events[event_count].event_id = id;
+ strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
+ event_count++;
+ return 0;
+}
+
+char *perf_header__find_event(u64 id)
+{
+ int i;
+ for (i = 0 ; i < event_count; i++) {
+ if (events[i].event_id == id)
+ return events[i].name;
+ }
+ return NULL;
+}
+
+/*
+ * magic2 = "PERFILE2"
+ * must be a numerical value to let the endianness
+ * determine the memory layout. That way we are able
+ * to detect endianness when reading the perf.data file
+ * back.
+ *
+ * we check for legacy (PERFFILE) format.
+ */
+static const char *__perf_magic1 = "PERFFILE";
+static const u64 __perf_magic2 = 0x32454c4946524550ULL;
+static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
+
+#define PERF_MAGIC __perf_magic2
+
+struct perf_file_attr {
+ struct perf_event_attr attr;
+ struct perf_file_section ids;
+};
+
+void perf_header__set_feat(struct perf_header *header, int feat)
+{
+ set_bit(feat, header->adds_features);
+}
+
+void perf_header__clear_feat(struct perf_header *header, int feat)
+{
+ clear_bit(feat, header->adds_features);
+}
+
+bool perf_header__has_feat(const struct perf_header *header, int feat)
+{
+ return test_bit(feat, header->adds_features);
+}
+
+static int do_write(int fd, const void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(fd, buf, size);
+
+ if (ret < 0)
+ return -errno;
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return 0;
+}
+
+#define NAME_ALIGN 64
+
+static int write_padded(int fd, const void *bf, size_t count,
+ size_t count_aligned)
+{
+ static const char zero_buf[NAME_ALIGN];
+ int err = do_write(fd, bf, count);
+
+ if (!err)
+ err = do_write(fd, zero_buf, count_aligned - count);
+
+ return err;
+}
+
+static int do_write_string(int fd, const char *str)
+{
+ u32 len, olen;
+ int ret;
+
+ olen = strlen(str) + 1;
+ len = ALIGN(olen, NAME_ALIGN);
+
+ /* write len, incl. \0 */
+ ret = do_write(fd, &len, sizeof(len));
+ if (ret < 0)
+ return ret;
+
+ return write_padded(fd, str, olen, len);
+}
+
+static char *do_read_string(int fd, struct perf_header *ph)
+{
+ ssize_t sz, ret;
+ u32 len;
+ char *buf;
+
+ sz = read(fd, &len, sizeof(len));
+ if (sz < (ssize_t)sizeof(len))
+ return NULL;
+
+ if (ph->needs_swap)
+ len = bswap_32(len);
+
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ ret = read(fd, buf, len);
+ if (ret == (ssize_t)len) {
+ /*
+ * strings are padded by zeroes
+ * thus the actual strlen of buf
+ * may be less than len
+ */
+ return buf;
+ }
+
+ free(buf);
+ return NULL;
+}
+
+int
+perf_header__set_cmdline(int argc, const char **argv)
+{
+ int i;
+
+ header_argc = (u32)argc;
+
+ /* do not include NULL termination */
+ header_argv = calloc(argc, sizeof(char *));
+ if (!header_argv)
+ return -ENOMEM;
+
+ /*
+ * must copy argv contents because it gets moved
+ * around during option parsing
+ */
+ for (i = 0; i < argc ; i++)
+ header_argv[i] = argv[i];
+
+ return 0;
+}
+
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
+static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
+ u16 misc, int fd)
+{
+ struct dso *pos;
+
+ dsos__for_each_with_build_id(pos, head) {
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ if (!pos->hit)
+ continue;
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+ err = write_padded(fd, pos->long_name,
+ pos->long_name_len + 1, len);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int machine__write_buildid_table(struct machine *machine, int fd)
+{
+ int err;
+ u16 kmisc = PERF_RECORD_MISC_KERNEL,
+ umisc = PERF_RECORD_MISC_USER;
+
+ if (!machine__is_host(machine)) {
+ kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ umisc = PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
+ kmisc, fd);
+ if (err == 0)
+ err = __dsos__write_buildid_table(&machine->user_dsos,
+ machine->pid, umisc, fd);
+ return err;
+}
+
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
+{
+ struct perf_session *session = container_of(header,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int err = machine__write_buildid_table(&session->host_machine, fd);
+
+ if (err)
+ return err;
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ err = machine__write_buildid_table(pos, fd);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms)
+{
+ const size_t size = PATH_MAX;
+ char *realname, *filename = zalloc(size),
+ *linkname = zalloc(size), *targetname;
+ int len, err = -1;
+
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ return 0;
+ }
+ realname = (char *)name;
+ } else
+ realname = realpath(name, NULL);
+
+ if (realname == NULL || filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = scnprintf(filename, size, "%s%s%s",
+ debugdir, is_kallsyms ? "/" : "", realname);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ snprintf(filename + len, size - len, "/%s", sbuild_id);
+
+ if (access(filename, F_OK)) {
+ if (is_kallsyms) {
+ if (copyfile("/proc/kallsyms", filename))
+ goto out_free;
+ } else if (link(realname, filename) && copyfile(name, filename))
+ goto out_free;
+ }
+
+ len = scnprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ if (!is_kallsyms)
+ free(realname);
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+ const char *name, const char *debugdir,
+ bool is_kallsyms)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+ return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = zalloc(size),
+ *linkname = zalloc(size);
+ int err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, sbuild_id + 2);
+
+ if (access(linkname, F_OK))
+ goto out_free;
+
+ if (readlink(linkname, filename, size - 1) < 0)
+ goto out_free;
+
+ if (unlink(linkname))
+ goto out_free;
+
+ /*
+ * Since the link is relative, we must make it absolute:
+ */
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, filename);
+
+ if (unlink(linkname))
+ goto out_free;
+
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+{
+ bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+
+ return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
+ dso->long_name, debugdir, is_kallsyms);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
+{
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+ return ret;
+}
+
+static int perf_session__cache_build_ids(struct perf_session *session)
+{
+ struct rb_node *nd;
+ int ret;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ ret = machine__cache_build_ids(&session->host_machine, debugdir);
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__cache_build_ids(pos, debugdir);
+ }
+ return ret ? -1 : 0;
+}
+
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
+{
+ bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
+ return ret;
+}
+
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
+{
+ struct rb_node *nd;
+ bool ret = machine__read_build_ids(&session->host_machine, with_hits);
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__read_build_ids(pos, with_hits);
+ }
+
+ return ret;
+}
+
+static int write_trace_info(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist)
+{
+ return read_tracing_data(fd, &evlist->entries);
+}
+
+
+static int write_build_id(int fd, struct perf_header *h,
+ struct perf_evlist *evlist __used)
+{
+ struct perf_session *session;
+ int err;
+
+ session = container_of(h, struct perf_session, header);
+
+ if (!perf_session__read_build_ids(session, true))
+ return -1;
+
+ err = dsos__write_buildid_table(h, fd);
+ if (err < 0) {
+ pr_debug("failed to write buildid table\n");
+ return err;
+ }
+ if (!no_buildid_cache)
+ perf_session__cache_build_ids(session);
+
+ return 0;
+}
+
+static int write_hostname(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.nodename);
+}
+
+static int write_osrelease(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.release);
+}
+
+static int write_arch(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.machine);
+}
+
+static int write_version(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ return do_write_string(fd, perf_version_string);
+}
+
+static int write_cpudesc(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+#ifndef CPUINFO_PROC
+#define CPUINFO_PROC NULL
+#endif
+ FILE *file;
+ char *buf = NULL;
+ char *s, *p;
+ const char *search = CPUINFO_PROC;
+ size_t len = 0;
+ int ret = -1;
+
+ if (!search)
+ return -1;
+
+ file = fopen("/proc/cpuinfo", "r");
+ if (!file)
+ return -1;
+
+ while (getline(&buf, &len, file) > 0) {
+ ret = strncmp(buf, search, strlen(search));
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ goto done;
+
+ s = buf;
+
+ p = strchr(buf, ':');
+ if (p && *(p+1) == ' ' && *(p+2))
+ s = p + 2;
+ p = strchr(s, '\n');
+ if (p)
+ *p = '\0';
+
+ /* squash extra space characters (branding string) */
+ p = s;
+ while (*p) {
+ if (isspace(*p)) {
+ char *r = p + 1;
+ char *q = r;
+ *p = ' ';
+ while (*q && isspace(*q))
+ q++;
+ if (q != (p+1))
+ while ((*r++ = *q++));
+ }
+ p++;
+ }
+ ret = do_write_string(fd, s);
+done:
+ free(buf);
+ fclose(file);
+ return ret;
+}
+
+static int write_nrcpus(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ long nr;
+ u32 nrc, nra;
+ int ret;
+
+ nr = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr < 0)
+ return -1;
+
+ nrc = (u32)(nr & UINT_MAX);
+
+ nr = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr < 0)
+ return -1;
+
+ nra = (u32)(nr & UINT_MAX);
+
+ ret = do_write(fd, &nrc, sizeof(nrc));
+ if (ret < 0)
+ return ret;
+
+ return do_write(fd, &nra, sizeof(nra));
+}
+
+static int write_event_desc(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *attr;
+ u32 nre = 0, nri, sz;
+ int ret;
+
+ list_for_each_entry(attr, &evlist->entries, node)
+ nre++;
+
+ /*
+ * write number of events
+ */
+ ret = do_write(fd, &nre, sizeof(nre));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * size of perf_event_attr struct
+ */
+ sz = (u32)sizeof(attr->attr);
+ ret = do_write(fd, &sz, sizeof(sz));
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(attr, &evlist->entries, node) {
+
+ ret = do_write(fd, &attr->attr, sz);
+ if (ret < 0)
+ return ret;
+ /*
+ * write number of unique id per event
+ * there is one id per instance of an event
+ *
+ * copy into an nri to be independent of the
+ * type of ids,
+ */
+ nri = attr->ids;
+ ret = do_write(fd, &nri, sizeof(nri));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * write event string as passed on cmdline
+ */
+ ret = do_write_string(fd, event_name(attr));
+ if (ret < 0)
+ return ret;
+ /*
+ * write unique ids for this event
+ */
+ ret = do_write(fd, attr->id, attr->ids * sizeof(u64));
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int write_cmdline(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ char buf[MAXPATHLEN];
+ char proc[32];
+ u32 i, n;
+ int ret;
+
+ /*
+ * actual atual path to perf binary
+ */
+ sprintf(proc, "/proc/%d/exe", getpid());
+ ret = readlink(proc, buf, sizeof(buf));
+ if (ret <= 0)
+ return -1;
+
+ /* readlink() does not add null termination */
+ buf[ret] = '\0';
+
+ /* account for binary path */
+ n = header_argc + 1;
+
+ ret = do_write(fd, &n, sizeof(n));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write_string(fd, buf);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0 ; i < header_argc; i++) {
+ ret = do_write_string(fd, header_argv[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+#define CORE_SIB_FMT \
+ "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+
+struct cpu_topo {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+static int build_cpu_topo(struct cpu_topo *tp, int cpu)
+{
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ u32 i = 0;
+ int ret = -1;
+
+ sprintf(filename, CORE_SIB_FMT, cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ fclose(fp);
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+
+ sprintf(filename, THRD_SIB_FMT, cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
+ }
+ ret = 0;
+done:
+ if(fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+
+static void free_cpu_topo(struct cpu_topo *tp)
+{
+ u32 i;
+
+ if (!tp)
+ return;
+
+ for (i = 0 ; i < tp->core_sib; i++)
+ free(tp->core_siblings[i]);
+
+ for (i = 0 ; i < tp->thread_sib; i++)
+ free(tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+static struct cpu_topo *build_cpu_topology(void)
+{
+ struct cpu_topo *tp;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+
+ ncpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (ncpus < 0)
+ return NULL;
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ return NULL;
+
+ tp = addr;
+
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ ret = build_cpu_topo(tp, i);
+ if (ret < 0)
+ break;
+ }
+ if (ret) {
+ free_cpu_topo(tp);
+ tp = NULL;
+ }
+ return tp;
+}
+
+static int write_cpu_topology(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ struct cpu_topo *tp;
+ u32 i;
+ int ret;
+
+ tp = build_cpu_topology();
+ if (!tp)
+ return -1;
+
+ ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < tp->core_sib; i++) {
+ ret = do_write_string(fd, tp->core_siblings[i]);
+ if (ret < 0)
+ goto done;
+ }
+ ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ ret = do_write_string(fd, tp->thread_siblings[i]);
+ if (ret < 0)
+ break;
+ }
+done:
+ free_cpu_topo(tp);
+ return ret;
+}
+
+
+
+static int write_total_mem(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ char *buf = NULL;
+ FILE *fp;
+ size_t len = 0;
+ int ret = -1, n;
+ uint64_t mem;
+
+ fp = fopen("/proc/meminfo", "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ ret = strncmp(buf, "MemTotal:", 9);
+ if (!ret)
+ break;
+ }
+ if (!ret) {
+ n = sscanf(buf, "%*s %"PRIu64, &mem);
+ if (n == 1)
+ ret = do_write(fd, &mem, sizeof(mem));
+ }
+ free(buf);
+ fclose(fp);
+ return ret;
+}
+
+static int write_topo_node(int fd, int node)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ FILE *fp;
+ u64 mem_total, mem_free, mem;
+ int ret = -1;
+
+ sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
+ goto done;
+ if (!strcmp(field, "MemTotal:"))
+ mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ mem_free = mem;
+ }
+
+ fclose(fp);
+
+ ret = do_write(fd, &mem_total, sizeof(u64));
+ if (ret)
+ goto done;
+
+ ret = do_write(fd, &mem_free, sizeof(u64));
+ if (ret)
+ goto done;
+
+ ret = -1;
+ sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ ret = do_write_string(fd, buf);
+done:
+ free(buf);
+ fclose(fp);
+ return ret;
+}
+
+static int write_numa_topology(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ char *buf = NULL;
+ size_t len = 0;
+ FILE *fp;
+ struct cpu_map *node_map = NULL;
+ char *c;
+ u32 nr, i, j;
+ int ret = -1;
+
+ fp = fopen("/sys/devices/system/node/online", "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto done;
+
+ nr = (u32)node_map->nr;
+
+ ret = do_write(fd, &nr, sizeof(nr));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < nr; i++) {
+ j = (u32)node_map->map[i];
+ ret = do_write(fd, &j, sizeof(j));
+ if (ret < 0)
+ break;
+
+ ret = write_topo_node(fd, i);
+ if (ret < 0)
+ break;
+ }
+done:
+ free(buf);
+ fclose(fp);
+ free(node_map);
+ return ret;
+}
+
+/*
+ * default get_cpuid(): nothing gets recorded
+ * actual implementation must be in arch/$(ARCH)/util/header.c
+ */
+int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
+{
+ return -1;
+}
+
+static int write_cpuid(int fd, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ char buffer[64];
+ int ret;
+
+ ret = get_cpuid(buffer, sizeof(buffer));
+ if (!ret)
+ goto write_it;
+
+ return -1;
+write_it:
+ return do_write_string(fd, buffer);
+}
+
+static int write_branch_stack(int fd __used, struct perf_header *h __used,
+ struct perf_evlist *evlist __used)
+{
+ return 0;
+}
+
+static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
+{
+ char *str = do_read_string(fd, ph);
+ fprintf(fp, "# hostname : %s\n", str);
+ free(str);
+}
+
+static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
+{
+ char *str = do_read_string(fd, ph);
+ fprintf(fp, "# os release : %s\n", str);
+ free(str);
+}
+
+static void print_arch(struct perf_header *ph, int fd, FILE *fp)
+{
+ char *str = do_read_string(fd, ph);
+ fprintf(fp, "# arch : %s\n", str);
+ free(str);
+}
+
+static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
+{
+ char *str = do_read_string(fd, ph);
+ fprintf(fp, "# cpudesc : %s\n", str);
+ free(str);
+}
+
+static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
+{
+ ssize_t ret;
+ u32 nr;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ nr = -1; /* interpreted as error */
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ fprintf(fp, "# nrcpus online : %u\n", nr);
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ nr = -1; /* interpreted as error */
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ fprintf(fp, "# nrcpus avail : %u\n", nr);
+}
+
+static void print_version(struct perf_header *ph, int fd, FILE *fp)
+{
+ char *str = do_read_string(fd, ph);
+ fprintf(fp, "# perf version : %s\n", str);
+ free(str);
+}
+
+static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
+{
+ ssize_t ret;
+ char *str;
+ u32 nr, i;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ return;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ fprintf(fp, "# cmdline : ");
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ fprintf(fp, "%s ", str);
+ free(str);
+ }
+ fputc('\n', fp);
+}
+
+static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
+{
+ ssize_t ret;
+ u32 nr, i;
+ char *str;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ return;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ fprintf(fp, "# sibling cores : %s\n", str);
+ free(str);
+ }
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ return;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ fprintf(fp, "# sibling threads : %s\n", str);
+ free(str);
+ }
+}
+
+static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
+{
+ struct perf_event_attr attr;
+ uint64_t id;
+ void *buf = NULL;
+ char *str;
+ u32 nre, sz, nr, i, j;
+ ssize_t ret;
+ size_t msz;
+
+ /* number of events */
+ ret = read(fd, &nre, sizeof(nre));
+ if (ret != (ssize_t)sizeof(nre))
+ goto error;
+
+ if (ph->needs_swap)
+ nre = bswap_32(nre);
+
+ ret = read(fd, &sz, sizeof(sz));
+ if (ret != (ssize_t)sizeof(sz))
+ goto error;
+
+ if (ph->needs_swap)
+ sz = bswap_32(sz);
+
+ memset(&attr, 0, sizeof(attr));
+
+ /* buffer to hold on file attr struct */
+ buf = malloc(sz);
+ if (!buf)
+ goto error;
+
+ msz = sizeof(attr);
+ if (sz < msz)
+ msz = sz;
+
+ for (i = 0 ; i < nre; i++) {
+
+ /*
+ * must read entire on-file attr struct to
+ * sync up with layout.
+ */
+ ret = read(fd, buf, sz);
+ if (ret != (ssize_t)sz)
+ goto error;
+
+ if (ph->needs_swap)
+ perf_event__attr_swap(buf);
+
+ memcpy(&attr, buf, msz);
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ goto error;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ str = do_read_string(fd, ph);
+ fprintf(fp, "# event : name = %s, ", str);
+ free(str);
+
+ fprintf(fp, "type = %d, config = 0x%"PRIx64
+ ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
+ attr.type,
+ (u64)attr.config,
+ (u64)attr.config1,
+ (u64)attr.config2);
+
+ fprintf(fp, ", excl_usr = %d, excl_kern = %d",
+ attr.exclude_user,
+ attr.exclude_kernel);
+
+ if (nr)
+ fprintf(fp, ", id = {");
+
+ for (j = 0 ; j < nr; j++) {
+ ret = read(fd, &id, sizeof(id));
+ if (ret != (ssize_t)sizeof(id))
+ goto error;
+
+ if (ph->needs_swap)
+ id = bswap_64(id);
+
+ if (j)
+ fputc(',', fp);
+
+ fprintf(fp, " %"PRIu64, id);
+ }
+ if (nr && j == nr)
+ fprintf(fp, " }");
+ fputc('\n', fp);
+ }
+ free(buf);
+ return;
+error:
+ fprintf(fp, "# event desc: not available or unable to read\n");
+}
+
+static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
+{
+ uint64_t mem;
+ ssize_t ret;
+
+ ret = read(fd, &mem, sizeof(mem));
+ if (ret != sizeof(mem))
+ goto error;
+
+ if (h->needs_swap)
+ mem = bswap_64(mem);
+
+ fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
+ return;
+error:
+ fprintf(fp, "# total memory : unknown\n");
+}
+
+static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
+{
+ ssize_t ret;
+ u32 nr, c, i;
+ char *str;
+ uint64_t mem_total, mem_free;
+
+ /* nr nodes */
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ goto error;
+
+ if (h->needs_swap)
+ nr = bswap_32(nr);
+
+ for (i = 0; i < nr; i++) {
+
+ /* node number */
+ ret = read(fd, &c, sizeof(c));
+ if (ret != (ssize_t)sizeof(c))
+ goto error;
+
+ if (h->needs_swap)
+ c = bswap_32(c);
+
+ ret = read(fd, &mem_total, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ ret = read(fd, &mem_free, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ if (h->needs_swap) {
+ mem_total = bswap_64(mem_total);
+ mem_free = bswap_64(mem_free);
+ }
+
+ fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
+ " free = %"PRIu64" kB\n",
+ c,
+ mem_total,
+ mem_free);
+
+ str = do_read_string(fd, h);
+ fprintf(fp, "# node%u cpu list : %s\n", c, str);
+ free(str);
+ }
+ return;
+error:
+ fprintf(fp, "# numa topology : not available\n");
+}
+
+static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
+{
+ char *str = do_read_string(fd, ph);
+ fprintf(fp, "# cpuid : %s\n", str);
+ free(str);
+}
+
+static void print_branch_stack(struct perf_header *ph __used, int fd __used,
+ FILE *fp)
+{
+ fprintf(fp, "# contains samples with branch stack\n");
+}
+
+static int __event_process_build_id(struct build_id_event *bev,
+ char *filename,
+ struct perf_session *session)
+{
+ int err = -1;
+ struct list_head *head;
+ struct machine *machine;
+ u16 misc;
+ struct dso *dso;
+ enum dso_kernel_type dso_type;
+
+ machine = perf_session__findnew_machine(session, bev->pid);
+ if (!machine)
+ goto out;
+
+ misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ switch (misc) {
+ case PERF_RECORD_MISC_KERNEL:
+ dso_type = DSO_TYPE_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ dso_type = DSO_TYPE_GUEST_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_USER:
+ case PERF_RECORD_MISC_GUEST_USER:
+ dso_type = DSO_TYPE_USER;
+ head = &machine->user_dsos;
+ break;
+ default:
+ goto out;
+ }
+
+ dso = __dsos__findnew(head, filename);
+ if (dso != NULL) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ dso__set_build_id(dso, &bev->build_id);
+
+ if (filename[0] == '[')
+ dso->kernel = dso_type;
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id),
+ sbuild_id);
+ pr_debug("build id event received for %s: %s\n",
+ dso->long_name, sbuild_id);
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(header, struct perf_session, header);
+ struct {
+ struct perf_event_header header;
+ u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ char filename[0];
+ } old_bev;
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
+ return -1;
+
+ if (header->needs_swap)
+ perf_event_header__bswap(&old_bev.header);
+
+ len = old_bev.header.size - sizeof(old_bev);
+ if (read(input, filename, len) != len)
+ return -1;
+
+ bev.header = old_bev.header;
+
+ /*
+ * As the pid is the missing value, we need to fill
+ * it properly. The header.misc value give us nice hint.
+ */
+ bev.pid = HOST_KERNEL_ID;
+ if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
+ bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
+ bev.pid = DEFAULT_GUEST_KERNEL_ID;
+
+ memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+
+ return 0;
+}
+
+static int perf_header__read_build_ids(struct perf_header *header,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(header, struct perf_session, header);
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size, orig_offset = offset;
+ int err = -1;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+ goto out;
+
+ if (header->needs_swap)
+ perf_event_header__bswap(&bev.header);
+
+ len = bev.header.size - sizeof(bev);
+ if (read(input, filename, len) != len)
+ goto out;
+ /*
+ * The a1645ce1 changeset:
+ *
+ * "perf: 'perf kvm' tool for monitoring guest performance from host"
+ *
+ * Added a field to struct build_id_event that broke the file
+ * format.
+ *
+ * Since the kernel build-id is the first entry, process the
+ * table using the old format if the well known
+ * '[kernel.kallsyms]' string for the kernel build-id has the
+ * first 4 characters chopped off (where the pid_t sits).
+ */
+ if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
+ if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
+ return -1;
+ return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
+ }
+
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static int process_trace_info(struct perf_file_section *section __unused,
+ struct perf_header *ph __unused,
+ int feat __unused, int fd)
+{
+ trace_report(fd, false);
+ return 0;
+}
+
+static int process_build_id(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat __unused, int fd)
+{
+ if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
+ pr_debug("Failed to read buildids, continuing...\n");
+ return 0;
+}
+
+struct feature_ops {
+ int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
+ void (*print)(struct perf_header *h, int fd, FILE *fp);
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *h, int feat, int fd);
+ const char *name;
+ bool full_only;
+};
+
+#define FEAT_OPA(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func }
+#define FEAT_OPP(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, \
+ .process = process_##func }
+#define FEAT_OPF(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, \
+ .full_only = true }
+
+/* feature_ops not implemented: */
+#define print_trace_info NULL
+#define print_build_id NULL
+
+static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
+ FEAT_OPP(HEADER_TRACE_INFO, trace_info),
+ FEAT_OPP(HEADER_BUILD_ID, build_id),
+ FEAT_OPA(HEADER_HOSTNAME, hostname),
+ FEAT_OPA(HEADER_OSRELEASE, osrelease),
+ FEAT_OPA(HEADER_VERSION, version),
+ FEAT_OPA(HEADER_ARCH, arch),
+ FEAT_OPA(HEADER_NRCPUS, nrcpus),
+ FEAT_OPA(HEADER_CPUDESC, cpudesc),
+ FEAT_OPA(HEADER_CPUID, cpuid),
+ FEAT_OPA(HEADER_TOTAL_MEM, total_mem),
+ FEAT_OPA(HEADER_EVENT_DESC, event_desc),
+ FEAT_OPA(HEADER_CMDLINE, cmdline),
+ FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
+ FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
+ FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
+};
+
+struct header_print_data {
+ FILE *fp;
+ bool full; /* extended list of headers */
+};
+
+static int perf_file_section__fprintf_info(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data)
+{
+ struct header_print_data *hd = data;
+
+ if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", section->offset, feat);
+ return 0;
+ }
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_warning("unknown feature %d\n", feat);
+ return 0;
+ }
+ if (!feat_ops[feat].print)
+ return 0;
+
+ if (!feat_ops[feat].full_only || hd->full)
+ feat_ops[feat].print(ph, fd, hd->fp);
+ else
+ fprintf(hd->fp, "# %s info available, use -I to display\n",
+ feat_ops[feat].name);
+
+ return 0;
+}
+
+int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
+{
+ struct header_print_data hd;
+ struct perf_header *header = &session->header;
+ int fd = session->fd;
+ hd.fp = fp;
+ hd.full = full;
+
+ perf_header__process_sections(header, fd, &hd,
+ perf_file_section__fprintf_info);
+ return 0;
+}
+
+static int do_write_feat(int fd, struct perf_header *h, int type,
+ struct perf_file_section **p,
+ struct perf_evlist *evlist)
+{
+ int err;
+ int ret = 0;
+
+ if (perf_header__has_feat(h, type)) {
+ if (!feat_ops[type].write)
+ return -1;
+
+ (*p)->offset = lseek(fd, 0, SEEK_CUR);
+
+ err = feat_ops[type].write(fd, h, evlist);
+ if (err < 0) {
+ pr_debug("failed to write feature %d\n", type);
+
+ /* undo anything written */
+ lseek(fd, (*p)->offset, SEEK_SET);
+
+ return -1;
+ }
+ (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
+ (*p)++;
+ }
+ return ret;
+}
+
+static int perf_header__adds_write(struct perf_header *header,
+ struct perf_evlist *evlist, int fd)
+{
+ int nr_sections;
+ struct perf_file_section *feat_sec, *p;
+ int sec_size;
+ u64 sec_start;
+ int feat;
+ int err;
+
+ nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
+
+ feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
+ if (feat_sec == NULL)
+ return -ENOMEM;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ sec_start = header->data_offset + header->data_size;
+ lseek(fd, sec_start + sec_size, SEEK_SET);
+
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (do_write_feat(fd, header, feat, &p, evlist))
+ perf_header__clear_feat(header, feat);
+ }
+
+ lseek(fd, sec_start, SEEK_SET);
+ /*
+ * may write more than needed due to dropped feature, but
+ * this is okay, reader will skip the mising entries
+ */
+ err = do_write(fd, feat_sec, sec_size);
+ if (err < 0)
+ pr_debug("failed to write feature section\n");
+ free(feat_sec);
+ return err;
+}
+
+int perf_header__write_pipe(int fd)
+{
+ struct perf_pipe_file_header f_header;
+ int err;
+
+ f_header = (struct perf_pipe_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ };
+
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf pipe header\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int perf_session__write_header(struct perf_session *session,
+ struct perf_evlist *evlist,
+ int fd, bool at_exit)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header *header = &session->header;
+ struct perf_evsel *attr, *pair = NULL;
+ int err;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+ if (session->evlist != evlist)
+ pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry(attr, &evlist->entries, node) {
+ attr->id_offset = lseek(fd, 0, SEEK_CUR);
+ err = do_write(fd, attr->id, attr->ids * sizeof(u64));
+ if (err < 0) {
+out_err_write:
+ pr_debug("failed to write perf header\n");
+ return err;
+ }
+ if (session->evlist != evlist) {
+ err = do_write(fd, pair->id, pair->ids * sizeof(u64));
+ if (err < 0)
+ goto out_err_write;
+ attr->ids += pair->ids;
+ pair = list_entry(pair->node.next, struct perf_evsel, node);
+ }
+ }
+
+ header->attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ list_for_each_entry(attr, &evlist->entries, node) {
+ f_attr = (struct perf_file_attr){
+ .attr = attr->attr,
+ .ids = {
+ .offset = attr->id_offset,
+ .size = attr->ids * sizeof(u64),
+ }
+ };
+ err = do_write(fd, &f_attr, sizeof(f_attr));
+ if (err < 0) {
+ pr_debug("failed to write perf header attribute\n");
+ return err;
+ }
+ }
+
+ header->event_offset = lseek(fd, 0, SEEK_CUR);
+ header->event_size = event_count * sizeof(struct perf_trace_event_type);
+ if (events) {
+ err = do_write(fd, events, header->event_size);
+ if (err < 0) {
+ pr_debug("failed to write perf header events\n");
+ return err;
+ }
+ }
+
+ header->data_offset = lseek(fd, 0, SEEK_CUR);
+
+ if (at_exit) {
+ err = perf_header__adds_write(header, evlist, fd);
+ if (err < 0)
+ return err;
+ }
+
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = header->attr_offset,
+ .size = evlist->nr_entries * sizeof(f_attr),
+ },
+ .data = {
+ .offset = header->data_offset,
+ .size = header->data_size,
+ },
+ .event_types = {
+ .offset = header->event_offset,
+ .size = header->event_size,
+ },
+ };
+
+ memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
+
+ lseek(fd, 0, SEEK_SET);
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
+ }
+ lseek(fd, header->data_offset + header->data_size, SEEK_SET);
+
+ header->frozen = 1;
+ return 0;
+}
+
+static int perf_header__getbuffer64(struct perf_header *header,
+ int fd, void *buf, size_t size)
+{
+ if (readn(fd, buf, size) <= 0)
+ return -1;
+
+ if (header->needs_swap)
+ mem_bswap_64(buf, size);
+
+ return 0;
+}
+
+int perf_header__process_sections(struct perf_header *header, int fd,
+ void *data,
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data))
+{
+ struct perf_file_section *feat_sec, *sec;
+ int nr_sections;
+ int sec_size;
+ int feat;
+ int err;
+
+ nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
+
+ feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
+ if (!feat_sec)
+ return -1;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ lseek(fd, header->data_offset + header->data_size, SEEK_SET);
+
+ err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
+ if (err < 0)
+ goto out_free;
+
+ for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+ err = process(sec++, header, feat, fd, data);
+ if (err < 0)
+ goto out_free;
+ }
+ err = 0;
+out_free:
+ free(feat_sec);
+ return err;
+}
+
+static const int attr_file_abi_sizes[] = {
+ [0] = PERF_ATTR_SIZE_VER0,
+ [1] = PERF_ATTR_SIZE_VER1,
+ 0,
+};
+
+/*
+ * In the legacy file format, the magic number is not used to encode endianness.
+ * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
+ * on ABI revisions, we need to try all combinations for all endianness to
+ * detect the endianness.
+ */
+static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
+{
+ uint64_t ref_size, attr_size;
+ int i;
+
+ for (i = 0 ; attr_file_abi_sizes[i]; i++) {
+ ref_size = attr_file_abi_sizes[i]
+ + sizeof(struct perf_file_section);
+ if (hdr_sz != ref_size) {
+ attr_size = bswap_64(hdr_sz);
+ if (attr_size != ref_size)
+ continue;
+
+ ph->needs_swap = true;
+ }
+ pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
+ i,
+ ph->needs_swap);
+ return 0;
+ }
+ /* could not determine endianness */
+ return -1;
+}
+
+#define PERF_PIPE_HDR_VER0 16
+
+static const size_t attr_pipe_abi_sizes[] = {
+ [0] = PERF_PIPE_HDR_VER0,
+ 0,
+};
+
+/*
+ * In the legacy pipe format, there is an implicit assumption that endiannesss
+ * between host recording the samples, and host parsing the samples is the
+ * same. This is not always the case given that the pipe output may always be
+ * redirected into a file and analyzed on a different machine with possibly a
+ * different endianness and perf_event ABI revsions in the perf tool itself.
+ */
+static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
+{
+ u64 attr_size;
+ int i;
+
+ for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
+ if (hdr_sz != attr_pipe_abi_sizes[i]) {
+ attr_size = bswap_64(hdr_sz);
+ if (attr_size != hdr_sz)
+ continue;
+
+ ph->needs_swap = true;
+ }
+ pr_debug("Pipe ABI%d perf.data file detected\n", i);
+ return 0;
+ }
+ return -1;
+}
+
+static int check_magic_endian(u64 magic, uint64_t hdr_sz,
+ bool is_pipe, struct perf_header *ph)
+{
+ int ret;
+
+ /* check for legacy format */
+ ret = memcmp(&magic, __perf_magic1, sizeof(magic));
+ if (ret == 0) {
+ pr_debug("legacy perf.data format\n");
+ if (is_pipe)
+ return try_all_pipe_abis(hdr_sz, ph);
+
+ return try_all_file_abis(hdr_sz, ph);
+ }
+ /*
+ * the new magic number serves two purposes:
+ * - unique number to identify actual perf.data files
+ * - encode endianness of file
+ */
+
+ /* check magic number with one endianness */
+ if (magic == __perf_magic2)
+ return 0;
+
+ /* check magic number with opposite endianness */
+ if (magic != __perf_magic2_sw)
+ return -1;
+
+ ph->needs_swap = true;
+
+ return 0;
+}
+
+int perf_file_header__read(struct perf_file_header *header,
+ struct perf_header *ph, int fd)
+{
+ int ret;
+
+ lseek(fd, 0, SEEK_SET);
+
+ ret = readn(fd, header, sizeof(*header));
+ if (ret <= 0)
+ return -1;
+
+ if (check_magic_endian(header->magic,
+ header->attr_size, false, ph) < 0) {
+ pr_debug("magic/endian check failed\n");
+ return -1;
+ }
+
+ if (ph->needs_swap) {
+ mem_bswap_64(header, offsetof(struct perf_file_header,
+ adds_features));
+ }
+
+ if (header->size != sizeof(*header)) {
+ /* Support the previous format */
+ if (header->size == offsetof(typeof(*header), adds_features))
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ else
+ return -1;
+ } else if (ph->needs_swap) {
+ unsigned int i;
+ /*
+ * feature bitmap is declared as an array of unsigned longs --
+ * not good since its size can differ between the host that
+ * generated the data file and the host analyzing the file.
+ *
+ * We need to handle endianness, but we don't know the size of
+ * the unsigned long where the file was generated. Take a best
+ * guess at determining it: try 64-bit swap first (ie., file
+ * created on a 64-bit host), and check if the hostname feature
+ * bit is set (this feature bit is forced on as of fbe96f2).
+ * If the bit is not, undo the 64-bit swap and try a 32-bit
+ * swap. If the hostname bit is still not set (e.g., older data
+ * file), punt and fallback to the original behavior --
+ * clearing all feature bits and setting buildid.
+ */
+ for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
+ header->adds_features[i] = bswap_64(header->adds_features[i]);
+
+ if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
+ for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
+ header->adds_features[i] = bswap_64(header->adds_features[i]);
+ header->adds_features[i] = bswap_32(header->adds_features[i]);
+ }
+ }
+
+ if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ set_bit(HEADER_BUILD_ID, header->adds_features);
+ }
+ }
+
+ memcpy(&ph->adds_features, &header->adds_features,
+ sizeof(ph->adds_features));
+
+ ph->event_offset = header->event_types.offset;
+ ph->event_size = header->event_types.size;
+ ph->data_offset = header->data.offset;
+ ph->data_size = header->data.size;
+ return 0;
+}
+
+static int perf_file_section__process(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data __used)
+{
+ if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", section->offset, feat);
+ return 0;
+ }
+
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_debug("unknown feature %d, continuing...\n", feat);
+ return 0;
+ }
+
+ if (!feat_ops[feat].process)
+ return 0;
+
+ return feat_ops[feat].process(section, ph, feat, fd);
+}
+
+static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
+ struct perf_header *ph, int fd,
+ bool repipe)
+{
+ int ret;
+
+ ret = readn(fd, header, sizeof(*header));
+ if (ret <= 0)
+ return -1;
+
+ if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
+ pr_debug("endian/magic failed\n");
+ return -1;
+ }
+
+ if (ph->needs_swap)
+ header->size = bswap_64(header->size);
+
+ if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int perf_header__read_pipe(struct perf_session *session, int fd)
+{
+ struct perf_header *header = &session->header;
+ struct perf_pipe_file_header f_header;
+
+ if (perf_file_header__read_pipe(&f_header, header, fd,
+ session->repipe) < 0) {
+ pr_debug("incompatible file format\n");
+ return -EINVAL;
+ }
+
+ session->fd = fd;
+
+ return 0;
+}
+
+static int read_attr(int fd, struct perf_header *ph,
+ struct perf_file_attr *f_attr)
+{
+ struct perf_event_attr *attr = &f_attr->attr;
+ size_t sz, left;
+ size_t our_sz = sizeof(f_attr->attr);
+ int ret;
+
+ memset(f_attr, 0, sizeof(*f_attr));
+
+ /* read minimal guaranteed structure */
+ ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
+ if (ret <= 0) {
+ pr_debug("cannot read %d bytes of header attr\n",
+ PERF_ATTR_SIZE_VER0);
+ return -1;
+ }
+
+ /* on file perf_event_attr size */
+ sz = attr->size;
+
+ if (ph->needs_swap)
+ sz = bswap_32(sz);
+
+ if (sz == 0) {
+ /* assume ABI0 */
+ sz = PERF_ATTR_SIZE_VER0;
+ } else if (sz > our_sz) {
+ pr_debug("file uses a more recent and unsupported ABI"
+ " (%zu bytes extra)\n", sz - our_sz);
+ return -1;
+ }
+ /* what we have not yet read and that we know about */
+ left = sz - PERF_ATTR_SIZE_VER0;
+ if (left) {
+ void *ptr = attr;
+ ptr += PERF_ATTR_SIZE_VER0;
+
+ ret = readn(fd, ptr, left);
+ }
+ /* read perf_file_section, ids are read in caller */
+ ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
+
+ return ret <= 0 ? -1 : 0;
+}
+
+int perf_session__read_header(struct perf_session *session, int fd)
+{
+ struct perf_header *header = &session->header;
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+ int nr_attrs, nr_ids, i, j;
+
+ session->evlist = perf_evlist__new(NULL, NULL);
+ if (session->evlist == NULL)
+ return -ENOMEM;
+
+ if (session->fd_pipe)
+ return perf_header__read_pipe(session, fd);
+
+ if (perf_file_header__read(&f_header, header, fd) < 0)
+ return -EINVAL;
+
+ nr_attrs = f_header.attrs.size / f_header.attr_size;
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_evsel *evsel;
+ off_t tmp;
+
+ if (read_attr(fd, header, &f_attr) < 0)
+ goto out_errno;
+
+ if (header->needs_swap)
+ perf_event__attr_swap(&f_attr.attr);
+
+ tmp = lseek(fd, 0, SEEK_CUR);
+ evsel = perf_evsel__new(&f_attr.attr, i);
+
+ if (evsel == NULL)
+ goto out_delete_evlist;
+ /*
+ * Do it before so that if perf_evsel__alloc_id fails, this
+ * entry gets purged too at perf_evlist__delete().
+ */
+ perf_evlist__add(session->evlist, evsel);
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ /*
+ * We don't have the cpu and thread maps on the header, so
+ * for allocating the perf_sample_id table we fake 1 cpu and
+ * hattr->ids threads.
+ */
+ if (perf_evsel__alloc_id(evsel, 1, nr_ids))
+ goto out_delete_evlist;
+
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
+ goto out_errno;
+
+ perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
+ }
+
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ symbol_conf.nr_events = nr_attrs;
+
+ if (f_header.event_types.size) {
+ lseek(fd, f_header.event_types.offset, SEEK_SET);
+ events = malloc(f_header.event_types.size);
+ if (events == NULL)
+ return -ENOMEM;
+ if (perf_header__getbuffer64(header, fd, events,
+ f_header.event_types.size))
+ goto out_errno;
+ event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
+ }
+
+ perf_header__process_sections(header, fd, NULL,
+ perf_file_section__process);
+
+ lseek(fd, header->data_offset, SEEK_SET);
+
+ header->frozen = 1;
+ return 0;
+out_errno:
+ return -errno;
+
+out_delete_evlist:
+ perf_evlist__delete(session->evlist);
+ session->evlist = NULL;
+ return -ENOMEM;
+}
+
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process)
+{
+ union perf_event *ev;
+ size_t size;
+ int err;
+
+ size = sizeof(struct perf_event_attr);
+ size = ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = malloc(size);
+
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
+
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = size;
+
+ err = process(tool, ev, NULL, NULL);
+
+ free(ev);
+
+ return err;
+}
+
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process)
+{
+ struct perf_evsel *attr;
+ int err = 0;
+
+ list_for_each_entry(attr, &session->evlist->entries, node) {
+ err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
+ attr->id, process);
+ if (err) {
+ pr_debug("failed to create perf header attribute\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int perf_event__process_attr(union perf_event *event,
+ struct perf_evlist **pevlist)
+{
+ unsigned int i, ids, n_ids;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = *pevlist;
+
+ if (evlist == NULL) {
+ *pevlist = evlist = perf_evlist__new(NULL, NULL);
+ if (evlist == NULL)
+ return -ENOMEM;
+ }
+
+ evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
+ if (evsel == NULL)
+ return -ENOMEM;
+
+ perf_evlist__add(evlist, evsel);
+
+ ids = event->header.size;
+ ids -= (void *)&event->attr.id - (void *)event;
+ n_ids = ids / sizeof(u64);
+ /*
+ * We don't have the cpu and thread maps on the header, so
+ * for allocating the perf_sample_id table we fake 1 cpu and
+ * hattr->ids threads.
+ */
+ if (perf_evsel__alloc_id(evsel, 1, n_ids))
+ return -ENOMEM;
+
+ for (i = 0; i < n_ids; i++) {
+ perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
+ }
+
+ return 0;
+}
+
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+ u64 event_id, char *name,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event ev;
+ size_t size = 0;
+ int err = 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.event_type.event_type.event_id = event_id;
+ memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
+ strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
+
+ ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
+ size = strlen(ev.event_type.event_type.name);
+ size = ALIGN(size, sizeof(u64));
+ ev.event_type.header.size = sizeof(ev.event_type) -
+ (sizeof(ev.event_type.event_type.name) - size);
+
+ err = process(tool, &ev, NULL, machine);
+
+ return err;
+}
+
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_trace_event_type *type;
+ int i, err = 0;
+
+ for (i = 0; i < event_count; i++) {
+ type = &events[i];
+
+ err = perf_event__synthesize_event_type(tool, type->event_id,
+ type->name, process,
+ machine);
+ if (err) {
+ pr_debug("failed to create perf header event type\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int perf_event__process_event_type(struct perf_tool *tool __unused,
+ union perf_event *event)
+{
+ if (perf_header__push_event(event->event_type.event_type.event_id,
+ event->event_type.event_type.name) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process)
+{
+ union perf_event ev;
+ struct tracing_data *tdata;
+ ssize_t size = 0, aligned_size = 0, padding;
+ int err __used = 0;
+
+ /*
+ * We are going to store the size of the data followed
+ * by the data contents. Since the fd descriptor is a pipe,
+ * we cannot seek back to store the size of the data once
+ * we know it. Instead we:
+ *
+ * - write the tracing data to the temp file
+ * - get/write the data size to pipe
+ * - write the tracing data from the temp file
+ * to the pipe
+ */
+ tdata = tracing_data_get(&evlist->entries, fd, true);
+ if (!tdata)
+ return -1;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
+ size = tdata->size;
+ aligned_size = ALIGN(size, sizeof(u64));
+ padding = aligned_size - size;
+ ev.tracing_data.header.size = sizeof(ev.tracing_data);
+ ev.tracing_data.size = aligned_size;
+
+ process(tool, &ev, NULL, NULL);
+
+ /*
+ * The put function will copy all the tracing data
+ * stored in temp file to the pipe.
+ */
+ tracing_data_put(tdata);
+
+ write_padded(fd, NULL, 0, padding);
+
+ return aligned_size;
+}
+
+int perf_event__process_tracing_data(union perf_event *event,
+ struct perf_session *session)
+{
+ ssize_t size_read, padding, size = event->tracing_data.size;
+ off_t offset = lseek(session->fd, 0, SEEK_CUR);
+ char buf[BUFSIZ];
+
+ /* setup for reading amidst mmap */
+ lseek(session->fd, offset + sizeof(struct tracing_data_event),
+ SEEK_SET);
+
+ size_read = trace_report(session->fd, session->repipe);
+
+ padding = ALIGN(size_read, sizeof(u64)) - size_read;
+
+ if (read(session->fd, buf, padding) < 0)
+ die("reading input file");
+ if (session->repipe) {
+ int retw = write(STDOUT_FILENO, buf, padding);
+ if (retw <= 0 || retw != padding)
+ die("repiping tracing data padding");
+ }
+
+ if (size_read + padding != size)
+ die("tracing data size mismatch");
+
+ return size_read + padding;
+}
+
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event ev;
+ size_t len;
+ int err = 0;
+
+ if (!pos->hit)
+ return err;
+
+ memset(&ev, 0, sizeof(ev));
+
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
+ ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
+ ev.build_id.header.misc = misc;
+ ev.build_id.pid = machine->pid;
+ ev.build_id.header.size = sizeof(ev.build_id) + len;
+ memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+
+ err = process(tool, &ev, NULL, machine);
+
+ return err;
+}
+
+int perf_event__process_build_id(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ __event_process_build_id(&event->build_id,
+ event->build_id.filename,
+ session);
+ return 0;
+}
+
+void disable_buildid_cache(void)
+{
+ no_buildid_cache = true;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/header.h b/ANDROID_3.4.5/tools/perf/util/header.h
new file mode 100644
index 00000000..21a6be09
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/header.h
@@ -0,0 +1,138 @@
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
+
+#include "../../../include/linux/perf_event.h"
+#include <sys/types.h>
+#include <stdbool.h>
+#include "types.h"
+#include "event.h"
+
+#include <linux/bitmap.h>
+
+enum {
+ HEADER_RESERVED = 0, /* always cleared */
+ HEADER_FIRST_FEATURE = 1,
+ HEADER_TRACE_INFO = 1,
+ HEADER_BUILD_ID,
+
+ HEADER_HOSTNAME,
+ HEADER_OSRELEASE,
+ HEADER_VERSION,
+ HEADER_ARCH,
+ HEADER_NRCPUS,
+ HEADER_CPUDESC,
+ HEADER_CPUID,
+ HEADER_TOTAL_MEM,
+ HEADER_CMDLINE,
+ HEADER_EVENT_DESC,
+ HEADER_CPU_TOPOLOGY,
+ HEADER_NUMA_TOPOLOGY,
+ HEADER_BRANCH_STACK,
+ HEADER_LAST_FEATURE,
+ HEADER_FEAT_BITS = 256,
+};
+
+struct perf_file_section {
+ u64 offset;
+ u64 size;
+};
+
+struct perf_file_header {
+ u64 magic;
+ u64 size;
+ u64 attr_size;
+ struct perf_file_section attrs;
+ struct perf_file_section data;
+ struct perf_file_section event_types;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+
+struct perf_pipe_file_header {
+ u64 magic;
+ u64 size;
+};
+
+struct perf_header;
+
+int perf_file_header__read(struct perf_file_header *header,
+ struct perf_header *ph, int fd);
+
+struct perf_header {
+ int frozen;
+ bool needs_swap;
+ s64 attr_offset;
+ u64 data_offset;
+ u64 data_size;
+ u64 event_offset;
+ u64 event_size;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+
+struct perf_evlist;
+struct perf_session;
+
+int perf_session__read_header(struct perf_session *session, int fd);
+int perf_session__write_header(struct perf_session *session,
+ struct perf_evlist *evlist,
+ int fd, bool at_exit);
+int perf_header__write_pipe(int fd);
+
+int perf_header__push_event(u64 id, const char *name);
+char *perf_header__find_event(u64 id);
+
+void perf_header__set_feat(struct perf_header *header, int feat);
+void perf_header__clear_feat(struct perf_header *header, int feat);
+bool perf_header__has_feat(const struct perf_header *header, int feat);
+
+int perf_header__set_cmdline(int argc, const char **argv);
+
+int perf_header__process_sections(struct perf_header *header, int fd,
+ void *data,
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data));
+
+int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms);
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process);
+int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
+
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+ u64 event_id, char *name,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__process_event_type(struct perf_tool *tool,
+ union perf_event *event);
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+ int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process);
+int perf_event__process_tracing_data(union perf_event *event,
+ struct perf_session *session);
+
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+/*
+ * arch specific callback
+ */
+int get_cpuid(char *buffer, size_t sz);
+
+#endif /* __PERF_HEADER_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/help.c b/ANDROID_3.4.5/tools/perf/util/help.c
new file mode 100644
index 00000000..6f2975a0
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/help.c
@@ -0,0 +1,338 @@
+#include "cache.h"
+#include "../builtin.h"
+#include "exec_cmd.h"
+#include "levenshtein.h"
+#include "help.h"
+
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
+{
+ struct cmdname *ent = malloc(sizeof(*ent) + len + 1);
+
+ ent->len = len;
+ memcpy(ent->name, name, len);
+ ent->name[len] = 0;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc);
+ cmds->names[cmds->cnt++] = ent;
+}
+
+static void clean_cmdnames(struct cmdnames *cmds)
+{
+ unsigned int i;
+
+ for (i = 0; i < cmds->cnt; ++i)
+ free(cmds->names[i]);
+ free(cmds->names);
+ cmds->cnt = 0;
+ cmds->alloc = 0;
+}
+
+static int cmdname_compare(const void *a_, const void *b_)
+{
+ struct cmdname *a = *(struct cmdname **)a_;
+ struct cmdname *b = *(struct cmdname **)b_;
+ return strcmp(a->name, b->name);
+}
+
+static void uniq(struct cmdnames *cmds)
+{
+ unsigned int i, j;
+
+ if (!cmds->cnt)
+ return;
+
+ for (i = j = 1; i < cmds->cnt; i++)
+ if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+ cmds->names[j++] = cmds->names[i];
+
+ cmds->cnt = j;
+}
+
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+{
+ size_t ci, cj, ei;
+ int cmp;
+
+ ci = cj = ei = 0;
+ while (ci < cmds->cnt && ei < excludes->cnt) {
+ cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);
+ if (cmp < 0)
+ cmds->names[cj++] = cmds->names[ci++];
+ else if (cmp == 0)
+ ci++, ei++;
+ else if (cmp > 0)
+ ei++;
+ }
+
+ while (ci < cmds->cnt)
+ cmds->names[cj++] = cmds->names[ci++];
+
+ cmds->cnt = cj;
+}
+
+static void pretty_print_string_list(struct cmdnames *cmds, int longest)
+{
+ int cols = 1, rows;
+ int space = longest + 1; /* min 1 SP between words */
+ struct winsize win;
+ int max_cols;
+ int i, j;
+
+ get_term_dimensions(&win);
+ max_cols = win.ws_col - 1; /* don't print *on* the edge */
+
+ if (space < max_cols)
+ cols = max_cols / space;
+ rows = (cmds->cnt + cols - 1) / cols;
+
+ for (i = 0; i < rows; i++) {
+ printf(" ");
+
+ for (j = 0; j < cols; j++) {
+ unsigned int n = j * rows + i;
+ unsigned int size = space;
+
+ if (n >= cmds->cnt)
+ break;
+ if (j == cols-1 || n + rows >= cmds->cnt)
+ size = 1;
+ printf("%-*s", size, cmds->names[n]->name);
+ }
+ putchar('\n');
+ }
+}
+
+static int is_executable(const char *name)
+{
+ struct stat st;
+
+ if (stat(name, &st) || /* stat, not lstat */
+ !S_ISREG(st.st_mode))
+ return 0;
+
+ return st.st_mode & S_IXUSR;
+}
+
+static void list_commands_in_dir(struct cmdnames *cmds,
+ const char *path,
+ const char *prefix)
+{
+ int prefix_len;
+ DIR *dir = opendir(path);
+ struct dirent *de;
+ struct strbuf buf = STRBUF_INIT;
+ int len;
+
+ if (!dir)
+ return;
+ if (!prefix)
+ prefix = "perf-";
+ prefix_len = strlen(prefix);
+
+ strbuf_addf(&buf, "%s/", path);
+ len = buf.len;
+
+ while ((de = readdir(dir)) != NULL) {
+ int entlen;
+
+ if (prefixcmp(de->d_name, prefix))
+ continue;
+
+ strbuf_setlen(&buf, len);
+ strbuf_addstr(&buf, de->d_name);
+ if (!is_executable(buf.buf))
+ continue;
+
+ entlen = strlen(de->d_name) - prefix_len;
+ if (has_extension(de->d_name, ".exe"))
+ entlen -= 4;
+
+ add_cmdname(cmds, de->d_name + prefix_len, entlen);
+ }
+ closedir(dir);
+ strbuf_release(&buf);
+}
+
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds)
+{
+ const char *env_path = getenv("PATH");
+ const char *exec_path = perf_exec_path();
+
+ if (exec_path) {
+ list_commands_in_dir(main_cmds, exec_path, prefix);
+ qsort(main_cmds->names, main_cmds->cnt,
+ sizeof(*main_cmds->names), cmdname_compare);
+ uniq(main_cmds);
+ }
+
+ if (env_path) {
+ char *paths, *path, *colon;
+ path = paths = strdup(env_path);
+ while (1) {
+ if ((colon = strchr(path, PATH_SEP)))
+ *colon = 0;
+ if (!exec_path || strcmp(path, exec_path))
+ list_commands_in_dir(other_cmds, path, prefix);
+
+ if (!colon)
+ break;
+ path = colon + 1;
+ }
+ free(paths);
+
+ qsort(other_cmds->names, other_cmds->cnt,
+ sizeof(*other_cmds->names), cmdname_compare);
+ uniq(other_cmds);
+ }
+ exclude_cmds(other_cmds, main_cmds);
+}
+
+void list_commands(const char *title, struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds)
+{
+ unsigned int i, longest = 0;
+
+ for (i = 0; i < main_cmds->cnt; i++)
+ if (longest < main_cmds->names[i]->len)
+ longest = main_cmds->names[i]->len;
+ for (i = 0; i < other_cmds->cnt; i++)
+ if (longest < other_cmds->names[i]->len)
+ longest = other_cmds->names[i]->len;
+
+ if (main_cmds->cnt) {
+ const char *exec_path = perf_exec_path();
+ printf("available %s in '%s'\n", title, exec_path);
+ printf("----------------");
+ mput_char('-', strlen(title) + strlen(exec_path));
+ putchar('\n');
+ pretty_print_string_list(main_cmds, longest);
+ putchar('\n');
+ }
+
+ if (other_cmds->cnt) {
+ printf("%s available from elsewhere on your $PATH\n", title);
+ printf("---------------------------------------");
+ mput_char('-', strlen(title));
+ putchar('\n');
+ pretty_print_string_list(other_cmds, longest);
+ putchar('\n');
+ }
+}
+
+int is_in_cmdlist(struct cmdnames *c, const char *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < c->cnt; i++)
+ if (!strcmp(s, c->names[i]->name))
+ return 1;
+ return 0;
+}
+
+static int autocorrect;
+static struct cmdnames aliases;
+
+static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "help.autocorrect"))
+ autocorrect = perf_config_int(var,value);
+ /* Also use aliases for command lookup */
+ if (!prefixcmp(var, "alias."))
+ add_cmdname(&aliases, var + 6, strlen(var + 6));
+
+ return perf_default_config(var, value, cb);
+}
+
+static int levenshtein_compare(const void *p1, const void *p2)
+{
+ const struct cmdname *const *c1 = p1, *const *c2 = p2;
+ const char *s1 = (*c1)->name, *s2 = (*c2)->name;
+ int l1 = (*c1)->len;
+ int l2 = (*c2)->len;
+ return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
+}
+
+static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
+{
+ unsigned int i;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
+
+ for (i = 0; i < old->cnt; i++)
+ cmds->names[cmds->cnt++] = old->names[i];
+ free(old->names);
+ old->cnt = 0;
+ old->names = NULL;
+}
+
+const char *help_unknown_cmd(const char *cmd)
+{
+ unsigned int i, n = 0, best_similarity = 0;
+ struct cmdnames main_cmds, other_cmds;
+
+ memset(&main_cmds, 0, sizeof(main_cmds));
+ memset(&other_cmds, 0, sizeof(main_cmds));
+ memset(&aliases, 0, sizeof(aliases));
+
+ perf_config(perf_unknown_cmd_config, NULL);
+
+ load_command_list("perf-", &main_cmds, &other_cmds);
+
+ add_cmd_list(&main_cmds, &aliases);
+ add_cmd_list(&main_cmds, &other_cmds);
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(main_cmds.names), cmdname_compare);
+ uniq(&main_cmds);
+
+ if (main_cmds.cnt) {
+ /* This reuses cmdname->len for similarity index */
+ for (i = 0; i < main_cmds.cnt; ++i)
+ main_cmds.names[i]->len =
+ levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
+
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(*main_cmds.names), levenshtein_compare);
+
+ best_similarity = main_cmds.names[0]->len;
+ n = 1;
+ while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
+ ++n;
+ }
+
+ if (autocorrect && n == 1) {
+ const char *assumed = main_cmds.names[0]->name;
+
+ main_cmds.names[0] = NULL;
+ clean_cmdnames(&main_cmds);
+ fprintf(stderr, "WARNING: You called a perf program named '%s', "
+ "which does not exist.\n"
+ "Continuing under the assumption that you meant '%s'\n",
+ cmd, assumed);
+ if (autocorrect > 0) {
+ fprintf(stderr, "in %0.1f seconds automatically...\n",
+ (float)autocorrect/10.0);
+ poll(NULL, 0, autocorrect * 100);
+ }
+ return assumed;
+ }
+
+ fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
+
+ if (main_cmds.cnt && best_similarity < 6) {
+ fprintf(stderr, "\nDid you mean %s?\n",
+ n < 2 ? "this": "one of these");
+
+ for (i = 0; i < n; i++)
+ fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
+ }
+
+ exit(1);
+}
+
+int cmd_version(int argc __used, const char **argv __used, const char *prefix __used)
+{
+ printf("perf version %s\n", perf_version_string);
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/help.h b/ANDROID_3.4.5/tools/perf/util/help.h
new file mode 100644
index 00000000..7f5c6ded
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/help.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_HELP_H
+#define __PERF_HELP_H
+
+struct cmdnames {
+ size_t alloc;
+ size_t cnt;
+ struct cmdname {
+ size_t len; /* also used for similarity index in help.c */
+ char name[FLEX_ARRAY];
+ } **names;
+};
+
+static inline void mput_char(char c, unsigned int num)
+{
+ while(num--)
+ putchar(c);
+}
+
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len);
+/* Here we require that excludes is a sorted list. */
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
+int is_in_cmdlist(struct cmdnames *c, const char *s);
+void list_commands(const char *title, struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+
+#endif /* __PERF_HELP_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/hist.c b/ANDROID_3.4.5/tools/perf/util/hist.c
new file mode 100644
index 00000000..9f6d630d
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/hist.c
@@ -0,0 +1,1362 @@
+#include "annotate.h"
+#include "util.h"
+#include "build-id.h"
+#include "hist.h"
+#include "session.h"
+#include "sort.h"
+#include <math.h>
+
+static bool hists__filter_entry_by_dso(struct hists *hists,
+ struct hist_entry *he);
+static bool hists__filter_entry_by_thread(struct hists *hists,
+ struct hist_entry *he);
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he);
+
+enum hist_filter {
+ HIST_FILTER__DSO,
+ HIST_FILTER__THREAD,
+ HIST_FILTER__PARENT,
+ HIST_FILTER__SYMBOL,
+};
+
+struct callchain_param callchain_param = {
+ .mode = CHAIN_GRAPH_REL,
+ .min_percent = 0.5,
+ .order = ORDER_CALLEE
+};
+
+u16 hists__col_len(struct hists *hists, enum hist_column col)
+{
+ return hists->col_len[col];
+}
+
+void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
+{
+ hists->col_len[col] = len;
+}
+
+bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
+{
+ if (len > hists__col_len(hists, col)) {
+ hists__set_col_len(hists, col, len);
+ return true;
+ }
+ return false;
+}
+
+static void hists__reset_col_len(struct hists *hists)
+{
+ enum hist_column col;
+
+ for (col = 0; col < HISTC_NR_COLS; ++col)
+ hists__set_col_len(hists, col, 0);
+}
+
+static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
+{
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+
+ if (hists__col_len(hists, dso) < unresolved_col_width &&
+ !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ !symbol_conf.dso_list)
+ hists__set_col_len(hists, dso, unresolved_col_width);
+}
+
+static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+{
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+ u16 len;
+
+ if (h->ms.sym)
+ hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
+ else
+ hists__set_unres_dso_col_len(hists, HISTC_DSO);
+
+ len = thread__comm_len(h->thread);
+ if (hists__new_col_len(hists, HISTC_COMM, len))
+ hists__set_col_len(hists, HISTC_THREAD, len + 6);
+
+ if (h->ms.map) {
+ len = dso__name_len(h->ms.map->dso);
+ hists__new_col_len(hists, HISTC_DSO, len);
+ }
+
+ if (h->branch_info) {
+ int symlen;
+ /*
+ * +4 accounts for '[x] ' priv level info
+ * +2 account of 0x prefix on raw addresses
+ */
+ if (h->branch_info->from.sym) {
+ symlen = (int)h->branch_info->from.sym->namelen + 4;
+ hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+
+ symlen = dso__name_len(h->branch_info->from.map->dso);
+ hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
+ }
+
+ if (h->branch_info->to.sym) {
+ symlen = (int)h->branch_info->to.sym->namelen + 4;
+ hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+
+ symlen = dso__name_len(h->branch_info->to.map->dso);
+ hists__new_col_len(hists, HISTC_DSO_TO, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
+ }
+ }
+}
+
+static void hist_entry__add_cpumode_period(struct hist_entry *he,
+ unsigned int cpumode, u64 period)
+{
+ switch (cpumode) {
+ case PERF_RECORD_MISC_KERNEL:
+ he->period_sys += period;
+ break;
+ case PERF_RECORD_MISC_USER:
+ he->period_us += period;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ he->period_guest_sys += period;
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ he->period_guest_us += period;
+ break;
+ default:
+ break;
+ }
+}
+
+static void hist_entry__decay(struct hist_entry *he)
+{
+ he->period = (he->period * 7) / 8;
+ he->nr_events = (he->nr_events * 7) / 8;
+}
+
+static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
+{
+ u64 prev_period = he->period;
+
+ if (prev_period == 0)
+ return true;
+
+ hist_entry__decay(he);
+
+ if (!he->filtered)
+ hists->stats.total_period -= prev_period - he->period;
+
+ return he->period == 0;
+}
+
+static void __hists__decay_entries(struct hists *hists, bool zap_user,
+ bool zap_kernel, bool threaded)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+ /*
+ * We may be annotating this, for instance, so keep it here in
+ * case some it gets new samples, we'll eventually free it when
+ * the user stops browsing and it agains gets fully decayed.
+ */
+ if (((zap_user && n->level == '.') ||
+ (zap_kernel && n->level != '.') ||
+ hists__decay_entry(hists, n)) &&
+ !n->used) {
+ rb_erase(&n->rb_node, &hists->entries);
+
+ if (sort__need_collapse || threaded)
+ rb_erase(&n->rb_node_in, &hists->entries_collapsed);
+
+ hist_entry__free(n);
+ --hists->nr_entries;
+ }
+ }
+}
+
+void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
+{
+ return __hists__decay_entries(hists, zap_user, zap_kernel, false);
+}
+
+void hists__decay_entries_threaded(struct hists *hists,
+ bool zap_user, bool zap_kernel)
+{
+ return __hists__decay_entries(hists, zap_user, zap_kernel, true);
+}
+
+/*
+ * histogram, sorted on item, collects periods
+ */
+
+static struct hist_entry *hist_entry__new(struct hist_entry *template)
+{
+ size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
+ struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
+
+ if (he != NULL) {
+ *he = *template;
+ he->nr_events = 1;
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+ if (symbol_conf.use_callchain)
+ callchain_init(he->callchain);
+ }
+
+ return he;
+}
+
+static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
+{
+ if (!h->filtered) {
+ hists__calc_col_len(hists, h);
+ ++hists->nr_entries;
+ hists->stats.total_period += h->period;
+ }
+}
+
+static u8 symbol__parent_filter(const struct symbol *parent)
+{
+ if (symbol_conf.exclude_other && parent == NULL)
+ return 1 << HIST_FILTER__PARENT;
+ return 0;
+}
+
+static struct hist_entry *add_hist_entry(struct hists *hists,
+ struct hist_entry *entry,
+ struct addr_location *al,
+ u64 period)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct hist_entry *he;
+ int cmp;
+
+ pthread_mutex_lock(&hists->lock);
+
+ p = &hists->entries_in->rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ he = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = hist_entry__cmp(entry, he);
+
+ if (!cmp) {
+ he->period += period;
+ ++he->nr_events;
+
+ /* If the map of an existing hist_entry has
+ * become out-of-date due to an exec() or
+ * similar, update it. Otherwise we will
+ * mis-adjust symbol addresses when computing
+ * the history counter to increment.
+ */
+ if (he->ms.map != entry->ms.map) {
+ he->ms.map = entry->ms.map;
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+ }
+ goto out;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ he = hist_entry__new(entry);
+ if (!he)
+ goto out_unlock;
+
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, hists->entries_in);
+out:
+ hist_entry__add_cpumode_period(he, al->cpumode, period);
+out_unlock:
+ pthread_mutex_unlock(&hists->lock);
+ return he;
+}
+
+struct hist_entry *__hists__add_branch_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *sym_parent,
+ struct branch_info *bi,
+ u64 period)
+{
+ struct hist_entry entry = {
+ .thread = al->thread,
+ .ms = {
+ .map = bi->to.map,
+ .sym = bi->to.sym,
+ },
+ .cpu = al->cpu,
+ .ip = bi->to.addr,
+ .level = al->level,
+ .period = period,
+ .parent = sym_parent,
+ .filtered = symbol__parent_filter(sym_parent),
+ .branch_info = bi,
+ };
+
+ return add_hist_entry(self, &entry, al, period);
+}
+
+struct hist_entry *__hists__add_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *sym_parent, u64 period)
+{
+ struct hist_entry entry = {
+ .thread = al->thread,
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
+ .cpu = al->cpu,
+ .ip = al->addr,
+ .level = al->level,
+ .period = period,
+ .parent = sym_parent,
+ .filtered = symbol__parent_filter(sym_parent),
+ };
+
+ return add_hist_entry(self, &entry, al, period);
+}
+
+int64_t
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct sort_entry *se;
+ int64_t cmp = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ cmp = se->se_cmp(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ struct sort_entry *se;
+ int64_t cmp = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ int64_t (*f)(struct hist_entry *, struct hist_entry *);
+
+ f = se->se_collapse ?: se->se_cmp;
+
+ cmp = f(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+void hist_entry__free(struct hist_entry *he)
+{
+ free(he);
+}
+
+/*
+ * collapse the histogram
+ */
+
+static bool hists__collapse_insert_entry(struct hists *hists,
+ struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ int64_t cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = hist_entry__collapse(iter, he);
+
+ if (!cmp) {
+ iter->period += he->period;
+ iter->nr_events += he->nr_events;
+ if (symbol_conf.use_callchain) {
+ callchain_cursor_reset(&hists->callchain_cursor);
+ callchain_merge(&hists->callchain_cursor, iter->callchain,
+ he->callchain);
+ }
+ hist_entry__free(he);
+ return false;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, root);
+ return true;
+}
+
+static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+{
+ struct rb_root *root;
+
+ pthread_mutex_lock(&hists->lock);
+
+ root = hists->entries_in;
+ if (++hists->entries_in > &hists->entries_in_array[1])
+ hists->entries_in = &hists->entries_in_array[0];
+
+ pthread_mutex_unlock(&hists->lock);
+
+ return root;
+}
+
+static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
+{
+ hists__filter_entry_by_dso(hists, he);
+ hists__filter_entry_by_thread(hists, he);
+ hists__filter_entry_by_symbol(hists, he);
+}
+
+static void __hists__collapse_resort(struct hists *hists, bool threaded)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+ struct hist_entry *n;
+
+ if (!sort__need_collapse && !threaded)
+ return;
+
+ root = hists__get_rotate_entries_in(hists);
+ next = rb_first(root);
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node_in);
+ next = rb_next(&n->rb_node_in);
+
+ rb_erase(&n->rb_node_in, root);
+ if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
+ /*
+ * If it wasn't combined with one of the entries already
+ * collapsed, we need to apply the filters that may have
+ * been set by, say, the hist_browser.
+ */
+ hists__apply_filters(hists, n);
+ }
+ }
+}
+
+void hists__collapse_resort(struct hists *hists)
+{
+ return __hists__collapse_resort(hists, false);
+}
+
+void hists__collapse_resort_threaded(struct hists *hists)
+{
+ return __hists__collapse_resort(hists, true);
+}
+
+/*
+ * reverse the map, sort on period.
+ */
+
+static void __hists__insert_output_entry(struct rb_root *entries,
+ struct hist_entry *he,
+ u64 min_callchain_hits)
+{
+ struct rb_node **p = &entries->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+
+ if (symbol_conf.use_callchain)
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (he->period > iter->period)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, entries);
+}
+
+static void __hists__output_resort(struct hists *hists, bool threaded)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+ struct hist_entry *n;
+ u64 min_callchain_hits;
+
+ min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
+
+ if (sort__need_collapse || threaded)
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
+
+ next = rb_first(root);
+ hists->entries = RB_ROOT;
+
+ hists->nr_entries = 0;
+ hists->stats.total_period = 0;
+ hists__reset_col_len(hists);
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node_in);
+ next = rb_next(&n->rb_node_in);
+
+ __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
+ hists__inc_nr_entries(hists, n);
+ }
+}
+
+void hists__output_resort(struct hists *hists)
+{
+ return __hists__output_resort(hists, false);
+}
+
+void hists__output_resort_threaded(struct hists *hists)
+{
+ return __hists__output_resort(hists, true);
+}
+
+static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
+{
+ int i;
+ int ret = fprintf(fp, " ");
+
+ for (i = 0; i < left_margin; i++)
+ ret += fprintf(fp, " ");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
+ int left_margin)
+{
+ int i;
+ size_t ret = callchain__fprintf_left_margin(fp, left_margin);
+
+ for (i = 0; i < depth; i++)
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "| ");
+ else
+ ret += fprintf(fp, " ");
+
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+ int depth, int depth_mask, int period,
+ u64 total_samples, u64 hits,
+ int left_margin)
+{
+ int i;
+ size_t ret = 0;
+
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ for (i = 0; i < depth; i++) {
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "|");
+ else
+ ret += fprintf(fp, " ");
+ if (!period && i == depth - 1) {
+ double percent;
+
+ percent = hits * 100.0 / total_samples;
+ ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ } else
+ ret += fprintf(fp, "%s", " ");
+ }
+ if (chain->ms.sym)
+ ret += fprintf(fp, "%s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
+
+ return ret;
+}
+
+static struct symbol *rem_sq_bracket;
+static struct callchain_list rem_hits;
+
+static void init_rem_hits(void)
+{
+ rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
+ if (!rem_sq_bracket) {
+ fprintf(stderr, "Not enough memory to display remaining hits\n");
+ return;
+ }
+
+ strcpy(rem_sq_bracket->name, "[...]");
+ rem_hits.ms.sym = rem_sq_bracket;
+}
+
+static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
+ u64 total_samples, int depth,
+ int depth_mask, int left_margin)
+{
+ struct rb_node *node, *next;
+ struct callchain_node *child;
+ struct callchain_list *chain;
+ int new_depth_mask = depth_mask;
+ u64 remaining;
+ size_t ret = 0;
+ int i;
+ uint entries_printed = 0;
+
+ remaining = total_samples;
+
+ node = rb_first(root);
+ while (node) {
+ u64 new_total;
+ u64 cumul;
+
+ child = rb_entry(node, struct callchain_node, rb_node);
+ cumul = callchain_cumul_hits(child);
+ remaining -= cumul;
+
+ /*
+ * The depth mask manages the output of pipes that show
+ * the depth. We don't want to keep the pipes of the current
+ * level for the last child of this depth.
+ * Except if we have remaining filtered hits. They will
+ * supersede the last child
+ */
+ next = rb_next(node);
+ if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ /*
+ * But we keep the older depth mask for the line separator
+ * to keep the level link until we reach the last child
+ */
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
+ left_margin);
+ i = 0;
+ list_for_each_entry(chain, &child->val, list) {
+ ret += ipchain__fprintf_graph(fp, chain, depth,
+ new_depth_mask, i++,
+ total_samples,
+ cumul,
+ left_margin);
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = child->children_hit;
+ else
+ new_total = total_samples;
+
+ ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth),
+ left_margin);
+ node = next;
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL &&
+ remaining && remaining != total_samples) {
+
+ if (!rem_sq_bracket)
+ return ret;
+
+ new_depth_mask &= ~(1 << (depth - 1));
+ ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+ new_depth_mask, 0, total_samples,
+ remaining, left_margin);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
+ u64 total_samples, int left_margin)
+{
+ struct callchain_node *cnode;
+ struct callchain_list *chain;
+ u32 entries_printed = 0;
+ bool printed = false;
+ struct rb_node *node;
+ int i = 0;
+ int ret;
+
+ /*
+ * If have one single callchain root, don't bother printing
+ * its percentage (100 % in fractal mode and the same percentage
+ * than the hist in graph mode). This also avoid one level of column.
+ */
+ node = rb_first(root);
+ if (node && !rb_next(node)) {
+ cnode = rb_entry(node, struct callchain_node, rb_node);
+ list_for_each_entry(chain, &cnode->val, list) {
+ /*
+ * If we sort by symbol, the first entry is the same than
+ * the symbol. No need to print it otherwise it appears as
+ * displayed twice.
+ */
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+ root = &cnode->rb_root;
+ }
+
+ return __callchain__fprintf_graph(fp, root, total_samples,
+ 1, 1, left_margin);
+}
+
+static size_t __callchain__fprintf_flat(FILE *fp,
+ struct callchain_node *self,
+ u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n",
+ (void *)(long)chain->ip);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
+ u64 total_samples)
+{
+ size_t ret = 0;
+ u32 entries_printed = 0;
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+
+ rb_node = rb_first(self);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+
+ ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
+ ret += __callchain__fprintf_flat(fp, chain, total_samples);
+ ret += fprintf(fp, "\n");
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
+static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
+ u64 total_samples, int left_margin,
+ FILE *fp)
+{
+ switch (callchain_param.mode) {
+ case CHAIN_GRAPH_REL:
+ return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
+ left_margin);
+ break;
+ case CHAIN_GRAPH_ABS:
+ return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+ left_margin);
+ break;
+ case CHAIN_FLAT:
+ return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
+ break;
+ case CHAIN_NONE:
+ break;
+ default:
+ pr_err("Bad callchain mode\n");
+ }
+
+ return 0;
+}
+
+void hists__output_recalc_col_len(struct hists *hists, int max_rows)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+ int row = 0;
+
+ hists__reset_col_len(hists);
+
+ while (next && row++ < max_rows) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ if (!n->filtered)
+ hists__calc_col_len(hists, n);
+ next = rb_next(&n->rb_node);
+ }
+}
+
+static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
+ size_t size, struct hists *pair_hists,
+ bool show_displacement, long displacement,
+ bool color, u64 total_period)
+{
+ u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+ u64 nr_events;
+ const char *sep = symbol_conf.field_sep;
+ int ret;
+
+ if (symbol_conf.exclude_other && !he->parent)
+ return 0;
+
+ if (pair_hists) {
+ period = he->pair ? he->pair->period : 0;
+ nr_events = he->pair ? he->pair->nr_events : 0;
+ total = pair_hists->stats.total_period;
+ period_sys = he->pair ? he->pair->period_sys : 0;
+ period_us = he->pair ? he->pair->period_us : 0;
+ period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
+ period_guest_us = he->pair ? he->pair->period_guest_us : 0;
+ } else {
+ period = he->period;
+ nr_events = he->nr_events;
+ total = total_period;
+ period_sys = he->period_sys;
+ period_us = he->period_us;
+ period_guest_sys = he->period_guest_sys;
+ period_guest_us = he->period_guest_us;
+ }
+
+ if (total) {
+ if (color)
+ ret = percent_color_snprintf(s, size,
+ sep ? "%.2f" : " %6.2f%%",
+ (period * 100.0) / total);
+ else
+ ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
+ (period * 100.0) / total);
+ if (symbol_conf.show_cpu_utilization) {
+ ret += percent_color_snprintf(s + ret, size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_sys * 100.0) / total);
+ ret += percent_color_snprintf(s + ret, size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_us * 100.0) / total);
+ if (perf_guest) {
+ ret += percent_color_snprintf(s + ret,
+ size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_guest_sys * 100.0) /
+ total);
+ ret += percent_color_snprintf(s + ret,
+ size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_guest_us * 100.0) /
+ total);
+ }
+ }
+ } else
+ ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+ ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
+ else
+ ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
+ }
+
+ if (symbol_conf.show_total_period) {
+ if (sep)
+ ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
+ else
+ ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
+ }
+
+ if (pair_hists) {
+ char bf[32];
+ double old_percent = 0, new_percent = 0, diff;
+
+ if (total > 0)
+ old_percent = (period * 100.0) / total;
+ if (total_period > 0)
+ new_percent = (he->period * 100.0) / total_period;
+
+ diff = new_percent - old_percent;
+
+ if (fabs(diff) >= 0.01)
+ scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+ else
+ scnprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+ ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
+ else
+ ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
+
+ if (show_displacement) {
+ if (displacement)
+ scnprintf(bf, sizeof(bf), "%+4ld", displacement);
+ else
+ scnprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+ ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
+ else
+ ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
+ }
+ }
+
+ return ret;
+}
+
+int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
+ struct hists *hists)
+{
+ const char *sep = symbol_conf.field_sep;
+ struct sort_entry *se;
+ int ret = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
+ ret += se->se_snprintf(he, s + ret, size - ret,
+ hists__col_len(hists, se->se_width_idx));
+ }
+
+ return ret;
+}
+
+static int hist_entry__fprintf(struct hist_entry *he, size_t size,
+ struct hists *hists, struct hists *pair_hists,
+ bool show_displacement, long displacement,
+ u64 total_period, FILE *fp)
+{
+ char bf[512];
+ int ret;
+
+ if (size == 0 || size > sizeof(bf))
+ size = sizeof(bf);
+
+ ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
+ show_displacement, displacement,
+ true, total_period);
+ hist_entry__snprintf(he, bf + ret, size - ret, hists);
+ return fprintf(fp, "%s\n", bf);
+}
+
+static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
+ struct hists *hists,
+ u64 total_period, FILE *fp)
+{
+ int left_margin = 0;
+
+ if (sort__first_dimension == SORT_COMM) {
+ struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
+ typeof(*se), list);
+ left_margin = hists__col_len(hists, se->se_width_idx);
+ left_margin -= thread__comm_len(he->thread);
+ }
+
+ return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
+}
+
+size_t hists__fprintf(struct hists *hists, struct hists *pair,
+ bool show_displacement, bool show_header, int max_rows,
+ int max_cols, FILE *fp)
+{
+ struct sort_entry *se;
+ struct rb_node *nd;
+ size_t ret = 0;
+ u64 total_period;
+ unsigned long position = 1;
+ long displacement = 0;
+ unsigned int width;
+ const char *sep = symbol_conf.field_sep;
+ const char *col_width = symbol_conf.col_width_list_str;
+ int nr_rows = 0;
+
+ init_rem_hits();
+
+ if (!show_header)
+ goto print_entries;
+
+ fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
+
+ if (symbol_conf.show_cpu_utilization) {
+ if (sep) {
+ ret += fprintf(fp, "%csys", *sep);
+ ret += fprintf(fp, "%cus", *sep);
+ if (perf_guest) {
+ ret += fprintf(fp, "%cguest sys", *sep);
+ ret += fprintf(fp, "%cguest us", *sep);
+ }
+ } else {
+ ret += fprintf(fp, " sys ");
+ ret += fprintf(fp, " us ");
+ if (perf_guest) {
+ ret += fprintf(fp, " guest sys ");
+ ret += fprintf(fp, " guest us ");
+ }
+ }
+ }
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+ fprintf(fp, "%cSamples", *sep);
+ else
+ fputs(" Samples ", fp);
+ }
+
+ if (symbol_conf.show_total_period) {
+ if (sep)
+ ret += fprintf(fp, "%cPeriod", *sep);
+ else
+ ret += fprintf(fp, " Period ");
+ }
+
+ if (pair) {
+ if (sep)
+ ret += fprintf(fp, "%cDelta", *sep);
+ else
+ ret += fprintf(fp, " Delta ");
+
+ if (show_displacement) {
+ if (sep)
+ ret += fprintf(fp, "%cDisplacement", *sep);
+ else
+ ret += fprintf(fp, " Displ");
+ }
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+ if (sep) {
+ fprintf(fp, "%c%s", *sep, se->se_header);
+ continue;
+ }
+ width = strlen(se->se_header);
+ if (symbol_conf.col_width_list_str) {
+ if (col_width) {
+ hists__set_col_len(hists, se->se_width_idx,
+ atoi(col_width));
+ col_width = strchr(col_width, ',');
+ if (col_width)
+ ++col_width;
+ }
+ }
+ if (!hists__new_col_len(hists, se->se_width_idx, width))
+ width = hists__col_len(hists, se->se_width_idx);
+ fprintf(fp, " %*s", width, se->se_header);
+ }
+
+ fprintf(fp, "\n");
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+ if (sep)
+ goto print_entries;
+
+ fprintf(fp, "# ........");
+ if (symbol_conf.show_cpu_utilization)
+ fprintf(fp, " ....... .......");
+ if (symbol_conf.show_nr_samples)
+ fprintf(fp, " ..........");
+ if (symbol_conf.show_total_period)
+ fprintf(fp, " ............");
+ if (pair) {
+ fprintf(fp, " ..........");
+ if (show_displacement)
+ fprintf(fp, " .....");
+ }
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ unsigned int i;
+
+ if (se->elide)
+ continue;
+
+ fprintf(fp, " ");
+ width = hists__col_len(hists, se->se_width_idx);
+ if (width == 0)
+ width = strlen(se->se_header);
+ for (i = 0; i < width; i++)
+ fprintf(fp, ".");
+ }
+
+ fprintf(fp, "\n");
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+ fprintf(fp, "#\n");
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+print_entries:
+ total_period = hists->stats.total_period;
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (h->filtered)
+ continue;
+
+ if (show_displacement) {
+ if (h->pair != NULL)
+ displacement = ((long)h->pair->position -
+ (long)position);
+ else
+ displacement = 0;
+ ++position;
+ }
+ ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
+ displacement, total_period, fp);
+
+ if (symbol_conf.use_callchain)
+ ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+ if (h->ms.map == NULL && verbose > 1) {
+ __map_groups__fprintf_maps(&h->thread->mg,
+ MAP__FUNCTION, verbose, fp);
+ fprintf(fp, "%.10s end\n", graph_dotted_line);
+ }
+ }
+out:
+ free(rem_sq_bracket);
+
+ return ret;
+}
+
+/*
+ * See hists__fprintf to match the column widths
+ */
+unsigned int hists__sort_list_width(struct hists *hists)
+{
+ struct sort_entry *se;
+ int ret = 9; /* total % */
+
+ if (symbol_conf.show_cpu_utilization) {
+ ret += 7; /* count_sys % */
+ ret += 6; /* count_us % */
+ if (perf_guest) {
+ ret += 13; /* count_guest_sys % */
+ ret += 12; /* count_guest_us % */
+ }
+ }
+
+ if (symbol_conf.show_nr_samples)
+ ret += 11;
+
+ if (symbol_conf.show_total_period)
+ ret += 13;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list)
+ if (!se->elide)
+ ret += 2 + hists__col_len(hists, se->se_width_idx);
+
+ if (verbose) /* Addr + origin */
+ ret += 3 + BITS_PER_LONG / 4;
+
+ return ret;
+}
+
+static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
+ enum hist_filter filter)
+{
+ h->filtered &= ~(1 << filter);
+ if (h->filtered)
+ return;
+
+ ++hists->nr_entries;
+ if (h->ms.unfolded)
+ hists->nr_entries += h->nr_rows;
+ h->row_offset = 0;
+ hists->stats.total_period += h->period;
+ hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
+
+ hists__calc_col_len(hists, h);
+}
+
+
+static bool hists__filter_entry_by_dso(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->dso_filter != NULL &&
+ (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
+ he->filtered |= (1 << HIST_FILTER__DSO);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_dso(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->nr_entries = hists->stats.total_period = 0;
+ hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (symbol_conf.exclude_other && !h->parent)
+ continue;
+
+ if (hists__filter_entry_by_dso(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
+ }
+}
+
+static bool hists__filter_entry_by_thread(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->thread_filter != NULL &&
+ he->thread != hists->thread_filter) {
+ he->filtered |= (1 << HIST_FILTER__THREAD);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_thread(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->nr_entries = hists->stats.total_period = 0;
+ hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (hists__filter_entry_by_thread(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
+ }
+}
+
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->symbol_filter_str != NULL &&
+ (!he->ms.sym || strstr(he->ms.sym->name,
+ hists->symbol_filter_str) == NULL)) {
+ he->filtered |= (1 << HIST_FILTER__SYMBOL);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->nr_entries = hists->stats.total_period = 0;
+ hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (hists__filter_entry_by_symbol(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+ }
+}
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
+{
+ return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
+}
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize)
+{
+ return symbol__annotate(he->ms.sym, he->ms.map, privsize);
+}
+
+void hists__inc_nr_events(struct hists *hists, u32 type)
+{
+ ++hists->stats.nr_events[0];
+ ++hists->stats.nr_events[type];
+}
+
+size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
+{
+ int i;
+ size_t ret = 0;
+
+ for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
+ const char *name;
+
+ if (hists->stats.nr_events[i] == 0)
+ continue;
+
+ name = perf_event__name(i);
+ if (!strcmp(name, "UNKNOWN"))
+ continue;
+
+ ret += fprintf(fp, "%16s events: %10d\n", name,
+ hists->stats.nr_events[i]);
+ }
+
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/hist.h b/ANDROID_3.4.5/tools/perf/util/hist.h
new file mode 100644
index 00000000..2cae9df4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/hist.h
@@ -0,0 +1,169 @@
+#ifndef __PERF_HIST_H
+#define __PERF_HIST_H
+
+#include <linux/types.h>
+#include <pthread.h>
+#include "callchain.h"
+
+extern struct callchain_param callchain_param;
+
+struct hist_entry;
+struct addr_location;
+struct symbol;
+
+/*
+ * The kernel collects the number of events it couldn't send in a stretch and
+ * when possible sends this number in a PERF_RECORD_LOST event. The number of
+ * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
+ * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
+ * the sum of all struct lost_event.lost fields reported.
+ *
+ * The total_period is needed because by default auto-freq is used, so
+ * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
+ * the total number of low level events, it is necessary to to sum all struct
+ * sample_event.period and stash the result in total_period.
+ */
+struct events_stats {
+ u64 total_period;
+ u64 total_lost;
+ u64 total_invalid_chains;
+ u32 nr_events[PERF_RECORD_HEADER_MAX];
+ u32 nr_lost_warned;
+ u32 nr_unknown_events;
+ u32 nr_invalid_chains;
+ u32 nr_unknown_id;
+ u32 nr_unprocessable_samples;
+};
+
+enum hist_column {
+ HISTC_SYMBOL,
+ HISTC_DSO,
+ HISTC_THREAD,
+ HISTC_COMM,
+ HISTC_PARENT,
+ HISTC_CPU,
+ HISTC_MISPREDICT,
+ HISTC_SYMBOL_FROM,
+ HISTC_SYMBOL_TO,
+ HISTC_DSO_FROM,
+ HISTC_DSO_TO,
+ HISTC_NR_COLS, /* Last entry */
+};
+
+struct thread;
+struct dso;
+
+struct hists {
+ struct rb_root entries_in_array[2];
+ struct rb_root *entries_in;
+ struct rb_root entries;
+ struct rb_root entries_collapsed;
+ u64 nr_entries;
+ const struct thread *thread_filter;
+ const struct dso *dso_filter;
+ const char *uid_filter_str;
+ const char *symbol_filter_str;
+ pthread_mutex_t lock;
+ struct events_stats stats;
+ u64 event_stream;
+ u16 col_len[HISTC_NR_COLS];
+ /* Best would be to reuse the session callchain cursor */
+ struct callchain_cursor callchain_cursor;
+};
+
+struct hist_entry *__hists__add_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *parent, u64 period);
+int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
+int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
+int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
+ struct hists *hists);
+void hist_entry__free(struct hist_entry *);
+
+struct hist_entry *__hists__add_branch_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *sym_parent,
+ struct branch_info *bi,
+ u64 period);
+
+void hists__output_resort(struct hists *self);
+void hists__output_resort_threaded(struct hists *hists);
+void hists__collapse_resort(struct hists *self);
+void hists__collapse_resort_threaded(struct hists *hists);
+
+void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
+void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
+ bool zap_kernel);
+void hists__output_recalc_col_len(struct hists *hists, int max_rows);
+
+void hists__inc_nr_events(struct hists *self, u32 type);
+size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
+
+size_t hists__fprintf(struct hists *self, struct hists *pair,
+ bool show_displacement, bool show_header,
+ int max_rows, int max_cols, FILE *fp);
+
+int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
+int hist_entry__annotate(struct hist_entry *self, size_t privsize);
+
+void hists__filter_by_dso(struct hists *hists);
+void hists__filter_by_thread(struct hists *hists);
+void hists__filter_by_symbol(struct hists *hists);
+
+u16 hists__col_len(struct hists *self, enum hist_column col);
+void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
+bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
+
+struct perf_evlist;
+
+#ifdef NO_NEWT_SUPPORT
+static inline
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
+ const char *help __used,
+ void(*timer)(void *arg) __used,
+ void *arg __used,
+ int refresh __used)
+{
+ return 0;
+}
+
+static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
+ int evidx __used,
+ void(*timer)(void *arg) __used,
+ void *arg __used,
+ int delay_secs __used)
+{
+ return 0;
+}
+#define K_LEFT -1
+#define K_RIGHT -2
+#else
+#include "ui/keysyms.h"
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
+ void(*timer)(void *arg), void *arg, int delay_secs);
+
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
+ void(*timer)(void *arg), void *arg,
+ int refresh);
+#endif
+
+#ifdef NO_GTK2_SUPPORT
+static inline
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
+ const char *help __used,
+ void(*timer)(void *arg) __used,
+ void *arg __used,
+ int refresh __used)
+{
+ return 0;
+}
+
+#else
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
+ void(*timer)(void *arg), void *arg,
+ int refresh);
+#endif
+
+unsigned int hists__sort_list_width(struct hists *self);
+
+#endif /* __PERF_HIST_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/hweight.c b/ANDROID_3.4.5/tools/perf/util/hweight.c
new file mode 100644
index 00000000..5c1d0d09
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/hweight.c
@@ -0,0 +1,31 @@
+#include <linux/bitops.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int hweight32(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
+
+unsigned long hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/alternative-asm.h b/ANDROID_3.4.5/tools/perf/util/include/asm/alternative-asm.h
new file mode 100644
index 00000000..6789d788
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/alternative-asm.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
+#define _PERF_ASM_ALTERNATIVE_ASM_H
+
+/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
+
+#define altinstruction_entry #
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/asm-offsets.h b/ANDROID_3.4.5/tools/perf/util/include/asm/asm-offsets.h
new file mode 100644
index 00000000..ed538942
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/bug.h b/ANDROID_3.4.5/tools/perf/util/include/asm/bug.h
new file mode 100644
index 00000000..7fcc6810
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/bug.h
@@ -0,0 +1,22 @@
+#ifndef _PERF_ASM_GENERIC_BUG_H
+#define _PERF_ASM_GENERIC_BUG_H
+
+#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
+
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+
+#define WARN_ONCE(condition, format...) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN(!__warned, format)) \
+ __warned = 1; \
+ unlikely(__ret_warn_once); \
+})
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/byteorder.h b/ANDROID_3.4.5/tools/perf/util/include/asm/byteorder.h
new file mode 100644
index 00000000..b722abe3
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/byteorder.h
@@ -0,0 +1,2 @@
+#include <asm/types.h>
+#include "../../../../include/linux/swab.h"
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/cpufeature.h b/ANDROID_3.4.5/tools/perf/util/include/asm/cpufeature.h
new file mode 100644
index 00000000..acffd5e4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/cpufeature.h
@@ -0,0 +1,9 @@
+
+#ifndef PERF_CPUFEATURE_H
+#define PERF_CPUFEATURE_H
+
+/* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */
+
+#define X86_FEATURE_REP_GOOD 0
+
+#endif /* PERF_CPUFEATURE_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/dwarf2.h b/ANDROID_3.4.5/tools/perf/util/include/asm/dwarf2.h
new file mode 100644
index 00000000..afe38199
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/dwarf2.h
@@ -0,0 +1,13 @@
+
+#ifndef PERF_DWARF2_H
+#define PERF_DWARF2_H
+
+/* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */
+
+#define CFI_STARTPROC
+#define CFI_ENDPROC
+#define CFI_REMEMBER_STATE
+#define CFI_RESTORE_STATE
+
+#endif /* PERF_DWARF2_H */
+
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/hweight.h b/ANDROID_3.4.5/tools/perf/util/include/asm/hweight.h
new file mode 100644
index 00000000..36cf26d4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/hweight.h
@@ -0,0 +1,8 @@
+#ifndef PERF_HWEIGHT_H
+#define PERF_HWEIGHT_H
+
+#include <linux/types.h>
+unsigned int hweight32(unsigned int w);
+unsigned long hweight64(__u64 w);
+
+#endif /* PERF_HWEIGHT_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/swab.h b/ANDROID_3.4.5/tools/perf/util/include/asm/swab.h
new file mode 100644
index 00000000..ed538942
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/swab.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/system.h b/ANDROID_3.4.5/tools/perf/util/include/asm/system.h
new file mode 100644
index 00000000..710cecca
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/system.h
@@ -0,0 +1 @@
+/* Empty */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/uaccess.h b/ANDROID_3.4.5/tools/perf/util/include/asm/uaccess.h
new file mode 100644
index 00000000..d0f72b8f
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/uaccess.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_ASM_UACCESS_H_
+#define _PERF_ASM_UACCESS_H_
+
+#define __get_user(src, dest) \
+({ \
+ (src) = *dest; \
+ 0; \
+})
+
+#define get_user __get_user
+
+#define access_ok(type, addr, size) 1
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/unistd_32.h b/ANDROID_3.4.5/tools/perf/util/include/asm/unistd_32.h
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/unistd_32.h
@@ -0,0 +1 @@
+
diff --git a/ANDROID_3.4.5/tools/perf/util/include/asm/unistd_64.h b/ANDROID_3.4.5/tools/perf/util/include/asm/unistd_64.h
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/asm/unistd_64.h
@@ -0,0 +1 @@
+
diff --git a/ANDROID_3.4.5/tools/perf/util/include/dwarf-regs.h b/ANDROID_3.4.5/tools/perf/util/include/dwarf-regs.h
new file mode 100644
index 00000000..cf6727e9
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/dwarf-regs.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_DWARF_REGS_H_
+#define _PERF_DWARF_REGS_H_
+
+#ifdef DWARF_SUPPORT
+const char *get_arch_regstr(unsigned int n);
+#endif
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/bitmap.h b/ANDROID_3.4.5/tools/perf/util/include/linux/bitmap.h
new file mode 100644
index 00000000..bb162e40
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/bitmap.h
@@ -0,0 +1,46 @@
+#ifndef _PERF_BITOPS_H
+#define _PERF_BITOPS_H
+
+#include <string.h>
+#include <linux/bitops.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+( \
+ ((nbits) % BITS_PER_LONG) ? \
+ (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
+)
+
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline int bitmap_weight(const unsigned long *src, int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight(src, nbits);
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src1 | *src2;
+ else
+ __bitmap_or(dst, src1, src2, nbits);
+}
+
+#endif /* _PERF_BITOPS_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/bitops.h b/ANDROID_3.4.5/tools/perf/util/include/linux/bitops.h
new file mode 100644
index 00000000..f1584833
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/bitops.h
@@ -0,0 +1,151 @@
+#ifndef _PERF_LINUX_BITOPS_H_
+#define _PERF_LINUX_BITOPS_H_
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/hweight.h>
+
+#define BITS_PER_LONG __WORDSIZE
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
+}
+
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+ return ((1UL << (nr % BITS_PER_LONG)) &
+ (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/compiler.h b/ANDROID_3.4.5/tools/perf/util/include/linux/compiler.h
new file mode 100644
index 00000000..547628e9
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/compiler.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_LINUX_COMPILER_H_
+#define _PERF_LINUX_COMPILER_H_
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+#define __user
+#ifndef __attribute_const__
+#define __attribute_const__
+#endif
+
+#define __used __attribute__((__unused__))
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/const.h b/ANDROID_3.4.5/tools/perf/util/include/linux/const.h
new file mode 100644
index 00000000..1b476c9a
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/const.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/const.h"
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/ctype.h b/ANDROID_3.4.5/tools/perf/util/include/linux/ctype.h
new file mode 100644
index 00000000..a53d4ee1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/ctype.h
@@ -0,0 +1 @@
+#include "../util.h"
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/export.h b/ANDROID_3.4.5/tools/perf/util/include/linux/export.h
new file mode 100644
index 00000000..b43e2dc2
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/export.h
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_MODULE_H
+#define PERF_LINUX_MODULE_H
+
+#define EXPORT_SYMBOL(name)
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/hash.h b/ANDROID_3.4.5/tools/perf/util/include/linux/hash.h
new file mode 100644
index 00000000..201f5739
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/hash.h
@@ -0,0 +1,5 @@
+#include "../../../../include/linux/hash.h"
+
+#ifndef PERF_HASH_H
+#define PERF_HASH_H
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/kernel.h b/ANDROID_3.4.5/tools/perf/util/include/linux/kernel.h
new file mode 100644
index 00000000..1eb804fd
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/kernel.h
@@ -0,0 +1,111 @@
+#ifndef PERF_LINUX_KERNEL_H_
+#define PERF_LINUX_KERNEL_H_
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
+#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#ifndef max
+#define max(x, y) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ (void) (&_max1 == &_max2); \
+ _max1 > _max2 ? _max1 : _max2; })
+#endif
+
+#ifndef min
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+#endif
+
+#ifndef BUG_ON
+#define BUG_ON(cond) assert(!(cond))
+#endif
+
+/*
+ * Both need more care to handle endianness
+ * (Don't use bitmap_copy_le() for now)
+ */
+#define cpu_to_le64(x) (x)
+#define cpu_to_le32(x) (x)
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int i;
+ ssize_t ssize = size;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
+{
+ va_list args;
+ ssize_t ssize = size;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline unsigned long
+simple_strtoul(const char *nptr, char **endptr, int base)
+{
+ return strtoul(nptr, endptr, base);
+}
+
+int eprintf(int level,
+ const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_err(fmt, ...) \
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug(fmt, ...) \
+ eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+ eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/linkage.h b/ANDROID_3.4.5/tools/perf/util/include/linux/linkage.h
new file mode 100644
index 00000000..06387cff
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/linkage.h
@@ -0,0 +1,13 @@
+
+#ifndef PERF_LINUX_LINKAGE_H_
+#define PERF_LINUX_LINKAGE_H_
+
+/* linkage.h ... for including arch/x86/lib/memcpy_64.S */
+
+#define ENTRY(name) \
+ .globl name; \
+ name:
+
+#define ENDPROC(name)
+
+#endif /* PERF_LINUX_LINKAGE_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/list.h b/ANDROID_3.4.5/tools/perf/util/include/linux/list.h
new file mode 100644
index 00000000..1d928a0c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/list.h
@@ -0,0 +1,29 @@
+#include <linux/kernel.h>
+#include <linux/prefetch.h>
+
+#include "../../../../include/linux/list.h"
+
+#ifndef PERF_LIST_H
+#define PERF_LIST_H
+/**
+ * list_del_range - deletes range of entries from list.
+ * @begin: first element in the range to delete from the list.
+ * @end: last element in the range to delete from the list.
+ * Note: list_empty on the range of entries does not return true after this,
+ * the entries is in an undefined state.
+ */
+static inline void list_del_range(struct list_head *begin,
+ struct list_head *end)
+{
+ begin->prev->next = end->next;
+ end->next->prev = begin->prev;
+}
+
+/**
+ * list_for_each_from - iterate over a list from one of its nodes
+ * @pos: the &struct list_head to use as a loop cursor, from where to start
+ * @head: the head for your list.
+ */
+#define list_for_each_from(pos, head) \
+ for (; pos != (head); pos = pos->next)
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/poison.h b/ANDROID_3.4.5/tools/perf/util/include/linux/poison.h
new file mode 100644
index 00000000..fef6dbc9
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/poison.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/poison.h"
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/prefetch.h b/ANDROID_3.4.5/tools/perf/util/include/linux/prefetch.h
new file mode 100644
index 00000000..7841e485
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/prefetch.h
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_PREFETCH_H
+#define PERF_LINUX_PREFETCH_H
+
+static inline void prefetch(void *a __attribute__((unused))) { }
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/rbtree.h b/ANDROID_3.4.5/tools/perf/util/include/linux/rbtree.h
new file mode 100644
index 00000000..7a243a14
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/rbtree.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/rbtree.h"
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/string.h b/ANDROID_3.4.5/tools/perf/util/include/linux/string.h
new file mode 100644
index 00000000..3b2f5900
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/string.h
@@ -0,0 +1 @@
+#include <string.h>
diff --git a/ANDROID_3.4.5/tools/perf/util/include/linux/types.h b/ANDROID_3.4.5/tools/perf/util/include/linux/types.h
new file mode 100644
index 00000000..12de3b81
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/include/linux/types.h
@@ -0,0 +1,21 @@
+#ifndef _PERF_LINUX_TYPES_H_
+#define _PERF_LINUX_TYPES_H_
+
+#include <asm/types.h>
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/levenshtein.c b/ANDROID_3.4.5/tools/perf/util/levenshtein.c
new file mode 100644
index 00000000..e521d151
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/levenshtein.c
@@ -0,0 +1,84 @@
+#include "cache.h"
+#include "levenshtein.h"
+
+/*
+ * This function implements the Damerau-Levenshtein algorithm to
+ * calculate a distance between strings.
+ *
+ * Basically, it says how many letters need to be swapped, substituted,
+ * deleted from, or added to string1, at least, to get string2.
+ *
+ * The idea is to build a distance matrix for the substrings of both
+ * strings. To avoid a large space complexity, only the last three rows
+ * are kept in memory (if swaps had the same or higher cost as one deletion
+ * plus one insertion, only two rows would be needed).
+ *
+ * At any stage, "i + 1" denotes the length of the current substring of
+ * string1 that the distance is calculated for.
+ *
+ * row2 holds the current row, row1 the previous row (i.e. for the substring
+ * of string1 of length "i"), and row0 the row before that.
+ *
+ * In other words, at the start of the big loop, row2[j + 1] contains the
+ * Damerau-Levenshtein distance between the substring of string1 of length
+ * "i" and the substring of string2 of length "j + 1".
+ *
+ * All the big loop does is determine the partial minimum-cost paths.
+ *
+ * It does so by calculating the costs of the path ending in characters
+ * i (in string1) and j (in string2), respectively, given that the last
+ * operation is a substition, a swap, a deletion, or an insertion.
+ *
+ * This implementation allows the costs to be weighted:
+ *
+ * - w (as in "sWap")
+ * - s (as in "Substitution")
+ * - a (for insertion, AKA "Add")
+ * - d (as in "Deletion")
+ *
+ * Note that this algorithm calculates a distance _iff_ d == a.
+ */
+int levenshtein(const char *string1, const char *string2,
+ int w, int s, int a, int d)
+{
+ int len1 = strlen(string1), len2 = strlen(string2);
+ int *row0 = malloc(sizeof(int) * (len2 + 1));
+ int *row1 = malloc(sizeof(int) * (len2 + 1));
+ int *row2 = malloc(sizeof(int) * (len2 + 1));
+ int i, j;
+
+ for (j = 0; j <= len2; j++)
+ row1[j] = j * a;
+ for (i = 0; i < len1; i++) {
+ int *dummy;
+
+ row2[0] = (i + 1) * d;
+ for (j = 0; j < len2; j++) {
+ /* substitution */
+ row2[j + 1] = row1[j] + s * (string1[i] != string2[j]);
+ /* swap */
+ if (i > 0 && j > 0 && string1[i - 1] == string2[j] &&
+ string1[i] == string2[j - 1] &&
+ row2[j + 1] > row0[j - 1] + w)
+ row2[j + 1] = row0[j - 1] + w;
+ /* deletion */
+ if (row2[j + 1] > row1[j + 1] + d)
+ row2[j + 1] = row1[j + 1] + d;
+ /* insertion */
+ if (row2[j + 1] > row2[j] + a)
+ row2[j + 1] = row2[j] + a;
+ }
+
+ dummy = row0;
+ row0 = row1;
+ row1 = row2;
+ row2 = dummy;
+ }
+
+ i = row1[len2];
+ free(row0);
+ free(row1);
+ free(row2);
+
+ return i;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/levenshtein.h b/ANDROID_3.4.5/tools/perf/util/levenshtein.h
new file mode 100644
index 00000000..b0fcb6d8
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/levenshtein.h
@@ -0,0 +1,8 @@
+#ifndef __PERF_LEVENSHTEIN_H
+#define __PERF_LEVENSHTEIN_H
+
+int levenshtein(const char *string1, const char *string2,
+ int swap_penalty, int substition_penalty,
+ int insertion_penalty, int deletion_penalty);
+
+#endif /* __PERF_LEVENSHTEIN_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/map.c b/ANDROID_3.4.5/tools/perf/util/map.c
new file mode 100644
index 00000000..35ae5686
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/map.c
@@ -0,0 +1,715 @@
+#include "symbol.h"
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "map.h"
+
+const char *map_type__name[MAP__NR_TYPES] = {
+ [MAP__FUNCTION] = "Functions",
+ [MAP__VARIABLE] = "Variables",
+};
+
+static inline int is_anon_memory(const char *filename)
+{
+ return strcmp(filename, "//anon") == 0;
+}
+
+static inline int is_no_dso_memory(const char *filename)
+{
+ return !strcmp(filename, "[stack]") ||
+ !strcmp(filename, "[vdso]") ||
+ !strcmp(filename, "[heap]");
+}
+
+void map__init(struct map *self, enum map_type type,
+ u64 start, u64 end, u64 pgoff, struct dso *dso)
+{
+ self->type = type;
+ self->start = start;
+ self->end = end;
+ self->pgoff = pgoff;
+ self->dso = dso;
+ self->map_ip = map__map_ip;
+ self->unmap_ip = map__unmap_ip;
+ RB_CLEAR_NODE(&self->rb_node);
+ self->groups = NULL;
+ self->referenced = false;
+ self->erange_warned = false;
+}
+
+struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+ u64 pgoff, u32 pid, char *filename,
+ enum map_type type)
+{
+ struct map *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ char newfilename[PATH_MAX];
+ struct dso *dso;
+ int anon, no_dso;
+
+ anon = is_anon_memory(filename);
+ no_dso = is_no_dso_memory(filename);
+
+ if (anon) {
+ snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
+ filename = newfilename;
+ }
+
+ dso = __dsos__findnew(dsos__list, filename);
+ if (dso == NULL)
+ goto out_delete;
+
+ map__init(self, type, start, start + len, pgoff, dso);
+
+ if (anon || no_dso) {
+ self->map_ip = self->unmap_ip = identity__map_ip;
+
+ /*
+ * Set memory without DSO as loaded. All map__find_*
+ * functions still return NULL, and we avoid the
+ * unnecessary map__load warning.
+ */
+ if (no_dso)
+ dso__set_loaded(dso, self->type);
+ }
+ }
+ return self;
+out_delete:
+ free(self);
+ return NULL;
+}
+
+void map__delete(struct map *self)
+{
+ free(self);
+}
+
+void map__fixup_start(struct map *self)
+{
+ struct rb_root *symbols = &self->dso->symbols[self->type];
+ struct rb_node *nd = rb_first(symbols);
+ if (nd != NULL) {
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ self->start = sym->start;
+ }
+}
+
+void map__fixup_end(struct map *self)
+{
+ struct rb_root *symbols = &self->dso->symbols[self->type];
+ struct rb_node *nd = rb_last(symbols);
+ if (nd != NULL) {
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ self->end = sym->end;
+ }
+}
+
+#define DSO__DELETED "(deleted)"
+
+int map__load(struct map *self, symbol_filter_t filter)
+{
+ const char *name = self->dso->long_name;
+ int nr;
+
+ if (dso__loaded(self->dso, self->type))
+ return 0;
+
+ nr = dso__load(self->dso, self, filter);
+ if (nr < 0) {
+ if (self->dso->has_build_id) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(self->dso->build_id,
+ sizeof(self->dso->build_id),
+ sbuild_id);
+ pr_warning("%s with build id %s not found",
+ name, sbuild_id);
+ } else
+ pr_warning("Failed to open %s", name);
+
+ pr_warning(", continuing without symbols\n");
+ return -1;
+ } else if (nr == 0) {
+ const size_t len = strlen(name);
+ const size_t real_len = len - sizeof(DSO__DELETED);
+
+ if (len > sizeof(DSO__DELETED) &&
+ strcmp(name + real_len + 1, DSO__DELETED) == 0) {
+ pr_warning("%.*s was updated (is prelink enabled?). "
+ "Restart the long running apps that use it!\n",
+ (int)real_len, name);
+ } else {
+ pr_warning("no symbols found in %s, maybe install "
+ "a debug package?\n", name);
+ }
+
+ return -1;
+ }
+ /*
+ * Only applies to the kernel, as its symtabs aren't relative like the
+ * module ones.
+ */
+ if (self->dso->kernel)
+ map__reloc_vmlinux(self);
+
+ return 0;
+}
+
+struct symbol *map__find_symbol(struct map *self, u64 addr,
+ symbol_filter_t filter)
+{
+ if (map__load(self, filter) < 0)
+ return NULL;
+
+ return dso__find_symbol(self->dso, self->type, addr);
+}
+
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter)
+{
+ if (map__load(self, filter) < 0)
+ return NULL;
+
+ if (!dso__sorted_by_name(self->dso, self->type))
+ dso__sort_by_name(self->dso, self->type);
+
+ return dso__find_symbol_by_name(self->dso, self->type, name);
+}
+
+struct map *map__clone(struct map *self)
+{
+ struct map *map = malloc(sizeof(*self));
+
+ if (!map)
+ return NULL;
+
+ memcpy(map, self, sizeof(*self));
+
+ return map;
+}
+
+int map__overlap(struct map *l, struct map *r)
+{
+ if (l->start > r->start) {
+ struct map *t = l;
+ l = r;
+ r = t;
+ }
+
+ if (l->end > r->start)
+ return 1;
+
+ return 0;
+}
+
+size_t map__fprintf(struct map *self, FILE *fp)
+{
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
+ self->start, self->end, self->pgoff, self->dso->name);
+}
+
+size_t map__fprintf_dsoname(struct map *map, FILE *fp)
+{
+ const char *dsoname;
+
+ if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+ if (symbol_conf.show_kernel_path && map->dso->long_name)
+ dsoname = map->dso->long_name;
+ else if (map->dso->name)
+ dsoname = map->dso->name;
+ } else
+ dsoname = "[unknown]";
+
+ return fprintf(fp, "%s", dsoname);
+}
+
+/*
+ * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
+ * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
+ */
+u64 map__rip_2objdump(struct map *map, u64 rip)
+{
+ u64 addr = map->dso->adjust_symbols ?
+ map->unmap_ip(map, rip) : /* RIP -> IP */
+ rip;
+ return addr;
+}
+
+u64 map__objdump_2ip(struct map *map, u64 addr)
+{
+ u64 ip = map->dso->adjust_symbols ?
+ addr :
+ map->unmap_ip(map, addr); /* RIP -> IP */
+ return ip;
+}
+
+void map_groups__init(struct map_groups *mg)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ mg->maps[i] = RB_ROOT;
+ INIT_LIST_HEAD(&mg->removed_maps[i]);
+ }
+ mg->machine = NULL;
+}
+
+static void maps__delete(struct rb_root *maps)
+{
+ struct rb_node *next = rb_first(maps);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, maps);
+ map__delete(pos);
+ }
+}
+
+static void maps__delete_removed(struct list_head *maps)
+{
+ struct map *pos, *n;
+
+ list_for_each_entry_safe(pos, n, maps, node) {
+ list_del(&pos->node);
+ map__delete(pos);
+ }
+}
+
+void map_groups__exit(struct map_groups *mg)
+{
+ int i;
+
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ maps__delete(&mg->maps[i]);
+ maps__delete_removed(&mg->removed_maps[i]);
+ }
+}
+
+void map_groups__flush(struct map_groups *mg)
+{
+ int type;
+
+ for (type = 0; type < MAP__NR_TYPES; type++) {
+ struct rb_root *root = &mg->maps[type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, root);
+ /*
+ * We may have references to this map, for
+ * instance in some hist_entry instances, so
+ * just move them to a separate list.
+ */
+ list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
+ }
+ }
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *mg,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ struct map *map = map_groups__find(mg, type, addr);
+
+ if (map != NULL) {
+ if (mapp != NULL)
+ *mapp = map;
+ return map__find_symbol(map, map->map_ip(map, addr), filter);
+ }
+
+ return NULL;
+}
+
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
+ enum map_type type,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
+
+ if (sym == NULL)
+ continue;
+ if (mapp != NULL)
+ *mapp = pos;
+ return sym;
+ }
+
+ return NULL;
+}
+
+size_t __map_groups__fprintf_maps(struct map_groups *mg,
+ enum map_type type, int verbose, FILE *fp)
+{
+ size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
+ struct rb_node *nd;
+
+ for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 2) {
+ printed += dso__fprintf(pos->dso, type, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+
+ return printed;
+}
+
+size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
+{
+ size_t printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
+ return printed;
+}
+
+static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
+ enum map_type type,
+ int verbose, FILE *fp)
+{
+ struct map *pos;
+ size_t printed = 0;
+
+ list_for_each_entry(pos, &mg->removed_maps[type], node) {
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 1) {
+ printed += dso__fprintf(pos->dso, type, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+ return printed;
+}
+
+static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
+ int verbose, FILE *fp)
+{
+ size_t printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
+ return printed;
+}
+
+size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
+{
+ size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
+ printed += fprintf(fp, "Removed maps:\n");
+ return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
+}
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
+ int verbose, FILE *fp)
+{
+ struct rb_root *root = &mg->maps[map->type];
+ struct rb_node *next = rb_first(root);
+ int err = 0;
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ if (!map__overlap(pos, map))
+ continue;
+
+ if (verbose >= 2) {
+ fputs("overlapping maps:\n", fp);
+ map__fprintf(map, fp);
+ map__fprintf(pos, fp);
+ }
+
+ rb_erase(&pos->rb_node, root);
+ /*
+ * Now check if we need to create new maps for areas not
+ * overlapped by the new map:
+ */
+ if (map->start > pos->start) {
+ struct map *before = map__clone(pos);
+
+ if (before == NULL) {
+ err = -ENOMEM;
+ goto move_map;
+ }
+
+ before->end = map->start - 1;
+ map_groups__insert(mg, before);
+ if (verbose >= 2)
+ map__fprintf(before, fp);
+ }
+
+ if (map->end < pos->end) {
+ struct map *after = map__clone(pos);
+
+ if (after == NULL) {
+ err = -ENOMEM;
+ goto move_map;
+ }
+
+ after->start = map->end + 1;
+ map_groups__insert(mg, after);
+ if (verbose >= 2)
+ map__fprintf(after, fp);
+ }
+move_map:
+ /*
+ * If we have references, just move them to a separate list.
+ */
+ if (pos->referenced)
+ list_add_tail(&pos->node, &mg->removed_maps[map->type]);
+ else
+ map__delete(pos);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * XXX This should not really _copy_ te maps, but refcount them.
+ */
+int map_groups__clone(struct map_groups *mg,
+ struct map_groups *parent, enum map_type type)
+{
+ struct rb_node *nd;
+ for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+ struct map *new = map__clone(map);
+ if (new == NULL)
+ return -ENOMEM;
+ map_groups__insert(mg, new);
+ }
+ return 0;
+}
+
+static u64 map__reloc_map_ip(struct map *map, u64 ip)
+{
+ return ip + (s64)map->pgoff;
+}
+
+static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
+{
+ return ip - (s64)map->pgoff;
+}
+
+void map__reloc_vmlinux(struct map *self)
+{
+ struct kmap *kmap = map__kmap(self);
+ s64 reloc;
+
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
+ return;
+
+ reloc = (kmap->ref_reloc_sym->unrelocated_addr -
+ kmap->ref_reloc_sym->addr);
+
+ if (!reloc)
+ return;
+
+ self->map_ip = map__reloc_map_ip;
+ self->unmap_ip = map__reloc_unmap_ip;
+ self->pgoff = reloc;
+}
+
+void maps__insert(struct rb_root *maps, struct map *map)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 ip = map->start;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&map->rb_node, parent, p);
+ rb_insert_color(&map->rb_node, maps);
+}
+
+void maps__remove(struct rb_root *self, struct map *map)
+{
+ rb_erase(&map->rb_node, self);
+}
+
+struct map *maps__find(struct rb_root *maps, u64 ip)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else if (ip > m->end)
+ p = &(*p)->rb_right;
+ else
+ return m;
+ }
+
+ return NULL;
+}
+
+int machine__init(struct machine *self, const char *root_dir, pid_t pid)
+{
+ map_groups__init(&self->kmaps);
+ RB_CLEAR_NODE(&self->rb_node);
+ INIT_LIST_HEAD(&self->user_dsos);
+ INIT_LIST_HEAD(&self->kernel_dsos);
+
+ self->threads = RB_ROOT;
+ INIT_LIST_HEAD(&self->dead_threads);
+ self->last_match = NULL;
+
+ self->kmaps.machine = self;
+ self->pid = pid;
+ self->root_dir = strdup(root_dir);
+ return self->root_dir == NULL ? -ENOMEM : 0;
+}
+
+static void dsos__delete(struct list_head *self)
+{
+ struct dso *pos, *n;
+
+ list_for_each_entry_safe(pos, n, self, node) {
+ list_del(&pos->node);
+ dso__delete(pos);
+ }
+}
+
+void machine__exit(struct machine *self)
+{
+ map_groups__exit(&self->kmaps);
+ dsos__delete(&self->user_dsos);
+ dsos__delete(&self->kernel_dsos);
+ free(self->root_dir);
+ self->root_dir = NULL;
+}
+
+void machine__delete(struct machine *self)
+{
+ machine__exit(self);
+ free(self);
+}
+
+struct machine *machines__add(struct rb_root *self, pid_t pid,
+ const char *root_dir)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *pos, *machine = malloc(sizeof(*machine));
+
+ if (!machine)
+ return NULL;
+
+ if (machine__init(machine, root_dir, pid) != 0) {
+ free(machine);
+ return NULL;
+ }
+
+ while (*p != NULL) {
+ parent = *p;
+ pos = rb_entry(parent, struct machine, rb_node);
+ if (pid < pos->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&machine->rb_node, parent, p);
+ rb_insert_color(&machine->rb_node, self);
+
+ return machine;
+}
+
+struct machine *machines__find(struct rb_root *self, pid_t pid)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *machine;
+ struct machine *default_machine = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ machine = rb_entry(parent, struct machine, rb_node);
+ if (pid < machine->pid)
+ p = &(*p)->rb_left;
+ else if (pid > machine->pid)
+ p = &(*p)->rb_right;
+ else
+ return machine;
+ if (!machine->pid)
+ default_machine = machine;
+ }
+
+ return default_machine;
+}
+
+struct machine *machines__findnew(struct rb_root *self, pid_t pid)
+{
+ char path[PATH_MAX];
+ const char *root_dir;
+ struct machine *machine = machines__find(self, pid);
+
+ if (!machine || machine->pid != pid) {
+ if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
+ root_dir = "";
+ else {
+ if (!symbol_conf.guestmount)
+ goto out;
+ sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+ if (access(path, R_OK)) {
+ pr_err("Can't access file %s\n", path);
+ goto out;
+ }
+ root_dir = path;
+ }
+ machine = machines__add(self, pid, root_dir);
+ }
+
+out:
+ return machine;
+}
+
+void machines__process(struct rb_root *self, machine__process_t process, void *data)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ process(pos, data);
+ }
+}
+
+char *machine__mmap_name(struct machine *self, char *bf, size_t size)
+{
+ if (machine__is_host(self))
+ snprintf(bf, size, "[%s]", "kernel.kallsyms");
+ else if (machine__is_default_guest(self))
+ snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
+ else
+ snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
+
+ return bf;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/map.h b/ANDROID_3.4.5/tools/perf/util/map.h
new file mode 100644
index 00000000..81371bad
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/map.h
@@ -0,0 +1,258 @@
+#ifndef __PERF_MAP_H
+#define __PERF_MAP_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include "types.h"
+
+enum map_type {
+ MAP__FUNCTION = 0,
+ MAP__VARIABLE,
+};
+
+#define MAP__NR_TYPES (MAP__VARIABLE + 1)
+
+extern const char *map_type__name[MAP__NR_TYPES];
+
+struct dso;
+struct ip_callchain;
+struct ref_reloc_sym;
+struct map_groups;
+struct machine;
+struct perf_evsel;
+
+struct map {
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
+ u64 start;
+ u64 end;
+ u8 /* enum map_type */ type;
+ bool referenced;
+ bool erange_warned;
+ u32 priv;
+ u64 pgoff;
+
+ /* ip -> dso rip */
+ u64 (*map_ip)(struct map *, u64);
+ /* dso rip -> ip */
+ u64 (*unmap_ip)(struct map *, u64);
+
+ struct dso *dso;
+ struct map_groups *groups;
+};
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+};
+
+struct map_groups {
+ struct rb_root maps[MAP__NR_TYPES];
+ struct list_head removed_maps[MAP__NR_TYPES];
+ struct machine *machine;
+};
+
+/* Native host kernel uses -1 as pid index in machine */
+#define HOST_KERNEL_ID (-1)
+#define DEFAULT_GUEST_KERNEL_ID (0)
+
+struct machine {
+ struct rb_node rb_node;
+ pid_t pid;
+ u16 id_hdr_size;
+ char *root_dir;
+ struct rb_root threads;
+ struct list_head dead_threads;
+ struct thread *last_match;
+ struct list_head user_dsos;
+ struct list_head kernel_dsos;
+ struct map_groups kmaps;
+ struct map *vmlinux_maps[MAP__NR_TYPES];
+};
+
+static inline
+struct map *machine__kernel_map(struct machine *self, enum map_type type)
+{
+ return self->vmlinux_maps[type];
+}
+
+static inline struct kmap *map__kmap(struct map *self)
+{
+ return (struct kmap *)(self + 1);
+}
+
+static inline u64 map__map_ip(struct map *map, u64 ip)
+{
+ return ip - map->start + map->pgoff;
+}
+
+static inline u64 map__unmap_ip(struct map *map, u64 ip)
+{
+ return ip + map->start - map->pgoff;
+}
+
+static inline u64 identity__map_ip(struct map *map __used, u64 ip)
+{
+ return ip;
+}
+
+
+/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
+u64 map__rip_2objdump(struct map *map, u64 rip);
+u64 map__objdump_2ip(struct map *map, u64 addr);
+
+struct symbol;
+
+typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
+
+void map__init(struct map *self, enum map_type type,
+ u64 start, u64 end, u64 pgoff, struct dso *dso);
+struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+ u64 pgoff, u32 pid, char *filename,
+ enum map_type type);
+void map__delete(struct map *self);
+struct map *map__clone(struct map *self);
+int map__overlap(struct map *l, struct map *r);
+size_t map__fprintf(struct map *self, FILE *fp);
+size_t map__fprintf_dsoname(struct map *map, FILE *fp);
+
+int map__load(struct map *self, symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *self,
+ u64 addr, symbol_filter_t filter);
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter);
+void map__fixup_start(struct map *self);
+void map__fixup_end(struct map *self);
+
+void map__reloc_vmlinux(struct map *self);
+
+size_t __map_groups__fprintf_maps(struct map_groups *mg,
+ enum map_type type, int verbose, FILE *fp);
+void maps__insert(struct rb_root *maps, struct map *map);
+void maps__remove(struct rb_root *maps, struct map *map);
+struct map *maps__find(struct rb_root *maps, u64 addr);
+void map_groups__init(struct map_groups *mg);
+void map_groups__exit(struct map_groups *mg);
+int map_groups__clone(struct map_groups *mg,
+ struct map_groups *parent, enum map_type type);
+size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp);
+size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp);
+
+typedef void (*machine__process_t)(struct machine *self, void *data);
+
+void machines__process(struct rb_root *self, machine__process_t process, void *data);
+struct machine *machines__add(struct rb_root *self, pid_t pid,
+ const char *root_dir);
+struct machine *machines__find_host(struct rb_root *self);
+struct machine *machines__find(struct rb_root *self, pid_t pid);
+struct machine *machines__findnew(struct rb_root *self, pid_t pid);
+char *machine__mmap_name(struct machine *self, char *bf, size_t size);
+int machine__init(struct machine *self, const char *root_dir, pid_t pid);
+void machine__exit(struct machine *self);
+void machine__delete(struct machine *self);
+
+int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel, struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
+ u64 addr);
+
+/*
+ * Default guest kernel is defined by parameter --guestkallsyms
+ * and --guestmodules
+ */
+static inline bool machine__is_default_guest(struct machine *self)
+{
+ return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false;
+}
+
+static inline bool machine__is_host(struct machine *self)
+{
+ return self ? self->pid == HOST_KERNEL_ID : false;
+}
+
+static inline void map_groups__insert(struct map_groups *mg, struct map *map)
+{
+ maps__insert(&mg->maps[map->type], map);
+ map->groups = mg;
+}
+
+static inline void map_groups__remove(struct map_groups *mg, struct map *map)
+{
+ maps__remove(&mg->maps[map->type], map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *mg,
+ enum map_type type, u64 addr)
+{
+ return maps__find(&mg->maps[type], addr);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *mg,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter);
+
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
+ enum map_type type,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter);
+
+
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
+void machine__remove_thread(struct machine *machine, struct thread *th);
+
+size_t machine__fprintf(struct machine *machine, FILE *fp);
+
+static inline
+struct symbol *machine__find_kernel_symbol(struct machine *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter);
+}
+
+static inline
+struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter);
+}
+
+static inline
+struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
+ const char *name, struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter);
+}
+
+static inline
+struct symbol *machine__find_kernel_function_by_name(struct machine *self,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_function_by_name(&self->kmaps, name, mapp,
+ filter);
+}
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
+ int verbose, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *mg,
+ enum map_type type, const char *name);
+struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
+
+void map_groups__flush(struct map_groups *mg);
+
+#endif /* __PERF_MAP_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/pager.c b/ANDROID_3.4.5/tools/perf/util/pager.c
new file mode 100644
index 00000000..1915de20
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pager.c
@@ -0,0 +1,96 @@
+#include "cache.h"
+#include "run-command.h"
+#include "sigchain.h"
+
+/*
+ * This is split up from the rest of git so that we can do
+ * something different on Windows.
+ */
+
+static int spawned_pager;
+
+static void pager_preexec(void)
+{
+ /*
+ * Work around bug in "less" by not starting it until we
+ * have real input
+ */
+ fd_set in;
+
+ FD_ZERO(&in);
+ FD_SET(0, &in);
+ select(1, &in, NULL, &in, NULL);
+
+ setenv("LESS", "FRSX", 0);
+}
+
+static const char *pager_argv[] = { "sh", "-c", NULL, NULL };
+static struct child_process pager_process;
+
+static void wait_for_pager(void)
+{
+ fflush(stdout);
+ fflush(stderr);
+ /* signal EOF to pager */
+ close(1);
+ close(2);
+ finish_command(&pager_process);
+}
+
+static void wait_for_pager_signal(int signo)
+{
+ wait_for_pager();
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+void setup_pager(void)
+{
+ const char *pager = getenv("PERF_PAGER");
+
+ if (!isatty(1))
+ return;
+ if (!pager) {
+ if (!pager_program)
+ perf_config(perf_default_config, NULL);
+ pager = pager_program;
+ }
+ if (!pager)
+ pager = getenv("PAGER");
+ if (!pager)
+ pager = "less";
+ else if (!*pager || !strcmp(pager, "cat"))
+ return;
+
+ spawned_pager = 1; /* means we are emitting to terminal */
+
+ /* spawn the pager */
+ pager_argv[2] = pager;
+ pager_process.argv = pager_argv;
+ pager_process.in = -1;
+ pager_process.preexec_cb = pager_preexec;
+
+ if (start_command(&pager_process))
+ return;
+
+ /* original process continues, but writes to the pipe */
+ dup2(pager_process.in, 1);
+ if (isatty(2))
+ dup2(pager_process.in, 2);
+ close(pager_process.in);
+
+ /* this makes sure that the parent terminates after the pager */
+ sigchain_push_common(wait_for_pager_signal);
+ atexit(wait_for_pager);
+}
+
+int pager_in_use(void)
+{
+ const char *env;
+
+ if (spawned_pager)
+ return 1;
+
+ env = getenv("PERF_PAGER_IN_USE");
+ return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/parse-events.c b/ANDROID_3.4.5/tools/perf/util/parse-events.c
new file mode 100644
index 00000000..5b3a0ef4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/parse-events.c
@@ -0,0 +1,1062 @@
+#include "../../../include/linux/hw_breakpoint.h"
+#include "util.h"
+#include "../perf.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-options.h"
+#include "parse-events.h"
+#include "exec_cmd.h"
+#include "string.h"
+#include "symbol.h"
+#include "cache.h"
+#include "header.h"
+#include "debugfs.h"
+#include "parse-events-flex.h"
+#include "pmu.h"
+
+#define MAX_NAME_LEN 100
+
+struct event_symbol {
+ u8 type;
+ u64 config;
+ const char *symbol;
+ const char *alias;
+};
+
+int parse_events_parse(struct list_head *list, struct list_head *list_tmp,
+ int *idx);
+
+#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
+#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
+
+static struct event_symbol event_symbols[] = {
+ { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
+ { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" },
+ { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" },
+ { CHW(INSTRUCTIONS), "instructions", "" },
+ { CHW(CACHE_REFERENCES), "cache-references", "" },
+ { CHW(CACHE_MISSES), "cache-misses", "" },
+ { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
+ { CHW(BRANCH_MISSES), "branch-misses", "" },
+ { CHW(BUS_CYCLES), "bus-cycles", "" },
+ { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
+
+ { CSW(CPU_CLOCK), "cpu-clock", "" },
+ { CSW(TASK_CLOCK), "task-clock", "" },
+ { CSW(PAGE_FAULTS), "page-faults", "faults" },
+ { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
+ { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
+ { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
+ { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
+ { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
+ { CSW(EMULATION_FAULTS), "emulation-faults", "" },
+};
+
+#define __PERF_EVENT_FIELD(config, name) \
+ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
+
+#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
+#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
+#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
+#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
+
+static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
+ "cycles",
+ "instructions",
+ "cache-references",
+ "cache-misses",
+ "branches",
+ "branch-misses",
+ "bus-cycles",
+ "stalled-cycles-frontend",
+ "stalled-cycles-backend",
+ "ref-cycles",
+};
+
+static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
+ "cpu-clock",
+ "task-clock",
+ "page-faults",
+ "context-switches",
+ "CPU-migrations",
+ "minor-faults",
+ "major-faults",
+ "alignment-faults",
+ "emulation-faults",
+};
+
+#define MAX_ALIASES 8
+
+static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
+ { "L1-dcache", "l1-d", "l1d", "L1-data", },
+ { "L1-icache", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2", },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
+ { "node", },
+};
+
+static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
+};
+
+static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+ [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+};
+
+#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
+ while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
+ if (sys_dirent.d_type == DT_DIR && \
+ (strcmp(sys_dirent.d_name, ".")) && \
+ (strcmp(sys_dirent.d_name, "..")))
+
+static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
+{
+ char evt_path[MAXPATHLEN];
+ int fd;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
+ sys_dir->d_name, evt_dir->d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ return -EINVAL;
+ close(fd);
+
+ return 0;
+}
+
+#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
+ while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
+ if (evt_dirent.d_type == DT_DIR && \
+ (strcmp(evt_dirent.d_name, ".")) && \
+ (strcmp(evt_dirent.d_name, "..")) && \
+ (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+
+#define MAX_EVENT_LENGTH 512
+
+
+struct tracepoint_path *tracepoint_id_to_path(u64 config)
+{
+ struct tracepoint_path *path = NULL;
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ char id_buf[24];
+ int fd;
+ u64 id;
+ char evt_path[MAXPATHLEN];
+ char dir_path[MAXPATHLEN];
+
+ if (debugfs_valid_mountpoint(tracing_events_path))
+ return NULL;
+
+ sys_dir = opendir(tracing_events_path);
+ if (!sys_dir)
+ return NULL;
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
+ sys_dirent.d_name);
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ continue;
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
+ evt_dirent.d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ continue;
+ if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+ close(fd);
+ continue;
+ }
+ close(fd);
+ id = atoll(id_buf);
+ if (id == config) {
+ closedir(evt_dir);
+ closedir(sys_dir);
+ path = zalloc(sizeof(*path));
+ path->system = malloc(MAX_EVENT_LENGTH);
+ if (!path->system) {
+ free(path);
+ return NULL;
+ }
+ path->name = malloc(MAX_EVENT_LENGTH);
+ if (!path->name) {
+ free(path->system);
+ free(path);
+ return NULL;
+ }
+ strncpy(path->system, sys_dirent.d_name,
+ MAX_EVENT_LENGTH);
+ strncpy(path->name, evt_dirent.d_name,
+ MAX_EVENT_LENGTH);
+ return path;
+ }
+ }
+ closedir(evt_dir);
+ }
+
+ closedir(sys_dir);
+ return NULL;
+}
+
+#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
+static const char *tracepoint_id_to_name(u64 config)
+{
+ static char buf[TP_PATH_LEN];
+ struct tracepoint_path *path;
+
+ path = tracepoint_id_to_path(config);
+ if (path) {
+ snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
+ free(path->name);
+ free(path->system);
+ free(path);
+ } else
+ snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
+
+ return buf;
+}
+
+static int is_cache_op_valid(u8 cache_type, u8 cache_op)
+{
+ if (hw_cache_stat[cache_type] & COP(cache_op))
+ return 1; /* valid */
+ else
+ return 0; /* invalid */
+}
+
+static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
+{
+ static char name[50];
+
+ if (cache_result) {
+ sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][0],
+ hw_cache_result[cache_result][0]);
+ } else {
+ sprintf(name, "%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][1]);
+ }
+
+ return name;
+}
+
+const char *event_type(int type)
+{
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ return "hardware";
+
+ case PERF_TYPE_SOFTWARE:
+ return "software";
+
+ case PERF_TYPE_TRACEPOINT:
+ return "tracepoint";
+
+ case PERF_TYPE_HW_CACHE:
+ return "hardware-cache";
+
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+const char *event_name(struct perf_evsel *evsel)
+{
+ u64 config = evsel->attr.config;
+ int type = evsel->attr.type;
+
+ if (evsel->name)
+ return evsel->name;
+
+ return __event_name(type, config);
+}
+
+const char *__event_name(int type, u64 config)
+{
+ static char buf[32];
+
+ if (type == PERF_TYPE_RAW) {
+ sprintf(buf, "raw 0x%" PRIx64, config);
+ return buf;
+ }
+
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
+ return hw_event_names[config];
+ return "unknown-hardware";
+
+ case PERF_TYPE_HW_CACHE: {
+ u8 cache_type, cache_op, cache_result;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type > PERF_COUNT_HW_CACHE_MAX)
+ return "unknown-ext-hardware-cache-type";
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
+ return "unknown-ext-hardware-cache-op";
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return "unknown-ext-hardware-cache-result";
+
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return "invalid-cache";
+
+ return event_cache_name(cache_type, cache_op, cache_result);
+ }
+
+ case PERF_TYPE_SOFTWARE:
+ if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
+ return sw_event_names[config];
+ return "unknown-software";
+
+ case PERF_TYPE_TRACEPOINT:
+ return tracepoint_id_to_name(config);
+
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int add_event(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name)
+{
+ struct perf_evsel *evsel;
+
+ event_attr_init(attr);
+
+ evsel = perf_evsel__new(attr, (*idx)++);
+ if (!evsel)
+ return -ENOMEM;
+
+ list_add_tail(&evsel->node, list);
+
+ evsel->name = strdup(name);
+ return 0;
+}
+
+static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
+{
+ int i, j;
+ int n, longest = -1;
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+ n = strlen(names[i][j]);
+ if (n > longest && !strncasecmp(str, names[i][j], n))
+ longest = n;
+ }
+ if (longest > 0)
+ return i;
+ }
+
+ return -1;
+}
+
+int parse_events_add_cache(struct list_head *list, int *idx,
+ char *type, char *op_result1, char *op_result2)
+{
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
+ int cache_type = -1, cache_op = -1, cache_result = -1;
+ char *op_result[2] = { op_result1, op_result2 };
+ int i, n;
+
+ /*
+ * No fallback - if we cannot get a clear cache type
+ * then bail out:
+ */
+ cache_type = parse_aliases(type, hw_cache,
+ PERF_COUNT_HW_CACHE_MAX);
+ if (cache_type == -1)
+ return -EINVAL;
+
+ n = snprintf(name, MAX_NAME_LEN, "%s", type);
+
+ for (i = 0; (i < 2) && (op_result[i]); i++) {
+ char *str = op_result[i];
+
+ snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
+
+ if (cache_op == -1) {
+ cache_op = parse_aliases(str, hw_cache_op,
+ PERF_COUNT_HW_CACHE_OP_MAX);
+ if (cache_op >= 0) {
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return -EINVAL;
+ continue;
+ }
+ }
+
+ if (cache_result == -1) {
+ cache_result = parse_aliases(str, hw_cache_result,
+ PERF_COUNT_HW_CACHE_RESULT_MAX);
+ if (cache_result >= 0)
+ continue;
+ }
+ }
+
+ /*
+ * Fall back to reads:
+ */
+ if (cache_op == -1)
+ cache_op = PERF_COUNT_HW_CACHE_OP_READ;
+
+ /*
+ * Fall back to accesses:
+ */
+ if (cache_result == -1)
+ cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
+ attr.type = PERF_TYPE_HW_CACHE;
+ return add_event(list, idx, &attr, name);
+}
+
+static int add_tracepoint(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
+{
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
+ char evt_path[MAXPATHLEN];
+ char id_buf[4];
+ u64 id;
+ int fd;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
+ sys_name, evt_name);
+
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+ id = atoll(id_buf);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.config = id;
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type |= PERF_SAMPLE_RAW;
+ attr.sample_type |= PERF_SAMPLE_TIME;
+ attr.sample_type |= PERF_SAMPLE_CPU;
+ attr.sample_period = 1;
+
+ snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
+ return add_event(list, idx, &attr, name);
+}
+
+static int add_tracepoint_multi(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
+{
+ char evt_path[MAXPATHLEN];
+ struct dirent *evt_ent;
+ DIR *evt_dir;
+ int ret = 0;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
+ evt_dir = opendir(evt_path);
+ if (!evt_dir) {
+ perror("Can't open event dir");
+ return -1;
+ }
+
+ while (!ret && (evt_ent = readdir(evt_dir))) {
+ if (!strcmp(evt_ent->d_name, ".")
+ || !strcmp(evt_ent->d_name, "..")
+ || !strcmp(evt_ent->d_name, "enable")
+ || !strcmp(evt_ent->d_name, "filter"))
+ continue;
+
+ if (!strglobmatch(evt_ent->d_name, evt_name))
+ continue;
+
+ ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
+ }
+
+ return ret;
+}
+
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ char *sys, char *event)
+{
+ int ret;
+
+ ret = debugfs_valid_mountpoint(tracing_events_path);
+ if (ret)
+ return ret;
+
+ return strpbrk(event, "*?") ?
+ add_tracepoint_multi(list, idx, sys, event) :
+ add_tracepoint(list, idx, sys, event);
+}
+
+static int
+parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (!type || !type[i])
+ break;
+
+ switch (type[i]) {
+ case 'r':
+ attr->bp_type |= HW_BREAKPOINT_R;
+ break;
+ case 'w':
+ attr->bp_type |= HW_BREAKPOINT_W;
+ break;
+ case 'x':
+ attr->bp_type |= HW_BREAKPOINT_X;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!attr->bp_type) /* Default */
+ attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
+
+ return 0;
+}
+
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+ void *ptr, char *type)
+{
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
+
+ memset(&attr, 0, sizeof(attr));
+ attr.bp_addr = (unsigned long) ptr;
+
+ if (parse_breakpoint_type(type, &attr))
+ return -EINVAL;
+
+ /*
+ * We should find a nice way to override the access length
+ * Provide some defaults for now
+ */
+ if (attr.bp_type == HW_BREAKPOINT_X)
+ attr.bp_len = sizeof(long);
+ else
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+
+ attr.type = PERF_TYPE_BREAKPOINT;
+
+ snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
+ return add_event(list, idx, &attr, name);
+}
+
+static int config_term(struct perf_event_attr *attr,
+ struct parse_events__term *term)
+{
+ switch (term->type) {
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ attr->config = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ attr->config1 = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ attr->config2 = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ attr->sample_period = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ /*
+ * TODO uncomment when the field is available
+ * attr->branch_sample_type = term->val.num;
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int config_attr(struct perf_event_attr *attr,
+ struct list_head *head, int fail)
+{
+ struct parse_events__term *term;
+
+ list_for_each_entry(term, head, list)
+ if (config_term(attr, term) && fail)
+ return -EINVAL;
+
+ return 0;
+}
+
+int parse_events_add_numeric(struct list_head *list, int *idx,
+ unsigned long type, unsigned long config,
+ struct list_head *head_config)
+{
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = type;
+ attr.config = config;
+
+ if (head_config &&
+ config_attr(&attr, head_config, 1))
+ return -EINVAL;
+
+ return add_event(list, idx, &attr,
+ (char *) __event_name(type, config));
+}
+
+int parse_events_add_pmu(struct list_head *list, int *idx,
+ char *name, struct list_head *head_config)
+{
+ struct perf_event_attr attr;
+ struct perf_pmu *pmu;
+
+ pmu = perf_pmu__find(name);
+ if (!pmu)
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+
+ /*
+ * Configure hardcoded terms first, no need to check
+ * return value when called with fail == 0 ;)
+ */
+ config_attr(&attr, head_config, 0);
+
+ if (perf_pmu__config(pmu, &attr, head_config))
+ return -EINVAL;
+
+ return add_event(list, idx, &attr, (char *) "pmu");
+}
+
+void parse_events_update_lists(struct list_head *list_event,
+ struct list_head *list_all)
+{
+ /*
+ * Called for single event definition. Update the
+ * 'all event' list, and reinit the 'signle event'
+ * list, for next event definition.
+ */
+ list_splice_tail(list_event, list_all);
+ INIT_LIST_HEAD(list_event);
+}
+
+int parse_events_modifier(struct list_head *list, char *str)
+{
+ struct perf_evsel *evsel;
+ int exclude = 0, exclude_GH = 0;
+ int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
+
+ if (str == NULL)
+ return 0;
+
+ while (*str) {
+ if (*str == 'u') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
+ eu = 0;
+ } else if (*str == 'k') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
+ ek = 0;
+ } else if (*str == 'h') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
+ eh = 0;
+ } else if (*str == 'G') {
+ if (!exclude_GH)
+ exclude_GH = eG = eH = 1;
+ eG = 0;
+ } else if (*str == 'H') {
+ if (!exclude_GH)
+ exclude_GH = eG = eH = 1;
+ eH = 0;
+ } else if (*str == 'p') {
+ precise++;
+ } else
+ break;
+
+ ++str;
+ }
+
+ /*
+ * precise ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ if (precise > 3)
+ return -EINVAL;
+
+ list_for_each_entry(evsel, list, node) {
+ evsel->attr.exclude_user = eu;
+ evsel->attr.exclude_kernel = ek;
+ evsel->attr.exclude_hv = eh;
+ evsel->attr.precise_ip = precise;
+ evsel->attr.exclude_host = eH;
+ evsel->attr.exclude_guest = eG;
+ }
+
+ return 0;
+}
+
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+{
+ LIST_HEAD(list);
+ LIST_HEAD(list_tmp);
+ YY_BUFFER_STATE buffer;
+ int ret, idx = evlist->nr_entries;
+
+ buffer = parse_events__scan_string(str);
+
+ ret = parse_events_parse(&list, &list_tmp, &idx);
+
+ parse_events__flush_buffer(buffer);
+ parse_events__delete_buffer(buffer);
+
+ if (!ret) {
+ int entries = idx - evlist->nr_entries;
+ perf_evlist__splice_list_tail(evlist, &list, entries);
+ return 0;
+ }
+
+ /*
+ * There are 2 users - builtin-record and builtin-test objects.
+ * Both call perf_evlist__delete in case of error, so we dont
+ * need to bother.
+ */
+ fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
+ fprintf(stderr, "Run 'perf list' for a list of valid events\n");
+ return ret;
+}
+
+int parse_events_option(const struct option *opt, const char *str,
+ int unset __used)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ return parse_events(evlist, str, unset);
+}
+
+int parse_filter(const struct option *opt, const char *str,
+ int unset __used)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ struct perf_evsel *last = NULL;
+
+ if (evlist->nr_entries > 0)
+ last = list_entry(evlist->entries.prev, struct perf_evsel, node);
+
+ if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
+ fprintf(stderr,
+ "-F option should follow a -e tracepoint option\n");
+ return -1;
+ }
+
+ last->filter = strdup(str);
+ if (last->filter == NULL) {
+ fprintf(stderr, "not enough memory to hold filter string\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static const char * const event_type_descriptors[] = {
+ "Hardware event",
+ "Software event",
+ "Tracepoint event",
+ "Hardware cache event",
+ "Raw hardware event descriptor",
+ "Hardware breakpoint",
+};
+
+/*
+ * Print the events from <debugfs_mount_point>/tracing/events
+ */
+
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
+{
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ char evt_path[MAXPATHLEN];
+ char dir_path[MAXPATHLEN];
+
+ if (debugfs_valid_mountpoint(tracing_events_path))
+ return;
+
+ sys_dir = opendir(tracing_events_path);
+ if (!sys_dir)
+ return;
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ if (subsys_glob != NULL &&
+ !strglobmatch(sys_dirent.d_name, subsys_glob))
+ continue;
+
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
+ sys_dirent.d_name);
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ continue;
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ if (event_glob != NULL &&
+ !strglobmatch(evt_dirent.d_name, event_glob))
+ continue;
+
+ snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ sys_dirent.d_name, evt_dirent.d_name);
+ printf(" %-50s [%s]\n", evt_path,
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+ }
+ closedir(evt_dir);
+ }
+ closedir(sys_dir);
+}
+
+/*
+ * Check whether event is in <debugfs_mount_point>/tracing/events
+ */
+
+int is_valid_tracepoint(const char *event_string)
+{
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ char evt_path[MAXPATHLEN];
+ char dir_path[MAXPATHLEN];
+
+ if (debugfs_valid_mountpoint(tracing_events_path))
+ return 0;
+
+ sys_dir = opendir(tracing_events_path);
+ if (!sys_dir)
+ return 0;
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
+ sys_dirent.d_name);
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ continue;
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ sys_dirent.d_name, evt_dirent.d_name);
+ if (!strcmp(evt_path, event_string)) {
+ closedir(evt_dir);
+ closedir(sys_dir);
+ return 1;
+ }
+ }
+ closedir(evt_dir);
+ }
+ closedir(sys_dir);
+ return 0;
+}
+
+void print_events_type(u8 type)
+{
+ struct event_symbol *syms = event_symbols;
+ unsigned int i;
+ char name[64];
+
+ for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
+ if (type != syms->type)
+ continue;
+
+ if (strlen(syms->alias))
+ snprintf(name, sizeof(name), "%s OR %s",
+ syms->symbol, syms->alias);
+ else
+ snprintf(name, sizeof(name), "%s", syms->symbol);
+
+ printf(" %-50s [%s]\n", name,
+ event_type_descriptors[type]);
+ }
+}
+
+int print_hwcache_events(const char *event_glob)
+{
+ unsigned int type, op, i, printed = 0;
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ char *name = event_cache_name(type, op, i);
+
+ if (event_glob != NULL && !strglobmatch(name, event_glob))
+ continue;
+
+ printf(" %-50s [%s]\n", name,
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ ++printed;
+ }
+ }
+ }
+
+ return printed;
+}
+
+/*
+ * Print the help text for the event symbols:
+ */
+void print_events(const char *event_glob)
+{
+ unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
+ struct event_symbol *syms = event_symbols;
+ char name[MAX_NAME_LEN];
+
+ printf("\n");
+ printf("List of pre-defined events (to be used in -e):\n");
+
+ for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
+ type = syms->type;
+
+ if (type != prev_type && printed) {
+ printf("\n");
+ printed = 0;
+ ntypes_printed++;
+ }
+
+ if (event_glob != NULL &&
+ !(strglobmatch(syms->symbol, event_glob) ||
+ (syms->alias && strglobmatch(syms->alias, event_glob))))
+ continue;
+
+ if (strlen(syms->alias))
+ snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strncpy(name, syms->symbol, MAX_NAME_LEN);
+ printf(" %-50s [%s]\n", name,
+ event_type_descriptors[type]);
+
+ prev_type = type;
+ ++printed;
+ }
+
+ if (ntypes_printed) {
+ printed = 0;
+ printf("\n");
+ }
+ print_hwcache_events(event_glob);
+
+ if (event_glob != NULL)
+ return;
+
+ printf("\n");
+ printf(" %-50s [%s]\n",
+ "rNNN",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" %-50s [%s]\n",
+ "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" (see 'perf list --help' on how to encode it)\n");
+ printf("\n");
+
+ printf(" %-50s [%s]\n",
+ "mem:<addr>[:access]",
+ event_type_descriptors[PERF_TYPE_BREAKPOINT]);
+ printf("\n");
+
+ print_tracepoint_events(NULL, NULL);
+}
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term)
+{
+ return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX;
+}
+
+int parse_events__new_term(struct parse_events__term **_term, int type,
+ char *config, char *str, long num)
+{
+ struct parse_events__term *term;
+
+ term = zalloc(sizeof(*term));
+ if (!term)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&term->list);
+ term->type = type;
+ term->config = config;
+
+ switch (type) {
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ case PARSE_EVENTS__TERM_TYPE_NUM:
+ term->val.num = num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_STR:
+ term->val.str = str;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *_term = term;
+ return 0;
+}
+
+void parse_events__free_terms(struct list_head *terms)
+{
+ struct parse_events__term *term, *h;
+
+ list_for_each_entry_safe(term, h, terms, list)
+ free(term);
+
+ free(terms);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/parse-events.h b/ANDROID_3.4.5/tools/perf/util/parse-events.h
new file mode 100644
index 00000000..ca069f89
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/parse-events.h
@@ -0,0 +1,93 @@
+#ifndef __PERF_PARSE_EVENTS_H
+#define __PERF_PARSE_EVENTS_H
+/*
+ * Parse symbolic events/counts passed in as options:
+ */
+
+#include "../../../include/linux/perf_event.h"
+
+struct list_head;
+struct perf_evsel;
+struct perf_evlist;
+
+struct option;
+
+struct tracepoint_path {
+ char *system;
+ char *name;
+ struct tracepoint_path *next;
+};
+
+extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
+extern bool have_tracepoints(struct list_head *evlist);
+
+const char *event_type(int type);
+const char *event_name(struct perf_evsel *event);
+extern const char *__event_name(int type, u64 config);
+
+extern int parse_events_option(const struct option *opt, const char *str,
+ int unset);
+extern int parse_events(struct perf_evlist *evlist, const char *str,
+ int unset);
+extern int parse_filter(const struct option *opt, const char *str, int unset);
+
+#define EVENTS_HELP_MAX (128*1024)
+
+enum {
+ PARSE_EVENTS__TERM_TYPE_CONFIG,
+ PARSE_EVENTS__TERM_TYPE_CONFIG1,
+ PARSE_EVENTS__TERM_TYPE_CONFIG2,
+ PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
+ PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+ PARSE_EVENTS__TERM_TYPE_NUM,
+ PARSE_EVENTS__TERM_TYPE_STR,
+
+ PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
+ PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+};
+
+struct parse_events__term {
+ char *config;
+ union {
+ char *str;
+ long num;
+ } val;
+ int type;
+
+ struct list_head list;
+};
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term);
+int parse_events__new_term(struct parse_events__term **term, int type,
+ char *config, char *str, long num);
+void parse_events__free_terms(struct list_head *terms);
+int parse_events_modifier(struct list_head *list __used, char *str __used);
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ char *sys, char *event);
+int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
+ unsigned long config1, unsigned long config2,
+ char *mod);
+int parse_events_add_numeric(struct list_head *list, int *idx,
+ unsigned long type, unsigned long config,
+ struct list_head *head_config);
+int parse_events_add_cache(struct list_head *list, int *idx,
+ char *type, char *op_result1, char *op_result2);
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+ void *ptr, char *type);
+int parse_events_add_pmu(struct list_head *list, int *idx,
+ char *pmu , struct list_head *head_config);
+void parse_events_update_lists(struct list_head *list_event,
+ struct list_head *list_all);
+void parse_events_error(struct list_head *list_all,
+ struct list_head *list_event,
+ int *idx, char const *msg);
+
+void print_events(const char *event_glob);
+void print_events_type(u8 type);
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
+int print_hwcache_events(const char *event_glob);
+extern int is_valid_tracepoint(const char *event_string);
+
+extern int valid_debugfs_mount(const char *debugfs);
+
+#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/parse-events.l b/ANDROID_3.4.5/tools/perf/util/parse-events.l
new file mode 100644
index 00000000..1fcf1bbc
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/parse-events.l
@@ -0,0 +1,127 @@
+
+%option prefix="parse_events_"
+
+%{
+#include <errno.h>
+#include "../perf.h"
+#include "parse-events-bison.h"
+#include "parse-events.h"
+
+static int __value(char *str, int base, int token)
+{
+ long num;
+
+ errno = 0;
+ num = strtoul(str, NULL, base);
+ if (errno)
+ return PE_ERROR;
+
+ parse_events_lval.num = num;
+ return token;
+}
+
+static int value(int base)
+{
+ return __value(parse_events_text, base, PE_VALUE);
+}
+
+static int raw(void)
+{
+ return __value(parse_events_text + 1, 16, PE_RAW);
+}
+
+static int str(int token)
+{
+ parse_events_lval.str = strdup(parse_events_text);
+ return token;
+}
+
+static int sym(int type, int config)
+{
+ parse_events_lval.num = (type << 16) + config;
+ return PE_VALUE_SYM;
+}
+
+static int term(int type)
+{
+ parse_events_lval.num = type;
+ return PE_TERM;
+}
+
+%}
+
+num_dec [0-9]+
+num_hex 0x[a-fA-F0-9]+
+num_raw_hex [a-fA-F0-9]+
+name [a-zA-Z_*?][a-zA-Z0-9_*?]*
+modifier_event [ukhpGH]{1,8}
+modifier_bp [rwx]
+
+%%
+cpu-cycles|cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+L1-dcache|l1-d|l1d|L1-data |
+L1-icache|l1-i|l1i|L1-instruction |
+LLC|L2 |
+dTLB|d-tlb|Data-TLB |
+iTLB|i-tlb|Instruction-TLB |
+branch|branches|bpu|btb|bpc |
+node { return str(PE_NAME_CACHE_TYPE); }
+
+load|loads|read |
+store|stores|write |
+prefetch|prefetches |
+speculative-read|speculative-load |
+refs|Reference|ops|access |
+misses|miss { return str(PE_NAME_CACHE_OP_RESULT); }
+
+ /*
+ * These are event config hardcoded term names to be specified
+ * within xxx/.../ syntax. So far we dont clash with other names,
+ * so we can put them here directly. In case the we have a conflict
+ * in future, this needs to go into '//' condition block.
+ */
+config { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+period { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+
+mem: { return PE_PREFIX_MEM; }
+r{num_raw_hex} { return raw(); }
+{num_dec} { return value(10); }
+{num_hex} { return value(16); }
+
+{modifier_event} { return str(PE_MODIFIER_EVENT); }
+{modifier_bp} { return str(PE_MODIFIER_BP); }
+{name} { return str(PE_NAME); }
+"/" { return '/'; }
+- { return '-'; }
+, { return ','; }
+: { return ':'; }
+= { return '='; }
+
+%%
+
+int parse_events_wrap(void)
+{
+ return 1;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/parse-events.y b/ANDROID_3.4.5/tools/perf/util/parse-events.y
new file mode 100644
index 00000000..d9637da7
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/parse-events.y
@@ -0,0 +1,229 @@
+
+%name-prefix "parse_events_"
+%parse-param {struct list_head *list_all}
+%parse-param {struct list_head *list_event}
+%parse-param {int *idx}
+
+%{
+
+#define YYDEBUG 1
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include "types.h"
+#include "util.h"
+#include "parse-events.h"
+
+extern int parse_events_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+ if (val) \
+ YYABORT; \
+} while (0)
+
+%}
+
+%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
+%token PE_NAME
+%token PE_MODIFIER_EVENT PE_MODIFIER_BP
+%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
+%token PE_PREFIX_MEM PE_PREFIX_RAW
+%token PE_ERROR
+%type <num> PE_VALUE
+%type <num> PE_VALUE_SYM
+%type <num> PE_RAW
+%type <num> PE_TERM
+%type <str> PE_NAME
+%type <str> PE_NAME_CACHE_TYPE
+%type <str> PE_NAME_CACHE_OP_RESULT
+%type <str> PE_MODIFIER_EVENT
+%type <str> PE_MODIFIER_BP
+%type <head> event_config
+%type <term> event_term
+
+%union
+{
+ char *str;
+ unsigned long num;
+ struct list_head *head;
+ struct parse_events__term *term;
+}
+%%
+
+events:
+events ',' event | event
+
+event:
+event_def PE_MODIFIER_EVENT
+{
+ /*
+ * Apply modifier on all events added by single event definition
+ * (there could be more events added for multiple tracepoint
+ * definitions via '*?'.
+ */
+ ABORT_ON(parse_events_modifier(list_event, $2));
+ parse_events_update_lists(list_event, list_all);
+}
+|
+event_def
+{
+ parse_events_update_lists(list_event, list_all);
+}
+
+event_def: event_pmu |
+ event_legacy_symbol |
+ event_legacy_cache sep_dc |
+ event_legacy_mem |
+ event_legacy_tracepoint sep_dc |
+ event_legacy_numeric sep_dc |
+ event_legacy_raw sep_dc
+
+event_pmu:
+PE_NAME '/' event_config '/'
+{
+ ABORT_ON(parse_events_add_pmu(list_event, idx, $1, $3));
+ parse_events__free_terms($3);
+}
+
+event_legacy_symbol:
+PE_VALUE_SYM '/' event_config '/'
+{
+ int type = $1 >> 16;
+ int config = $1 & 255;
+
+ ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, $3));
+ parse_events__free_terms($3);
+}
+|
+PE_VALUE_SYM sep_slash_dc
+{
+ int type = $1 >> 16;
+ int config = $1 & 255;
+
+ ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, NULL));
+}
+
+event_legacy_cache:
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
+{
+ ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, $5));
+}
+|
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
+{
+ ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, NULL));
+}
+|
+PE_NAME_CACHE_TYPE
+{
+ ABORT_ON(parse_events_add_cache(list_event, idx, $1, NULL, NULL));
+}
+
+event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+ ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, $4));
+}
+|
+PE_PREFIX_MEM PE_VALUE sep_dc
+{
+ ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, NULL));
+}
+
+event_legacy_tracepoint:
+PE_NAME ':' PE_NAME
+{
+ ABORT_ON(parse_events_add_tracepoint(list_event, idx, $1, $3));
+}
+
+event_legacy_numeric:
+PE_VALUE ':' PE_VALUE
+{
+ ABORT_ON(parse_events_add_numeric(list_event, idx, $1, $3, NULL));
+}
+
+event_legacy_raw:
+PE_RAW
+{
+ ABORT_ON(parse_events_add_numeric(list_event, idx, PERF_TYPE_RAW, $1, NULL));
+}
+
+event_config:
+event_config ',' event_term
+{
+ struct list_head *head = $1;
+ struct parse_events__term *term = $3;
+
+ ABORT_ON(!head);
+ list_add_tail(&term->list, head);
+ $$ = $1;
+}
+|
+event_term
+{
+ struct list_head *head = malloc(sizeof(*head));
+ struct parse_events__term *term = $1;
+
+ ABORT_ON(!head);
+ INIT_LIST_HEAD(head);
+ list_add_tail(&term->list, head);
+ $$ = head;
+}
+
+event_term:
+PE_NAME '=' PE_NAME
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR,
+ $1, $3, 0));
+ $$ = term;
+}
+|
+PE_NAME '=' PE_VALUE
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+ $1, NULL, $3));
+ $$ = term;
+}
+|
+PE_NAME
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+ $1, NULL, 1));
+ $$ = term;
+}
+|
+PE_TERM '=' PE_VALUE
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3));
+ $$ = term;
+}
+|
+PE_TERM
+{
+ struct parse_events__term *term;
+
+ ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1));
+ $$ = term;
+}
+
+sep_dc: ':' |
+
+sep_slash_dc: '/' | ':' |
+
+%%
+
+void parse_events_error(struct list_head *list_all __used,
+ struct list_head *list_event __used,
+ int *idx __used,
+ char const *msg __used)
+{
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/parse-options.c b/ANDROID_3.4.5/tools/perf/util/parse-options.c
new file mode 100644
index 00000000..99d02aa5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/parse-options.c
@@ -0,0 +1,577 @@
+#include "util.h"
+#include "parse-options.h"
+#include "cache.h"
+
+#define OPT_SHORT 1
+#define OPT_UNSET 2
+
+static int opterror(const struct option *opt, const char *reason, int flags)
+{
+ if (flags & OPT_SHORT)
+ return error("switch `%c' %s", opt->short_name, reason);
+ if (flags & OPT_UNSET)
+ return error("option `no-%s' %s", opt->long_name, reason);
+ return error("option `%s' %s", opt->long_name, reason);
+}
+
+static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
+ int flags, const char **arg)
+{
+ if (p->opt) {
+ *arg = p->opt;
+ p->opt = NULL;
+ } else if ((opt->flags & PARSE_OPT_LASTARG_DEFAULT) && (p->argc == 1 ||
+ **(p->argv + 1) == '-')) {
+ *arg = (const char *)opt->defval;
+ } else if (p->argc > 1) {
+ p->argc--;
+ *arg = *++p->argv;
+ } else
+ return opterror(opt, "requires a value", flags);
+ return 0;
+}
+
+static int get_value(struct parse_opt_ctx_t *p,
+ const struct option *opt, int flags)
+{
+ const char *s, *arg = NULL;
+ const int unset = flags & OPT_UNSET;
+
+ if (unset && p->opt)
+ return opterror(opt, "takes no value", flags);
+ if (unset && (opt->flags & PARSE_OPT_NONEG))
+ return opterror(opt, "isn't available", flags);
+
+ if (!(flags & OPT_SHORT) && p->opt) {
+ switch (opt->type) {
+ case OPTION_CALLBACK:
+ if (!(opt->flags & PARSE_OPT_NOARG))
+ break;
+ /* FALLTHROUGH */
+ case OPTION_BOOLEAN:
+ case OPTION_INCR:
+ case OPTION_BIT:
+ case OPTION_SET_UINT:
+ case OPTION_SET_PTR:
+ return opterror(opt, "takes no value", flags);
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
+ case OPTION_STRING:
+ case OPTION_INTEGER:
+ case OPTION_UINTEGER:
+ case OPTION_LONG:
+ case OPTION_U64:
+ default:
+ break;
+ }
+ }
+
+ switch (opt->type) {
+ case OPTION_BIT:
+ if (unset)
+ *(int *)opt->value &= ~opt->defval;
+ else
+ *(int *)opt->value |= opt->defval;
+ return 0;
+
+ case OPTION_BOOLEAN:
+ *(bool *)opt->value = unset ? false : true;
+ return 0;
+
+ case OPTION_INCR:
+ *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
+ return 0;
+
+ case OPTION_SET_UINT:
+ *(unsigned int *)opt->value = unset ? 0 : opt->defval;
+ return 0;
+
+ case OPTION_SET_PTR:
+ *(void **)opt->value = unset ? NULL : (void *)opt->defval;
+ return 0;
+
+ case OPTION_STRING:
+ if (unset)
+ *(const char **)opt->value = NULL;
+ else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ *(const char **)opt->value = (const char *)opt->defval;
+ else
+ return get_arg(p, opt, flags, (const char **)opt->value);
+ return 0;
+
+ case OPTION_CALLBACK:
+ if (unset)
+ return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
+ if (opt->flags & PARSE_OPT_NOARG)
+ return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ return (*opt->callback)(opt, arg, 0) ? (-1) : 0;
+
+ case OPTION_INTEGER:
+ if (unset) {
+ *(int *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(int *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(int *)opt->value = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_UINTEGER:
+ if (unset) {
+ *(unsigned int *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(unsigned int *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(unsigned int *)opt->value = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_LONG:
+ if (unset) {
+ *(long *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(long *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(long *)opt->value = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_U64:
+ if (unset) {
+ *(u64 *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(u64 *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(u64 *)opt->value = strtoull(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
+ default:
+ die("should not happen, someone must be hit on the forehead");
+ }
+}
+
+static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options)
+{
+ for (; options->type != OPTION_END; options++) {
+ if (options->short_name == *p->opt) {
+ p->opt = p->opt[1] ? p->opt + 1 : NULL;
+ return get_value(p, options, OPT_SHORT);
+ }
+ }
+ return -2;
+}
+
+static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
+ const struct option *options)
+{
+ const char *arg_end = strchr(arg, '=');
+ const struct option *abbrev_option = NULL, *ambiguous_option = NULL;
+ int abbrev_flags = 0, ambiguous_flags = 0;
+
+ if (!arg_end)
+ arg_end = arg + strlen(arg);
+
+ for (; options->type != OPTION_END; options++) {
+ const char *rest;
+ int flags = 0;
+
+ if (!options->long_name)
+ continue;
+
+ rest = skip_prefix(arg, options->long_name);
+ if (options->type == OPTION_ARGUMENT) {
+ if (!rest)
+ continue;
+ if (*rest == '=')
+ return opterror(options, "takes no value", flags);
+ if (*rest)
+ continue;
+ p->out[p->cpidx++] = arg - 2;
+ return 0;
+ }
+ if (!rest) {
+ /* abbreviated? */
+ if (!strncmp(options->long_name, arg, arg_end - arg)) {
+is_abbreviated:
+ if (abbrev_option) {
+ /*
+ * If this is abbreviated, it is
+ * ambiguous. So when there is no
+ * exact match later, we need to
+ * error out.
+ */
+ ambiguous_option = abbrev_option;
+ ambiguous_flags = abbrev_flags;
+ }
+ if (!(flags & OPT_UNSET) && *arg_end)
+ p->opt = arg_end + 1;
+ abbrev_option = options;
+ abbrev_flags = flags;
+ continue;
+ }
+ /* negated and abbreviated very much? */
+ if (!prefixcmp("no-", arg)) {
+ flags |= OPT_UNSET;
+ goto is_abbreviated;
+ }
+ /* negated? */
+ if (strncmp(arg, "no-", 3))
+ continue;
+ flags |= OPT_UNSET;
+ rest = skip_prefix(arg + 3, options->long_name);
+ /* abbreviated and negated? */
+ if (!rest && !prefixcmp(options->long_name, arg + 3))
+ goto is_abbreviated;
+ if (!rest)
+ continue;
+ }
+ if (*rest) {
+ if (*rest != '=')
+ continue;
+ p->opt = rest + 1;
+ }
+ return get_value(p, options, flags);
+ }
+
+ if (ambiguous_option)
+ return error("Ambiguous option: %s "
+ "(could be --%s%s or --%s%s)",
+ arg,
+ (ambiguous_flags & OPT_UNSET) ? "no-" : "",
+ ambiguous_option->long_name,
+ (abbrev_flags & OPT_UNSET) ? "no-" : "",
+ abbrev_option->long_name);
+ if (abbrev_option)
+ return get_value(p, abbrev_option, abbrev_flags);
+ return -2;
+}
+
+static void check_typos(const char *arg, const struct option *options)
+{
+ if (strlen(arg) < 3)
+ return;
+
+ if (!prefixcmp(arg, "no-")) {
+ error ("did you mean `--%s` (with two dashes ?)", arg);
+ exit(129);
+ }
+
+ for (; options->type != OPTION_END; options++) {
+ if (!options->long_name)
+ continue;
+ if (!prefixcmp(options->long_name, arg)) {
+ error ("did you mean `--%s` (with two dashes ?)", arg);
+ exit(129);
+ }
+ }
+}
+
+void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, int flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->argc = argc - 1;
+ ctx->argv = argv + 1;
+ ctx->out = argv;
+ ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0);
+ ctx->flags = flags;
+ if ((flags & PARSE_OPT_KEEP_UNKNOWN) &&
+ (flags & PARSE_OPT_STOP_AT_NON_OPTION))
+ die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
+}
+
+static int usage_with_options_internal(const char * const *,
+ const struct option *, int);
+
+int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[])
+{
+ int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
+
+ /* we must reset ->opt, unknown short option leave it dangling */
+ ctx->opt = NULL;
+
+ for (; ctx->argc; ctx->argc--, ctx->argv++) {
+ const char *arg = ctx->argv[0];
+
+ if (*arg != '-' || !arg[1]) {
+ if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION)
+ break;
+ ctx->out[ctx->cpidx++] = ctx->argv[0];
+ continue;
+ }
+
+ if (arg[1] != '-') {
+ ctx->opt = arg + 1;
+ if (internal_help && *ctx->opt == 'h')
+ return parse_options_usage(usagestr, options);
+ switch (parse_short_opt(ctx, options)) {
+ case -1:
+ return parse_options_usage(usagestr, options);
+ case -2:
+ goto unknown;
+ default:
+ break;
+ }
+ if (ctx->opt)
+ check_typos(arg + 1, options);
+ while (ctx->opt) {
+ if (internal_help && *ctx->opt == 'h')
+ return parse_options_usage(usagestr, options);
+ switch (parse_short_opt(ctx, options)) {
+ case -1:
+ return parse_options_usage(usagestr, options);
+ case -2:
+ /* fake a short option thing to hide the fact that we may have
+ * started to parse aggregated stuff
+ *
+ * This is leaky, too bad.
+ */
+ ctx->argv[0] = strdup(ctx->opt - 1);
+ *(char *)ctx->argv[0] = '-';
+ goto unknown;
+ default:
+ break;
+ }
+ }
+ continue;
+ }
+
+ if (!arg[2]) { /* "--" */
+ if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) {
+ ctx->argc--;
+ ctx->argv++;
+ }
+ break;
+ }
+
+ if (internal_help && !strcmp(arg + 2, "help-all"))
+ return usage_with_options_internal(usagestr, options, 1);
+ if (internal_help && !strcmp(arg + 2, "help"))
+ return parse_options_usage(usagestr, options);
+ switch (parse_long_opt(ctx, arg + 2, options)) {
+ case -1:
+ return parse_options_usage(usagestr, options);
+ case -2:
+ goto unknown;
+ default:
+ break;
+ }
+ continue;
+unknown:
+ if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN))
+ return PARSE_OPT_UNKNOWN;
+ ctx->out[ctx->cpidx++] = ctx->argv[0];
+ ctx->opt = NULL;
+ }
+ return PARSE_OPT_DONE;
+}
+
+int parse_options_end(struct parse_opt_ctx_t *ctx)
+{
+ memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
+ ctx->out[ctx->cpidx + ctx->argc] = NULL;
+ return ctx->cpidx + ctx->argc;
+}
+
+int parse_options(int argc, const char **argv, const struct option *options,
+ const char * const usagestr[], int flags)
+{
+ struct parse_opt_ctx_t ctx;
+
+ parse_options_start(&ctx, argc, argv, flags);
+ switch (parse_options_step(&ctx, options, usagestr)) {
+ case PARSE_OPT_HELP:
+ exit(129);
+ case PARSE_OPT_DONE:
+ break;
+ default: /* PARSE_OPT_UNKNOWN */
+ if (ctx.argv[0][1] == '-') {
+ error("unknown option `%s'", ctx.argv[0] + 2);
+ } else {
+ error("unknown switch `%c'", *ctx.opt);
+ }
+ usage_with_options(usagestr, options);
+ }
+
+ return parse_options_end(&ctx);
+}
+
+#define USAGE_OPTS_WIDTH 24
+#define USAGE_GAP 2
+
+int usage_with_options_internal(const char * const *usagestr,
+ const struct option *opts, int full)
+{
+ if (!usagestr)
+ return PARSE_OPT_HELP;
+
+ fprintf(stderr, "\n usage: %s\n", *usagestr++);
+ while (*usagestr && **usagestr)
+ fprintf(stderr, " or: %s\n", *usagestr++);
+ while (*usagestr) {
+ fprintf(stderr, "%s%s\n",
+ **usagestr ? " " : "",
+ *usagestr);
+ usagestr++;
+ }
+
+ if (opts->type != OPTION_GROUP)
+ fputc('\n', stderr);
+
+ for (; opts->type != OPTION_END; opts++) {
+ size_t pos;
+ int pad;
+
+ if (opts->type == OPTION_GROUP) {
+ fputc('\n', stderr);
+ if (*opts->help)
+ fprintf(stderr, "%s\n", opts->help);
+ continue;
+ }
+ if (!full && (opts->flags & PARSE_OPT_HIDDEN))
+ continue;
+
+ pos = fprintf(stderr, " ");
+ if (opts->short_name)
+ pos += fprintf(stderr, "-%c", opts->short_name);
+ else
+ pos += fprintf(stderr, " ");
+
+ if (opts->long_name && opts->short_name)
+ pos += fprintf(stderr, ", ");
+ if (opts->long_name)
+ pos += fprintf(stderr, "--%s", opts->long_name);
+
+ switch (opts->type) {
+ case OPTION_ARGUMENT:
+ break;
+ case OPTION_LONG:
+ case OPTION_U64:
+ case OPTION_INTEGER:
+ case OPTION_UINTEGER:
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=<n>]");
+ else
+ pos += fprintf(stderr, "[<n>]");
+ else
+ pos += fprintf(stderr, " <n>");
+ break;
+ case OPTION_CALLBACK:
+ if (opts->flags & PARSE_OPT_NOARG)
+ break;
+ /* FALLTHROUGH */
+ case OPTION_STRING:
+ if (opts->argh) {
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=<%s>]", opts->argh);
+ else
+ pos += fprintf(stderr, "[<%s>]", opts->argh);
+ else
+ pos += fprintf(stderr, " <%s>", opts->argh);
+ } else {
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=...]");
+ else
+ pos += fprintf(stderr, "[...]");
+ else
+ pos += fprintf(stderr, " ...");
+ }
+ break;
+ default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */
+ case OPTION_END:
+ case OPTION_GROUP:
+ case OPTION_BIT:
+ case OPTION_BOOLEAN:
+ case OPTION_INCR:
+ case OPTION_SET_UINT:
+ case OPTION_SET_PTR:
+ break;
+ }
+
+ if (pos <= USAGE_OPTS_WIDTH)
+ pad = USAGE_OPTS_WIDTH - pos;
+ else {
+ fputc('\n', stderr);
+ pad = USAGE_OPTS_WIDTH;
+ }
+ fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
+ }
+ fputc('\n', stderr);
+
+ return PARSE_OPT_HELP;
+}
+
+void usage_with_options(const char * const *usagestr,
+ const struct option *opts)
+{
+ exit_browser(false);
+ usage_with_options_internal(usagestr, opts, 0);
+ exit(129);
+}
+
+int parse_options_usage(const char * const *usagestr,
+ const struct option *opts)
+{
+ return usage_with_options_internal(usagestr, opts, 0);
+}
+
+
+int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used,
+ int unset)
+{
+ int *target = opt->value;
+
+ if (unset)
+ /* --no-quiet, --no-verbose */
+ *target = 0;
+ else if (opt->short_name == 'v') {
+ if (*target >= 0)
+ (*target)++;
+ else
+ *target = 1;
+ } else {
+ if (*target <= 0)
+ (*target)--;
+ else
+ *target = -1;
+ }
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/parse-options.h b/ANDROID_3.4.5/tools/perf/util/parse-options.h
new file mode 100644
index 00000000..abc31a1d
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/parse-options.h
@@ -0,0 +1,192 @@
+#ifndef __PERF_PARSE_OPTIONS_H
+#define __PERF_PARSE_OPTIONS_H
+
+#include <linux/kernel.h>
+#include <stdbool.h>
+
+enum parse_opt_type {
+ /* special types */
+ OPTION_END,
+ OPTION_ARGUMENT,
+ OPTION_GROUP,
+ /* options with no arguments */
+ OPTION_BIT,
+ OPTION_BOOLEAN,
+ OPTION_INCR,
+ OPTION_SET_UINT,
+ OPTION_SET_PTR,
+ /* options with arguments (usually) */
+ OPTION_STRING,
+ OPTION_INTEGER,
+ OPTION_LONG,
+ OPTION_CALLBACK,
+ OPTION_U64,
+ OPTION_UINTEGER,
+};
+
+enum parse_opt_flags {
+ PARSE_OPT_KEEP_DASHDASH = 1,
+ PARSE_OPT_STOP_AT_NON_OPTION = 2,
+ PARSE_OPT_KEEP_ARGV0 = 4,
+ PARSE_OPT_KEEP_UNKNOWN = 8,
+ PARSE_OPT_NO_INTERNAL_HELP = 16,
+};
+
+enum parse_opt_option_flags {
+ PARSE_OPT_OPTARG = 1,
+ PARSE_OPT_NOARG = 2,
+ PARSE_OPT_NONEG = 4,
+ PARSE_OPT_HIDDEN = 8,
+ PARSE_OPT_LASTARG_DEFAULT = 16,
+};
+
+struct option;
+typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
+
+/*
+ * `type`::
+ * holds the type of the option, you must have an OPTION_END last in your
+ * array.
+ *
+ * `short_name`::
+ * the character to use as a short option name, '\0' if none.
+ *
+ * `long_name`::
+ * the long option name, without the leading dashes, NULL if none.
+ *
+ * `value`::
+ * stores pointers to the values to be filled.
+ *
+ * `argh`::
+ * token to explain the kind of argument this option wants. Keep it
+ * homogenous across the repository.
+ *
+ * `help`::
+ * the short help associated to what the option does.
+ * Must never be NULL (except for OPTION_END).
+ * OPTION_GROUP uses this pointer to store the group header.
+ *
+ * `flags`::
+ * mask of parse_opt_option_flags.
+ * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs)
+ * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs
+ * PARSE_OPT_NONEG: says that this option cannot be negated
+ * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in
+ * the long one.
+ *
+ * `callback`::
+ * pointer to the callback to use for OPTION_CALLBACK.
+ *
+ * `defval`::
+ * default value to fill (*->value) with for PARSE_OPT_OPTARG.
+ * OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in
+ * the value when met.
+ * CALLBACKS can use it like they want.
+ */
+struct option {
+ enum parse_opt_type type;
+ int short_name;
+ const char *long_name;
+ void *value;
+ const char *argh;
+ const char *help;
+
+ int flags;
+ parse_opt_cb *callback;
+ intptr_t defval;
+};
+
+#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
+
+#define OPT_END() { .type = OPTION_END }
+#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) }
+#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
+#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
+#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
+#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
+#define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) }
+#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
+#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
+#define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) }
+#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
+#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
+#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
+#define OPT_DATE(s, l, v, h) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
+#define OPT_CALLBACK(s, l, v, a, h, f) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) }
+#define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
+#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
+#define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\
+ .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
+ .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG}
+
+/* parse_options() will filter out the processed options and leave the
+ * non-option argments in argv[].
+ * Returns the number of arguments left in argv[].
+ */
+extern int parse_options(int argc, const char **argv,
+ const struct option *options,
+ const char * const usagestr[], int flags);
+
+extern NORETURN void usage_with_options(const char * const *usagestr,
+ const struct option *options);
+
+/*----- incremantal advanced APIs -----*/
+
+enum {
+ PARSE_OPT_HELP = -1,
+ PARSE_OPT_DONE,
+ PARSE_OPT_UNKNOWN,
+};
+
+/*
+ * It's okay for the caller to consume argv/argc in the usual way.
+ * Other fields of that structure are private to parse-options and should not
+ * be modified in any way.
+ */
+struct parse_opt_ctx_t {
+ const char **argv;
+ const char **out;
+ int argc, cpidx;
+ const char *opt;
+ int flags;
+};
+
+extern int parse_options_usage(const char * const *usagestr,
+ const struct option *opts);
+
+extern void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, int flags);
+
+extern int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[]);
+
+extern int parse_options_end(struct parse_opt_ctx_t *ctx);
+
+
+/*----- some often used options -----*/
+extern int parse_opt_abbrev_cb(const struct option *, const char *, int);
+extern int parse_opt_approxidate_cb(const struct option *, const char *, int);
+extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
+
+#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose")
+#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet")
+#define OPT__VERBOSITY(var) \
+ { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \
+ PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \
+ { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \
+ PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }
+#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run")
+#define OPT__ABBREV(var) \
+ { OPTION_CALLBACK, 0, "abbrev", (var), "n", \
+ "use <n> digits to display SHA-1s", \
+ PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 }
+
+extern const char *parse_options_fix_filename(const char *prefix, const char *file);
+
+#endif /* __PERF_PARSE_OPTIONS_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/path.c b/ANDROID_3.4.5/tools/perf/util/path.c
new file mode 100644
index 00000000..bd749771
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/path.c
@@ -0,0 +1,157 @@
+/*
+ * I'm tired of doing "vsnprintf()" etc just to open a
+ * file, so here's a "return static buffer with printf"
+ * interface for paths.
+ *
+ * It's obviously not thread-safe. Sue me. But it's quite
+ * useful for doing things like
+ *
+ * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY);
+ *
+ * which is what it's designed for.
+ */
+#include "cache.h"
+
+static char bad_path[] = "/bad-path/";
+/*
+ * Two hacks:
+ */
+
+static const char *get_perf_dir(void)
+{
+ return ".";
+}
+
+#ifdef NO_STRLCPY
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+#endif
+
+static char *get_pathname(void)
+{
+ static char pathname_array[4][PATH_MAX];
+ static int idx;
+
+ return pathname_array[3 & ++idx];
+}
+
+static char *cleanup_path(char *path)
+{
+ /* Clean it up */
+ if (!memcmp(path, "./", 2)) {
+ path += 2;
+ while (*path == '/')
+ path++;
+ }
+ return path;
+}
+
+static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args)
+{
+ const char *perf_dir = get_perf_dir();
+ size_t len;
+
+ len = strlen(perf_dir);
+ if (n < len + 1)
+ goto bad;
+ memcpy(buf, perf_dir, len);
+ if (len && !is_dir_sep(perf_dir[len-1]))
+ buf[len++] = '/';
+ len += vsnprintf(buf + len, n - len, fmt, args);
+ if (len >= n)
+ goto bad;
+ return cleanup_path(buf);
+bad:
+ strlcpy(buf, bad_path, n);
+ return buf;
+}
+
+char *perf_pathdup(const char *fmt, ...)
+{
+ char path[PATH_MAX];
+ va_list args;
+ va_start(args, fmt);
+ (void)perf_vsnpath(path, sizeof(path), fmt, args);
+ va_end(args);
+ return xstrdup(path);
+}
+
+char *mkpath(const char *fmt, ...)
+{
+ va_list args;
+ unsigned len;
+ char *pathname = get_pathname();
+
+ va_start(args, fmt);
+ len = vsnprintf(pathname, PATH_MAX, fmt, args);
+ va_end(args);
+ if (len >= PATH_MAX)
+ return bad_path;
+ return cleanup_path(pathname);
+}
+
+char *perf_path(const char *fmt, ...)
+{
+ const char *perf_dir = get_perf_dir();
+ char *pathname = get_pathname();
+ va_list args;
+ unsigned len;
+
+ len = strlen(perf_dir);
+ if (len > PATH_MAX-100)
+ return bad_path;
+ memcpy(pathname, perf_dir, len);
+ if (len && perf_dir[len-1] != '/')
+ pathname[len++] = '/';
+ va_start(args, fmt);
+ len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args);
+ va_end(args);
+ if (len >= PATH_MAX)
+ return bad_path;
+ return cleanup_path(pathname);
+}
+
+/* strip arbitrary amount of directory separators at end of path */
+static inline int chomp_trailing_dir_sep(const char *path, int len)
+{
+ while (len && is_dir_sep(path[len - 1]))
+ len--;
+ return len;
+}
+
+/*
+ * If path ends with suffix (complete path components), returns the
+ * part before suffix (sans trailing directory separators).
+ * Otherwise returns NULL.
+ */
+char *strip_path_suffix(const char *path, const char *suffix)
+{
+ int path_len = strlen(path), suffix_len = strlen(suffix);
+
+ while (suffix_len) {
+ if (!path_len)
+ return NULL;
+
+ if (is_dir_sep(path[path_len - 1])) {
+ if (!is_dir_sep(suffix[suffix_len - 1]))
+ return NULL;
+ path_len = chomp_trailing_dir_sep(path, path_len);
+ suffix_len = chomp_trailing_dir_sep(suffix, suffix_len);
+ }
+ else if (path[--path_len] != suffix[--suffix_len])
+ return NULL;
+ }
+
+ if (path_len && !is_dir_sep(path[path_len - 1]))
+ return NULL;
+ return strndup(path, chomp_trailing_dir_sep(path, path_len));
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/pmu.c b/ANDROID_3.4.5/tools/perf/util/pmu.c
new file mode 100644
index 00000000..cb08a118
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pmu.c
@@ -0,0 +1,469 @@
+
+#include <linux/list.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <dirent.h>
+#include "sysfs.h"
+#include "util.h"
+#include "pmu.h"
+#include "parse-events.h"
+
+int perf_pmu_parse(struct list_head *list, char *name);
+extern FILE *perf_pmu_in;
+
+static LIST_HEAD(pmus);
+
+/*
+ * Parse & process all the sysfs attributes located under
+ * the directory specified in 'dir' parameter.
+ */
+static int pmu_format_parse(char *dir, struct list_head *head)
+{
+ struct dirent *evt_ent;
+ DIR *format_dir;
+ int ret = 0;
+
+ format_dir = opendir(dir);
+ if (!format_dir)
+ return -EINVAL;
+
+ while (!ret && (evt_ent = readdir(format_dir))) {
+ char path[PATH_MAX];
+ char *name = evt_ent->d_name;
+ FILE *file;
+
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+ ret = -EINVAL;
+ file = fopen(path, "r");
+ if (!file)
+ break;
+
+ perf_pmu_in = file;
+ ret = perf_pmu_parse(head, name);
+ fclose(file);
+ }
+
+ closedir(format_dir);
+ return ret;
+}
+
+/*
+ * Reading/parsing the default pmu format definition, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
+ */
+static int pmu_format(char *name, struct list_head *format)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/format", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ if (pmu_format_parse(path, format))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Reading/parsing the default pmu type value, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
+ */
+static int pmu_type(char *name, __u32 *type)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+ FILE *file;
+ int ret = 0;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/type", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ file = fopen(path, "r");
+ if (!file)
+ return -EINVAL;
+
+ if (1 != fscanf(file, "%u", type))
+ ret = -1;
+
+ fclose(file);
+ return ret;
+}
+
+static struct perf_pmu *pmu_lookup(char *name)
+{
+ struct perf_pmu *pmu;
+ LIST_HEAD(format);
+ __u32 type;
+
+ /*
+ * The pmu data we store & need consists of the pmu
+ * type value and format definitions. Load both right
+ * now.
+ */
+ if (pmu_format(name, &format))
+ return NULL;
+
+ if (pmu_type(name, &type))
+ return NULL;
+
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return NULL;
+
+ INIT_LIST_HEAD(&pmu->format);
+ list_splice(&format, &pmu->format);
+ pmu->name = strdup(name);
+ pmu->type = type;
+ return pmu;
+}
+
+static struct perf_pmu *pmu_find(char *name)
+{
+ struct perf_pmu *pmu;
+
+ list_for_each_entry(pmu, &pmus, list)
+ if (!strcmp(pmu->name, name))
+ return pmu;
+
+ return NULL;
+}
+
+struct perf_pmu *perf_pmu__find(char *name)
+{
+ struct perf_pmu *pmu;
+
+ /*
+ * Once PMU is loaded it stays in the list,
+ * so we keep us from multiple reading/parsing
+ * the pmu format definitions.
+ */
+ pmu = pmu_find(name);
+ if (pmu)
+ return pmu;
+
+ return pmu_lookup(name);
+}
+
+static struct perf_pmu__format*
+pmu_find_format(struct list_head *formats, char *name)
+{
+ struct perf_pmu__format *format;
+
+ list_for_each_entry(format, formats, list)
+ if (!strcmp(format->name, name))
+ return format;
+
+ return NULL;
+}
+
+/*
+ * Returns value based on the format definition (format parameter)
+ * and unformated value (value parameter).
+ *
+ * TODO maybe optimize a little ;)
+ */
+static __u64 pmu_format_value(unsigned long *format, __u64 value)
+{
+ unsigned long fbit, vbit;
+ __u64 v = 0;
+
+ for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
+
+ if (!test_bit(fbit, format))
+ continue;
+
+ if (!(value & (1llu << vbit++)))
+ continue;
+
+ v |= (1llu << fbit);
+ }
+
+ return v;
+}
+
+/*
+ * Setup one of config[12] attr members based on the
+ * user input data - temr parameter.
+ */
+static int pmu_config_term(struct list_head *formats,
+ struct perf_event_attr *attr,
+ struct parse_events__term *term)
+{
+ struct perf_pmu__format *format;
+ __u64 *vp;
+
+ /*
+ * Support only for hardcoded and numnerial terms.
+ * Hardcoded terms should be already in, so nothing
+ * to be done for them.
+ */
+ if (parse_events__is_hardcoded_term(term))
+ return 0;
+
+ if (term->type != PARSE_EVENTS__TERM_TYPE_NUM)
+ return -EINVAL;
+
+ format = pmu_find_format(formats, term->config);
+ if (!format)
+ return -EINVAL;
+
+ switch (format->value) {
+ case PERF_PMU_FORMAT_VALUE_CONFIG:
+ vp = &attr->config;
+ break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG1:
+ vp = &attr->config1;
+ break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG2:
+ vp = &attr->config2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *vp |= pmu_format_value(format->bits, term->val.num);
+ return 0;
+}
+
+static int pmu_config(struct list_head *formats, struct perf_event_attr *attr,
+ struct list_head *head_terms)
+{
+ struct parse_events__term *term, *h;
+
+ list_for_each_entry_safe(term, h, head_terms, list)
+ if (pmu_config_term(formats, attr, term))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Configures event's 'attr' parameter based on the:
+ * 1) users input - specified in terms parameter
+ * 2) pmu format definitions - specified by pmu parameter
+ */
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+ struct list_head *head_terms)
+{
+ attr->type = pmu->type;
+ return pmu_config(&pmu->format, attr, head_terms);
+}
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+ int config, unsigned long *bits)
+{
+ struct perf_pmu__format *format;
+
+ format = zalloc(sizeof(*format));
+ if (!format)
+ return -ENOMEM;
+
+ format->name = strdup(name);
+ format->value = config;
+ memcpy(format->bits, bits, sizeof(format->bits));
+
+ list_add_tail(&format->list, list);
+ return 0;
+}
+
+void perf_pmu__set_format(unsigned long *bits, long from, long to)
+{
+ long b;
+
+ if (!to)
+ to = from;
+
+ memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS));
+ for (b = from; b <= to; b++)
+ set_bit(b, bits);
+}
+
+/* Simulated format definitions. */
+static struct test_format {
+ const char *name;
+ const char *value;
+} test_formats[] = {
+ { "krava01", "config:0-1,62-63\n", },
+ { "krava02", "config:10-17\n", },
+ { "krava03", "config:5\n", },
+ { "krava11", "config1:0,2,4,6,8,20-28\n", },
+ { "krava12", "config1:63\n", },
+ { "krava13", "config1:45-47\n", },
+ { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
+ { "krava22", "config2:8,18,48,58\n", },
+ { "krava23", "config2:28-29,38\n", },
+};
+
+#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
+
+/* Simulated users input. */
+static struct parse_events__term test_terms[] = {
+ {
+ .config = (char *) "krava01",
+ .val.num = 15,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava02",
+ .val.num = 170,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava03",
+ .val.num = 1,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava11",
+ .val.num = 27,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava12",
+ .val.num = 1,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava13",
+ .val.num = 2,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava21",
+ .val.num = 119,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava22",
+ .val.num = 11,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+ {
+ .config = (char *) "krava23",
+ .val.num = 2,
+ .type = PARSE_EVENTS__TERM_TYPE_NUM,
+ },
+};
+#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
+
+/*
+ * Prepare format directory data, exported by kernel
+ * at /sys/bus/event_source/devices/<dev>/format.
+ */
+static char *test_format_dir_get(void)
+{
+ static char dir[PATH_MAX];
+ unsigned int i;
+
+ snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX");
+ if (!mkdtemp(dir))
+ return NULL;
+
+ for (i = 0; i < TEST_FORMATS_CNT; i++) {
+ static char name[PATH_MAX];
+ struct test_format *format = &test_formats[i];
+ FILE *file;
+
+ snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+
+ file = fopen(name, "w");
+ if (!file)
+ return NULL;
+
+ if (1 != fwrite(format->value, strlen(format->value), 1, file))
+ break;
+
+ fclose(file);
+ }
+
+ return dir;
+}
+
+/* Cleanup format directory. */
+static int test_format_dir_put(char *dir)
+{
+ char buf[PATH_MAX];
+ snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir);
+ if (system(buf))
+ return -1;
+
+ snprintf(buf, PATH_MAX, "rmdir %s\n", dir);
+ return system(buf);
+}
+
+static struct list_head *test_terms_list(void)
+{
+ static LIST_HEAD(terms);
+ unsigned int i;
+
+ for (i = 0; i < TERMS_CNT; i++)
+ list_add_tail(&test_terms[i].list, &terms);
+
+ return &terms;
+}
+
+#undef TERMS_CNT
+
+int perf_pmu__test(void)
+{
+ char *format = test_format_dir_get();
+ LIST_HEAD(formats);
+ struct list_head *terms = test_terms_list();
+ int ret;
+
+ if (!format)
+ return -EINVAL;
+
+ do {
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+
+ ret = pmu_format_parse(format, &formats);
+ if (ret)
+ break;
+
+ ret = pmu_config(&formats, &attr, terms);
+ if (ret)
+ break;
+
+ ret = -EINVAL;
+
+ if (attr.config != 0xc00000000002a823)
+ break;
+ if (attr.config1 != 0x8000400000000145)
+ break;
+ if (attr.config2 != 0x0400000020041d07)
+ break;
+
+ ret = 0;
+ } while (0);
+
+ test_format_dir_put(format);
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/pmu.h b/ANDROID_3.4.5/tools/perf/util/pmu.h
new file mode 100644
index 00000000..68c0db96
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pmu.h
@@ -0,0 +1,41 @@
+#ifndef __PMU_H
+#define __PMU_H
+
+#include <linux/bitops.h>
+#include "../../../include/linux/perf_event.h"
+
+enum {
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
+};
+
+#define PERF_PMU_FORMAT_BITS 64
+
+struct perf_pmu__format {
+ char *name;
+ int value;
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+ struct list_head list;
+};
+
+struct perf_pmu {
+ char *name;
+ __u32 type;
+ struct list_head format;
+ struct list_head list;
+};
+
+struct perf_pmu *perf_pmu__find(char *name);
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+ struct list_head *head_terms);
+
+int perf_pmu_wrap(void);
+void perf_pmu_error(struct list_head *list, char *name, char const *msg);
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+ int config, unsigned long *bits);
+void perf_pmu__set_format(unsigned long *bits, long from, long to);
+
+int perf_pmu__test(void);
+#endif /* __PMU_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/pmu.l b/ANDROID_3.4.5/tools/perf/util/pmu.l
new file mode 100644
index 00000000..a15d9fbd
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pmu.l
@@ -0,0 +1,43 @@
+%option prefix="perf_pmu_"
+
+%{
+#include <stdlib.h>
+#include <linux/bitops.h>
+#include "pmu.h"
+#include "pmu-bison.h"
+
+static int value(int base)
+{
+ long num;
+
+ errno = 0;
+ num = strtoul(perf_pmu_text, NULL, base);
+ if (errno)
+ return PP_ERROR;
+
+ perf_pmu_lval.num = num;
+ return PP_VALUE;
+}
+
+%}
+
+num_dec [0-9]+
+
+%%
+
+{num_dec} { return value(10); }
+config { return PP_CONFIG; }
+config1 { return PP_CONFIG1; }
+config2 { return PP_CONFIG2; }
+- { return '-'; }
+: { return ':'; }
+, { return ','; }
+. { ; }
+\n { ; }
+
+%%
+
+int perf_pmu_wrap(void)
+{
+ return 1;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/pmu.y b/ANDROID_3.4.5/tools/perf/util/pmu.y
new file mode 100644
index 00000000..20ea77e9
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pmu.y
@@ -0,0 +1,93 @@
+
+%name-prefix "perf_pmu_"
+%parse-param {struct list_head *format}
+%parse-param {char *name}
+
+%{
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <string.h>
+#include "pmu.h"
+
+extern int perf_pmu_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+ if (val) \
+ YYABORT; \
+} while (0)
+
+%}
+
+%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
+%token PP_VALUE PP_ERROR
+%type <num> PP_VALUE
+%type <bits> bit_term
+%type <bits> bits
+
+%union
+{
+ unsigned long num;
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+}
+
+%%
+
+format:
+format format_term
+|
+format_term
+
+format_term:
+PP_CONFIG ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ $3));
+}
+|
+PP_CONFIG1 ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ $3));
+}
+|
+PP_CONFIG2 ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
+ $3));
+}
+
+bits:
+bits ',' bit_term
+{
+ bitmap_or($$, $1, $3, 64);
+}
+|
+bit_term
+{
+ memcpy($$, $1, sizeof($1));
+}
+
+bit_term:
+PP_VALUE '-' PP_VALUE
+{
+ perf_pmu__set_format($$, $1, $3);
+}
+|
+PP_VALUE
+{
+ perf_pmu__set_format($$, $1, 0);
+}
+
+%%
+
+void perf_pmu_error(struct list_head *list __used,
+ char *name __used,
+ char const *msg __used)
+{
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/probe-event.c b/ANDROID_3.4.5/tools/perf/util/probe-event.c
new file mode 100644
index 00000000..8a8ee64e
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/probe-event.c
@@ -0,0 +1,2097 @@
+/*
+ * probe-event.c : perf-probe definition to probe_events format converter
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <elf.h>
+
+#include "util.h"
+#include "event.h"
+#include "strlist.h"
+#include "debug.h"
+#include "cache.h"
+#include "color.h"
+#include "symbol.h"
+#include "thread.h"
+#include "debugfs.h"
+#include "trace-event.h" /* For __unused */
+#include "probe-event.h"
+#include "probe-finder.h"
+
+#define MAX_CMDLEN 256
+#define MAX_PROBE_ARGS 128
+#define PERFPROBE_GROUP "probe"
+
+bool probe_event_dry_run; /* Dry run flag */
+
+#define semantic_error(msg ...) pr_err("Semantic error :" msg)
+
+/* If there is no space to write, returns -E2BIG. */
+static int e_snprintf(char *str, size_t size, const char *format, ...)
+ __attribute__((format(printf, 3, 4)));
+
+static int e_snprintf(char *str, size_t size, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, format);
+ ret = vsnprintf(str, size, format, ap);
+ va_end(ap);
+ if (ret >= (int)size)
+ ret = -E2BIG;
+ return ret;
+}
+
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
+static struct machine machine;
+
+/* Initialize symbol maps and path of vmlinux/modules */
+static int init_vmlinux(void)
+{
+ int ret;
+
+ symbol_conf.sort_by_name = true;
+ if (symbol_conf.vmlinux_name == NULL)
+ symbol_conf.try_vmlinux_path = true;
+ else
+ pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+ ret = symbol__init();
+ if (ret < 0) {
+ pr_debug("Failed to init symbol map.\n");
+ goto out;
+ }
+
+ ret = machine__init(&machine, "", HOST_KERNEL_ID);
+ if (ret < 0)
+ goto out;
+
+ if (machine__create_kernel_maps(&machine) < 0) {
+ pr_debug("machine__create_kernel_maps() failed.\n");
+ goto out;
+ }
+out:
+ if (ret < 0)
+ pr_warning("Failed to init vmlinux path.\n");
+ return ret;
+}
+
+static struct symbol *__find_kernel_function_by_name(const char *name,
+ struct map **mapp)
+{
+ return machine__find_kernel_function_by_name(&machine, name, mapp,
+ NULL);
+}
+
+static struct map *kernel_get_module_map(const char *module)
+{
+ struct rb_node *nd;
+ struct map_groups *grp = &machine.kmaps;
+
+ /* A file path -- this is an offline module */
+ if (module && strchr(module, '/'))
+ return machine__new_module(&machine, 0, module);
+
+ if (!module)
+ module = "kernel";
+
+ for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ if (strncmp(pos->dso->short_name + 1, module,
+ pos->dso->short_name_len - 2) == 0) {
+ return pos;
+ }
+ }
+ return NULL;
+}
+
+static struct dso *kernel_get_module_dso(const char *module)
+{
+ struct dso *dso;
+ struct map *map;
+ const char *vmlinux_name;
+
+ if (module) {
+ list_for_each_entry(dso, &machine.kernel_dsos, node) {
+ if (strncmp(dso->short_name + 1, module,
+ dso->short_name_len - 2) == 0)
+ goto found;
+ }
+ pr_debug("Failed to find module %s.\n", module);
+ return NULL;
+ }
+
+ map = machine.vmlinux_maps[MAP__FUNCTION];
+ dso = map->dso;
+
+ vmlinux_name = symbol_conf.vmlinux_name;
+ if (vmlinux_name) {
+ if (dso__load_vmlinux(dso, map, vmlinux_name, NULL) <= 0)
+ return NULL;
+ } else {
+ if (dso__load_vmlinux_path(dso, map, NULL) <= 0) {
+ pr_debug("Failed to load kernel map.\n");
+ return NULL;
+ }
+ }
+found:
+ return dso;
+}
+
+const char *kernel_get_module_path(const char *module)
+{
+ struct dso *dso = kernel_get_module_dso(module);
+ return (dso) ? dso->long_name : NULL;
+}
+
+#ifdef DWARF_SUPPORT
+/* Open new debuginfo of given module */
+static struct debuginfo *open_debuginfo(const char *module)
+{
+ const char *path;
+
+ /* A file path -- this is an offline module */
+ if (module && strchr(module, '/'))
+ path = module;
+ else {
+ path = kernel_get_module_path(module);
+
+ if (!path) {
+ pr_err("Failed to find path of %s module.\n",
+ module ?: "kernel");
+ return NULL;
+ }
+ }
+ return debuginfo__new(path);
+}
+
+/*
+ * Convert trace point to probe point with debuginfo
+ * Currently only handles kprobes.
+ */
+static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ struct symbol *sym;
+ struct map *map;
+ u64 addr;
+ int ret = -ENOENT;
+ struct debuginfo *dinfo;
+
+ sym = __find_kernel_function_by_name(tp->symbol, &map);
+ if (sym) {
+ addr = map->unmap_ip(map, sym->start + tp->offset);
+ pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
+ tp->offset, addr);
+
+ dinfo = debuginfo__new_online_kernel(addr);
+ if (dinfo) {
+ ret = debuginfo__find_probe_point(dinfo,
+ (unsigned long)addr, pp);
+ debuginfo__delete(dinfo);
+ } else {
+ pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n",
+ addr);
+ ret = -ENOENT;
+ }
+ }
+ if (ret <= 0) {
+ pr_debug("Failed to find corresponding probes from "
+ "debuginfo. Use kprobe event information.\n");
+ pp->function = strdup(tp->symbol);
+ if (pp->function == NULL)
+ return -ENOMEM;
+ pp->offset = tp->offset;
+ }
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
+static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module)
+{
+ int i, ret = 0;
+ char *tmp;
+
+ if (!module)
+ return 0;
+
+ tmp = strrchr(module, '/');
+ if (tmp) {
+ /* This is a module path -- get the module name */
+ module = strdup(tmp + 1);
+ if (!module)
+ return -ENOMEM;
+ tmp = strchr(module, '.');
+ if (tmp)
+ *tmp = '\0';
+ tmp = (char *)module; /* For free() */
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ tevs[i].point.module = strdup(module);
+ if (!tevs[i].point.module) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ if (tmp)
+ free(tmp);
+
+ return ret;
+}
+
+/* Try to find perf_probe_event with debuginfo */
+static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs, const char *target)
+{
+ bool need_dwarf = perf_probe_event_need_dwarf(pev);
+ struct debuginfo *dinfo = open_debuginfo(target);
+ int ntevs, ret = 0;
+
+ if (!dinfo) {
+ if (need_dwarf) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return -ENOENT;
+ }
+ pr_debug("Could not open debuginfo. Try to use symbols.\n");
+ return 0;
+ }
+
+ /* Searching trace events corresponding to a probe event */
+ ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
+
+ debuginfo__delete(dinfo);
+
+ if (ntevs > 0) { /* Succeeded to find trace events */
+ pr_debug("find %d probe_trace_events.\n", ntevs);
+ if (target)
+ ret = add_module_to_probe_trace_events(*tevs, ntevs,
+ target);
+ return ret < 0 ? ret : ntevs;
+ }
+
+ if (ntevs == 0) { /* No error but failed to find probe point. */
+ pr_warning("Probe point '%s' not found.\n",
+ synthesize_perf_probe_point(&pev->point));
+ return -ENOENT;
+ }
+ /* Error path : ntevs < 0 */
+ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
+ if (ntevs == -EBADF) {
+ pr_warning("Warning: No dwarf info found in the vmlinux - "
+ "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
+ if (!need_dwarf) {
+ pr_debug("Trying to use symbols.\n");
+ return 0;
+ }
+ }
+ return ntevs;
+}
+
+/*
+ * Find a src file from a DWARF tag path. Prepend optional source path prefix
+ * and chop off leading directories that do not exist. Result is passed back as
+ * a newly allocated path on success.
+ * Return 0 if file was found and readable, -errno otherwise.
+ */
+static int get_real_path(const char *raw_path, const char *comp_dir,
+ char **new_path)
+{
+ const char *prefix = symbol_conf.source_prefix;
+
+ if (!prefix) {
+ if (raw_path[0] != '/' && comp_dir)
+ /* If not an absolute path, try to use comp_dir */
+ prefix = comp_dir;
+ else {
+ if (access(raw_path, R_OK) == 0) {
+ *new_path = strdup(raw_path);
+ return 0;
+ } else
+ return -errno;
+ }
+ }
+
+ *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2));
+ if (!*new_path)
+ return -ENOMEM;
+
+ for (;;) {
+ sprintf(*new_path, "%s/%s", prefix, raw_path);
+
+ if (access(*new_path, R_OK) == 0)
+ return 0;
+
+ if (!symbol_conf.source_prefix)
+ /* In case of searching comp_dir, don't retry */
+ return -errno;
+
+ switch (errno) {
+ case ENAMETOOLONG:
+ case ENOENT:
+ case EROFS:
+ case EFAULT:
+ raw_path = strchr(++raw_path, '/');
+ if (!raw_path) {
+ free(*new_path);
+ *new_path = NULL;
+ return -ENOENT;
+ }
+ continue;
+
+ default:
+ free(*new_path);
+ *new_path = NULL;
+ return -errno;
+ }
+ }
+}
+
+#define LINEBUF_SIZE 256
+#define NR_ADDITIONAL_LINES 2
+
+static int __show_one_line(FILE *fp, int l, bool skip, bool show_num)
+{
+ char buf[LINEBUF_SIZE];
+ const char *color = show_num ? "" : PERF_COLOR_BLUE;
+ const char *prefix = NULL;
+
+ do {
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (skip)
+ continue;
+ if (!prefix) {
+ prefix = show_num ? "%7d " : " ";
+ color_fprintf(stdout, color, prefix, l);
+ }
+ color_fprintf(stdout, color, "%s", buf);
+
+ } while (strchr(buf, '\n') == NULL);
+
+ return 1;
+error:
+ if (ferror(fp)) {
+ pr_warning("File read error: %s\n", strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+static int _show_one_line(FILE *fp, int l, bool skip, bool show_num)
+{
+ int rv = __show_one_line(fp, l, skip, show_num);
+ if (rv == 0) {
+ pr_warning("Source file is shorter than expected.\n");
+ rv = -1;
+ }
+ return rv;
+}
+
+#define show_one_line_with_num(f,l) _show_one_line(f,l,false,true)
+#define show_one_line(f,l) _show_one_line(f,l,false,false)
+#define skip_one_line(f,l) _show_one_line(f,l,true,false)
+#define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false)
+
+/*
+ * Show line-range always requires debuginfo to find source file and
+ * line number.
+ */
+int show_line_range(struct line_range *lr, const char *module)
+{
+ int l = 1;
+ struct line_node *ln;
+ struct debuginfo *dinfo;
+ FILE *fp;
+ int ret;
+ char *tmp;
+
+ /* Search a line range */
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ dinfo = open_debuginfo(module);
+ if (!dinfo) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return -ENOENT;
+ }
+
+ ret = debuginfo__find_line_range(dinfo, lr);
+ debuginfo__delete(dinfo);
+ if (ret == 0) {
+ pr_warning("Specified source line is not found.\n");
+ return -ENOENT;
+ } else if (ret < 0) {
+ pr_warning("Debuginfo analysis failed. (%d)\n", ret);
+ return ret;
+ }
+
+ /* Convert source file path */
+ tmp = lr->path;
+ ret = get_real_path(tmp, lr->comp_dir, &lr->path);
+ free(tmp); /* Free old path */
+ if (ret < 0) {
+ pr_warning("Failed to find source file. (%d)\n", ret);
+ return ret;
+ }
+
+ setup_pager();
+
+ if (lr->function)
+ fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path,
+ lr->start - lr->offset);
+ else
+ fprintf(stdout, "<%s:%d>\n", lr->path, lr->start);
+
+ fp = fopen(lr->path, "r");
+ if (fp == NULL) {
+ pr_warning("Failed to open %s: %s\n", lr->path,
+ strerror(errno));
+ return -errno;
+ }
+ /* Skip to starting line number */
+ while (l < lr->start) {
+ ret = skip_one_line(fp, l++);
+ if (ret < 0)
+ goto end;
+ }
+
+ list_for_each_entry(ln, &lr->line_list, list) {
+ for (; ln->line > l; l++) {
+ ret = show_one_line(fp, l - lr->offset);
+ if (ret < 0)
+ goto end;
+ }
+ ret = show_one_line_with_num(fp, l++ - lr->offset);
+ if (ret < 0)
+ goto end;
+ }
+
+ if (lr->end == INT_MAX)
+ lr->end = l + NR_ADDITIONAL_LINES;
+ while (l <= lr->end) {
+ ret = show_one_line_or_eof(fp, l++ - lr->offset);
+ if (ret <= 0)
+ break;
+ }
+end:
+ fclose(fp);
+ return ret;
+}
+
+static int show_available_vars_at(struct debuginfo *dinfo,
+ struct perf_probe_event *pev,
+ int max_vls, struct strfilter *_filter,
+ bool externs)
+{
+ char *buf;
+ int ret, i, nvars;
+ struct str_node *node;
+ struct variable_list *vls = NULL, *vl;
+ const char *var;
+
+ buf = synthesize_perf_probe_point(&pev->point);
+ if (!buf)
+ return -EINVAL;
+ pr_debug("Searching variables at %s\n", buf);
+
+ ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
+ max_vls, externs);
+ if (ret <= 0) {
+ pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+ goto end;
+ }
+ /* Some variables are found */
+ fprintf(stdout, "Available variables at %s\n", buf);
+ for (i = 0; i < ret; i++) {
+ vl = &vls[i];
+ /*
+ * A probe point might be converted to
+ * several trace points.
+ */
+ fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+ vl->point.offset);
+ free(vl->point.symbol);
+ nvars = 0;
+ if (vl->vars) {
+ strlist__for_each(node, vl->vars) {
+ var = strchr(node->s, '\t') + 1;
+ if (strfilter__compare(_filter, var)) {
+ fprintf(stdout, "\t\t%s\n", node->s);
+ nvars++;
+ }
+ }
+ strlist__delete(vl->vars);
+ }
+ if (nvars == 0)
+ fprintf(stdout, "\t\t(No matched variables)\n");
+ }
+ free(vls);
+end:
+ free(buf);
+ return ret;
+}
+
+/* Show available variables on given probe point */
+int show_available_vars(struct perf_probe_event *pevs, int npevs,
+ int max_vls, const char *module,
+ struct strfilter *_filter, bool externs)
+{
+ int i, ret = 0;
+ struct debuginfo *dinfo;
+
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ dinfo = open_debuginfo(module);
+ if (!dinfo) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return -ENOENT;
+ }
+
+ setup_pager();
+
+ for (i = 0; i < npevs && ret >= 0; i++)
+ ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter,
+ externs);
+
+ debuginfo__delete(dinfo);
+ return ret;
+}
+
+#else /* !DWARF_SUPPORT */
+
+static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ struct symbol *sym;
+
+ sym = __find_kernel_function_by_name(tp->symbol, NULL);
+ if (!sym) {
+ pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
+ return -ENOENT;
+ }
+ pp->function = strdup(tp->symbol);
+ if (pp->function == NULL)
+ return -ENOMEM;
+ pp->offset = tp->offset;
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
+static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs __unused,
+ int max_tevs __unused, const char *mod __unused)
+{
+ if (perf_probe_event_need_dwarf(pev)) {
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+int show_line_range(struct line_range *lr __unused, const char *module __unused)
+{
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+}
+
+int show_available_vars(struct perf_probe_event *pevs __unused,
+ int npevs __unused, int max_vls __unused,
+ const char *module __unused,
+ struct strfilter *filter __unused,
+ bool externs __unused)
+{
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+}
+#endif
+
+static int parse_line_num(char **ptr, int *val, const char *what)
+{
+ const char *start = *ptr;
+
+ errno = 0;
+ *val = strtol(*ptr, ptr, 0);
+ if (errno || *ptr == start) {
+ semantic_error("'%s' is not a valid number.\n", what);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Stuff 'lr' according to the line range described by 'arg'.
+ * The line range syntax is described by:
+ *
+ * SRC[:SLN[+NUM|-ELN]]
+ * FNC[@SRC][:SLN[+NUM|-ELN]]
+ */
+int parse_line_range_desc(const char *arg, struct line_range *lr)
+{
+ char *range, *file, *name = strdup(arg);
+ int err;
+
+ if (!name)
+ return -ENOMEM;
+
+ lr->start = 0;
+ lr->end = INT_MAX;
+
+ range = strchr(name, ':');
+ if (range) {
+ *range++ = '\0';
+
+ err = parse_line_num(&range, &lr->start, "start line");
+ if (err)
+ goto err;
+
+ if (*range == '+' || *range == '-') {
+ const char c = *range++;
+
+ err = parse_line_num(&range, &lr->end, "end line");
+ if (err)
+ goto err;
+
+ if (c == '+') {
+ lr->end += lr->start;
+ /*
+ * Adjust the number of lines here.
+ * If the number of lines == 1, the
+ * the end of line should be equal to
+ * the start of line.
+ */
+ lr->end--;
+ }
+ }
+
+ pr_debug("Line range is %d to %d\n", lr->start, lr->end);
+
+ err = -EINVAL;
+ if (lr->start > lr->end) {
+ semantic_error("Start line must be smaller"
+ " than end line.\n");
+ goto err;
+ }
+ if (*range != '\0') {
+ semantic_error("Tailing with invalid str '%s'.\n", range);
+ goto err;
+ }
+ }
+
+ file = strchr(name, '@');
+ if (file) {
+ *file = '\0';
+ lr->file = strdup(++file);
+ if (lr->file == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+ lr->function = name;
+ } else if (strchr(name, '.'))
+ lr->file = name;
+ else
+ lr->function = name;
+
+ return 0;
+err:
+ free(name);
+ return err;
+}
+
+/* Check the name is good for event/group */
+static bool check_event_name(const char *name)
+{
+ if (!isalpha(*name) && *name != '_')
+ return false;
+ while (*++name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ }
+ return true;
+}
+
+/* Parse probepoint definition. */
+static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
+{
+ struct perf_probe_point *pp = &pev->point;
+ char *ptr, *tmp;
+ char c, nc = 0;
+ /*
+ * <Syntax>
+ * perf probe [EVENT=]SRC[:LN|;PTN]
+ * perf probe [EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
+ *
+ * TODO:Group name support
+ */
+
+ ptr = strpbrk(arg, ";=@+%");
+ if (ptr && *ptr == '=') { /* Event name */
+ *ptr = '\0';
+ tmp = ptr + 1;
+ if (strchr(arg, ':')) {
+ semantic_error("Group name is not supported yet.\n");
+ return -ENOTSUP;
+ }
+ if (!check_event_name(arg)) {
+ semantic_error("%s is bad for event name -it must "
+ "follow C symbol-naming rule.\n", arg);
+ return -EINVAL;
+ }
+ pev->event = strdup(arg);
+ if (pev->event == NULL)
+ return -ENOMEM;
+ pev->group = NULL;
+ arg = tmp;
+ }
+
+ ptr = strpbrk(arg, ";:+@%");
+ if (ptr) {
+ nc = *ptr;
+ *ptr++ = '\0';
+ }
+
+ tmp = strdup(arg);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Check arg is function or file and copy it */
+ if (strchr(tmp, '.')) /* File */
+ pp->file = tmp;
+ else /* Function */
+ pp->function = tmp;
+
+ /* Parse other options */
+ while (ptr) {
+ arg = ptr;
+ c = nc;
+ if (c == ';') { /* Lazy pattern must be the last part */
+ pp->lazy_line = strdup(arg);
+ if (pp->lazy_line == NULL)
+ return -ENOMEM;
+ break;
+ }
+ ptr = strpbrk(arg, ";:+@%");
+ if (ptr) {
+ nc = *ptr;
+ *ptr++ = '\0';
+ }
+ switch (c) {
+ case ':': /* Line number */
+ pp->line = strtoul(arg, &tmp, 0);
+ if (*tmp != '\0') {
+ semantic_error("There is non-digit char"
+ " in line number.\n");
+ return -EINVAL;
+ }
+ break;
+ case '+': /* Byte offset from a symbol */
+ pp->offset = strtoul(arg, &tmp, 0);
+ if (*tmp != '\0') {
+ semantic_error("There is non-digit character"
+ " in offset.\n");
+ return -EINVAL;
+ }
+ break;
+ case '@': /* File name */
+ if (pp->file) {
+ semantic_error("SRC@SRC is not allowed.\n");
+ return -EINVAL;
+ }
+ pp->file = strdup(arg);
+ if (pp->file == NULL)
+ return -ENOMEM;
+ break;
+ case '%': /* Probe places */
+ if (strcmp(arg, "return") == 0) {
+ pp->retprobe = 1;
+ } else { /* Others not supported yet */
+ semantic_error("%%%s is not supported.\n", arg);
+ return -ENOTSUP;
+ }
+ break;
+ default: /* Buggy case */
+ pr_err("This program has a bug at %s:%d.\n",
+ __FILE__, __LINE__);
+ return -ENOTSUP;
+ break;
+ }
+ }
+
+ /* Exclusion check */
+ if (pp->lazy_line && pp->line) {
+ semantic_error("Lazy pattern can't be used with"
+ " line number.\n");
+ return -EINVAL;
+ }
+
+ if (pp->lazy_line && pp->offset) {
+ semantic_error("Lazy pattern can't be used with offset.\n");
+ return -EINVAL;
+ }
+
+ if (pp->line && pp->offset) {
+ semantic_error("Offset can't be used with line number.\n");
+ return -EINVAL;
+ }
+
+ if (!pp->line && !pp->lazy_line && pp->file && !pp->function) {
+ semantic_error("File always requires line number or "
+ "lazy pattern.\n");
+ return -EINVAL;
+ }
+
+ if (pp->offset && !pp->function) {
+ semantic_error("Offset requires an entry function.\n");
+ return -EINVAL;
+ }
+
+ if (pp->retprobe && !pp->function) {
+ semantic_error("Return probe requires an entry function.\n");
+ return -EINVAL;
+ }
+
+ if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) {
+ semantic_error("Offset/Line/Lazy pattern can't be used with "
+ "return probe.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("symbol:%s file:%s line:%d offset:%lu return:%d lazy:%s\n",
+ pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
+ pp->lazy_line);
+ return 0;
+}
+
+/* Parse perf-probe event argument */
+static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg)
+{
+ char *tmp, *goodname;
+ struct perf_probe_arg_field **fieldp;
+
+ pr_debug("parsing arg: %s into ", str);
+
+ tmp = strchr(str, '=');
+ if (tmp) {
+ arg->name = strndup(str, tmp - str);
+ if (arg->name == NULL)
+ return -ENOMEM;
+ pr_debug("name:%s ", arg->name);
+ str = tmp + 1;
+ }
+
+ tmp = strchr(str, ':');
+ if (tmp) { /* Type setting */
+ *tmp = '\0';
+ arg->type = strdup(tmp + 1);
+ if (arg->type == NULL)
+ return -ENOMEM;
+ pr_debug("type:%s ", arg->type);
+ }
+
+ tmp = strpbrk(str, "-.[");
+ if (!is_c_varname(str) || !tmp) {
+ /* A variable, register, symbol or special value */
+ arg->var = strdup(str);
+ if (arg->var == NULL)
+ return -ENOMEM;
+ pr_debug("%s\n", arg->var);
+ return 0;
+ }
+
+ /* Structure fields or array element */
+ arg->var = strndup(str, tmp - str);
+ if (arg->var == NULL)
+ return -ENOMEM;
+ goodname = arg->var;
+ pr_debug("%s, ", arg->var);
+ fieldp = &arg->field;
+
+ do {
+ *fieldp = zalloc(sizeof(struct perf_probe_arg_field));
+ if (*fieldp == NULL)
+ return -ENOMEM;
+ if (*tmp == '[') { /* Array */
+ str = tmp;
+ (*fieldp)->index = strtol(str + 1, &tmp, 0);
+ (*fieldp)->ref = true;
+ if (*tmp != ']' || tmp == str + 1) {
+ semantic_error("Array index must be a"
+ " number.\n");
+ return -EINVAL;
+ }
+ tmp++;
+ if (*tmp == '\0')
+ tmp = NULL;
+ } else { /* Structure */
+ if (*tmp == '.') {
+ str = tmp + 1;
+ (*fieldp)->ref = false;
+ } else if (tmp[1] == '>') {
+ str = tmp + 2;
+ (*fieldp)->ref = true;
+ } else {
+ semantic_error("Argument parse error: %s\n",
+ str);
+ return -EINVAL;
+ }
+ tmp = strpbrk(str, "-.[");
+ }
+ if (tmp) {
+ (*fieldp)->name = strndup(str, tmp - str);
+ if ((*fieldp)->name == NULL)
+ return -ENOMEM;
+ if (*str != '[')
+ goodname = (*fieldp)->name;
+ pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref);
+ fieldp = &(*fieldp)->next;
+ }
+ } while (tmp);
+ (*fieldp)->name = strdup(str);
+ if ((*fieldp)->name == NULL)
+ return -ENOMEM;
+ if (*str != '[')
+ goodname = (*fieldp)->name;
+ pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref);
+
+ /* If no name is specified, set the last field name (not array index)*/
+ if (!arg->name) {
+ arg->name = strdup(goodname);
+ if (arg->name == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Parse perf-probe event command */
+int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
+{
+ char **argv;
+ int argc, i, ret = 0;
+
+ argv = argv_split(cmd, &argc);
+ if (!argv) {
+ pr_debug("Failed to split arguments.\n");
+ return -ENOMEM;
+ }
+ if (argc - 1 > MAX_PROBE_ARGS) {
+ semantic_error("Too many probe arguments (%d).\n", argc - 1);
+ ret = -ERANGE;
+ goto out;
+ }
+ /* Parse probe point */
+ ret = parse_perf_probe_point(argv[0], pev);
+ if (ret < 0)
+ goto out;
+
+ /* Copy arguments and ensure return probe has no C argument */
+ pev->nargs = argc - 1;
+ pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ if (pev->args == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < pev->nargs && ret >= 0; i++) {
+ ret = parse_perf_probe_arg(argv[i + 1], &pev->args[i]);
+ if (ret >= 0 &&
+ is_c_varname(pev->args[i].var) && pev->point.retprobe) {
+ semantic_error("You can't specify local variable for"
+ " kretprobe.\n");
+ ret = -EINVAL;
+ }
+ }
+out:
+ argv_free(argv);
+
+ return ret;
+}
+
+/* Return true if this perf_probe_event requires debuginfo */
+bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
+{
+ int i;
+
+ if (pev->point.file || pev->point.line || pev->point.lazy_line)
+ return true;
+
+ for (i = 0; i < pev->nargs; i++)
+ if (is_c_varname(pev->args[i].var))
+ return true;
+
+ return false;
+}
+
+/* Parse probe_events event into struct probe_point */
+static int parse_probe_trace_command(const char *cmd,
+ struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ char pr;
+ char *p;
+ int ret, i, argc;
+ char **argv;
+
+ pr_debug("Parsing probe_events: %s\n", cmd);
+ argv = argv_split(cmd, &argc);
+ if (!argv) {
+ pr_debug("Failed to split arguments.\n");
+ return -ENOMEM;
+ }
+ if (argc < 2) {
+ semantic_error("Too few probe arguments.\n");
+ ret = -ERANGE;
+ goto out;
+ }
+
+ /* Scan event and group name. */
+ ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
+ &pr, (float *)(void *)&tev->group,
+ (float *)(void *)&tev->event);
+ if (ret != 3) {
+ semantic_error("Failed to parse event name: %s\n", argv[0]);
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
+
+ tp->retprobe = (pr == 'r');
+
+ /* Scan module name(if there), function name and offset */
+ p = strchr(argv[1], ':');
+ if (p) {
+ tp->module = strndup(argv[1], p - argv[1]);
+ p++;
+ } else
+ p = argv[1];
+ ret = sscanf(p, "%a[^+]+%lu", (float *)(void *)&tp->symbol,
+ &tp->offset);
+ if (ret == 1)
+ tp->offset = 0;
+
+ tev->nargs = argc - 2;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < tev->nargs; i++) {
+ p = strchr(argv[i + 2], '=');
+ if (p) /* We don't need which register is assigned. */
+ *p++ = '\0';
+ else
+ p = argv[i + 2];
+ tev->args[i].name = strdup(argv[i + 2]);
+ /* TODO: parse regs and offset */
+ tev->args[i].value = strdup(p);
+ if (tev->args[i].name == NULL || tev->args[i].value == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ argv_free(argv);
+ return ret;
+}
+
+/* Compose only probe arg */
+int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
+{
+ struct perf_probe_arg_field *field = pa->field;
+ int ret;
+ char *tmp = buf;
+
+ if (pa->name && pa->var)
+ ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var);
+ else
+ ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+
+ while (field) {
+ if (field->name[0] == '[')
+ ret = e_snprintf(tmp, len, "%s", field->name);
+ else
+ ret = e_snprintf(tmp, len, "%s%s",
+ field->ref ? "->" : ".", field->name);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+ field = field->next;
+ }
+
+ if (pa->type) {
+ ret = e_snprintf(tmp, len, ":%s", pa->type);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+ }
+
+ return tmp - buf;
+error:
+ pr_debug("Failed to synthesize perf probe argument: %s\n",
+ strerror(-ret));
+ return ret;
+}
+
+/* Compose only probe point (not argument) */
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
+{
+ char *buf, *tmp;
+ char offs[32] = "", line[32] = "", file[32] = "";
+ int ret, len;
+
+ buf = zalloc(MAX_CMDLEN);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (pp->offset) {
+ ret = e_snprintf(offs, 32, "+%lu", pp->offset);
+ if (ret <= 0)
+ goto error;
+ }
+ if (pp->line) {
+ ret = e_snprintf(line, 32, ":%d", pp->line);
+ if (ret <= 0)
+ goto error;
+ }
+ if (pp->file) {
+ tmp = pp->file;
+ len = strlen(tmp);
+ if (len > 30) {
+ tmp = strchr(pp->file + len - 30, '/');
+ tmp = tmp ? tmp + 1 : pp->file + len - 30;
+ }
+ ret = e_snprintf(file, 32, "@%s", tmp);
+ if (ret <= 0)
+ goto error;
+ }
+
+ if (pp->function)
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function,
+ offs, pp->retprobe ? "%return" : "", line,
+ file);
+ else
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line);
+ if (ret <= 0)
+ goto error;
+
+ return buf;
+error:
+ pr_debug("Failed to synthesize perf probe point: %s\n",
+ strerror(-ret));
+ if (buf)
+ free(buf);
+ return NULL;
+}
+
+#if 0
+char *synthesize_perf_probe_command(struct perf_probe_event *pev)
+{
+ char *buf;
+ int i, len, ret;
+
+ buf = synthesize_perf_probe_point(&pev->point);
+ if (!buf)
+ return NULL;
+
+ len = strlen(buf);
+ for (i = 0; i < pev->nargs; i++) {
+ ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
+ pev->args[i].name);
+ if (ret <= 0) {
+ free(buf);
+ return NULL;
+ }
+ len += ret;
+ }
+
+ return buf;
+}
+#endif
+
+static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
+ char **buf, size_t *buflen,
+ int depth)
+{
+ int ret;
+ if (ref->next) {
+ depth = __synthesize_probe_trace_arg_ref(ref->next, buf,
+ buflen, depth + 1);
+ if (depth < 0)
+ goto out;
+ }
+
+ ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset);
+ if (ret < 0)
+ depth = ret;
+ else {
+ *buf += ret;
+ *buflen -= ret;
+ }
+out:
+ return depth;
+
+}
+
+static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
+ char *buf, size_t buflen)
+{
+ struct probe_trace_arg_ref *ref = arg->ref;
+ int ret, depth = 0;
+ char *tmp = buf;
+
+ /* Argument name or separator */
+ if (arg->name)
+ ret = e_snprintf(buf, buflen, " %s=", arg->name);
+ else
+ ret = e_snprintf(buf, buflen, " ");
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+
+ /* Special case: @XXX */
+ if (arg->value[0] == '@' && arg->ref)
+ ref = ref->next;
+
+ /* Dereferencing arguments */
+ if (ref) {
+ depth = __synthesize_probe_trace_arg_ref(ref, &buf,
+ &buflen, 1);
+ if (depth < 0)
+ return depth;
+ }
+
+ /* Print argument value */
+ if (arg->value[0] == '@' && arg->ref)
+ ret = e_snprintf(buf, buflen, "%s%+ld", arg->value,
+ arg->ref->offset);
+ else
+ ret = e_snprintf(buf, buflen, "%s", arg->value);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+
+ /* Closing */
+ while (depth--) {
+ ret = e_snprintf(buf, buflen, ")");
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+ }
+ /* Print argument type */
+ if (arg->type) {
+ ret = e_snprintf(buf, buflen, ":%s", arg->type);
+ if (ret <= 0)
+ return ret;
+ buf += ret;
+ }
+
+ return buf - tmp;
+}
+
+char *synthesize_probe_trace_command(struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ char *buf;
+ int i, len, ret;
+
+ buf = zalloc(MAX_CMDLEN);
+ if (buf == NULL)
+ return NULL;
+
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s%s%s+%lu",
+ tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event,
+ tp->module ?: "", tp->module ? ":" : "",
+ tp->symbol, tp->offset);
+ if (len <= 0)
+ goto error;
+
+ for (i = 0; i < tev->nargs; i++) {
+ ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
+ MAX_CMDLEN - len);
+ if (ret <= 0)
+ goto error;
+ len += ret;
+ }
+
+ return buf;
+error:
+ free(buf);
+ return NULL;
+}
+
+static int convert_to_perf_probe_event(struct probe_trace_event *tev,
+ struct perf_probe_event *pev)
+{
+ char buf[64] = "";
+ int i, ret;
+
+ /* Convert event/group name */
+ pev->event = strdup(tev->event);
+ pev->group = strdup(tev->group);
+ if (pev->event == NULL || pev->group == NULL)
+ return -ENOMEM;
+
+ /* Convert trace_point to probe_point */
+ ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
+ if (ret < 0)
+ return ret;
+
+ /* Convert trace_arg to probe_arg */
+ pev->nargs = tev->nargs;
+ pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ if (pev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < tev->nargs && ret >= 0; i++) {
+ if (tev->args[i].name)
+ pev->args[i].name = strdup(tev->args[i].name);
+ else {
+ ret = synthesize_probe_trace_arg(&tev->args[i],
+ buf, 64);
+ pev->args[i].name = strdup(buf);
+ }
+ if (pev->args[i].name == NULL && ret >= 0)
+ ret = -ENOMEM;
+ }
+
+ if (ret < 0)
+ clear_perf_probe_event(pev);
+
+ return ret;
+}
+
+void clear_perf_probe_event(struct perf_probe_event *pev)
+{
+ struct perf_probe_point *pp = &pev->point;
+ struct perf_probe_arg_field *field, *next;
+ int i;
+
+ if (pev->event)
+ free(pev->event);
+ if (pev->group)
+ free(pev->group);
+ if (pp->file)
+ free(pp->file);
+ if (pp->function)
+ free(pp->function);
+ if (pp->lazy_line)
+ free(pp->lazy_line);
+ for (i = 0; i < pev->nargs; i++) {
+ if (pev->args[i].name)
+ free(pev->args[i].name);
+ if (pev->args[i].var)
+ free(pev->args[i].var);
+ if (pev->args[i].type)
+ free(pev->args[i].type);
+ field = pev->args[i].field;
+ while (field) {
+ next = field->next;
+ if (field->name)
+ free(field->name);
+ free(field);
+ field = next;
+ }
+ }
+ if (pev->args)
+ free(pev->args);
+ memset(pev, 0, sizeof(*pev));
+}
+
+static void clear_probe_trace_event(struct probe_trace_event *tev)
+{
+ struct probe_trace_arg_ref *ref, *next;
+ int i;
+
+ if (tev->event)
+ free(tev->event);
+ if (tev->group)
+ free(tev->group);
+ if (tev->point.symbol)
+ free(tev->point.symbol);
+ if (tev->point.module)
+ free(tev->point.module);
+ for (i = 0; i < tev->nargs; i++) {
+ if (tev->args[i].name)
+ free(tev->args[i].name);
+ if (tev->args[i].value)
+ free(tev->args[i].value);
+ if (tev->args[i].type)
+ free(tev->args[i].type);
+ ref = tev->args[i].ref;
+ while (ref) {
+ next = ref->next;
+ free(ref);
+ ref = next;
+ }
+ }
+ if (tev->args)
+ free(tev->args);
+ memset(tev, 0, sizeof(*tev));
+}
+
+static int open_kprobe_events(bool readwrite)
+{
+ char buf[PATH_MAX];
+ const char *__debugfs;
+ int ret;
+
+ __debugfs = debugfs_find_mountpoint();
+ if (__debugfs == NULL) {
+ pr_warning("Debugfs is not mounted.\n");
+ return -ENOENT;
+ }
+
+ ret = e_snprintf(buf, PATH_MAX, "%stracing/kprobe_events", __debugfs);
+ if (ret >= 0) {
+ pr_debug("Opening %s write=%d\n", buf, readwrite);
+ if (readwrite && !probe_event_dry_run)
+ ret = open(buf, O_RDWR, O_APPEND);
+ else
+ ret = open(buf, O_RDONLY, 0);
+ }
+
+ if (ret < 0) {
+ if (errno == ENOENT)
+ pr_warning("kprobe_events file does not exist - please"
+ " rebuild kernel with CONFIG_KPROBE_EVENT.\n");
+ else
+ pr_warning("Failed to open kprobe_events file: %s\n",
+ strerror(errno));
+ }
+ return ret;
+}
+
+/* Get raw string list of current kprobe_events */
+static struct strlist *get_probe_trace_command_rawlist(int fd)
+{
+ int ret, idx;
+ FILE *fp;
+ char buf[MAX_CMDLEN];
+ char *p;
+ struct strlist *sl;
+
+ sl = strlist__new(true, NULL);
+
+ fp = fdopen(dup(fd), "r");
+ while (!feof(fp)) {
+ p = fgets(buf, MAX_CMDLEN, fp);
+ if (!p)
+ break;
+
+ idx = strlen(p) - 1;
+ if (p[idx] == '\n')
+ p[idx] = '\0';
+ ret = strlist__add(sl, buf);
+ if (ret < 0) {
+ pr_debug("strlist__add failed: %s\n", strerror(-ret));
+ strlist__delete(sl);
+ return NULL;
+ }
+ }
+ fclose(fp);
+
+ return sl;
+}
+
+/* Show an event */
+static int show_perf_probe_event(struct perf_probe_event *pev)
+{
+ int i, ret;
+ char buf[128];
+ char *place;
+
+ /* Synthesize only event probe point */
+ place = synthesize_perf_probe_point(&pev->point);
+ if (!place)
+ return -EINVAL;
+
+ ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event);
+ if (ret < 0)
+ return ret;
+
+ printf(" %-20s (on %s", buf, place);
+
+ if (pev->nargs > 0) {
+ printf(" with");
+ for (i = 0; i < pev->nargs; i++) {
+ ret = synthesize_perf_probe_arg(&pev->args[i],
+ buf, 128);
+ if (ret < 0)
+ break;
+ printf(" %s", buf);
+ }
+ }
+ printf(")\n");
+ free(place);
+ return ret;
+}
+
+/* List up current perf-probe events */
+int show_perf_probe_events(void)
+{
+ int fd, ret;
+ struct probe_trace_event tev;
+ struct perf_probe_event pev;
+ struct strlist *rawlist;
+ struct str_node *ent;
+
+ setup_pager();
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ memset(&tev, 0, sizeof(tev));
+ memset(&pev, 0, sizeof(pev));
+
+ fd = open_kprobe_events(false);
+ if (fd < 0)
+ return fd;
+
+ rawlist = get_probe_trace_command_rawlist(fd);
+ close(fd);
+ if (!rawlist)
+ return -ENOENT;
+
+ strlist__for_each(ent, rawlist) {
+ ret = parse_probe_trace_command(ent->s, &tev);
+ if (ret >= 0) {
+ ret = convert_to_perf_probe_event(&tev, &pev);
+ if (ret >= 0)
+ ret = show_perf_probe_event(&pev);
+ }
+ clear_perf_probe_event(&pev);
+ clear_probe_trace_event(&tev);
+ if (ret < 0)
+ break;
+ }
+ strlist__delete(rawlist);
+
+ return ret;
+}
+
+/* Get current perf-probe event names */
+static struct strlist *get_probe_trace_event_names(int fd, bool include_group)
+{
+ char buf[128];
+ struct strlist *sl, *rawlist;
+ struct str_node *ent;
+ struct probe_trace_event tev;
+ int ret = 0;
+
+ memset(&tev, 0, sizeof(tev));
+ rawlist = get_probe_trace_command_rawlist(fd);
+ sl = strlist__new(true, NULL);
+ strlist__for_each(ent, rawlist) {
+ ret = parse_probe_trace_command(ent->s, &tev);
+ if (ret < 0)
+ break;
+ if (include_group) {
+ ret = e_snprintf(buf, 128, "%s:%s", tev.group,
+ tev.event);
+ if (ret >= 0)
+ ret = strlist__add(sl, buf);
+ } else
+ ret = strlist__add(sl, tev.event);
+ clear_probe_trace_event(&tev);
+ if (ret < 0)
+ break;
+ }
+ strlist__delete(rawlist);
+
+ if (ret < 0) {
+ strlist__delete(sl);
+ return NULL;
+ }
+ return sl;
+}
+
+static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
+{
+ int ret = 0;
+ char *buf = synthesize_probe_trace_command(tev);
+
+ if (!buf) {
+ pr_debug("Failed to synthesize probe trace event.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("Writing event: %s\n", buf);
+ if (!probe_event_dry_run) {
+ ret = write(fd, buf, strlen(buf));
+ if (ret <= 0)
+ pr_warning("Failed to write event: %s\n",
+ strerror(errno));
+ }
+ free(buf);
+ return ret;
+}
+
+static int get_new_event_name(char *buf, size_t len, const char *base,
+ struct strlist *namelist, bool allow_suffix)
+{
+ int i, ret;
+
+ /* Try no suffix */
+ ret = e_snprintf(buf, len, "%s", base);
+ if (ret < 0) {
+ pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ return ret;
+ }
+ if (!strlist__has_entry(namelist, buf))
+ return 0;
+
+ if (!allow_suffix) {
+ pr_warning("Error: event \"%s\" already exists. "
+ "(Use -f to force duplicates.)\n", base);
+ return -EEXIST;
+ }
+
+ /* Try to add suffix */
+ for (i = 1; i < MAX_EVENT_INDEX; i++) {
+ ret = e_snprintf(buf, len, "%s_%d", base, i);
+ if (ret < 0) {
+ pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ return ret;
+ }
+ if (!strlist__has_entry(namelist, buf))
+ break;
+ }
+ if (i == MAX_EVENT_INDEX) {
+ pr_warning("Too many events are on the same function.\n");
+ ret = -ERANGE;
+ }
+
+ return ret;
+}
+
+static int __add_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event *tevs,
+ int ntevs, bool allow_suffix)
+{
+ int i, fd, ret;
+ struct probe_trace_event *tev = NULL;
+ char buf[64];
+ const char *event, *group;
+ struct strlist *namelist;
+
+ fd = open_kprobe_events(true);
+ if (fd < 0)
+ return fd;
+ /* Get current event names */
+ namelist = get_probe_trace_event_names(fd, false);
+ if (!namelist) {
+ pr_debug("Failed to get current event list.\n");
+ return -EIO;
+ }
+
+ ret = 0;
+ printf("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
+ for (i = 0; i < ntevs; i++) {
+ tev = &tevs[i];
+ if (pev->event)
+ event = pev->event;
+ else
+ if (pev->point.function)
+ event = pev->point.function;
+ else
+ event = tev->point.symbol;
+ if (pev->group)
+ group = pev->group;
+ else
+ group = PERFPROBE_GROUP;
+
+ /* Get an unused new event name */
+ ret = get_new_event_name(buf, 64, event,
+ namelist, allow_suffix);
+ if (ret < 0)
+ break;
+ event = buf;
+
+ tev->event = strdup(event);
+ tev->group = strdup(group);
+ if (tev->event == NULL || tev->group == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = write_probe_trace_event(fd, tev);
+ if (ret < 0)
+ break;
+ /* Add added event name to namelist */
+ strlist__add(namelist, event);
+
+ /* Trick here - save current event/group */
+ event = pev->event;
+ group = pev->group;
+ pev->event = tev->event;
+ pev->group = tev->group;
+ show_perf_probe_event(pev);
+ /* Trick here - restore current event/group */
+ pev->event = (char *)event;
+ pev->group = (char *)group;
+
+ /*
+ * Probes after the first probe which comes from same
+ * user input are always allowed to add suffix, because
+ * there might be several addresses corresponding to
+ * one code line.
+ */
+ allow_suffix = true;
+ }
+
+ if (ret >= 0) {
+ /* Show how to use the event. */
+ printf("\nYou can now use it in all perf tools, such as:\n\n");
+ printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
+ tev->event);
+ }
+
+ strlist__delete(namelist);
+ close(fd);
+ return ret;
+}
+
+static int convert_to_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs, const char *target)
+{
+ struct symbol *sym;
+ int ret = 0, i;
+ struct probe_trace_event *tev;
+
+ /* Convert perf_probe_event with debuginfo */
+ ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
+ if (ret != 0)
+ return ret; /* Found in debuginfo or got an error */
+
+ /* Allocate trace event buffer */
+ tev = *tevs = zalloc(sizeof(struct probe_trace_event));
+ if (tev == NULL)
+ return -ENOMEM;
+
+ /* Copy parameters */
+ tev->point.symbol = strdup(pev->point.function);
+ if (tev->point.symbol == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (target) {
+ tev->point.module = strdup(target);
+ if (tev->point.module == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ tev->point.offset = pev->point.offset;
+ tev->point.retprobe = pev->point.retprobe;
+ tev->nargs = pev->nargs;
+ if (tev->nargs) {
+ tev->args = zalloc(sizeof(struct probe_trace_arg)
+ * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < tev->nargs; i++) {
+ if (pev->args[i].name) {
+ tev->args[i].name = strdup(pev->args[i].name);
+ if (tev->args[i].name == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ tev->args[i].value = strdup(pev->args[i].var);
+ if (tev->args[i].value == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (pev->args[i].type) {
+ tev->args[i].type = strdup(pev->args[i].type);
+ if (tev->args[i].type == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ }
+ }
+
+ /* Currently just checking function name from symbol map */
+ sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
+ if (!sym) {
+ pr_warning("Kernel symbol \'%s\' not found.\n",
+ tev->point.symbol);
+ ret = -ENOENT;
+ goto error;
+ } else if (tev->point.offset > sym->end - sym->start) {
+ pr_warning("Offset specified is greater than size of %s\n",
+ tev->point.symbol);
+ ret = -ENOENT;
+ goto error;
+
+ }
+
+ return 1;
+error:
+ clear_probe_trace_event(tev);
+ free(tev);
+ *tevs = NULL;
+ return ret;
+}
+
+struct __event_package {
+ struct perf_probe_event *pev;
+ struct probe_trace_event *tevs;
+ int ntevs;
+};
+
+int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
+ int max_tevs, const char *target, bool force_add)
+{
+ int i, j, ret;
+ struct __event_package *pkgs;
+
+ pkgs = zalloc(sizeof(struct __event_package) * npevs);
+ if (pkgs == NULL)
+ return -ENOMEM;
+
+ /* Init vmlinux path */
+ ret = init_vmlinux();
+ if (ret < 0) {
+ free(pkgs);
+ return ret;
+ }
+
+ /* Loop 1: convert all events */
+ for (i = 0; i < npevs; i++) {
+ pkgs[i].pev = &pevs[i];
+ /* Convert with or without debuginfo */
+ ret = convert_to_probe_trace_events(pkgs[i].pev,
+ &pkgs[i].tevs,
+ max_tevs,
+ target);
+ if (ret < 0)
+ goto end;
+ pkgs[i].ntevs = ret;
+ }
+
+ /* Loop 2: add all events */
+ for (i = 0; i < npevs; i++) {
+ ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
+ pkgs[i].ntevs, force_add);
+ if (ret < 0)
+ break;
+ }
+end:
+ /* Loop 3: cleanup and free trace events */
+ for (i = 0; i < npevs; i++) {
+ for (j = 0; j < pkgs[i].ntevs; j++)
+ clear_probe_trace_event(&pkgs[i].tevs[j]);
+ free(pkgs[i].tevs);
+ }
+ free(pkgs);
+
+ return ret;
+}
+
+static int __del_trace_probe_event(int fd, struct str_node *ent)
+{
+ char *p;
+ char buf[128];
+ int ret;
+
+ /* Convert from perf-probe event to trace-probe event */
+ ret = e_snprintf(buf, 128, "-:%s", ent->s);
+ if (ret < 0)
+ goto error;
+
+ p = strchr(buf + 2, ':');
+ if (!p) {
+ pr_debug("Internal error: %s should have ':' but not.\n",
+ ent->s);
+ ret = -ENOTSUP;
+ goto error;
+ }
+ *p = '/';
+
+ pr_debug("Writing event: %s\n", buf);
+ ret = write(fd, buf, strlen(buf));
+ if (ret < 0) {
+ ret = -errno;
+ goto error;
+ }
+
+ printf("Removed event: %s\n", ent->s);
+ return 0;
+error:
+ pr_warning("Failed to delete event: %s\n", strerror(-ret));
+ return ret;
+}
+
+static int del_trace_probe_event(int fd, const char *group,
+ const char *event, struct strlist *namelist)
+{
+ char buf[128];
+ struct str_node *ent, *n;
+ int found = 0, ret = 0;
+
+ ret = e_snprintf(buf, 128, "%s:%s", group, event);
+ if (ret < 0) {
+ pr_err("Failed to copy event.\n");
+ return ret;
+ }
+
+ if (strpbrk(buf, "*?")) { /* Glob-exp */
+ strlist__for_each_safe(ent, n, namelist)
+ if (strglobmatch(ent->s, buf)) {
+ found++;
+ ret = __del_trace_probe_event(fd, ent);
+ if (ret < 0)
+ break;
+ strlist__remove(namelist, ent);
+ }
+ } else {
+ ent = strlist__find(namelist, buf);
+ if (ent) {
+ found++;
+ ret = __del_trace_probe_event(fd, ent);
+ if (ret >= 0)
+ strlist__remove(namelist, ent);
+ }
+ }
+ if (found == 0 && ret >= 0)
+ pr_info("Info: Event \"%s\" does not exist.\n", buf);
+
+ return ret;
+}
+
+int del_perf_probe_events(struct strlist *dellist)
+{
+ int fd, ret = 0;
+ const char *group, *event;
+ char *p, *str;
+ struct str_node *ent;
+ struct strlist *namelist;
+
+ fd = open_kprobe_events(true);
+ if (fd < 0)
+ return fd;
+
+ /* Get current event names */
+ namelist = get_probe_trace_event_names(fd, true);
+ if (namelist == NULL)
+ return -EINVAL;
+
+ strlist__for_each(ent, dellist) {
+ str = strdup(ent->s);
+ if (str == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ pr_debug("Parsing: %s\n", str);
+ p = strchr(str, ':');
+ if (p) {
+ group = str;
+ *p = '\0';
+ event = p + 1;
+ } else {
+ group = "*";
+ event = str;
+ }
+ pr_debug("Group: %s, Event: %s\n", group, event);
+ ret = del_trace_probe_event(fd, group, event, namelist);
+ free(str);
+ if (ret < 0)
+ break;
+ }
+ strlist__delete(namelist);
+ close(fd);
+
+ return ret;
+}
+/* TODO: don't use a global variable for filter ... */
+static struct strfilter *available_func_filter;
+
+/*
+ * If a symbol corresponds to a function with global binding and
+ * matches filter return 0. For all others return 1.
+ */
+static int filter_available_functions(struct map *map __unused,
+ struct symbol *sym)
+{
+ if (sym->binding == STB_GLOBAL &&
+ strfilter__compare(available_func_filter, sym->name))
+ return 0;
+ return 1;
+}
+
+int show_available_funcs(const char *target, struct strfilter *_filter)
+{
+ struct map *map;
+ int ret;
+
+ setup_pager();
+
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ map = kernel_get_module_map(target);
+ if (!map) {
+ pr_err("Failed to find %s map.\n", (target) ? : "kernel");
+ return -EINVAL;
+ }
+ available_func_filter = _filter;
+ if (map__load(map, filter_available_functions)) {
+ pr_err("Failed to load map.\n");
+ return -EINVAL;
+ }
+ if (!dso__sorted_by_name(map->dso, map->type))
+ dso__sort_by_name(map->dso, map->type);
+
+ dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/probe-event.h b/ANDROID_3.4.5/tools/perf/util/probe-event.h
new file mode 100644
index 00000000..a7dee835
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/probe-event.h
@@ -0,0 +1,138 @@
+#ifndef _PROBE_EVENT_H
+#define _PROBE_EVENT_H
+
+#include <stdbool.h>
+#include "strlist.h"
+#include "strfilter.h"
+
+extern bool probe_event_dry_run;
+
+/* kprobe-tracer tracing point */
+struct probe_trace_point {
+ char *symbol; /* Base symbol */
+ char *module; /* Module name */
+ unsigned long offset; /* Offset from symbol */
+ bool retprobe; /* Return probe flag */
+};
+
+/* probe-tracer tracing argument referencing offset */
+struct probe_trace_arg_ref {
+ struct probe_trace_arg_ref *next; /* Next reference */
+ long offset; /* Offset value */
+};
+
+/* kprobe-tracer tracing argument */
+struct probe_trace_arg {
+ char *name; /* Argument name */
+ char *value; /* Base value */
+ char *type; /* Type name */
+ struct probe_trace_arg_ref *ref; /* Referencing offset */
+};
+
+/* kprobe-tracer tracing event (point + arg) */
+struct probe_trace_event {
+ char *event; /* Event name */
+ char *group; /* Group name */
+ struct probe_trace_point point; /* Trace point */
+ int nargs; /* Number of args */
+ struct probe_trace_arg *args; /* Arguments */
+};
+
+/* Perf probe probing point */
+struct perf_probe_point {
+ char *file; /* File path */
+ char *function; /* Function name */
+ int line; /* Line number */
+ bool retprobe; /* Return probe flag */
+ char *lazy_line; /* Lazy matching pattern */
+ unsigned long offset; /* Offset from function entry */
+};
+
+/* Perf probe probing argument field chain */
+struct perf_probe_arg_field {
+ struct perf_probe_arg_field *next; /* Next field */
+ char *name; /* Name of the field */
+ long index; /* Array index number */
+ bool ref; /* Referencing flag */
+};
+
+/* Perf probe probing argument */
+struct perf_probe_arg {
+ char *name; /* Argument name */
+ char *var; /* Variable name */
+ char *type; /* Type name */
+ struct perf_probe_arg_field *field; /* Structure fields */
+};
+
+/* Perf probe probing event (point + arg) */
+struct perf_probe_event {
+ char *event; /* Event name */
+ char *group; /* Group name */
+ struct perf_probe_point point; /* Probe point */
+ int nargs; /* Number of arguments */
+ struct perf_probe_arg *args; /* Arguments */
+};
+
+
+/* Line number container */
+struct line_node {
+ struct list_head list;
+ int line;
+};
+
+/* Line range */
+struct line_range {
+ char *file; /* File name */
+ char *function; /* Function name */
+ int start; /* Start line number */
+ int end; /* End line number */
+ int offset; /* Start line offset */
+ char *path; /* Real path name */
+ char *comp_dir; /* Compile directory */
+ struct list_head line_list; /* Visible lines */
+};
+
+/* List of variables */
+struct variable_list {
+ struct probe_trace_point point; /* Actual probepoint */
+ struct strlist *vars; /* Available variables */
+};
+
+/* Command string to events */
+extern int parse_perf_probe_command(const char *cmd,
+ struct perf_probe_event *pev);
+
+/* Events to command string */
+extern char *synthesize_perf_probe_command(struct perf_probe_event *pev);
+extern char *synthesize_probe_trace_command(struct probe_trace_event *tev);
+extern int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf,
+ size_t len);
+
+/* Check the perf_probe_event needs debuginfo */
+extern bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
+
+/* Release event contents */
+extern void clear_perf_probe_event(struct perf_probe_event *pev);
+
+/* Command string to line-range */
+extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
+
+/* Internal use: Return kernel/module path */
+extern const char *kernel_get_module_path(const char *module);
+
+extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
+ int max_probe_points, const char *module,
+ bool force_add);
+extern int del_perf_probe_events(struct strlist *dellist);
+extern int show_perf_probe_events(void);
+extern int show_line_range(struct line_range *lr, const char *module);
+extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
+ int max_probe_points, const char *module,
+ struct strfilter *filter, bool externs);
+extern int show_available_funcs(const char *module, struct strfilter *filter);
+
+
+/* Maximum index number of event-name postfix */
+#define MAX_EVENT_INDEX 1024
+
+#endif /*_PROBE_EVENT_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/probe-finder.c b/ANDROID_3.4.5/tools/perf/util/probe-finder.c
new file mode 100644
index 00000000..d448984e
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/probe-finder.c
@@ -0,0 +1,1580 @@
+/*
+ * probe-finder.c : C expression to kprobe event converter
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <dwarf-regs.h>
+
+#include <linux/bitops.h>
+#include "event.h"
+#include "debug.h"
+#include "util.h"
+#include "symbol.h"
+#include "probe-finder.h"
+
+/* Kprobe tracer basic type is up to u64 */
+#define MAX_BASIC_TYPE_BITS 64
+
+/* Line number list operations */
+
+/* Add a line to line number list */
+static int line_list__add_line(struct list_head *head, int line)
+{
+ struct line_node *ln;
+ struct list_head *p;
+
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry_reverse(ln, head, list) {
+ if (ln->line < line) {
+ p = &ln->list;
+ goto found;
+ } else if (ln->line == line) /* Already exist */
+ return 1;
+ }
+ /* List is empty, or the smallest entry */
+ p = head;
+found:
+ pr_debug("line list: add a line %u\n", line);
+ ln = zalloc(sizeof(struct line_node));
+ if (ln == NULL)
+ return -ENOMEM;
+ ln->line = line;
+ INIT_LIST_HEAD(&ln->list);
+ list_add(&ln->list, p);
+ return 0;
+}
+
+/* Check if the line in line number list */
+static int line_list__has_line(struct list_head *head, int line)
+{
+ struct line_node *ln;
+
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry(ln, head, list)
+ if (ln->line == line)
+ return 1;
+
+ return 0;
+}
+
+/* Init line number list */
+static void line_list__init(struct list_head *head)
+{
+ INIT_LIST_HEAD(head);
+}
+
+/* Free line number list */
+static void line_list__free(struct list_head *head)
+{
+ struct line_node *ln;
+ while (!list_empty(head)) {
+ ln = list_first_entry(head, struct line_node, list);
+ list_del(&ln->list);
+ free(ln);
+ }
+}
+
+/* Dwarf FL wrappers */
+static char *debuginfo_path; /* Currently dummy */
+
+static const Dwfl_Callbacks offline_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+
+ .section_address = dwfl_offline_section_address,
+
+ /* We use this table for core files too. */
+ .find_elf = dwfl_build_id_find_elf,
+};
+
+/* Get a Dwarf from offline image */
+static int debuginfo__init_offline_dwarf(struct debuginfo *self,
+ const char *path)
+{
+ Dwfl_Module *mod;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ self->dwfl = dwfl_begin(&offline_callbacks);
+ if (!self->dwfl)
+ goto error;
+
+ mod = dwfl_report_offline(self->dwfl, "", "", fd);
+ if (!mod)
+ goto error;
+
+ self->dbg = dwfl_module_getdwarf(mod, &self->bias);
+ if (!self->dbg)
+ goto error;
+
+ return 0;
+error:
+ if (self->dwfl)
+ dwfl_end(self->dwfl);
+ else
+ close(fd);
+ memset(self, 0, sizeof(*self));
+
+ return -ENOENT;
+}
+
+#if _ELFUTILS_PREREQ(0, 148)
+/* This method is buggy if elfutils is older than 0.148 */
+static int __linux_kernel_find_elf(Dwfl_Module *mod,
+ void **userdata,
+ const char *module_name,
+ Dwarf_Addr base,
+ char **file_name, Elf **elfp)
+{
+ int fd;
+ const char *path = kernel_get_module_path(module_name);
+
+ pr_debug2("Use file %s for %s\n", path, module_name);
+ if (path) {
+ fd = open(path, O_RDONLY);
+ if (fd >= 0) {
+ *file_name = strdup(path);
+ return fd;
+ }
+ }
+ /* If failed, try to call standard method */
+ return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
+ file_name, elfp);
+}
+
+static const Dwfl_Callbacks kernel_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+
+ .find_elf = __linux_kernel_find_elf,
+ .section_address = dwfl_linux_kernel_module_section_address,
+};
+
+/* Get a Dwarf from live kernel image */
+static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self,
+ Dwarf_Addr addr)
+{
+ self->dwfl = dwfl_begin(&kernel_callbacks);
+ if (!self->dwfl)
+ return -EINVAL;
+
+ /* Load the kernel dwarves: Don't care the result here */
+ dwfl_linux_kernel_report_kernel(self->dwfl);
+ dwfl_linux_kernel_report_modules(self->dwfl);
+
+ self->dbg = dwfl_addrdwarf(self->dwfl, addr, &self->bias);
+ /* Here, check whether we could get a real dwarf */
+ if (!self->dbg) {
+ pr_debug("Failed to find kernel dwarf at %lx\n",
+ (unsigned long)addr);
+ dwfl_end(self->dwfl);
+ memset(self, 0, sizeof(*self));
+ return -ENOENT;
+ }
+
+ return 0;
+}
+#else
+/* With older elfutils, this just support kernel module... */
+static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self,
+ Dwarf_Addr addr __used)
+{
+ const char *path = kernel_get_module_path("kernel");
+
+ if (!path) {
+ pr_err("Failed to find vmlinux path\n");
+ return -ENOENT;
+ }
+
+ pr_debug2("Use file %s for debuginfo\n", path);
+ return debuginfo__init_offline_dwarf(self, path);
+}
+#endif
+
+struct debuginfo *debuginfo__new(const char *path)
+{
+ struct debuginfo *self = zalloc(sizeof(struct debuginfo));
+ if (!self)
+ return NULL;
+
+ if (debuginfo__init_offline_dwarf(self, path) < 0) {
+ free(self);
+ self = NULL;
+ }
+
+ return self;
+}
+
+struct debuginfo *debuginfo__new_online_kernel(unsigned long addr)
+{
+ struct debuginfo *self = zalloc(sizeof(struct debuginfo));
+ if (!self)
+ return NULL;
+
+ if (debuginfo__init_online_kernel_dwarf(self, (Dwarf_Addr)addr) < 0) {
+ free(self);
+ self = NULL;
+ }
+
+ return self;
+}
+
+void debuginfo__delete(struct debuginfo *self)
+{
+ if (self) {
+ if (self->dwfl)
+ dwfl_end(self->dwfl);
+ free(self);
+ }
+}
+
+/*
+ * Probe finder related functions
+ */
+
+static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
+{
+ struct probe_trace_arg_ref *ref;
+ ref = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (ref != NULL)
+ ref->offset = offs;
+ return ref;
+}
+
+/*
+ * Convert a location into trace_arg.
+ * If tvar == NULL, this just checks variable can be converted.
+ */
+static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
+ Dwarf_Op *fb_ops,
+ struct probe_trace_arg *tvar)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *op;
+ size_t nops;
+ unsigned int regn;
+ Dwarf_Word offs = 0;
+ bool ref = false;
+ const char *regs;
+ int ret;
+
+ if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
+ goto static_var;
+
+ /* TODO: handle more than 1 exprs */
+ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
+ dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 ||
+ nops == 0) {
+ /* TODO: Support const_value */
+ return -ENOENT;
+ }
+
+ if (op->atom == DW_OP_addr) {
+static_var:
+ if (!tvar)
+ return 0;
+ /* Static variables on memory (not stack), make @varname */
+ ret = strlen(dwarf_diename(vr_die));
+ tvar->value = zalloc(ret + 2);
+ if (tvar->value == NULL)
+ return -ENOMEM;
+ snprintf(tvar->value, ret + 2, "@%s", dwarf_diename(vr_die));
+ tvar->ref = alloc_trace_arg_ref((long)offs);
+ if (tvar->ref == NULL)
+ return -ENOMEM;
+ return 0;
+ }
+
+ /* If this is based on frame buffer, set the offset */
+ if (op->atom == DW_OP_fbreg) {
+ if (fb_ops == NULL)
+ return -ENOTSUP;
+ ref = true;
+ offs = op->number;
+ op = &fb_ops[0];
+ }
+
+ if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
+ regn = op->atom - DW_OP_breg0;
+ offs += op->number;
+ ref = true;
+ } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
+ regn = op->atom - DW_OP_reg0;
+ } else if (op->atom == DW_OP_bregx) {
+ regn = op->number;
+ offs += op->number2;
+ ref = true;
+ } else if (op->atom == DW_OP_regx) {
+ regn = op->number;
+ } else {
+ pr_debug("DW_OP %x is not supported.\n", op->atom);
+ return -ENOTSUP;
+ }
+
+ if (!tvar)
+ return 0;
+
+ regs = get_arch_regstr(regn);
+ if (!regs) {
+ /* This should be a bug in DWARF or this tool */
+ pr_warning("Mapping for the register number %u "
+ "missing on this architecture.\n", regn);
+ return -ERANGE;
+ }
+
+ tvar->value = strdup(regs);
+ if (tvar->value == NULL)
+ return -ENOMEM;
+
+ if (ref) {
+ tvar->ref = alloc_trace_arg_ref((long)offs);
+ if (tvar->ref == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
+
+static int convert_variable_type(Dwarf_Die *vr_die,
+ struct probe_trace_arg *tvar,
+ const char *cast)
+{
+ struct probe_trace_arg_ref **ref_ptr = &tvar->ref;
+ Dwarf_Die type;
+ char buf[16];
+ int bsize, boffs, total;
+ int ret;
+
+ /* TODO: check all types */
+ if (cast && strcmp(cast, "string") != 0) {
+ /* Non string type is OK */
+ tvar->type = strdup(cast);
+ return (tvar->type == NULL) ? -ENOMEM : 0;
+ }
+
+ bsize = dwarf_bitsize(vr_die);
+ if (bsize > 0) {
+ /* This is a bitfield */
+ boffs = dwarf_bitoffset(vr_die);
+ total = dwarf_bytesize(vr_die);
+ if (boffs < 0 || total < 0)
+ return -ENOENT;
+ ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs,
+ BYTES_TO_BITS(total));
+ goto formatted;
+ }
+
+ if (die_get_real_type(vr_die, &type) == NULL) {
+ pr_warning("Failed to get a type information of %s.\n",
+ dwarf_diename(vr_die));
+ return -ENOENT;
+ }
+
+ pr_debug("%s type is %s.\n",
+ dwarf_diename(vr_die), dwarf_diename(&type));
+
+ if (cast && strcmp(cast, "string") == 0) { /* String type */
+ ret = dwarf_tag(&type);
+ if (ret != DW_TAG_pointer_type &&
+ ret != DW_TAG_array_type) {
+ pr_warning("Failed to cast into string: "
+ "%s(%s) is not a pointer nor array.\n",
+ dwarf_diename(vr_die), dwarf_diename(&type));
+ return -EINVAL;
+ }
+ if (ret == DW_TAG_pointer_type) {
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get a type"
+ " information.\n");
+ return -ENOENT;
+ }
+ while (*ref_ptr)
+ ref_ptr = &(*ref_ptr)->next;
+ /* Add new reference with offset +0 */
+ *ref_ptr = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (*ref_ptr == NULL) {
+ pr_warning("Out of memory error\n");
+ return -ENOMEM;
+ }
+ }
+ if (!die_compare_name(&type, "char") &&
+ !die_compare_name(&type, "unsigned char")) {
+ pr_warning("Failed to cast into string: "
+ "%s is not (unsigned) char *.\n",
+ dwarf_diename(vr_die));
+ return -EINVAL;
+ }
+ tvar->type = strdup(cast);
+ return (tvar->type == NULL) ? -ENOMEM : 0;
+ }
+
+ ret = dwarf_bytesize(&type);
+ if (ret <= 0)
+ /* No size ... try to use default type */
+ return 0;
+ ret = BYTES_TO_BITS(ret);
+
+ /* Check the bitwidth */
+ if (ret > MAX_BASIC_TYPE_BITS) {
+ pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n",
+ dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
+ ret = MAX_BASIC_TYPE_BITS;
+ }
+ ret = snprintf(buf, 16, "%c%d",
+ die_is_signed_type(&type) ? 's' : 'u', ret);
+
+formatted:
+ if (ret < 0 || ret >= 16) {
+ if (ret >= 16)
+ ret = -E2BIG;
+ pr_warning("Failed to convert variable type: %s\n",
+ strerror(-ret));
+ return ret;
+ }
+ tvar->type = strdup(buf);
+ if (tvar->type == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
+ struct perf_probe_arg_field *field,
+ struct probe_trace_arg_ref **ref_ptr,
+ Dwarf_Die *die_mem)
+{
+ struct probe_trace_arg_ref *ref = *ref_ptr;
+ Dwarf_Die type;
+ Dwarf_Word offs;
+ int ret, tag;
+
+ pr_debug("converting %s in %s\n", field->name, varname);
+ if (die_get_real_type(vr_die, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset(&type));
+ tag = dwarf_tag(&type);
+
+ if (field->name[0] == '[' &&
+ (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) {
+ if (field->next)
+ /* Save original type for next field */
+ memcpy(die_mem, &type, sizeof(*die_mem));
+ /* Get the type of this array */
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ pr_debug2("Array real type: (%x)\n",
+ (unsigned)dwarf_dieoffset(&type));
+ if (tag == DW_TAG_pointer_type) {
+ ref = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (ref == NULL)
+ return -ENOMEM;
+ if (*ref_ptr)
+ (*ref_ptr)->next = ref;
+ else
+ *ref_ptr = ref;
+ }
+ ref->offset += dwarf_bytesize(&type) * field->index;
+ if (!field->next)
+ /* Save vr_die for converting types */
+ memcpy(die_mem, vr_die, sizeof(*die_mem));
+ goto next;
+ } else if (tag == DW_TAG_pointer_type) {
+ /* Check the pointer and dereference */
+ if (!field->ref) {
+ pr_err("Semantic error: %s must be referred by '->'\n",
+ field->name);
+ return -EINVAL;
+ }
+ /* Get the type pointed by this pointer */
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ /* Verify it is a data structure */
+ if (dwarf_tag(&type) != DW_TAG_structure_type) {
+ pr_warning("%s is not a data structure.\n", varname);
+ return -EINVAL;
+ }
+
+ ref = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (ref == NULL)
+ return -ENOMEM;
+ if (*ref_ptr)
+ (*ref_ptr)->next = ref;
+ else
+ *ref_ptr = ref;
+ } else {
+ /* Verify it is a data structure */
+ if (tag != DW_TAG_structure_type) {
+ pr_warning("%s is not a data structure.\n", varname);
+ return -EINVAL;
+ }
+ if (field->name[0] == '[') {
+ pr_err("Semantic error: %s is not a pointor"
+ " nor array.\n", varname);
+ return -EINVAL;
+ }
+ if (field->ref) {
+ pr_err("Semantic error: %s must be referred by '.'\n",
+ field->name);
+ return -EINVAL;
+ }
+ if (!ref) {
+ pr_warning("Structure on a register is not "
+ "supported yet.\n");
+ return -ENOTSUP;
+ }
+ }
+
+ if (die_find_member(&type, field->name, die_mem) == NULL) {
+ pr_warning("%s(tyep:%s) has no member %s.\n", varname,
+ dwarf_diename(&type), field->name);
+ return -EINVAL;
+ }
+
+ /* Get the offset of the field */
+ ret = die_get_data_member_location(die_mem, &offs);
+ if (ret < 0) {
+ pr_warning("Failed to get the offset of %s.\n", field->name);
+ return ret;
+ }
+ ref->offset += (long)offs;
+
+next:
+ /* Converting next field */
+ if (field->next)
+ return convert_variable_fields(die_mem, field->name,
+ field->next, &ref, die_mem);
+ else
+ return 0;
+}
+
+/* Show a variables in kprobe event format */
+static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
+{
+ Dwarf_Die die_mem;
+ int ret;
+
+ pr_debug("Converting variable %s into trace event.\n",
+ dwarf_diename(vr_die));
+
+ ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
+ pf->tvar);
+ if (ret == -ENOENT)
+ pr_err("Failed to find the location of %s at this address.\n"
+ " Perhaps, it has been optimized out.\n", pf->pvar->var);
+ else if (ret == -ENOTSUP)
+ pr_err("Sorry, we don't support this variable location yet.\n");
+ else if (pf->pvar->field) {
+ ret = convert_variable_fields(vr_die, pf->pvar->var,
+ pf->pvar->field, &pf->tvar->ref,
+ &die_mem);
+ vr_die = &die_mem;
+ }
+ if (ret == 0)
+ ret = convert_variable_type(vr_die, pf->tvar, pf->pvar->type);
+ /* *expr will be cached in libdw. Don't free it. */
+ return ret;
+}
+
+/* Find a variable in a scope DIE */
+static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
+{
+ Dwarf_Die vr_die;
+ char buf[32], *ptr;
+ int ret = 0;
+
+ if (!is_c_varname(pf->pvar->var)) {
+ /* Copy raw parameters */
+ pf->tvar->value = strdup(pf->pvar->var);
+ if (pf->tvar->value == NULL)
+ return -ENOMEM;
+ if (pf->pvar->type) {
+ pf->tvar->type = strdup(pf->pvar->type);
+ if (pf->tvar->type == NULL)
+ return -ENOMEM;
+ }
+ if (pf->pvar->name) {
+ pf->tvar->name = strdup(pf->pvar->name);
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+ } else
+ pf->tvar->name = NULL;
+ return 0;
+ }
+
+ if (pf->pvar->name)
+ pf->tvar->name = strdup(pf->pvar->name);
+ else {
+ ret = synthesize_perf_probe_arg(pf->pvar, buf, 32);
+ if (ret < 0)
+ return ret;
+ ptr = strchr(buf, ':'); /* Change type separator to _ */
+ if (ptr)
+ *ptr = '_';
+ pf->tvar->name = strdup(buf);
+ }
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+
+ pr_debug("Searching '%s' variable in context.\n", pf->pvar->var);
+ /* Search child die for local variables and parameters. */
+ if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
+ /* Search again in global variables */
+ if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
+ ret = -ENOENT;
+ }
+ if (ret >= 0)
+ ret = convert_variable(&vr_die, pf);
+
+ if (ret < 0)
+ pr_warning("Failed to find '%s' in this function.\n",
+ pf->pvar->var);
+ return ret;
+}
+
+/* Convert subprogram DIE to trace point */
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
+ bool retprobe, struct probe_trace_point *tp)
+{
+ Dwarf_Addr eaddr, highaddr;
+ const char *name;
+
+ /* Copy the name of probe point */
+ name = dwarf_diename(sp_die);
+ if (name) {
+ if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+ pr_warning("Failed to get entry address of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ if (dwarf_highpc(sp_die, &highaddr) != 0) {
+ pr_warning("Failed to get end address of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ if (paddr > highaddr) {
+ pr_warning("Offset specified is greater than size of %s\n",
+ dwarf_diename(sp_die));
+ return -EINVAL;
+ }
+ tp->symbol = strdup(name);
+ if (tp->symbol == NULL)
+ return -ENOMEM;
+ tp->offset = (unsigned long)(paddr - eaddr);
+ } else
+ /* This function has no name. */
+ tp->offset = (unsigned long)paddr;
+
+ /* Return probe must be on the head of a subprogram */
+ if (retprobe) {
+ if (eaddr != paddr) {
+ pr_warning("Return probe must be on the head of"
+ " a real function.\n");
+ return -EINVAL;
+ }
+ tp->retprobe = true;
+ }
+
+ return 0;
+}
+
+/* Call probe_finder callback with scope DIE */
+static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
+{
+ Dwarf_Attribute fb_attr;
+ size_t nops;
+ int ret;
+
+ if (!sc_die) {
+ pr_err("Caller must pass a scope DIE. Program error.\n");
+ return -EINVAL;
+ }
+
+ /* If not a real subprogram, find a real one */
+ if (dwarf_tag(sc_die) != DW_TAG_subprogram) {
+ if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
+ pr_warning("Failed to find probe point in any "
+ "functions.\n");
+ return -ENOENT;
+ }
+ } else
+ memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die));
+
+ /* Get the frame base attribute/ops from subprogram */
+ dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr);
+ ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
+ if (ret <= 0 || nops == 0) {
+ pf->fb_ops = NULL;
+#if _ELFUTILS_PREREQ(0, 142)
+ } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
+ pf->cfi != NULL) {
+ Dwarf_Frame *frame;
+ if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
+ dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
+ pr_warning("Failed to get call frame on 0x%jx\n",
+ (uintmax_t)pf->addr);
+ return -ENOENT;
+ }
+#endif
+ }
+
+ /* Call finder's callback handler */
+ ret = pf->callback(sc_die, pf);
+
+ /* *pf->fb_ops will be cached in libdw. Don't free it. */
+ pf->fb_ops = NULL;
+
+ return ret;
+}
+
+struct find_scope_param {
+ const char *function;
+ const char *file;
+ int line;
+ int diff;
+ Dwarf_Die *die_mem;
+ bool found;
+};
+
+static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct find_scope_param *fsp = data;
+ const char *file;
+ int lno;
+
+ /* Skip if declared file name does not match */
+ if (fsp->file) {
+ file = dwarf_decl_file(fn_die);
+ if (!file || strcmp(fsp->file, file) != 0)
+ return 0;
+ }
+ /* If the function name is given, that's what user expects */
+ if (fsp->function) {
+ if (die_compare_name(fn_die, fsp->function)) {
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ return 1;
+ }
+ } else {
+ /* With the line number, find the nearest declared DIE */
+ dwarf_decl_line(fn_die, &lno);
+ if (lno < fsp->line && fsp->diff > fsp->line - lno) {
+ /* Keep a candidate and continue */
+ fsp->diff = fsp->line - lno;
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ }
+ }
+ return 0;
+}
+
+/* Find an appropriate scope fits to given conditions */
+static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
+{
+ struct find_scope_param fsp = {
+ .function = pf->pev->point.function,
+ .file = pf->fname,
+ .line = pf->lno,
+ .diff = INT_MAX,
+ .die_mem = die_mem,
+ .found = false,
+ };
+
+ cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
+
+ return fsp.found ? die_mem : NULL;
+}
+
+static int probe_point_line_walker(const char *fname, int lineno,
+ Dwarf_Addr addr, void *data)
+{
+ struct probe_finder *pf = data;
+ Dwarf_Die *sc_die, die_mem;
+ int ret;
+
+ if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
+ return 0;
+
+ pf->addr = addr;
+ sc_die = find_best_scope(pf, &die_mem);
+ if (!sc_die) {
+ pr_warning("Failed to find scope of probe point.\n");
+ return -ENOENT;
+ }
+
+ ret = call_probe_finder(sc_die, pf);
+
+ /* Continue if no error, because the line will be in inline function */
+ return ret < 0 ? ret : 0;
+}
+
+/* Find probe point from its line number */
+static int find_probe_point_by_line(struct probe_finder *pf)
+{
+ return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf);
+}
+
+/* Find lines which match lazy pattern */
+static int find_lazy_match_lines(struct list_head *head,
+ const char *fname, const char *pat)
+{
+ FILE *fp;
+ char *line = NULL;
+ size_t line_len;
+ ssize_t len;
+ int count = 0, linenum = 1;
+
+ fp = fopen(fname, "r");
+ if (!fp) {
+ pr_warning("Failed to open %s: %s\n", fname, strerror(errno));
+ return -errno;
+ }
+
+ while ((len = getline(&line, &line_len, fp)) > 0) {
+
+ if (line[len - 1] == '\n')
+ line[len - 1] = '\0';
+
+ if (strlazymatch(line, pat)) {
+ line_list__add_line(head, linenum);
+ count++;
+ }
+ linenum++;
+ }
+
+ if (ferror(fp))
+ count = -errno;
+ free(line);
+ fclose(fp);
+
+ if (count == 0)
+ pr_debug("No matched lines found in %s.\n", fname);
+ return count;
+}
+
+static int probe_point_lazy_walker(const char *fname, int lineno,
+ Dwarf_Addr addr, void *data)
+{
+ struct probe_finder *pf = data;
+ Dwarf_Die *sc_die, die_mem;
+ int ret;
+
+ if (!line_list__has_line(&pf->lcache, lineno) ||
+ strtailcmp(fname, pf->fname) != 0)
+ return 0;
+
+ pr_debug("Probe line found: line:%d addr:0x%llx\n",
+ lineno, (unsigned long long)addr);
+ pf->addr = addr;
+ pf->lno = lineno;
+ sc_die = find_best_scope(pf, &die_mem);
+ if (!sc_die) {
+ pr_warning("Failed to find scope of probe point.\n");
+ return -ENOENT;
+ }
+
+ ret = call_probe_finder(sc_die, pf);
+
+ /*
+ * Continue if no error, because the lazy pattern will match
+ * to other lines
+ */
+ return ret < 0 ? ret : 0;
+}
+
+/* Find probe points from lazy pattern */
+static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ int ret = 0;
+
+ if (list_empty(&pf->lcache)) {
+ /* Matching lazy line pattern */
+ ret = find_lazy_match_lines(&pf->lcache, pf->fname,
+ pf->pev->point.lazy_line);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
+}
+
+static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
+{
+ struct probe_finder *pf = data;
+ struct perf_probe_point *pp = &pf->pev->point;
+ Dwarf_Addr addr;
+ int ret;
+
+ if (pp->lazy_line)
+ ret = find_probe_point_lazy(in_die, pf);
+ else {
+ /* Get probe address */
+ if (dwarf_entrypc(in_die, &addr) != 0) {
+ pr_warning("Failed to get entry address of %s.\n",
+ dwarf_diename(in_die));
+ return -ENOENT;
+ }
+ pf->addr = addr;
+ pf->addr += pp->offset;
+ pr_debug("found inline addr: 0x%jx\n",
+ (uintmax_t)pf->addr);
+
+ ret = call_probe_finder(in_die, pf);
+ }
+
+ return ret;
+}
+
+/* Callback parameter with return value for libdw */
+struct dwarf_callback_param {
+ void *data;
+ int retval;
+};
+
+/* Search function from function name */
+static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct probe_finder *pf = param->data;
+ struct perf_probe_point *pp = &pf->pev->point;
+ Dwarf_Attribute attr;
+
+ /* Check tag and diename */
+ if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
+ !die_compare_name(sp_die, pp->function) ||
+ dwarf_attr(sp_die, DW_AT_declaration, &attr))
+ return DWARF_CB_OK;
+
+ /* Check declared file */
+ if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die)))
+ return DWARF_CB_OK;
+
+ pf->fname = dwarf_decl_file(sp_die);
+ if (pp->line) { /* Function relative line */
+ dwarf_decl_line(sp_die, &pf->lno);
+ pf->lno += pp->line;
+ param->retval = find_probe_point_by_line(pf);
+ } else if (!dwarf_func_inline(sp_die)) {
+ /* Real function */
+ if (pp->lazy_line)
+ param->retval = find_probe_point_lazy(sp_die, pf);
+ else {
+ if (dwarf_entrypc(sp_die, &pf->addr) != 0) {
+ pr_warning("Failed to get entry address of "
+ "%s.\n", dwarf_diename(sp_die));
+ param->retval = -ENOENT;
+ return DWARF_CB_ABORT;
+ }
+ pf->addr += pp->offset;
+ /* TODO: Check the address in this function */
+ param->retval = call_probe_finder(sp_die, pf);
+ }
+ } else
+ /* Inlined function: search instances */
+ param->retval = die_walk_instances(sp_die,
+ probe_point_inline_cb, (void *)pf);
+
+ return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
+}
+
+static int find_probe_point_by_func(struct probe_finder *pf)
+{
+ struct dwarf_callback_param _param = {.data = (void *)pf,
+ .retval = 0};
+ dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0);
+ return _param.retval;
+}
+
+struct pubname_callback_param {
+ char *function;
+ char *file;
+ Dwarf_Die *cu_die;
+ Dwarf_Die *sp_die;
+ int found;
+};
+
+static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
+{
+ struct pubname_callback_param *param = data;
+
+ if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) {
+ if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
+ return DWARF_CB_OK;
+
+ if (die_compare_name(param->sp_die, param->function)) {
+ if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
+ return DWARF_CB_OK;
+
+ if (param->file &&
+ strtailcmp(param->file, dwarf_decl_file(param->sp_die)))
+ return DWARF_CB_OK;
+
+ param->found = 1;
+ return DWARF_CB_ABORT;
+ }
+ }
+
+ return DWARF_CB_OK;
+}
+
+/* Find probe points from debuginfo */
+static int debuginfo__find_probes(struct debuginfo *self,
+ struct probe_finder *pf)
+{
+ struct perf_probe_point *pp = &pf->pev->point;
+ Dwarf_Off off, noff;
+ size_t cuhl;
+ Dwarf_Die *diep;
+ int ret = 0;
+
+#if _ELFUTILS_PREREQ(0, 142)
+ /* Get the call frame information from this dwarf */
+ pf->cfi = dwarf_getcfi(self->dbg);
+#endif
+
+ off = 0;
+ line_list__init(&pf->lcache);
+
+ /* Fastpath: lookup by function name from .debug_pubnames section */
+ if (pp->function) {
+ struct pubname_callback_param pubname_param = {
+ .function = pp->function,
+ .file = pp->file,
+ .cu_die = &pf->cu_die,
+ .sp_die = &pf->sp_die,
+ .found = 0,
+ };
+ struct dwarf_callback_param probe_param = {
+ .data = pf,
+ };
+
+ dwarf_getpubnames(self->dbg, pubname_search_cb,
+ &pubname_param, 0);
+ if (pubname_param.found) {
+ ret = probe_point_search_cb(&pf->sp_die, &probe_param);
+ if (ret)
+ goto found;
+ }
+ }
+
+ /* Loop on CUs (Compilation Unit) */
+ while (!dwarf_nextcu(self->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
+ /* Get the DIE(Debugging Information Entry) of this CU */
+ diep = dwarf_offdie(self->dbg, off + cuhl, &pf->cu_die);
+ if (!diep)
+ continue;
+
+ /* Check if target file is included. */
+ if (pp->file)
+ pf->fname = cu_find_realpath(&pf->cu_die, pp->file);
+ else
+ pf->fname = NULL;
+
+ if (!pp->file || pf->fname) {
+ if (pp->function)
+ ret = find_probe_point_by_func(pf);
+ else if (pp->lazy_line)
+ ret = find_probe_point_lazy(NULL, pf);
+ else {
+ pf->lno = pp->line;
+ ret = find_probe_point_by_line(pf);
+ }
+ if (ret < 0)
+ break;
+ }
+ off = noff;
+ }
+
+found:
+ line_list__free(&pf->lcache);
+
+ return ret;
+}
+
+/* Add a found probe point into trace event list */
+static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
+{
+ struct trace_event_finder *tf =
+ container_of(pf, struct trace_event_finder, pf);
+ struct probe_trace_event *tev;
+ int ret, i;
+
+ /* Check number of tevs */
+ if (tf->ntevs == tf->max_tevs) {
+ pr_warning("Too many( > %d) probe point found.\n",
+ tf->max_tevs);
+ return -ERANGE;
+ }
+ tev = &tf->tevs[tf->ntevs++];
+
+ /* Trace point should be converted from subprogram DIE */
+ ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+ pf->pev->point.retprobe, &tev->point);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
+ tev->point.offset);
+
+ /* Find each argument */
+ tev->nargs = pf->pev->nargs;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < pf->pev->nargs; i++) {
+ pf->pvar = &pf->pev->args[i];
+ pf->tvar = &tev->args[i];
+ /* Variable should be found from scope DIE */
+ ret = find_variable(sc_die, pf);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+int debuginfo__find_trace_events(struct debuginfo *self,
+ struct perf_probe_event *pev,
+ struct probe_trace_event **tevs, int max_tevs)
+{
+ struct trace_event_finder tf = {
+ .pf = {.pev = pev, .callback = add_probe_trace_event},
+ .max_tevs = max_tevs};
+ int ret;
+
+ /* Allocate result tevs array */
+ *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
+ if (*tevs == NULL)
+ return -ENOMEM;
+
+ tf.tevs = *tevs;
+ tf.ntevs = 0;
+
+ ret = debuginfo__find_probes(self, &tf.pf);
+ if (ret < 0) {
+ free(*tevs);
+ *tevs = NULL;
+ return ret;
+ }
+
+ return (ret < 0) ? ret : tf.ntevs;
+}
+
+#define MAX_VAR_LEN 64
+
+/* Collect available variables in this scope */
+static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct available_var_finder *af = data;
+ struct variable_list *vl;
+ char buf[MAX_VAR_LEN];
+ int tag, ret;
+
+ vl = &af->vls[af->nvls - 1];
+
+ tag = dwarf_tag(die_mem);
+ if (tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) {
+ ret = convert_variable_location(die_mem, af->pf.addr,
+ af->pf.fb_ops, NULL);
+ if (ret == 0) {
+ ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
+ pr_debug2("Add new var: %s\n", buf);
+ if (ret > 0)
+ strlist__add(vl->vars, buf);
+ }
+ }
+
+ if (af->child && dwarf_haspc(die_mem, af->pf.addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+/* Add a found vars into available variables list */
+static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
+{
+ struct available_var_finder *af =
+ container_of(pf, struct available_var_finder, pf);
+ struct variable_list *vl;
+ Dwarf_Die die_mem;
+ int ret;
+
+ /* Check number of tevs */
+ if (af->nvls == af->max_vls) {
+ pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
+ return -ERANGE;
+ }
+ vl = &af->vls[af->nvls++];
+
+ /* Trace point should be converted from subprogram DIE */
+ ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+ pf->pev->point.retprobe, &vl->point);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Probe point found: %s+%lu\n", vl->point.symbol,
+ vl->point.offset);
+
+ /* Find local variables */
+ vl->vars = strlist__new(true, NULL);
+ if (vl->vars == NULL)
+ return -ENOMEM;
+ af->child = true;
+ die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
+
+ /* Find external variables */
+ if (!af->externs)
+ goto out;
+ /* Don't need to search child DIE for externs. */
+ af->child = false;
+ die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
+
+out:
+ if (strlist__empty(vl->vars)) {
+ strlist__delete(vl->vars);
+ vl->vars = NULL;
+ }
+
+ return ret;
+}
+
+/* Find available variables at given probe point */
+int debuginfo__find_available_vars_at(struct debuginfo *self,
+ struct perf_probe_event *pev,
+ struct variable_list **vls,
+ int max_vls, bool externs)
+{
+ struct available_var_finder af = {
+ .pf = {.pev = pev, .callback = add_available_vars},
+ .max_vls = max_vls, .externs = externs};
+ int ret;
+
+ /* Allocate result vls array */
+ *vls = zalloc(sizeof(struct variable_list) * max_vls);
+ if (*vls == NULL)
+ return -ENOMEM;
+
+ af.vls = *vls;
+ af.nvls = 0;
+
+ ret = debuginfo__find_probes(self, &af.pf);
+ if (ret < 0) {
+ /* Free vlist for error */
+ while (af.nvls--) {
+ if (af.vls[af.nvls].point.symbol)
+ free(af.vls[af.nvls].point.symbol);
+ if (af.vls[af.nvls].vars)
+ strlist__delete(af.vls[af.nvls].vars);
+ }
+ free(af.vls);
+ *vls = NULL;
+ return ret;
+ }
+
+ return (ret < 0) ? ret : af.nvls;
+}
+
+/* Reverse search */
+int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
+ struct perf_probe_point *ppt)
+{
+ Dwarf_Die cudie, spdie, indie;
+ Dwarf_Addr _addr, baseaddr;
+ const char *fname = NULL, *func = NULL, *tmp;
+ int baseline = 0, lineno = 0, ret = 0;
+
+ /* Adjust address with bias */
+ addr += self->bias;
+
+ /* Find cu die */
+ if (!dwarf_addrdie(self->dbg, (Dwarf_Addr)addr - self->bias, &cudie)) {
+ pr_warning("Failed to find debug information for address %lx\n",
+ addr);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Find a corresponding line (filename and lineno) */
+ cu_find_lineinfo(&cudie, addr, &fname, &lineno);
+ /* Don't care whether it failed or not */
+
+ /* Find a corresponding function (name, baseline and baseaddr) */
+ if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
+ /* Get function entry information */
+ tmp = dwarf_diename(&spdie);
+ if (!tmp ||
+ dwarf_entrypc(&spdie, &baseaddr) != 0 ||
+ dwarf_decl_line(&spdie, &baseline) != 0)
+ goto post;
+ func = tmp;
+
+ if (addr == (unsigned long)baseaddr)
+ /* Function entry - Relative line number is 0 */
+ lineno = baseline;
+ else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
+ &indie)) {
+ if (dwarf_entrypc(&indie, &_addr) == 0 &&
+ _addr == addr)
+ /*
+ * addr is at an inline function entry.
+ * In this case, lineno should be the call-site
+ * line number.
+ */
+ lineno = die_get_call_lineno(&indie);
+ else {
+ /*
+ * addr is in an inline function body.
+ * Since lineno points one of the lines
+ * of the inline function, baseline should
+ * be the entry line of the inline function.
+ */
+ tmp = dwarf_diename(&indie);
+ if (tmp &&
+ dwarf_decl_line(&spdie, &baseline) == 0)
+ func = tmp;
+ }
+ }
+ }
+
+post:
+ /* Make a relative line number or an offset */
+ if (lineno)
+ ppt->line = lineno - baseline;
+ else if (func)
+ ppt->offset = addr - (unsigned long)baseaddr;
+
+ /* Duplicate strings */
+ if (func) {
+ ppt->function = strdup(func);
+ if (ppt->function == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ }
+ if (fname) {
+ ppt->file = strdup(fname);
+ if (ppt->file == NULL) {
+ if (ppt->function) {
+ free(ppt->function);
+ ppt->function = NULL;
+ }
+ ret = -ENOMEM;
+ goto end;
+ }
+ }
+end:
+ if (ret == 0 && (fname || func))
+ ret = 1; /* Found a point */
+ return ret;
+}
+
+/* Add a line and store the src path */
+static int line_range_add_line(const char *src, unsigned int lineno,
+ struct line_range *lr)
+{
+ /* Copy source path */
+ if (!lr->path) {
+ lr->path = strdup(src);
+ if (lr->path == NULL)
+ return -ENOMEM;
+ }
+ return line_list__add_line(&lr->line_list, lineno);
+}
+
+static int line_range_walk_cb(const char *fname, int lineno,
+ Dwarf_Addr addr __used,
+ void *data)
+{
+ struct line_finder *lf = data;
+
+ if ((strtailcmp(fname, lf->fname) != 0) ||
+ (lf->lno_s > lineno || lf->lno_e < lineno))
+ return 0;
+
+ if (line_range_add_line(fname, lineno, lf->lr) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Find line range from its line number */
+static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
+{
+ int ret;
+
+ ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf);
+
+ /* Update status */
+ if (ret >= 0)
+ if (!list_empty(&lf->lr->line_list))
+ ret = lf->found = 1;
+ else
+ ret = 0; /* Lines are not found */
+ else {
+ free(lf->lr->path);
+ lf->lr->path = NULL;
+ }
+ return ret;
+}
+
+static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
+{
+ find_line_range_by_line(in_die, data);
+
+ /*
+ * We have to check all instances of inlined function, because
+ * some execution paths can be optimized out depends on the
+ * function argument of instances
+ */
+ return 0;
+}
+
+/* Search function from function name */
+static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct line_finder *lf = param->data;
+ struct line_range *lr = lf->lr;
+
+ /* Check declared file */
+ if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
+ return DWARF_CB_OK;
+
+ if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+ die_compare_name(sp_die, lr->function)) {
+ lf->fname = dwarf_decl_file(sp_die);
+ dwarf_decl_line(sp_die, &lr->offset);
+ pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
+ lf->lno_s = lr->offset + lr->start;
+ if (lf->lno_s < 0) /* Overflow */
+ lf->lno_s = INT_MAX;
+ lf->lno_e = lr->offset + lr->end;
+ if (lf->lno_e < 0) /* Overflow */
+ lf->lno_e = INT_MAX;
+ pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
+ lr->start = lf->lno_s;
+ lr->end = lf->lno_e;
+ if (dwarf_func_inline(sp_die))
+ param->retval = die_walk_instances(sp_die,
+ line_range_inline_cb, lf);
+ else
+ param->retval = find_line_range_by_line(sp_die, lf);
+ return DWARF_CB_ABORT;
+ }
+ return DWARF_CB_OK;
+}
+
+static int find_line_range_by_func(struct line_finder *lf)
+{
+ struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
+ dwarf_getfuncs(&lf->cu_die, line_range_search_cb, &param, 0);
+ return param.retval;
+}
+
+int debuginfo__find_line_range(struct debuginfo *self, struct line_range *lr)
+{
+ struct line_finder lf = {.lr = lr, .found = 0};
+ int ret = 0;
+ Dwarf_Off off = 0, noff;
+ size_t cuhl;
+ Dwarf_Die *diep;
+ const char *comp_dir;
+
+ /* Fastpath: lookup by function name from .debug_pubnames section */
+ if (lr->function) {
+ struct pubname_callback_param pubname_param = {
+ .function = lr->function, .file = lr->file,
+ .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0};
+ struct dwarf_callback_param line_range_param = {
+ .data = (void *)&lf, .retval = 0};
+
+ dwarf_getpubnames(self->dbg, pubname_search_cb,
+ &pubname_param, 0);
+ if (pubname_param.found) {
+ line_range_search_cb(&lf.sp_die, &line_range_param);
+ if (lf.found)
+ goto found;
+ }
+ }
+
+ /* Loop on CUs (Compilation Unit) */
+ while (!lf.found && ret >= 0) {
+ if (dwarf_nextcu(self->dbg, off, &noff, &cuhl,
+ NULL, NULL, NULL) != 0)
+ break;
+
+ /* Get the DIE(Debugging Information Entry) of this CU */
+ diep = dwarf_offdie(self->dbg, off + cuhl, &lf.cu_die);
+ if (!diep)
+ continue;
+
+ /* Check if target file is included. */
+ if (lr->file)
+ lf.fname = cu_find_realpath(&lf.cu_die, lr->file);
+ else
+ lf.fname = 0;
+
+ if (!lr->file || lf.fname) {
+ if (lr->function)
+ ret = find_line_range_by_func(&lf);
+ else {
+ lf.lno_s = lr->start;
+ lf.lno_e = lr->end;
+ ret = find_line_range_by_line(NULL, &lf);
+ }
+ }
+ off = noff;
+ }
+
+found:
+ /* Store comp_dir */
+ if (lf.found) {
+ comp_dir = cu_get_comp_dir(&lf.cu_die);
+ if (comp_dir) {
+ lr->comp_dir = strdup(comp_dir);
+ if (!lr->comp_dir)
+ ret = -ENOMEM;
+ }
+ }
+
+ pr_debug("path: %s\n", lr->path);
+ return (ret < 0) ? ret : lf.found;
+}
+
diff --git a/ANDROID_3.4.5/tools/perf/util/probe-finder.h b/ANDROID_3.4.5/tools/perf/util/probe-finder.h
new file mode 100644
index 00000000..17e94d0c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/probe-finder.h
@@ -0,0 +1,107 @@
+#ifndef _PROBE_FINDER_H
+#define _PROBE_FINDER_H
+
+#include <stdbool.h>
+#include "util.h"
+#include "probe-event.h"
+
+#define MAX_PROBE_BUFFER 1024
+#define MAX_PROBES 128
+
+static inline int is_c_varname(const char *name)
+{
+ /* TODO */
+ return isalpha(name[0]) || name[0] == '_';
+}
+
+#ifdef DWARF_SUPPORT
+
+#include "dwarf-aux.h"
+
+/* TODO: export debuginfo data structure even if no dwarf support */
+
+/* debug information structure */
+struct debuginfo {
+ Dwarf *dbg;
+ Dwfl *dwfl;
+ Dwarf_Addr bias;
+};
+
+extern struct debuginfo *debuginfo__new(const char *path);
+extern struct debuginfo *debuginfo__new_online_kernel(unsigned long addr);
+extern void debuginfo__delete(struct debuginfo *self);
+
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+extern int debuginfo__find_trace_events(struct debuginfo *self,
+ struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs);
+
+/* Find a perf_probe_point from debuginfo */
+extern int debuginfo__find_probe_point(struct debuginfo *self,
+ unsigned long addr,
+ struct perf_probe_point *ppt);
+
+/* Find a line range */
+extern int debuginfo__find_line_range(struct debuginfo *self,
+ struct line_range *lr);
+
+/* Find available variables */
+extern int debuginfo__find_available_vars_at(struct debuginfo *self,
+ struct perf_probe_event *pev,
+ struct variable_list **vls,
+ int max_points, bool externs);
+
+struct probe_finder {
+ struct perf_probe_event *pev; /* Target probe event */
+
+ /* Callback when a probe point is found */
+ int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf);
+
+ /* For function searching */
+ int lno; /* Line number */
+ Dwarf_Addr addr; /* Address */
+ const char *fname; /* Real file name */
+ Dwarf_Die cu_die; /* Current CU */
+ Dwarf_Die sp_die;
+ struct list_head lcache; /* Line cache for lazy match */
+
+ /* For variable searching */
+#if _ELFUTILS_PREREQ(0, 142)
+ Dwarf_CFI *cfi; /* Call Frame Information */
+#endif
+ Dwarf_Op *fb_ops; /* Frame base attribute */
+ struct perf_probe_arg *pvar; /* Current target variable */
+ struct probe_trace_arg *tvar; /* Current result variable */
+};
+
+struct trace_event_finder {
+ struct probe_finder pf;
+ struct probe_trace_event *tevs; /* Found trace events */
+ int ntevs; /* Number of trace events */
+ int max_tevs; /* Max number of trace events */
+};
+
+struct available_var_finder {
+ struct probe_finder pf;
+ struct variable_list *vls; /* Found variable lists */
+ int nvls; /* Number of variable lists */
+ int max_vls; /* Max no. of variable lists */
+ bool externs; /* Find external vars too */
+ bool child; /* Search child scopes */
+};
+
+struct line_finder {
+ struct line_range *lr; /* Target line range */
+
+ const char *fname; /* File name */
+ int lno_s; /* Start line number */
+ int lno_e; /* End line number */
+ Dwarf_Die cu_die; /* Current CU */
+ Dwarf_Die sp_die;
+ int found;
+};
+
+#endif /* DWARF_SUPPORT */
+
+#endif /*_PROBE_FINDER_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/pstack.c b/ANDROID_3.4.5/tools/perf/util/pstack.c
new file mode 100644
index 00000000..13d36faf
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pstack.c
@@ -0,0 +1,75 @@
+/*
+ * Simple pointer stack
+ *
+ * (c) 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "util.h"
+#include "pstack.h"
+#include <linux/kernel.h>
+#include <stdlib.h>
+
+struct pstack {
+ unsigned short top;
+ unsigned short max_nr_entries;
+ void *entries[0];
+};
+
+struct pstack *pstack__new(unsigned short max_nr_entries)
+{
+ struct pstack *self = zalloc((sizeof(*self) +
+ max_nr_entries * sizeof(void *)));
+ if (self != NULL)
+ self->max_nr_entries = max_nr_entries;
+ return self;
+}
+
+void pstack__delete(struct pstack *self)
+{
+ free(self);
+}
+
+bool pstack__empty(const struct pstack *self)
+{
+ return self->top == 0;
+}
+
+void pstack__remove(struct pstack *self, void *key)
+{
+ unsigned short i = self->top, last_index = self->top - 1;
+
+ while (i-- != 0) {
+ if (self->entries[i] == key) {
+ if (i < last_index)
+ memmove(self->entries + i,
+ self->entries + i + 1,
+ (last_index - i) * sizeof(void *));
+ --self->top;
+ return;
+ }
+ }
+ pr_err("%s: %p not on the pstack!\n", __func__, key);
+}
+
+void pstack__push(struct pstack *self, void *key)
+{
+ if (self->top == self->max_nr_entries) {
+ pr_err("%s: top=%d, overflow!\n", __func__, self->top);
+ return;
+ }
+ self->entries[self->top++] = key;
+}
+
+void *pstack__pop(struct pstack *self)
+{
+ void *ret;
+
+ if (self->top == 0) {
+ pr_err("%s: underflow!\n", __func__);
+ return NULL;
+ }
+
+ ret = self->entries[--self->top];
+ self->entries[self->top] = NULL;
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/pstack.h b/ANDROID_3.4.5/tools/perf/util/pstack.h
new file mode 100644
index 00000000..4cedea59
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/pstack.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_PSTACK_
+#define _PERF_PSTACK_
+
+#include <stdbool.h>
+
+struct pstack;
+struct pstack *pstack__new(unsigned short max_nr_entries);
+void pstack__delete(struct pstack *self);
+bool pstack__empty(const struct pstack *self);
+void pstack__remove(struct pstack *self, void *key);
+void pstack__push(struct pstack *self, void *key);
+void *pstack__pop(struct pstack *self);
+
+#endif /* _PERF_PSTACK_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/python-ext-sources b/ANDROID_3.4.5/tools/perf/util/python-ext-sources
new file mode 100644
index 00000000..2884e67e
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/python-ext-sources
@@ -0,0 +1,19 @@
+#
+# List of files needed by perf python extention
+#
+# Each source file must be placed on its own line so that it can be
+# processed by Makefile and util/setup.py accordingly.
+#
+
+util/python.c
+util/ctype.c
+util/evlist.c
+util/evsel.c
+util/cpumap.c
+util/thread_map.c
+util/util.c
+util/xyarray.c
+util/cgroup.c
+util/debugfs.c
+util/strlist.c
+../../lib/rbtree.c
diff --git a/ANDROID_3.4.5/tools/perf/util/python.c b/ANDROID_3.4.5/tools/perf/util/python.c
new file mode 100644
index 00000000..e03b58a4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/python.c
@@ -0,0 +1,1044 @@
+#include <Python.h>
+#include <structmember.h>
+#include <inttypes.h>
+#include <poll.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "event.h"
+#include "cpumap.h"
+#include "thread_map.h"
+
+/* Define PyVarObject_HEAD_INIT for python 2.5 */
+#ifndef PyVarObject_HEAD_INIT
+# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
+#endif
+
+struct throttle_event {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+};
+
+PyMODINIT_FUNC initperf(void);
+
+#define member_def(type, member, ptype, help) \
+ { #member, ptype, \
+ offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
+ 0, help }
+
+#define sample_member_def(name, member, ptype, help) \
+ { #name, ptype, \
+ offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
+ 0, help }
+
+struct pyrf_event {
+ PyObject_HEAD
+ struct perf_sample sample;
+ union perf_event event;
+};
+
+#define sample_members \
+ sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
+ sample_member_def(sample_pid, pid, T_INT, "event pid"), \
+ sample_member_def(sample_tid, tid, T_INT, "event tid"), \
+ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
+ sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
+ sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
+ sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
+ sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
+ sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
+
+static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
+
+static PyMemberDef pyrf_mmap_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(mmap_event, pid, T_UINT, "event pid"),
+ member_def(mmap_event, tid, T_UINT, "event tid"),
+ member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
+ member_def(mmap_event, len, T_ULONGLONG, "map length"),
+ member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"),
+ member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", "
+ "length: %#" PRIx64 ", offset: %#" PRIx64 ", "
+ "filename: %s }",
+ pevent->event.mmap.pid, pevent->event.mmap.tid,
+ pevent->event.mmap.start, pevent->event.mmap.len,
+ pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_mmap_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.mmap_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_mmap_event__doc,
+ .tp_members = pyrf_mmap_event__members,
+ .tp_repr = (reprfunc)pyrf_mmap_event__repr,
+};
+
+static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
+
+static PyMemberDef pyrf_task_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(fork_event, pid, T_UINT, "event pid"),
+ member_def(fork_event, ppid, T_UINT, "event ppid"),
+ member_def(fork_event, tid, T_UINT, "event tid"),
+ member_def(fork_event, ptid, T_UINT, "event ptid"),
+ member_def(fork_event, time, T_ULONGLONG, "timestamp"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
+ "ptid: %u, time: %" PRIu64 "}",
+ pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
+ pevent->event.fork.pid,
+ pevent->event.fork.ppid,
+ pevent->event.fork.tid,
+ pevent->event.fork.ptid,
+ pevent->event.fork.time);
+}
+
+static PyTypeObject pyrf_task_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.task_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_task_event__doc,
+ .tp_members = pyrf_task_event__members,
+ .tp_repr = (reprfunc)pyrf_task_event__repr,
+};
+
+static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
+
+static PyMemberDef pyrf_comm_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(comm_event, pid, T_UINT, "event pid"),
+ member_def(comm_event, tid, T_UINT, "event tid"),
+ member_def(comm_event, comm, T_STRING_INPLACE, "process name"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
+ pevent->event.comm.pid,
+ pevent->event.comm.tid,
+ pevent->event.comm.comm);
+}
+
+static PyTypeObject pyrf_comm_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.comm_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_comm_event__doc,
+ .tp_members = pyrf_comm_event__members,
+ .tp_repr = (reprfunc)pyrf_comm_event__repr,
+};
+
+static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
+
+static PyMemberDef pyrf_throttle_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(throttle_event, time, T_ULONGLONG, "timestamp"),
+ member_def(throttle_event, id, T_ULONGLONG, "event id"),
+ member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
+{
+ struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1);
+
+ return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
+ ", stream_id: %" PRIu64 " }",
+ pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
+ te->time, te->id, te->stream_id);
+}
+
+static PyTypeObject pyrf_throttle_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.throttle_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_throttle_event__doc,
+ .tp_members = pyrf_throttle_event__members,
+ .tp_repr = (reprfunc)pyrf_throttle_event__repr,
+};
+
+static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
+
+static PyMemberDef pyrf_lost_event__members[] = {
+ sample_members
+ member_def(lost_event, id, T_ULONGLONG, "event id"),
+ member_def(lost_event, lost, T_ULONGLONG, "number of lost events"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: lost, id: %#" PRIx64 ", "
+ "lost: %#" PRIx64 " }",
+ pevent->event.lost.id, pevent->event.lost.lost) < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_lost_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.lost_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_lost_event__doc,
+ .tp_members = pyrf_lost_event__members,
+ .tp_repr = (reprfunc)pyrf_lost_event__repr,
+};
+
+static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
+
+static PyMemberDef pyrf_read_event__members[] = {
+ sample_members
+ member_def(read_event, pid, T_UINT, "event pid"),
+ member_def(read_event, tid, T_UINT, "event tid"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: read, pid: %u, tid: %u }",
+ pevent->event.read.pid,
+ pevent->event.read.tid);
+ /*
+ * FIXME: return the array of read values,
+ * making this method useful ;-)
+ */
+}
+
+static PyTypeObject pyrf_read_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.read_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_read_event__doc,
+ .tp_members = pyrf_read_event__members,
+ .tp_repr = (reprfunc)pyrf_read_event__repr,
+};
+
+static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
+
+static PyMemberDef pyrf_sample_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: sample }") < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_sample_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.sample_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_sample_event__doc,
+ .tp_members = pyrf_sample_event__members,
+ .tp_repr = (reprfunc)pyrf_sample_event__repr,
+};
+
+static int pyrf_event__setup_types(void)
+{
+ int err;
+ pyrf_mmap_event__type.tp_new =
+ pyrf_task_event__type.tp_new =
+ pyrf_comm_event__type.tp_new =
+ pyrf_lost_event__type.tp_new =
+ pyrf_read_event__type.tp_new =
+ pyrf_sample_event__type.tp_new =
+ pyrf_throttle_event__type.tp_new = PyType_GenericNew;
+ err = PyType_Ready(&pyrf_mmap_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_lost_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_task_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_comm_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_throttle_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_read_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_sample_event__type);
+ if (err < 0)
+ goto out;
+out:
+ return err;
+}
+
+static PyTypeObject *pyrf_event__type[] = {
+ [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
+ [PERF_RECORD_LOST] = &pyrf_lost_event__type,
+ [PERF_RECORD_COMM] = &pyrf_comm_event__type,
+ [PERF_RECORD_EXIT] = &pyrf_task_event__type,
+ [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
+ [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
+ [PERF_RECORD_FORK] = &pyrf_task_event__type,
+ [PERF_RECORD_READ] = &pyrf_read_event__type,
+ [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
+};
+
+static PyObject *pyrf_event__new(union perf_event *event)
+{
+ struct pyrf_event *pevent;
+ PyTypeObject *ptype;
+
+ if (event->header.type < PERF_RECORD_MMAP ||
+ event->header.type > PERF_RECORD_SAMPLE)
+ return NULL;
+
+ ptype = pyrf_event__type[event->header.type];
+ pevent = PyObject_New(struct pyrf_event, ptype);
+ if (pevent != NULL)
+ memcpy(&pevent->event, event, event->header.size);
+ return (PyObject *)pevent;
+}
+
+struct pyrf_cpu_map {
+ PyObject_HEAD
+
+ struct cpu_map *cpus;
+};
+
+static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
+ PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "cpustr", NULL };
+ char *cpustr = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
+ kwlist, &cpustr))
+ return -1;
+
+ pcpus->cpus = cpu_map__new(cpustr);
+ if (pcpus->cpus == NULL)
+ return -1;
+ return 0;
+}
+
+static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
+{
+ cpu_map__delete(pcpus->cpus);
+ pcpus->ob_type->tp_free((PyObject*)pcpus);
+}
+
+static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
+{
+ struct pyrf_cpu_map *pcpus = (void *)obj;
+
+ return pcpus->cpus->nr;
+}
+
+static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_cpu_map *pcpus = (void *)obj;
+
+ if (i >= pcpus->cpus->nr)
+ return NULL;
+
+ return Py_BuildValue("i", pcpus->cpus->map[i]);
+}
+
+static PySequenceMethods pyrf_cpu_map__sequence_methods = {
+ .sq_length = pyrf_cpu_map__length,
+ .sq_item = pyrf_cpu_map__item,
+};
+
+static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
+
+static PyTypeObject pyrf_cpu_map__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.cpu_map",
+ .tp_basicsize = sizeof(struct pyrf_cpu_map),
+ .tp_dealloc = (destructor)pyrf_cpu_map__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_cpu_map__doc,
+ .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
+ .tp_init = (initproc)pyrf_cpu_map__init,
+};
+
+static int pyrf_cpu_map__setup_types(void)
+{
+ pyrf_cpu_map__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_cpu_map__type);
+}
+
+struct pyrf_thread_map {
+ PyObject_HEAD
+
+ struct thread_map *threads;
+};
+
+static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
+ PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "pid", "tid", "uid", NULL };
+ int pid = -1, tid = -1, uid = UINT_MAX;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
+ kwlist, &pid, &tid, &uid))
+ return -1;
+
+ pthreads->threads = thread_map__new(pid, tid, uid);
+ if (pthreads->threads == NULL)
+ return -1;
+ return 0;
+}
+
+static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
+{
+ thread_map__delete(pthreads->threads);
+ pthreads->ob_type->tp_free((PyObject*)pthreads);
+}
+
+static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
+{
+ struct pyrf_thread_map *pthreads = (void *)obj;
+
+ return pthreads->threads->nr;
+}
+
+static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_thread_map *pthreads = (void *)obj;
+
+ if (i >= pthreads->threads->nr)
+ return NULL;
+
+ return Py_BuildValue("i", pthreads->threads->map[i]);
+}
+
+static PySequenceMethods pyrf_thread_map__sequence_methods = {
+ .sq_length = pyrf_thread_map__length,
+ .sq_item = pyrf_thread_map__item,
+};
+
+static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
+
+static PyTypeObject pyrf_thread_map__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.thread_map",
+ .tp_basicsize = sizeof(struct pyrf_thread_map),
+ .tp_dealloc = (destructor)pyrf_thread_map__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_thread_map__doc,
+ .tp_as_sequence = &pyrf_thread_map__sequence_methods,
+ .tp_init = (initproc)pyrf_thread_map__init,
+};
+
+static int pyrf_thread_map__setup_types(void)
+{
+ pyrf_thread_map__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_thread_map__type);
+}
+
+struct pyrf_evsel {
+ PyObject_HEAD
+
+ struct perf_evsel evsel;
+};
+
+static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
+ };
+ static char *kwlist[] = {
+ "type",
+ "config",
+ "sample_freq",
+ "sample_period",
+ "sample_type",
+ "read_format",
+ "disabled",
+ "inherit",
+ "pinned",
+ "exclusive",
+ "exclude_user",
+ "exclude_kernel",
+ "exclude_hv",
+ "exclude_idle",
+ "mmap",
+ "comm",
+ "freq",
+ "inherit_stat",
+ "enable_on_exec",
+ "task",
+ "watermark",
+ "precise_ip",
+ "mmap_data",
+ "sample_id_all",
+ "wakeup_events",
+ "bp_type",
+ "bp_addr",
+ "bp_len",
+ NULL
+ };
+ u64 sample_period = 0;
+ u32 disabled = 0,
+ inherit = 0,
+ pinned = 0,
+ exclusive = 0,
+ exclude_user = 0,
+ exclude_kernel = 0,
+ exclude_hv = 0,
+ exclude_idle = 0,
+ mmap = 0,
+ comm = 0,
+ freq = 1,
+ inherit_stat = 0,
+ enable_on_exec = 0,
+ task = 0,
+ watermark = 0,
+ precise_ip = 0,
+ mmap_data = 0,
+ sample_id_all = 1;
+ int idx = 0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+ "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
+ &attr.type, &attr.config, &attr.sample_freq,
+ &sample_period, &attr.sample_type,
+ &attr.read_format, &disabled, &inherit,
+ &pinned, &exclusive, &exclude_user,
+ &exclude_kernel, &exclude_hv, &exclude_idle,
+ &mmap, &comm, &freq, &inherit_stat,
+ &enable_on_exec, &task, &watermark,
+ &precise_ip, &mmap_data, &sample_id_all,
+ &attr.wakeup_events, &attr.bp_type,
+ &attr.bp_addr, &attr.bp_len, &idx))
+ return -1;
+
+ /* union... */
+ if (sample_period != 0) {
+ if (attr.sample_freq != 0)
+ return -1; /* FIXME: throw right exception */
+ attr.sample_period = sample_period;
+ }
+
+ /* Bitfields */
+ attr.disabled = disabled;
+ attr.inherit = inherit;
+ attr.pinned = pinned;
+ attr.exclusive = exclusive;
+ attr.exclude_user = exclude_user;
+ attr.exclude_kernel = exclude_kernel;
+ attr.exclude_hv = exclude_hv;
+ attr.exclude_idle = exclude_idle;
+ attr.mmap = mmap;
+ attr.comm = comm;
+ attr.freq = freq;
+ attr.inherit_stat = inherit_stat;
+ attr.enable_on_exec = enable_on_exec;
+ attr.task = task;
+ attr.watermark = watermark;
+ attr.precise_ip = precise_ip;
+ attr.mmap_data = mmap_data;
+ attr.sample_id_all = sample_id_all;
+
+ perf_evsel__init(&pevsel->evsel, &attr, idx);
+ return 0;
+}
+
+static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
+{
+ perf_evsel__exit(&pevsel->evsel);
+ pevsel->ob_type->tp_free((PyObject*)pevsel);
+}
+
+static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evsel *evsel = &pevsel->evsel;
+ struct cpu_map *cpus = NULL;
+ struct thread_map *threads = NULL;
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ int group = 0, inherit = 0;
+ static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
+ &pcpus, &pthreads, &group, &inherit))
+ return NULL;
+
+ if (pthreads != NULL)
+ threads = ((struct pyrf_thread_map *)pthreads)->threads;
+
+ if (pcpus != NULL)
+ cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+
+ evsel->attr.inherit = inherit;
+ /*
+ * This will group just the fds for this single evsel, to group
+ * multiple events, use evlist.open().
+ */
+ if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef pyrf_evsel__methods[] = {
+ {
+ .ml_name = "open",
+ .ml_meth = (PyCFunction)pyrf_evsel__open,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
+ },
+ { .ml_name = NULL, }
+};
+
+static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evsel__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.evsel",
+ .tp_basicsize = sizeof(struct pyrf_evsel),
+ .tp_dealloc = (destructor)pyrf_evsel__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_evsel__doc,
+ .tp_methods = pyrf_evsel__methods,
+ .tp_init = (initproc)pyrf_evsel__init,
+};
+
+static int pyrf_evsel__setup_types(void)
+{
+ pyrf_evsel__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_evsel__type);
+}
+
+struct pyrf_evlist {
+ PyObject_HEAD
+
+ struct perf_evlist evlist;
+};
+
+static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs __used)
+{
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ struct cpu_map *cpus;
+ struct thread_map *threads;
+
+ if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
+ return -1;
+
+ threads = ((struct pyrf_thread_map *)pthreads)->threads;
+ cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+ perf_evlist__init(&pevlist->evlist, cpus, threads);
+ return 0;
+}
+
+static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
+{
+ perf_evlist__exit(&pevlist->evlist);
+ pevlist->ob_type->tp_free((PyObject*)pevlist);
+}
+
+static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ static char *kwlist[] = { "pages", "overwrite", NULL };
+ int pages = 128, overwrite = false;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
+ &pages, &overwrite))
+ return NULL;
+
+ if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ static char *kwlist[] = { "timeout", NULL };
+ int timeout = -1, n;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
+ return NULL;
+
+ n = poll(evlist->pollfd, evlist->nr_fds, timeout);
+ if (n < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ return Py_BuildValue("i", n);
+}
+
+static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
+ PyObject *args __used, PyObject *kwargs __used)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ PyObject *list = PyList_New(0);
+ int i;
+
+ for (i = 0; i < evlist->nr_fds; ++i) {
+ PyObject *file;
+ FILE *fp = fdopen(evlist->pollfd[i].fd, "r");
+
+ if (fp == NULL)
+ goto free_list;
+
+ file = PyFile_FromFile(fp, "perf", "r", NULL);
+ if (file == NULL)
+ goto free_list;
+
+ if (PyList_Append(list, file) != 0) {
+ Py_DECREF(file);
+ goto free_list;
+ }
+
+ Py_DECREF(file);
+ }
+
+ return list;
+free_list:
+ return PyErr_NoMemory();
+}
+
+
+static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs __used)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ PyObject *pevsel;
+ struct perf_evsel *evsel;
+
+ if (!PyArg_ParseTuple(args, "O", &pevsel))
+ return NULL;
+
+ Py_INCREF(pevsel);
+ evsel = &((struct pyrf_evsel *)pevsel)->evsel;
+ evsel->idx = evlist->nr_entries;
+ perf_evlist__add(evlist, evsel);
+
+ return Py_BuildValue("i", evlist->nr_entries);
+}
+
+static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ union perf_event *event;
+ int sample_id_all = 1, cpu;
+ static char *kwlist[] = { "cpu", "sample_id_all", NULL };
+ int err;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
+ &cpu, &sample_id_all))
+ return NULL;
+
+ event = perf_evlist__mmap_read(evlist, cpu);
+ if (event != NULL) {
+ struct perf_evsel *first;
+ PyObject *pyevent = pyrf_event__new(event);
+ struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
+
+ if (pyevent == NULL)
+ return PyErr_NoMemory();
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ err = perf_event__parse_sample(event, first->attr.sample_type,
+ perf_evsel__sample_size(first),
+ sample_id_all, &pevent->sample, false);
+ if (err)
+ return PyErr_Format(PyExc_OSError,
+ "perf: can't parse sample, err=%d", err);
+ return pyevent;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ int group = 0;
+ static char *kwlist[] = { "group", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
+ return NULL;
+
+ if (perf_evlist__open(evlist, group) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef pyrf_evlist__methods[] = {
+ {
+ .ml_name = "mmap",
+ .ml_meth = (PyCFunction)pyrf_evlist__mmap,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("mmap the file descriptor table.")
+ },
+ {
+ .ml_name = "open",
+ .ml_meth = (PyCFunction)pyrf_evlist__open,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("open the file descriptors.")
+ },
+ {
+ .ml_name = "poll",
+ .ml_meth = (PyCFunction)pyrf_evlist__poll,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("poll the file descriptor table.")
+ },
+ {
+ .ml_name = "get_pollfd",
+ .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("get the poll file descriptor table.")
+ },
+ {
+ .ml_name = "add",
+ .ml_meth = (PyCFunction)pyrf_evlist__add,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("adds an event selector to the list.")
+ },
+ {
+ .ml_name = "read_on_cpu",
+ .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("reads an event.")
+ },
+ { .ml_name = NULL, }
+};
+
+static Py_ssize_t pyrf_evlist__length(PyObject *obj)
+{
+ struct pyrf_evlist *pevlist = (void *)obj;
+
+ return pevlist->evlist.nr_entries;
+}
+
+static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_evlist *pevlist = (void *)obj;
+ struct perf_evsel *pos;
+
+ if (i >= pevlist->evlist.nr_entries)
+ return NULL;
+
+ list_for_each_entry(pos, &pevlist->evlist.entries, node)
+ if (i-- == 0)
+ break;
+
+ return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
+}
+
+static PySequenceMethods pyrf_evlist__sequence_methods = {
+ .sq_length = pyrf_evlist__length,
+ .sq_item = pyrf_evlist__item,
+};
+
+static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evlist__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.evlist",
+ .tp_basicsize = sizeof(struct pyrf_evlist),
+ .tp_dealloc = (destructor)pyrf_evlist__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_as_sequence = &pyrf_evlist__sequence_methods,
+ .tp_doc = pyrf_evlist__doc,
+ .tp_methods = pyrf_evlist__methods,
+ .tp_init = (initproc)pyrf_evlist__init,
+};
+
+static int pyrf_evlist__setup_types(void)
+{
+ pyrf_evlist__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_evlist__type);
+}
+
+static struct {
+ const char *name;
+ int value;
+} perf__constants[] = {
+ { "TYPE_HARDWARE", PERF_TYPE_HARDWARE },
+ { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE },
+ { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT },
+ { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE },
+ { "TYPE_RAW", PERF_TYPE_RAW },
+ { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT },
+
+ { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES },
+ { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS },
+ { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES },
+ { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES },
+ { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES },
+ { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES },
+ { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D },
+ { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I },
+ { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL },
+ { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB },
+ { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB },
+ { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU },
+ { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ },
+ { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE },
+ { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH },
+ { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
+ { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS },
+
+ { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+ { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+
+ { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK },
+ { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK },
+ { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS },
+ { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES },
+ { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS },
+ { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN },
+ { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ },
+ { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
+ { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
+
+ { "SAMPLE_IP", PERF_SAMPLE_IP },
+ { "SAMPLE_TID", PERF_SAMPLE_TID },
+ { "SAMPLE_TIME", PERF_SAMPLE_TIME },
+ { "SAMPLE_ADDR", PERF_SAMPLE_ADDR },
+ { "SAMPLE_READ", PERF_SAMPLE_READ },
+ { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN },
+ { "SAMPLE_ID", PERF_SAMPLE_ID },
+ { "SAMPLE_CPU", PERF_SAMPLE_CPU },
+ { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD },
+ { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID },
+ { "SAMPLE_RAW", PERF_SAMPLE_RAW },
+
+ { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED },
+ { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING },
+ { "FORMAT_ID", PERF_FORMAT_ID },
+ { "FORMAT_GROUP", PERF_FORMAT_GROUP },
+
+ { "RECORD_MMAP", PERF_RECORD_MMAP },
+ { "RECORD_LOST", PERF_RECORD_LOST },
+ { "RECORD_COMM", PERF_RECORD_COMM },
+ { "RECORD_EXIT", PERF_RECORD_EXIT },
+ { "RECORD_THROTTLE", PERF_RECORD_THROTTLE },
+ { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE },
+ { "RECORD_FORK", PERF_RECORD_FORK },
+ { "RECORD_READ", PERF_RECORD_READ },
+ { "RECORD_SAMPLE", PERF_RECORD_SAMPLE },
+ { .name = NULL, },
+};
+
+static PyMethodDef perf__methods[] = {
+ { .ml_name = NULL, }
+};
+
+PyMODINIT_FUNC initperf(void)
+{
+ PyObject *obj;
+ int i;
+ PyObject *dict, *module = Py_InitModule("perf", perf__methods);
+
+ if (module == NULL ||
+ pyrf_event__setup_types() < 0 ||
+ pyrf_evlist__setup_types() < 0 ||
+ pyrf_evsel__setup_types() < 0 ||
+ pyrf_thread_map__setup_types() < 0 ||
+ pyrf_cpu_map__setup_types() < 0)
+ return;
+
+ Py_INCREF(&pyrf_evlist__type);
+ PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
+
+ Py_INCREF(&pyrf_evsel__type);
+ PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
+
+ Py_INCREF(&pyrf_thread_map__type);
+ PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
+
+ Py_INCREF(&pyrf_cpu_map__type);
+ PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
+
+ dict = PyModule_GetDict(module);
+ if (dict == NULL)
+ goto error;
+
+ for (i = 0; perf__constants[i].name != NULL; i++) {
+ obj = PyInt_FromLong(perf__constants[i].value);
+ if (obj == NULL)
+ goto error;
+ PyDict_SetItemString(dict, perf__constants[i].name, obj);
+ Py_DECREF(obj);
+ }
+
+error:
+ if (PyErr_Occurred())
+ PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/quote.c b/ANDROID_3.4.5/tools/perf/util/quote.c
new file mode 100644
index 00000000..01f03242
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/quote.c
@@ -0,0 +1,54 @@
+#include "cache.h"
+#include "quote.h"
+
+/* Help to copy the thing properly quoted for the shell safety.
+ * any single quote is replaced with '\'', any exclamation point
+ * is replaced with '\!', and the whole thing is enclosed in a
+ *
+ * E.g.
+ * original sq_quote result
+ * name ==> name ==> 'name'
+ * a b ==> a b ==> 'a b'
+ * a'b ==> a'\''b ==> 'a'\''b'
+ * a!b ==> a'\!'b ==> 'a'\!'b'
+ */
+static inline int need_bs_quote(char c)
+{
+ return (c == '\'' || c == '!');
+}
+
+static void sq_quote_buf(struct strbuf *dst, const char *src)
+{
+ char *to_free = NULL;
+
+ if (dst->buf == src)
+ to_free = strbuf_detach(dst, NULL);
+
+ strbuf_addch(dst, '\'');
+ while (*src) {
+ size_t len = strcspn(src, "'!");
+ strbuf_add(dst, src, len);
+ src += len;
+ while (need_bs_quote(*src)) {
+ strbuf_addstr(dst, "'\\");
+ strbuf_addch(dst, *src++);
+ strbuf_addch(dst, '\'');
+ }
+ }
+ strbuf_addch(dst, '\'');
+ free(to_free);
+}
+
+void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+{
+ int i;
+
+ /* Copy into destination buffer. */
+ strbuf_grow(dst, 255);
+ for (i = 0; argv[i]; ++i) {
+ strbuf_addch(dst, ' ');
+ sq_quote_buf(dst, argv[i]);
+ if (maxlen && dst->len > maxlen)
+ die("Too many or long arguments");
+ }
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/quote.h b/ANDROID_3.4.5/tools/perf/util/quote.h
new file mode 100644
index 00000000..172889ea
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/quote.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_QUOTE_H
+#define __PERF_QUOTE_H
+
+#include <stddef.h>
+#include <stdio.h>
+
+/* Help to copy the thing properly quoted for the shell safety.
+ * any single quote is replaced with '\'', any exclamation point
+ * is replaced with '\!', and the whole thing is enclosed in a
+ * single quote pair.
+ *
+ * For example, if you are passing the result to system() as an
+ * argument:
+ *
+ * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1))
+ *
+ * would be appropriate. If the system() is going to call ssh to
+ * run the command on the other side:
+ *
+ * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1));
+ * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd));
+ *
+ * Note that the above examples leak memory! Remember to free result from
+ * sq_quote() in a real application.
+ */
+
+extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+
+#endif /* __PERF_QUOTE_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/run-command.c b/ANDROID_3.4.5/tools/perf/util/run-command.c
new file mode 100644
index 00000000..da8e9b28
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/run-command.c
@@ -0,0 +1,214 @@
+#include "cache.h"
+#include "run-command.h"
+#include "exec_cmd.h"
+
+static inline void close_pair(int fd[2])
+{
+ close(fd[0]);
+ close(fd[1]);
+}
+
+static inline void dup_devnull(int to)
+{
+ int fd = open("/dev/null", O_RDWR);
+ dup2(fd, to);
+ close(fd);
+}
+
+int start_command(struct child_process *cmd)
+{
+ int need_in, need_out, need_err;
+ int fdin[2], fdout[2], fderr[2];
+
+ /*
+ * In case of errors we must keep the promise to close FDs
+ * that have been passed in via ->in and ->out.
+ */
+
+ need_in = !cmd->no_stdin && cmd->in < 0;
+ if (need_in) {
+ if (pipe(fdin) < 0) {
+ if (cmd->out > 0)
+ close(cmd->out);
+ return -ERR_RUN_COMMAND_PIPE;
+ }
+ cmd->in = fdin[1];
+ }
+
+ need_out = !cmd->no_stdout
+ && !cmd->stdout_to_stderr
+ && cmd->out < 0;
+ if (need_out) {
+ if (pipe(fdout) < 0) {
+ if (need_in)
+ close_pair(fdin);
+ else if (cmd->in)
+ close(cmd->in);
+ return -ERR_RUN_COMMAND_PIPE;
+ }
+ cmd->out = fdout[0];
+ }
+
+ need_err = !cmd->no_stderr && cmd->err < 0;
+ if (need_err) {
+ if (pipe(fderr) < 0) {
+ if (need_in)
+ close_pair(fdin);
+ else if (cmd->in)
+ close(cmd->in);
+ if (need_out)
+ close_pair(fdout);
+ else if (cmd->out)
+ close(cmd->out);
+ return -ERR_RUN_COMMAND_PIPE;
+ }
+ cmd->err = fderr[0];
+ }
+
+ fflush(NULL);
+ cmd->pid = fork();
+ if (!cmd->pid) {
+ if (cmd->no_stdin)
+ dup_devnull(0);
+ else if (need_in) {
+ dup2(fdin[0], 0);
+ close_pair(fdin);
+ } else if (cmd->in) {
+ dup2(cmd->in, 0);
+ close(cmd->in);
+ }
+
+ if (cmd->no_stderr)
+ dup_devnull(2);
+ else if (need_err) {
+ dup2(fderr[1], 2);
+ close_pair(fderr);
+ }
+
+ if (cmd->no_stdout)
+ dup_devnull(1);
+ else if (cmd->stdout_to_stderr)
+ dup2(2, 1);
+ else if (need_out) {
+ dup2(fdout[1], 1);
+ close_pair(fdout);
+ } else if (cmd->out > 1) {
+ dup2(cmd->out, 1);
+ close(cmd->out);
+ }
+
+ if (cmd->dir && chdir(cmd->dir))
+ die("exec %s: cd to %s failed (%s)", cmd->argv[0],
+ cmd->dir, strerror(errno));
+ if (cmd->env) {
+ for (; *cmd->env; cmd->env++) {
+ if (strchr(*cmd->env, '='))
+ putenv((char*)*cmd->env);
+ else
+ unsetenv(*cmd->env);
+ }
+ }
+ if (cmd->preexec_cb)
+ cmd->preexec_cb();
+ if (cmd->perf_cmd) {
+ execv_perf_cmd(cmd->argv);
+ } else {
+ execvp(cmd->argv[0], (char *const*) cmd->argv);
+ }
+ exit(127);
+ }
+
+ if (cmd->pid < 0) {
+ int err = errno;
+ if (need_in)
+ close_pair(fdin);
+ else if (cmd->in)
+ close(cmd->in);
+ if (need_out)
+ close_pair(fdout);
+ else if (cmd->out)
+ close(cmd->out);
+ if (need_err)
+ close_pair(fderr);
+ return err == ENOENT ?
+ -ERR_RUN_COMMAND_EXEC :
+ -ERR_RUN_COMMAND_FORK;
+ }
+
+ if (need_in)
+ close(fdin[0]);
+ else if (cmd->in)
+ close(cmd->in);
+
+ if (need_out)
+ close(fdout[1]);
+ else if (cmd->out)
+ close(cmd->out);
+
+ if (need_err)
+ close(fderr[1]);
+
+ return 0;
+}
+
+static int wait_or_whine(pid_t pid)
+{
+ for (;;) {
+ int status, code;
+ pid_t waiting = waitpid(pid, &status, 0);
+
+ if (waiting < 0) {
+ if (errno == EINTR)
+ continue;
+ error("waitpid failed (%s)", strerror(errno));
+ return -ERR_RUN_COMMAND_WAITPID;
+ }
+ if (waiting != pid)
+ return -ERR_RUN_COMMAND_WAITPID_WRONG_PID;
+ if (WIFSIGNALED(status))
+ return -ERR_RUN_COMMAND_WAITPID_SIGNAL;
+
+ if (!WIFEXITED(status))
+ return -ERR_RUN_COMMAND_WAITPID_NOEXIT;
+ code = WEXITSTATUS(status);
+ switch (code) {
+ case 127:
+ return -ERR_RUN_COMMAND_EXEC;
+ case 0:
+ return 0;
+ default:
+ return -code;
+ }
+ }
+}
+
+int finish_command(struct child_process *cmd)
+{
+ return wait_or_whine(cmd->pid);
+}
+
+int run_command(struct child_process *cmd)
+{
+ int code = start_command(cmd);
+ if (code)
+ return code;
+ return finish_command(cmd);
+}
+
+static void prepare_run_command_v_opt(struct child_process *cmd,
+ const char **argv,
+ int opt)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->argv = argv;
+ cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
+ cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0;
+ cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
+}
+
+int run_command_v_opt(const char **argv, int opt)
+{
+ struct child_process cmd;
+ prepare_run_command_v_opt(&cmd, argv, opt);
+ return run_command(&cmd);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/run-command.h b/ANDROID_3.4.5/tools/perf/util/run-command.h
new file mode 100644
index 00000000..1ef264d5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/run-command.h
@@ -0,0 +1,58 @@
+#ifndef __PERF_RUN_COMMAND_H
+#define __PERF_RUN_COMMAND_H
+
+enum {
+ ERR_RUN_COMMAND_FORK = 10000,
+ ERR_RUN_COMMAND_EXEC,
+ ERR_RUN_COMMAND_PIPE,
+ ERR_RUN_COMMAND_WAITPID,
+ ERR_RUN_COMMAND_WAITPID_WRONG_PID,
+ ERR_RUN_COMMAND_WAITPID_SIGNAL,
+ ERR_RUN_COMMAND_WAITPID_NOEXIT,
+};
+#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK)
+
+struct child_process {
+ const char **argv;
+ pid_t pid;
+ /*
+ * Using .in, .out, .err:
+ * - Specify 0 for no redirections (child inherits stdin, stdout,
+ * stderr from parent).
+ * - Specify -1 to have a pipe allocated as follows:
+ * .in: returns the writable pipe end; parent writes to it,
+ * the readable pipe end becomes child's stdin
+ * .out, .err: returns the readable pipe end; parent reads from
+ * it, the writable pipe end becomes child's stdout/stderr
+ * The caller of start_command() must close the returned FDs
+ * after it has completed reading from/writing to it!
+ * - Specify > 0 to set a channel to a particular FD as follows:
+ * .in: a readable FD, becomes child's stdin
+ * .out: a writable FD, becomes child's stdout/stderr
+ * .err > 0 not supported
+ * The specified FD is closed by start_command(), even in case
+ * of errors!
+ */
+ int in;
+ int out;
+ int err;
+ const char *dir;
+ const char *const *env;
+ unsigned no_stdin:1;
+ unsigned no_stdout:1;
+ unsigned no_stderr:1;
+ unsigned perf_cmd:1; /* if this is to be perf sub-command */
+ unsigned stdout_to_stderr:1;
+ void (*preexec_cb)(void);
+};
+
+int start_command(struct child_process *);
+int finish_command(struct child_process *);
+int run_command(struct child_process *);
+
+#define RUN_COMMAND_NO_STDIN 1
+#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */
+#define RUN_COMMAND_STDOUT_TO_STDERR 4
+int run_command_v_opt(const char **argv, int opt);
+
+#endif /* __PERF_RUN_COMMAND_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-perl.c b/ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-perl.c
new file mode 100644
index 00000000..e30749e3
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -0,0 +1,634 @@
+/*
+ * trace-event-perl. Feed perf script events to an embedded Perl interpreter.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../../perf.h"
+#include "../util.h"
+#include "../thread.h"
+#include "../event.h"
+#include "../trace-event.h"
+#include "../evsel.h"
+
+#include <EXTERN.h>
+#include <perl.h>
+
+void boot_Perf__Trace__Context(pTHX_ CV *cv);
+void boot_DynaLoader(pTHX_ CV *cv);
+typedef PerlInterpreter * INTERP;
+
+void xs_init(pTHX);
+
+void xs_init(pTHX)
+{
+ const char *file = __FILE__;
+ dXSUB_SYS;
+
+ newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context,
+ file);
+ newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file);
+}
+
+INTERP my_perl;
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(unsigned short) * 8)) - 1)
+
+struct event *events[FTRACE_MAX_EVENT];
+
+extern struct scripting_context *scripting_context;
+
+static char *cur_field_name;
+static int zero_flag_atom;
+
+static void define_symbolic_value(const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ unsigned long long value;
+ dSP;
+
+ value = eval_flag(field_value);
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+ XPUSHs(sv_2mortal(newSVuv(value)));
+ XPUSHs(sv_2mortal(newSVpv(field_str, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_symbolic_value", 0))
+ call_pv("main::define_symbolic_value", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_symbolic_values(struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_symbolic_value(ev_name, field_name, field->value, field->str);
+ if (field->next)
+ define_symbolic_values(field->next, ev_name, field_name);
+}
+
+static void define_symbolic_field(const char *ev_name,
+ const char *field_name)
+{
+ dSP;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_symbolic_field", 0))
+ call_pv("main::define_symbolic_field", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_flag_value(const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ unsigned long long value;
+ dSP;
+
+ value = eval_flag(field_value);
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+ XPUSHs(sv_2mortal(newSVuv(value)));
+ XPUSHs(sv_2mortal(newSVpv(field_str, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_flag_value", 0))
+ call_pv("main::define_flag_value", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_flag_values(struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_flag_value(ev_name, field_name, field->value, field->str);
+ if (field->next)
+ define_flag_values(field->next, ev_name, field_name);
+}
+
+static void define_flag_field(const char *ev_name,
+ const char *field_name,
+ const char *delim)
+{
+ dSP;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(delim, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_flag_field", 0))
+ call_pv("main::define_flag_field", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_event_symbols(struct event *event,
+ const char *ev_name,
+ struct print_arg *args)
+{
+ switch (args->type) {
+ case PRINT_NULL:
+ break;
+ case PRINT_ATOM:
+ define_flag_value(ev_name, cur_field_name, "0",
+ args->atom.atom);
+ zero_flag_atom = 0;
+ break;
+ case PRINT_FIELD:
+ if (cur_field_name)
+ free(cur_field_name);
+ cur_field_name = strdup(args->field.name);
+ break;
+ case PRINT_FLAGS:
+ define_event_symbols(event, ev_name, args->flags.field);
+ define_flag_field(ev_name, cur_field_name, args->flags.delim);
+ define_flag_values(args->flags.flags, ev_name, cur_field_name);
+ break;
+ case PRINT_SYMBOL:
+ define_event_symbols(event, ev_name, args->symbol.field);
+ define_symbolic_field(ev_name, cur_field_name);
+ define_symbolic_values(args->symbol.symbols, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_STRING:
+ break;
+ case PRINT_TYPE:
+ define_event_symbols(event, ev_name, args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ zero_flag_atom = 1;
+ define_event_symbols(event, ev_name, args->op.left);
+ define_event_symbols(event, ev_name, args->op.right);
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+
+ if (args->next)
+ define_event_symbols(event, ev_name, args->next);
+}
+
+static inline struct event *find_cache_event(int type)
+{
+ static char ev_name[256];
+ struct event *event;
+
+ if (events[type])
+ return events[type];
+
+ events[type] = event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ sprintf(ev_name, "%s::%s", event->system, event->name);
+
+ define_event_symbols(event, ev_name, event->print_fmt.args);
+
+ return event;
+}
+
+static void perl_process_tracepoint(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine __unused,
+ struct thread *thread)
+{
+ struct format_field *field;
+ static char handler[256];
+ unsigned long long val;
+ unsigned long s, ns;
+ struct event *event;
+ int type;
+ int pid;
+ int cpu = sample->cpu;
+ void *data = sample->raw_data;
+ unsigned long long nsecs = sample->time;
+ char *comm = thread->comm;
+
+ dSP;
+
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ return;
+
+ type = trace_parse_common_type(data);
+
+ event = find_cache_event(type);
+ if (!event)
+ die("ug! no event found for type %d", type);
+
+ pid = trace_parse_common_pid(data);
+
+ sprintf(handler, "%s::%s", event->system, event->name);
+
+ s = nsecs / NSECS_PER_SEC;
+ ns = nsecs - s * NSECS_PER_SEC;
+
+ scripting_context->event_data = data;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(handler, 0)));
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context))));
+ XPUSHs(sv_2mortal(newSVuv(cpu)));
+ XPUSHs(sv_2mortal(newSVuv(s)));
+ XPUSHs(sv_2mortal(newSVuv(ns)));
+ XPUSHs(sv_2mortal(newSViv(pid)));
+ XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+
+ /* common fields other than pid can be accessed via xsub fns */
+
+ for (field = event->format.fields; field; field = field->next) {
+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+ } else
+ offset = field->offset;
+ XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
+ } else { /* FIELD_IS_NUMERIC */
+ val = read_size(data + field->offset, field->size);
+ if (field->flags & FIELD_IS_SIGNED) {
+ XPUSHs(sv_2mortal(newSViv(val)));
+ } else {
+ XPUSHs(sv_2mortal(newSVuv(val)));
+ }
+ }
+ }
+
+ PUTBACK;
+
+ if (get_cv(handler, 0))
+ call_pv(handler, G_SCALAR);
+ else if (get_cv("main::trace_unhandled", 0)) {
+ XPUSHs(sv_2mortal(newSVpv(handler, 0)));
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context))));
+ XPUSHs(sv_2mortal(newSVuv(cpu)));
+ XPUSHs(sv_2mortal(newSVuv(nsecs)));
+ XPUSHs(sv_2mortal(newSViv(pid)));
+ XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+ call_pv("main::trace_unhandled", G_SCALAR);
+ }
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void perl_process_event_generic(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __unused,
+ struct machine *machine __unused,
+ struct thread *thread __unused)
+{
+ dSP;
+
+ if (!get_cv("process_event", 0))
+ return;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+ XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size)));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
+ PUTBACK;
+ call_pv("process_event", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void perl_process_event(union perf_event *pevent,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine,
+ struct thread *thread)
+{
+ perl_process_tracepoint(pevent, sample, evsel, machine, thread);
+ perl_process_event_generic(pevent, sample, evsel, machine, thread);
+}
+
+static void run_start_sub(void)
+{
+ dSP; /* access to Perl stack */
+ PUSHMARK(SP);
+
+ if (get_cv("main::trace_begin", 0))
+ call_pv("main::trace_begin", G_DISCARD | G_NOARGS);
+}
+
+/*
+ * Start trace script
+ */
+static int perl_start_script(const char *script, int argc, const char **argv)
+{
+ const char **command_line;
+ int i, err = 0;
+
+ command_line = malloc((argc + 2) * sizeof(const char *));
+ command_line[0] = "";
+ command_line[1] = script;
+ for (i = 2; i < argc + 2; i++)
+ command_line[i] = argv[i - 2];
+
+ my_perl = perl_alloc();
+ perl_construct(my_perl);
+
+ if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line,
+ (char **)NULL)) {
+ err = -1;
+ goto error;
+ }
+
+ if (perl_run(my_perl)) {
+ err = -1;
+ goto error;
+ }
+
+ if (SvTRUE(ERRSV)) {
+ err = -1;
+ goto error;
+ }
+
+ run_start_sub();
+
+ free(command_line);
+ return 0;
+error:
+ perl_free(my_perl);
+ free(command_line);
+
+ return err;
+}
+
+/*
+ * Stop trace script
+ */
+static int perl_stop_script(void)
+{
+ dSP; /* access to Perl stack */
+ PUSHMARK(SP);
+
+ if (get_cv("main::trace_end", 0))
+ call_pv("main::trace_end", G_DISCARD | G_NOARGS);
+
+ perl_destruct(my_perl);
+ perl_free(my_perl);
+
+ return 0;
+}
+
+static int perl_generate_script(const char *outfile)
+{
+ struct event *event = NULL;
+ struct format_field *f;
+ char fname[PATH_MAX];
+ int not_first, count;
+ FILE *ofp;
+
+ sprintf(fname, "%s.pl", outfile);
+ ofp = fopen(fname, "w");
+ if (ofp == NULL) {
+ fprintf(stderr, "couldn't open %s\n", fname);
+ return -1;
+ }
+
+ fprintf(ofp, "# perf script event handlers, "
+ "generated by perf script -g perl\n");
+
+ fprintf(ofp, "# Licensed under the terms of the GNU GPL"
+ " License version 2\n\n");
+
+ fprintf(ofp, "# The common_* event handler fields are the most useful "
+ "fields common to\n");
+
+ fprintf(ofp, "# all events. They don't necessarily correspond to "
+ "the 'common_*' fields\n");
+
+ fprintf(ofp, "# in the format files. Those fields not available as "
+ "handler params can\n");
+
+ fprintf(ofp, "# be retrieved using Perl functions of the form "
+ "common_*($context).\n");
+
+ fprintf(ofp, "# See Context.pm for the list of available "
+ "functions.\n\n");
+
+ fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/"
+ "Perf-Trace-Util/lib\";\n");
+
+ fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n");
+ fprintf(ofp, "use Perf::Trace::Core;\n");
+ fprintf(ofp, "use Perf::Trace::Context;\n");
+ fprintf(ofp, "use Perf::Trace::Util;\n\n");
+
+ fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
+ fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
+
+ while ((event = trace_find_next_event(event))) {
+ fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
+ fprintf(ofp, "\tmy (");
+
+ fprintf(ofp, "$event_name, ");
+ fprintf(ofp, "$context, ");
+ fprintf(ofp, "$common_cpu, ");
+ fprintf(ofp, "$common_secs, ");
+ fprintf(ofp, "$common_nsecs,\n");
+ fprintf(ofp, "\t $common_pid, ");
+ fprintf(ofp, "$common_comm,\n\t ");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t ");
+
+ fprintf(ofp, "$%s", f->name);
+ }
+ fprintf(ofp, ") = @_;\n\n");
+
+ fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
+ "$common_secs, $common_nsecs,\n\t "
+ "$common_pid, $common_comm);\n\n");
+
+ fprintf(ofp, "\tprintf(\"");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (count && count % 4 == 0) {
+ fprintf(ofp, "\".\n\t \"");
+ }
+ count++;
+
+ fprintf(ofp, "%s=", f->name);
+ if (f->flags & FIELD_IS_STRING ||
+ f->flags & FIELD_IS_FLAG ||
+ f->flags & FIELD_IS_SYMBOLIC)
+ fprintf(ofp, "%%s");
+ else if (f->flags & FIELD_IS_SIGNED)
+ fprintf(ofp, "%%d");
+ else
+ fprintf(ofp, "%%u");
+ }
+
+ fprintf(ofp, "\\n\",\n\t ");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t ");
+
+ if (f->flags & FIELD_IS_FLAG) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t ");
+ count = 4;
+ }
+ fprintf(ofp, "flag_str(\"");
+ fprintf(ofp, "%s::%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", $%s)", f->name,
+ f->name);
+ } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t ");
+ count = 4;
+ }
+ fprintf(ofp, "symbol_str(\"");
+ fprintf(ofp, "%s::%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", $%s)", f->name,
+ f->name);
+ } else
+ fprintf(ofp, "$%s", f->name);
+ }
+
+ fprintf(ofp, ");\n");
+ fprintf(ofp, "}\n\n");
+ }
+
+ fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, "
+ "$common_cpu, $common_secs, $common_nsecs,\n\t "
+ "$common_pid, $common_comm) = @_;\n\n");
+
+ fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
+ "$common_secs, $common_nsecs,\n\t $common_pid, "
+ "$common_comm);\n}\n\n");
+
+ fprintf(ofp, "sub print_header\n{\n"
+ "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
+ "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t "
+ "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n");
+
+ fprintf(ofp,
+ "\n# Packed byte string args of process_event():\n"
+ "#\n"
+ "# $event:\tunion perf_event\tutil/event.h\n"
+ "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n"
+ "# $sample:\tstruct perf_sample\tutil/event.h\n"
+ "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n"
+ "\n"
+ "sub process_event\n"
+ "{\n"
+ "\tmy ($event, $attr, $sample, $raw_data) = @_;\n"
+ "\n"
+ "\tmy @event\t= unpack(\"LSS\", $event);\n"
+ "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n"
+ "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n"
+ "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n"
+ "\n"
+ "\tuse Data::Dumper;\n"
+ "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n"
+ "}\n");
+
+ fclose(ofp);
+
+ fprintf(stderr, "generated Perl script: %s\n", fname);
+
+ return 0;
+}
+
+struct scripting_ops perl_scripting_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script,
+ .stop_script = perl_stop_script,
+ .process_event = perl_process_event,
+ .generate_script = perl_generate_script,
+};
diff --git a/ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-python.c b/ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-python.c
new file mode 100644
index 00000000..c2623c6f
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/scripting-engines/trace-event-python.c
@@ -0,0 +1,600 @@
+/*
+ * trace-event-python. Feed trace events to an embedded Python interpreter.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <Python.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "../../perf.h"
+#include "../util.h"
+#include "../event.h"
+#include "../thread.h"
+#include "../trace-event.h"
+
+PyMODINIT_FUNC initperf_trace_context(void);
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(unsigned short) * 8)) - 1)
+
+struct event *events[FTRACE_MAX_EVENT];
+
+#define MAX_FIELDS 64
+#define N_COMMON_FIELDS 7
+
+extern struct scripting_context *scripting_context;
+
+static char *cur_field_name;
+static int zero_flag_atom;
+
+static PyObject *main_module, *main_dict;
+
+static void handler_call_die(const char *handler_name)
+{
+ PyErr_Print();
+ Py_FatalError("problem in Python trace event handler");
+}
+
+static void define_value(enum print_arg_type field_type,
+ const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ const char *handler_name = "define_flag_value";
+ PyObject *handler, *t, *retval;
+ unsigned long long value;
+ unsigned n = 0;
+
+ if (field_type == PRINT_SYMBOL)
+ handler_name = "define_symbolic_value";
+
+ t = PyTuple_New(4);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ value = eval_flag(field_value);
+
+ PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(value));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_str));
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ }
+
+ Py_DECREF(t);
+}
+
+static void define_values(enum print_arg_type field_type,
+ struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_value(field_type, ev_name, field_name, field->value,
+ field->str);
+
+ if (field->next)
+ define_values(field_type, field->next, ev_name, field_name);
+}
+
+static void define_field(enum print_arg_type field_type,
+ const char *ev_name,
+ const char *field_name,
+ const char *delim)
+{
+ const char *handler_name = "define_flag_field";
+ PyObject *handler, *t, *retval;
+ unsigned n = 0;
+
+ if (field_type == PRINT_SYMBOL)
+ handler_name = "define_symbolic_field";
+
+ if (field_type == PRINT_FLAGS)
+ t = PyTuple_New(3);
+ else
+ t = PyTuple_New(2);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+ if (field_type == PRINT_FLAGS)
+ PyTuple_SetItem(t, n++, PyString_FromString(delim));
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ }
+
+ Py_DECREF(t);
+}
+
+static void define_event_symbols(struct event *event,
+ const char *ev_name,
+ struct print_arg *args)
+{
+ switch (args->type) {
+ case PRINT_NULL:
+ break;
+ case PRINT_ATOM:
+ define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
+ args->atom.atom);
+ zero_flag_atom = 0;
+ break;
+ case PRINT_FIELD:
+ if (cur_field_name)
+ free(cur_field_name);
+ cur_field_name = strdup(args->field.name);
+ break;
+ case PRINT_FLAGS:
+ define_event_symbols(event, ev_name, args->flags.field);
+ define_field(PRINT_FLAGS, ev_name, cur_field_name,
+ args->flags.delim);
+ define_values(PRINT_FLAGS, args->flags.flags, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_SYMBOL:
+ define_event_symbols(event, ev_name, args->symbol.field);
+ define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
+ define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_STRING:
+ break;
+ case PRINT_TYPE:
+ define_event_symbols(event, ev_name, args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ zero_flag_atom = 1;
+ define_event_symbols(event, ev_name, args->op.left);
+ define_event_symbols(event, ev_name, args->op.right);
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+
+ if (args->next)
+ define_event_symbols(event, ev_name, args->next);
+}
+
+static inline struct event *find_cache_event(int type)
+{
+ static char ev_name[256];
+ struct event *event;
+
+ if (events[type])
+ return events[type];
+
+ events[type] = event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ sprintf(ev_name, "%s__%s", event->system, event->name);
+
+ define_event_symbols(event, ev_name, event->print_fmt.args);
+
+ return event;
+}
+
+static void python_process_event(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __unused,
+ struct machine *machine __unused,
+ struct thread *thread)
+{
+ PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
+ static char handler_name[256];
+ struct format_field *field;
+ unsigned long long val;
+ unsigned long s, ns;
+ struct event *event;
+ unsigned n = 0;
+ int type;
+ int pid;
+ int cpu = sample->cpu;
+ void *data = sample->raw_data;
+ unsigned long long nsecs = sample->time;
+ char *comm = thread->comm;
+
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ type = trace_parse_common_type(data);
+
+ event = find_cache_event(type);
+ if (!event)
+ die("ug! no event found for type %d", type);
+
+ pid = trace_parse_common_pid(data);
+
+ sprintf(handler_name, "%s__%s", event->system, event->name);
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && !PyCallable_Check(handler))
+ handler = NULL;
+ if (!handler) {
+ dict = PyDict_New();
+ if (!dict)
+ Py_FatalError("couldn't create Python dict");
+ }
+ s = nsecs / NSECS_PER_SEC;
+ ns = nsecs - s * NSECS_PER_SEC;
+
+ scripting_context->event_data = data;
+
+ context = PyCObject_FromVoidPtr(scripting_context, NULL);
+
+ PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
+ PyTuple_SetItem(t, n++, context);
+
+ if (handler) {
+ PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(s));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
+ PyTuple_SetItem(t, n++, PyString_FromString(comm));
+ } else {
+ PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
+ PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
+ PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
+ PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
+ PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
+ }
+ for (field = event->format.fields; field; field = field->next) {
+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+ } else
+ offset = field->offset;
+ obj = PyString_FromString((char *)data + offset);
+ } else { /* FIELD_IS_NUMERIC */
+ val = read_size(data + field->offset, field->size);
+ if (field->flags & FIELD_IS_SIGNED) {
+ if ((long long)val >= LONG_MIN &&
+ (long long)val <= LONG_MAX)
+ obj = PyInt_FromLong(val);
+ else
+ obj = PyLong_FromLongLong(val);
+ } else {
+ if (val <= LONG_MAX)
+ obj = PyInt_FromLong(val);
+ else
+ obj = PyLong_FromUnsignedLongLong(val);
+ }
+ }
+ if (handler)
+ PyTuple_SetItem(t, n++, obj);
+ else
+ PyDict_SetItemString(dict, field->name, obj);
+
+ }
+ if (!handler)
+ PyTuple_SetItem(t, n++, dict);
+
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ if (handler) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ } else {
+ handler = PyDict_GetItemString(main_dict, "trace_unhandled");
+ if (handler && PyCallable_Check(handler)) {
+
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die("trace_unhandled");
+ }
+ Py_DECREF(dict);
+ }
+
+ Py_DECREF(t);
+}
+
+static int run_start_sub(void)
+{
+ PyObject *handler, *retval;
+ int err = 0;
+
+ main_module = PyImport_AddModule("__main__");
+ if (main_module == NULL)
+ return -1;
+ Py_INCREF(main_module);
+
+ main_dict = PyModule_GetDict(main_module);
+ if (main_dict == NULL) {
+ err = -1;
+ goto error;
+ }
+ Py_INCREF(main_dict);
+
+ handler = PyDict_GetItemString(main_dict, "trace_begin");
+ if (handler == NULL || !PyCallable_Check(handler))
+ goto out;
+
+ retval = PyObject_CallObject(handler, NULL);
+ if (retval == NULL)
+ handler_call_die("trace_begin");
+
+ Py_DECREF(retval);
+ return err;
+error:
+ Py_XDECREF(main_dict);
+ Py_XDECREF(main_module);
+out:
+ return err;
+}
+
+/*
+ * Start trace script
+ */
+static int python_start_script(const char *script, int argc, const char **argv)
+{
+ const char **command_line;
+ char buf[PATH_MAX];
+ int i, err = 0;
+ FILE *fp;
+
+ command_line = malloc((argc + 1) * sizeof(const char *));
+ command_line[0] = script;
+ for (i = 1; i < argc + 1; i++)
+ command_line[i] = argv[i - 1];
+
+ Py_Initialize();
+
+ initperf_trace_context();
+
+ PySys_SetArgv(argc + 1, (char **)command_line);
+
+ fp = fopen(script, "r");
+ if (!fp) {
+ sprintf(buf, "Can't open python script \"%s\"", script);
+ perror(buf);
+ err = -1;
+ goto error;
+ }
+
+ err = PyRun_SimpleFile(fp, script);
+ if (err) {
+ fprintf(stderr, "Error running python script %s\n", script);
+ goto error;
+ }
+
+ err = run_start_sub();
+ if (err) {
+ fprintf(stderr, "Error starting python script %s\n", script);
+ goto error;
+ }
+
+ free(command_line);
+
+ return err;
+error:
+ Py_Finalize();
+ free(command_line);
+
+ return err;
+}
+
+/*
+ * Stop trace script
+ */
+static int python_stop_script(void)
+{
+ PyObject *handler, *retval;
+ int err = 0;
+
+ handler = PyDict_GetItemString(main_dict, "trace_end");
+ if (handler == NULL || !PyCallable_Check(handler))
+ goto out;
+
+ retval = PyObject_CallObject(handler, NULL);
+ if (retval == NULL)
+ handler_call_die("trace_end");
+ else
+ Py_DECREF(retval);
+out:
+ Py_XDECREF(main_dict);
+ Py_XDECREF(main_module);
+ Py_Finalize();
+
+ return err;
+}
+
+static int python_generate_script(const char *outfile)
+{
+ struct event *event = NULL;
+ struct format_field *f;
+ char fname[PATH_MAX];
+ int not_first, count;
+ FILE *ofp;
+
+ sprintf(fname, "%s.py", outfile);
+ ofp = fopen(fname, "w");
+ if (ofp == NULL) {
+ fprintf(stderr, "couldn't open %s\n", fname);
+ return -1;
+ }
+ fprintf(ofp, "# perf script event handlers, "
+ "generated by perf script -g python\n");
+
+ fprintf(ofp, "# Licensed under the terms of the GNU GPL"
+ " License version 2\n\n");
+
+ fprintf(ofp, "# The common_* event handler fields are the most useful "
+ "fields common to\n");
+
+ fprintf(ofp, "# all events. They don't necessarily correspond to "
+ "the 'common_*' fields\n");
+
+ fprintf(ofp, "# in the format files. Those fields not available as "
+ "handler params can\n");
+
+ fprintf(ofp, "# be retrieved using Python functions of the form "
+ "common_*(context).\n");
+
+ fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+ "of available functions.\n\n");
+
+ fprintf(ofp, "import os\n");
+ fprintf(ofp, "import sys\n\n");
+
+ fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
+ fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
+ fprintf(ofp, "\nfrom perf_trace_context import *\n");
+ fprintf(ofp, "from Core import *\n\n\n");
+
+ fprintf(ofp, "def trace_begin():\n");
+ fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+
+ fprintf(ofp, "def trace_end():\n");
+ fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+
+ while ((event = trace_find_next_event(event))) {
+ fprintf(ofp, "def %s__%s(", event->system, event->name);
+ fprintf(ofp, "event_name, ");
+ fprintf(ofp, "context, ");
+ fprintf(ofp, "common_cpu,\n");
+ fprintf(ofp, "\tcommon_secs, ");
+ fprintf(ofp, "common_nsecs, ");
+ fprintf(ofp, "common_pid, ");
+ fprintf(ofp, "common_comm,\n\t");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t");
+
+ fprintf(ofp, "%s", f->name);
+ }
+ fprintf(ofp, "):\n");
+
+ fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
+ "common_secs, common_nsecs,\n\t\t\t"
+ "common_pid, common_comm)\n\n");
+
+ fprintf(ofp, "\t\tprint \"");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (count && count % 3 == 0) {
+ fprintf(ofp, "\" \\\n\t\t\"");
+ }
+ count++;
+
+ fprintf(ofp, "%s=", f->name);
+ if (f->flags & FIELD_IS_STRING ||
+ f->flags & FIELD_IS_FLAG ||
+ f->flags & FIELD_IS_SYMBOLIC)
+ fprintf(ofp, "%%s");
+ else if (f->flags & FIELD_IS_SIGNED)
+ fprintf(ofp, "%%d");
+ else
+ fprintf(ofp, "%%u");
+ }
+
+ fprintf(ofp, "\\n\" %% \\\n\t\t(");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t\t");
+
+ if (f->flags & FIELD_IS_FLAG) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t\t");
+ count = 4;
+ }
+ fprintf(ofp, "flag_str(\"");
+ fprintf(ofp, "%s__%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", %s)", f->name,
+ f->name);
+ } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t\t");
+ count = 4;
+ }
+ fprintf(ofp, "symbol_str(\"");
+ fprintf(ofp, "%s__%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", %s)", f->name,
+ f->name);
+ } else
+ fprintf(ofp, "%s", f->name);
+ }
+
+ fprintf(ofp, "),\n\n");
+ }
+
+ fprintf(ofp, "def trace_unhandled(event_name, context, "
+ "event_fields_dict):\n");
+
+ fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
+ "for k,v in sorted(event_fields_dict.items())])\n\n");
+
+ fprintf(ofp, "def print_header("
+ "event_name, cpu, secs, nsecs, pid, comm):\n"
+ "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+ "(event_name, cpu, secs, nsecs, pid, comm),\n");
+
+ fclose(ofp);
+
+ fprintf(stderr, "generated Python script: %s\n", fname);
+
+ return 0;
+}
+
+struct scripting_ops python_scripting_ops = {
+ .name = "Python",
+ .start_script = python_start_script,
+ .stop_script = python_stop_script,
+ .process_event = python_process_event,
+ .generate_script = python_generate_script,
+};
diff --git a/ANDROID_3.4.5/tools/perf/util/session.c b/ANDROID_3.4.5/tools/perf/util/session.c
new file mode 100644
index 00000000..1efd3bee
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/session.c
@@ -0,0 +1,1508 @@
+#define _FILE_OFFSET_BITS 64
+
+#include <linux/kernel.h>
+
+#include <byteswap.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include "evlist.h"
+#include "evsel.h"
+#include "session.h"
+#include "tool.h"
+#include "sort.h"
+#include "util.h"
+#include "cpumap.h"
+
+static int perf_session__open(struct perf_session *self, bool force)
+{
+ struct stat input_stat;
+
+ if (!strcmp(self->filename, "-")) {
+ self->fd_pipe = true;
+ self->fd = STDIN_FILENO;
+
+ if (perf_session__read_header(self, self->fd) < 0)
+ pr_err("incompatible file format (rerun with -v to learn more)");
+
+ return 0;
+ }
+
+ self->fd = open(self->filename, O_RDONLY);
+ if (self->fd < 0) {
+ int err = errno;
+
+ pr_err("failed to open %s: %s", self->filename, strerror(err));
+ if (err == ENOENT && !strcmp(self->filename, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -errno;
+ }
+
+ if (fstat(self->fd, &input_stat) < 0)
+ goto out_close;
+
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (!input_stat.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (perf_session__read_header(self, self->fd) < 0) {
+ pr_err("incompatible file format (rerun with -v to learn more)");
+ goto out_close;
+ }
+
+ if (!perf_evlist__valid_sample_type(self->evlist)) {
+ pr_err("non matching sample_type");
+ goto out_close;
+ }
+
+ if (!perf_evlist__valid_sample_id_all(self->evlist)) {
+ pr_err("non matching sample_id_all");
+ goto out_close;
+ }
+
+ self->size = input_stat.st_size;
+ return 0;
+
+out_close:
+ close(self->fd);
+ self->fd = -1;
+ return -1;
+}
+
+void perf_session__update_sample_type(struct perf_session *self)
+{
+ self->sample_type = perf_evlist__sample_type(self->evlist);
+ self->sample_size = __perf_evsel__sample_size(self->sample_type);
+ self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
+ self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
+ self->host_machine.id_hdr_size = self->id_hdr_size;
+}
+
+int perf_session__create_kernel_maps(struct perf_session *self)
+{
+ int ret = machine__create_kernel_maps(&self->host_machine);
+
+ if (ret >= 0)
+ ret = machines__create_guest_kernel_maps(&self->machines);
+ return ret;
+}
+
+static void perf_session__destroy_kernel_maps(struct perf_session *self)
+{
+ machine__destroy_kernel_maps(&self->host_machine);
+ machines__destroy_guest_kernel_maps(&self->machines);
+}
+
+struct perf_session *perf_session__new(const char *filename, int mode,
+ bool force, bool repipe,
+ struct perf_tool *tool)
+{
+ struct perf_session *self;
+ struct stat st;
+ size_t len;
+
+ if (!filename || !strlen(filename)) {
+ if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+ filename = "-";
+ else
+ filename = "perf.data";
+ }
+
+ len = strlen(filename);
+ self = zalloc(sizeof(*self) + len);
+
+ if (self == NULL)
+ goto out;
+
+ memcpy(self->filename, filename, len);
+ /*
+ * On 64bit we can mmap the data file in one go. No need for tiny mmap
+ * slices. On 32bit we use 32MB.
+ */
+#if BITS_PER_LONG == 64
+ self->mmap_window = ULLONG_MAX;
+#else
+ self->mmap_window = 32 * 1024 * 1024ULL;
+#endif
+ self->machines = RB_ROOT;
+ self->repipe = repipe;
+ INIT_LIST_HEAD(&self->ordered_samples.samples);
+ INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
+ INIT_LIST_HEAD(&self->ordered_samples.to_free);
+ machine__init(&self->host_machine, "", HOST_KERNEL_ID);
+ hists__init(&self->hists);
+
+ if (mode == O_RDONLY) {
+ if (perf_session__open(self, force) < 0)
+ goto out_delete;
+ perf_session__update_sample_type(self);
+ } else if (mode == O_WRONLY) {
+ /*
+ * In O_RDONLY mode this will be performed when reading the
+ * kernel MMAP event, in perf_event__process_mmap().
+ */
+ if (perf_session__create_kernel_maps(self) < 0)
+ goto out_delete;
+ }
+
+ if (tool && tool->ordering_requires_timestamps &&
+ tool->ordered_samples && !self->sample_id_all) {
+ dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
+ tool->ordered_samples = false;
+ }
+
+out:
+ return self;
+out_delete:
+ perf_session__delete(self);
+ return NULL;
+}
+
+static void machine__delete_dead_threads(struct machine *machine)
+{
+ struct thread *n, *t;
+
+ list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
+ list_del(&t->node);
+ thread__delete(t);
+ }
+}
+
+static void perf_session__delete_dead_threads(struct perf_session *session)
+{
+ machine__delete_dead_threads(&session->host_machine);
+}
+
+static void machine__delete_threads(struct machine *self)
+{
+ struct rb_node *nd = rb_first(&self->threads);
+
+ while (nd) {
+ struct thread *t = rb_entry(nd, struct thread, rb_node);
+
+ rb_erase(&t->rb_node, &self->threads);
+ nd = rb_next(nd);
+ thread__delete(t);
+ }
+}
+
+static void perf_session__delete_threads(struct perf_session *session)
+{
+ machine__delete_threads(&session->host_machine);
+}
+
+void perf_session__delete(struct perf_session *self)
+{
+ perf_session__destroy_kernel_maps(self);
+ perf_session__delete_dead_threads(self);
+ perf_session__delete_threads(self);
+ machine__exit(&self->host_machine);
+ close(self->fd);
+ free(self);
+}
+
+void machine__remove_thread(struct machine *self, struct thread *th)
+{
+ self->last_match = NULL;
+ rb_erase(&th->rb_node, &self->threads);
+ /*
+ * We may have references to this thread, for instance in some hist_entry
+ * instances, so just move them to a separate list.
+ */
+ list_add_tail(&th->node, &self->dead_threads);
+}
+
+static bool symbol__match_parent_regex(struct symbol *sym)
+{
+ if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+ return 1;
+
+ return 0;
+}
+
+static const u8 cpumodes[] = {
+ PERF_RECORD_MISC_USER,
+ PERF_RECORD_MISC_KERNEL,
+ PERF_RECORD_MISC_GUEST_USER,
+ PERF_RECORD_MISC_GUEST_KERNEL
+};
+#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
+
+static void ip__resolve_ams(struct machine *self, struct thread *thread,
+ struct addr_map_symbol *ams,
+ u64 ip)
+{
+ struct addr_location al;
+ size_t i;
+ u8 m;
+
+ memset(&al, 0, sizeof(al));
+
+ for (i = 0; i < NCPUMODES; i++) {
+ m = cpumodes[i];
+ /*
+ * We cannot use the header.misc hint to determine whether a
+ * branch stack address is user, kernel, guest, hypervisor.
+ * Branches may straddle the kernel/user/hypervisor boundaries.
+ * Thus, we have to try consecutively until we find a match
+ * or else, the symbol is unknown
+ */
+ thread__find_addr_location(thread, self, m, MAP__FUNCTION,
+ ip, &al, NULL);
+ if (al.sym)
+ goto found;
+ }
+found:
+ ams->addr = ip;
+ ams->al_addr = al.addr;
+ ams->sym = al.sym;
+ ams->map = al.map;
+}
+
+struct branch_info *machine__resolve_bstack(struct machine *self,
+ struct thread *thr,
+ struct branch_stack *bs)
+{
+ struct branch_info *bi;
+ unsigned int i;
+
+ bi = calloc(bs->nr, sizeof(struct branch_info));
+ if (!bi)
+ return NULL;
+
+ for (i = 0; i < bs->nr; i++) {
+ ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
+ ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
+ bi[i].flags = bs->entries[i].flags;
+ }
+ return bi;
+}
+
+int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ unsigned int i;
+ int err;
+
+ callchain_cursor_reset(&evsel->hists.callchain_cursor);
+
+ for (i = 0; i < chain->nr; i++) {
+ u64 ip;
+ struct addr_location al;
+
+ if (callchain_param.order == ORDER_CALLEE)
+ ip = chain->ips[i];
+ else
+ ip = chain->ips[chain->nr - i - 1];
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL; break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER; break;
+ default:
+ break;
+ }
+ continue;
+ }
+
+ al.filtered = false;
+ thread__find_addr_location(thread, self, cpumode,
+ MAP__FUNCTION, ip, &al, NULL);
+ if (al.sym != NULL) {
+ if (sort__has_parent && !*parent &&
+ symbol__match_parent_regex(al.sym))
+ *parent = al.sym;
+ if (!symbol_conf.use_callchain)
+ break;
+ }
+
+ err = callchain_cursor_append(&evsel->hists.callchain_cursor,
+ ip, al.map, al.sym);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int process_event_synth_tracing_data_stub(union perf_event *event __used,
+ struct perf_session *session __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_synth_attr_stub(union perf_event *event __used,
+ struct perf_evlist **pevlist __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_sample_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
+ struct perf_sample *sample __used,
+ struct perf_evsel *evsel __used,
+ struct machine *machine __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
+ struct perf_sample *sample __used,
+ struct machine *machine __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
+ struct perf_session *perf_session __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_type_stub(struct perf_tool *tool __used,
+ union perf_event *event __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+static void perf_tool__fill_defaults(struct perf_tool *tool)
+{
+ if (tool->sample == NULL)
+ tool->sample = process_event_sample_stub;
+ if (tool->mmap == NULL)
+ tool->mmap = process_event_stub;
+ if (tool->comm == NULL)
+ tool->comm = process_event_stub;
+ if (tool->fork == NULL)
+ tool->fork = process_event_stub;
+ if (tool->exit == NULL)
+ tool->exit = process_event_stub;
+ if (tool->lost == NULL)
+ tool->lost = perf_event__process_lost;
+ if (tool->read == NULL)
+ tool->read = process_event_sample_stub;
+ if (tool->throttle == NULL)
+ tool->throttle = process_event_stub;
+ if (tool->unthrottle == NULL)
+ tool->unthrottle = process_event_stub;
+ if (tool->attr == NULL)
+ tool->attr = process_event_synth_attr_stub;
+ if (tool->event_type == NULL)
+ tool->event_type = process_event_type_stub;
+ if (tool->tracing_data == NULL)
+ tool->tracing_data = process_event_synth_tracing_data_stub;
+ if (tool->build_id == NULL)
+ tool->build_id = process_finished_round_stub;
+ if (tool->finished_round == NULL) {
+ if (tool->ordered_samples)
+ tool->finished_round = process_finished_round;
+ else
+ tool->finished_round = process_finished_round_stub;
+ }
+}
+
+void mem_bswap_64(void *src, int byte_size)
+{
+ u64 *m = src;
+
+ while (byte_size > 0) {
+ *m = bswap_64(*m);
+ byte_size -= sizeof(u64);
+ ++m;
+ }
+}
+
+static void perf_event__all64_swap(union perf_event *event)
+{
+ struct perf_event_header *hdr = &event->header;
+ mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
+}
+
+static void perf_event__comm_swap(union perf_event *event)
+{
+ event->comm.pid = bswap_32(event->comm.pid);
+ event->comm.tid = bswap_32(event->comm.tid);
+}
+
+static void perf_event__mmap_swap(union perf_event *event)
+{
+ event->mmap.pid = bswap_32(event->mmap.pid);
+ event->mmap.tid = bswap_32(event->mmap.tid);
+ event->mmap.start = bswap_64(event->mmap.start);
+ event->mmap.len = bswap_64(event->mmap.len);
+ event->mmap.pgoff = bswap_64(event->mmap.pgoff);
+}
+
+static void perf_event__task_swap(union perf_event *event)
+{
+ event->fork.pid = bswap_32(event->fork.pid);
+ event->fork.tid = bswap_32(event->fork.tid);
+ event->fork.ppid = bswap_32(event->fork.ppid);
+ event->fork.ptid = bswap_32(event->fork.ptid);
+ event->fork.time = bswap_64(event->fork.time);
+}
+
+static void perf_event__read_swap(union perf_event *event)
+{
+ event->read.pid = bswap_32(event->read.pid);
+ event->read.tid = bswap_32(event->read.tid);
+ event->read.value = bswap_64(event->read.value);
+ event->read.time_enabled = bswap_64(event->read.time_enabled);
+ event->read.time_running = bswap_64(event->read.time_running);
+ event->read.id = bswap_64(event->read.id);
+}
+
+/* exported for swapping attributes in file header */
+void perf_event__attr_swap(struct perf_event_attr *attr)
+{
+ attr->type = bswap_32(attr->type);
+ attr->size = bswap_32(attr->size);
+ attr->config = bswap_64(attr->config);
+ attr->sample_period = bswap_64(attr->sample_period);
+ attr->sample_type = bswap_64(attr->sample_type);
+ attr->read_format = bswap_64(attr->read_format);
+ attr->wakeup_events = bswap_32(attr->wakeup_events);
+ attr->bp_type = bswap_32(attr->bp_type);
+ attr->bp_addr = bswap_64(attr->bp_addr);
+ attr->bp_len = bswap_64(attr->bp_len);
+}
+
+static void perf_event__hdr_attr_swap(union perf_event *event)
+{
+ size_t size;
+
+ perf_event__attr_swap(&event->attr.attr);
+
+ size = event->header.size;
+ size -= (void *)&event->attr.id - (void *)event;
+ mem_bswap_64(event->attr.id, size);
+}
+
+static void perf_event__event_type_swap(union perf_event *event)
+{
+ event->event_type.event_type.event_id =
+ bswap_64(event->event_type.event_type.event_id);
+}
+
+static void perf_event__tracing_data_swap(union perf_event *event)
+{
+ event->tracing_data.size = bswap_32(event->tracing_data.size);
+}
+
+typedef void (*perf_event__swap_op)(union perf_event *event);
+
+static perf_event__swap_op perf_event__swap_ops[] = {
+ [PERF_RECORD_MMAP] = perf_event__mmap_swap,
+ [PERF_RECORD_COMM] = perf_event__comm_swap,
+ [PERF_RECORD_FORK] = perf_event__task_swap,
+ [PERF_RECORD_EXIT] = perf_event__task_swap,
+ [PERF_RECORD_LOST] = perf_event__all64_swap,
+ [PERF_RECORD_READ] = perf_event__read_swap,
+ [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
+ [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
+ [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
+ [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
+ [PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_MAX] = NULL,
+};
+
+struct sample_queue {
+ u64 timestamp;
+ u64 file_offset;
+ union perf_event *event;
+ struct list_head list;
+};
+
+static void perf_session_free_sample_buffers(struct perf_session *session)
+{
+ struct ordered_samples *os = &session->ordered_samples;
+
+ while (!list_empty(&os->to_free)) {
+ struct sample_queue *sq;
+
+ sq = list_entry(os->to_free.next, struct sample_queue, list);
+ list_del(&sq->list);
+ free(sq);
+ }
+}
+
+static int perf_session_deliver_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool,
+ u64 file_offset);
+
+static void flush_sample_queue(struct perf_session *s,
+ struct perf_tool *tool)
+{
+ struct ordered_samples *os = &s->ordered_samples;
+ struct list_head *head = &os->samples;
+ struct sample_queue *tmp, *iter;
+ struct perf_sample sample;
+ u64 limit = os->next_flush;
+ u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
+ unsigned idx = 0, progress_next = os->nr_samples / 16;
+ int ret;
+
+ if (!tool->ordered_samples || !limit)
+ return;
+
+ list_for_each_entry_safe(iter, tmp, head, list) {
+ if (iter->timestamp > limit)
+ break;
+
+ ret = perf_session__parse_sample(s, iter->event, &sample);
+ if (ret)
+ pr_err("Can't parse sample, err = %d\n", ret);
+ else
+ perf_session_deliver_event(s, iter->event, &sample, tool,
+ iter->file_offset);
+
+ os->last_flush = iter->timestamp;
+ list_del(&iter->list);
+ list_add(&iter->list, &os->sample_cache);
+ if (++idx >= progress_next) {
+ progress_next += os->nr_samples / 16;
+ ui_progress__update(idx, os->nr_samples,
+ "Processing time ordered events...");
+ }
+ }
+
+ if (list_empty(head)) {
+ os->last_sample = NULL;
+ } else if (last_ts <= limit) {
+ os->last_sample =
+ list_entry(head->prev, struct sample_queue, list);
+ }
+
+ os->nr_samples = 0;
+}
+
+/*
+ * When perf record finishes a pass on every buffers, it records this pseudo
+ * event.
+ * We record the max timestamp t found in the pass n.
+ * Assuming these timestamps are monotonic across cpus, we know that if
+ * a buffer still has events with timestamps below t, they will be all
+ * available and then read in the pass n + 1.
+ * Hence when we start to read the pass n + 2, we can safely flush every
+ * events with timestamps below t.
+ *
+ * ============ PASS n =================
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 1 | 2
+ * 2 | 3
+ * - | 4 <--- max recorded
+ *
+ * ============ PASS n + 1 ==============
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 3 | 5
+ * 4 | 6
+ * 5 | 7 <---- max recorded
+ *
+ * Flush every events below timestamp 4
+ *
+ * ============ PASS n + 2 ==============
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 6 | 8
+ * 7 | 9
+ * - | 10
+ *
+ * Flush every events below timestamp 7
+ * etc...
+ */
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event __used,
+ struct perf_session *session)
+{
+ flush_sample_queue(session, tool);
+ session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
+
+ return 0;
+}
+
+/* The queue is ordered by time */
+static void __queue_event(struct sample_queue *new, struct perf_session *s)
+{
+ struct ordered_samples *os = &s->ordered_samples;
+ struct sample_queue *sample = os->last_sample;
+ u64 timestamp = new->timestamp;
+ struct list_head *p;
+
+ ++os->nr_samples;
+ os->last_sample = new;
+
+ if (!sample) {
+ list_add(&new->list, &os->samples);
+ os->max_timestamp = timestamp;
+ return;
+ }
+
+ /*
+ * last_sample might point to some random place in the list as it's
+ * the last queued event. We expect that the new event is close to
+ * this.
+ */
+ if (sample->timestamp <= timestamp) {
+ while (sample->timestamp <= timestamp) {
+ p = sample->list.next;
+ if (p == &os->samples) {
+ list_add_tail(&new->list, &os->samples);
+ os->max_timestamp = timestamp;
+ return;
+ }
+ sample = list_entry(p, struct sample_queue, list);
+ }
+ list_add_tail(&new->list, &sample->list);
+ } else {
+ while (sample->timestamp > timestamp) {
+ p = sample->list.prev;
+ if (p == &os->samples) {
+ list_add(&new->list, &os->samples);
+ return;
+ }
+ sample = list_entry(p, struct sample_queue, list);
+ }
+ list_add(&new->list, &sample->list);
+ }
+}
+
+#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
+
+static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset)
+{
+ struct ordered_samples *os = &s->ordered_samples;
+ struct list_head *sc = &os->sample_cache;
+ u64 timestamp = sample->time;
+ struct sample_queue *new;
+
+ if (!timestamp || timestamp == ~0ULL)
+ return -ETIME;
+
+ if (timestamp < s->ordered_samples.last_flush) {
+ printf("Warning: Timestamp below last timeslice flush\n");
+ return -EINVAL;
+ }
+
+ if (!list_empty(sc)) {
+ new = list_entry(sc->next, struct sample_queue, list);
+ list_del(&new->list);
+ } else if (os->sample_buffer) {
+ new = os->sample_buffer + os->sample_buffer_idx;
+ if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
+ os->sample_buffer = NULL;
+ } else {
+ os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
+ if (!os->sample_buffer)
+ return -ENOMEM;
+ list_add(&os->sample_buffer->list, &os->to_free);
+ os->sample_buffer_idx = 2;
+ new = os->sample_buffer + 1;
+ }
+
+ new->timestamp = timestamp;
+ new->file_offset = file_offset;
+ new->event = event;
+
+ __queue_event(new, s);
+
+ return 0;
+}
+
+static void callchain__printf(struct perf_sample *sample)
+{
+ unsigned int i;
+
+ printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
+
+ for (i = 0; i < sample->callchain->nr; i++)
+ printf("..... %2d: %016" PRIx64 "\n",
+ i, sample->callchain->ips[i]);
+}
+
+static void branch_stack__printf(struct perf_sample *sample)
+{
+ uint64_t i;
+
+ printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
+
+ for (i = 0; i < sample->branch_stack->nr; i++)
+ printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
+ i, sample->branch_stack->entries[i].from,
+ sample->branch_stack->entries[i].to);
+}
+
+static void perf_session__print_tstamp(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (event->header.type != PERF_RECORD_SAMPLE &&
+ !session->sample_id_all) {
+ fputs("-1 -1 ", stdout);
+ return;
+ }
+
+ if ((session->sample_type & PERF_SAMPLE_CPU))
+ printf("%u ", sample->cpu);
+
+ if (session->sample_type & PERF_SAMPLE_TIME)
+ printf("%" PRIu64 " ", sample->time);
+}
+
+static void dump_event(struct perf_session *session, union perf_event *event,
+ u64 file_offset, struct perf_sample *sample)
+{
+ if (!dump_trace)
+ return;
+
+ printf("\n%#" PRIx64 " [%#x]: event: %d\n",
+ file_offset, event->header.size, event->header.type);
+
+ trace_event(event);
+
+ if (sample)
+ perf_session__print_tstamp(session, event, sample);
+
+ printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
+ event->header.size, perf_event__name(event->header.type));
+}
+
+static void dump_sample(struct perf_session *session, union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (!dump_trace)
+ return;
+
+ printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
+ event->header.misc, sample->pid, sample->tid, sample->ip,
+ sample->period, sample->addr);
+
+ if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
+ callchain__printf(sample);
+
+ if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
+ branch_stack__printf(sample);
+}
+
+static struct machine *
+ perf_session__find_machine_for_cpumode(struct perf_session *session,
+ union perf_event *event)
+{
+ const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ u32 pid;
+
+ if (event->header.type == PERF_RECORD_MMAP)
+ pid = event->mmap.pid;
+ else
+ pid = event->ip.pid;
+
+ return perf_session__find_machine(session, pid);
+ }
+
+ return perf_session__find_host_machine(session);
+}
+
+static int perf_session_deliver_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool,
+ u64 file_offset)
+{
+ struct perf_evsel *evsel;
+ struct machine *machine;
+
+ dump_event(session, event, file_offset, sample);
+
+ evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
+ /*
+ * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
+ * because the tools right now may apply filters, discarding
+ * some of the samples. For consistency, in the future we
+ * should have something like nr_filtered_samples and remove
+ * the sample->period from total_sample_period, etc, KISS for
+ * now tho.
+ *
+ * Also testing against NULL allows us to handle files without
+ * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
+ * future probably it'll be a good idea to restrict event
+ * processing via perf_session to files with both set.
+ */
+ hists__inc_nr_events(&evsel->hists, event->header.type);
+ }
+
+ machine = perf_session__find_machine_for_cpumode(session, event);
+
+ switch (event->header.type) {
+ case PERF_RECORD_SAMPLE:
+ dump_sample(session, event, sample);
+ if (evsel == NULL) {
+ ++session->hists.stats.nr_unknown_id;
+ return 0;
+ }
+ if (machine == NULL) {
+ ++session->hists.stats.nr_unprocessable_samples;
+ return 0;
+ }
+ return tool->sample(tool, event, sample, evsel, machine);
+ case PERF_RECORD_MMAP:
+ return tool->mmap(tool, event, sample, machine);
+ case PERF_RECORD_COMM:
+ return tool->comm(tool, event, sample, machine);
+ case PERF_RECORD_FORK:
+ return tool->fork(tool, event, sample, machine);
+ case PERF_RECORD_EXIT:
+ return tool->exit(tool, event, sample, machine);
+ case PERF_RECORD_LOST:
+ if (tool->lost == perf_event__process_lost)
+ session->hists.stats.total_lost += event->lost.lost;
+ return tool->lost(tool, event, sample, machine);
+ case PERF_RECORD_READ:
+ return tool->read(tool, event, sample, evsel, machine);
+ case PERF_RECORD_THROTTLE:
+ return tool->throttle(tool, event, sample, machine);
+ case PERF_RECORD_UNTHROTTLE:
+ return tool->unthrottle(tool, event, sample, machine);
+ default:
+ ++session->hists.stats.nr_unknown_events;
+ return -1;
+ }
+}
+
+static int perf_session__preprocess_sample(struct perf_session *session,
+ union perf_event *event, struct perf_sample *sample)
+{
+ if (event->header.type != PERF_RECORD_SAMPLE ||
+ !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
+ return 0;
+
+ if (!ip_callchain__valid(sample->callchain, event)) {
+ pr_debug("call-chain problem with event, skipping it.\n");
+ ++session->hists.stats.nr_invalid_chains;
+ session->hists.stats.total_invalid_chains += sample->period;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
+ struct perf_tool *tool, u64 file_offset)
+{
+ int err;
+
+ dump_event(session, event, file_offset, NULL);
+
+ /* These events are processed right away */
+ switch (event->header.type) {
+ case PERF_RECORD_HEADER_ATTR:
+ err = tool->attr(event, &session->evlist);
+ if (err == 0)
+ perf_session__update_sample_type(session);
+ return err;
+ case PERF_RECORD_HEADER_EVENT_TYPE:
+ return tool->event_type(tool, event);
+ case PERF_RECORD_HEADER_TRACING_DATA:
+ /* setup for reading amidst mmap */
+ lseek(session->fd, file_offset, SEEK_SET);
+ return tool->tracing_data(event, session);
+ case PERF_RECORD_HEADER_BUILD_ID:
+ return tool->build_id(tool, event, session);
+ case PERF_RECORD_FINISHED_ROUND:
+ return tool->finished_round(tool, event, session);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int perf_session__process_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool,
+ u64 file_offset)
+{
+ struct perf_sample sample;
+ int ret;
+
+ if (session->header.needs_swap &&
+ perf_event__swap_ops[event->header.type])
+ perf_event__swap_ops[event->header.type](event);
+
+ if (event->header.type >= PERF_RECORD_HEADER_MAX)
+ return -EINVAL;
+
+ hists__inc_nr_events(&session->hists, event->header.type);
+
+ if (event->header.type >= PERF_RECORD_USER_TYPE_START)
+ return perf_session__process_user_event(session, event, tool, file_offset);
+
+ /*
+ * For all kernel events we get the sample data
+ */
+ ret = perf_session__parse_sample(session, event, &sample);
+ if (ret)
+ return ret;
+
+ /* Preprocess sample records - precheck callchains */
+ if (perf_session__preprocess_sample(session, event, &sample))
+ return 0;
+
+ if (tool->ordered_samples) {
+ ret = perf_session_queue_event(session, event, &sample,
+ file_offset);
+ if (ret != -ETIME)
+ return ret;
+ }
+
+ return perf_session_deliver_event(session, event, &sample, tool,
+ file_offset);
+}
+
+void perf_event_header__bswap(struct perf_event_header *self)
+{
+ self->type = bswap_32(self->type);
+ self->misc = bswap_16(self->misc);
+ self->size = bswap_16(self->size);
+}
+
+struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
+{
+ return machine__findnew_thread(&session->host_machine, pid);
+}
+
+static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+{
+ struct thread *thread = perf_session__findnew(self, 0);
+
+ if (thread == NULL || thread__set_comm(thread, "swapper")) {
+ pr_err("problem inserting idle task.\n");
+ thread = NULL;
+ }
+
+ return thread;
+}
+
+static void perf_session__warn_about_errors(const struct perf_session *session,
+ const struct perf_tool *tool)
+{
+ if (tool->lost == perf_event__process_lost &&
+ session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
+ ui__warning("Processed %d events and lost %d chunks!\n\n"
+ "Check IO/CPU overload!\n\n",
+ session->hists.stats.nr_events[0],
+ session->hists.stats.nr_events[PERF_RECORD_LOST]);
+ }
+
+ if (session->hists.stats.nr_unknown_events != 0) {
+ ui__warning("Found %u unknown events!\n\n"
+ "Is this an older tool processing a perf.data "
+ "file generated by a more recent tool?\n\n"
+ "If that is not the case, consider "
+ "reporting to linux-kernel@vger.kernel.org.\n\n",
+ session->hists.stats.nr_unknown_events);
+ }
+
+ if (session->hists.stats.nr_unknown_id != 0) {
+ ui__warning("%u samples with id not present in the header\n",
+ session->hists.stats.nr_unknown_id);
+ }
+
+ if (session->hists.stats.nr_invalid_chains != 0) {
+ ui__warning("Found invalid callchains!\n\n"
+ "%u out of %u events were discarded for this reason.\n\n"
+ "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
+ session->hists.stats.nr_invalid_chains,
+ session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
+ }
+
+ if (session->hists.stats.nr_unprocessable_samples != 0) {
+ ui__warning("%u unprocessable samples recorded.\n"
+ "Do you have a KVM guest running and not using 'perf kvm'?\n",
+ session->hists.stats.nr_unprocessable_samples);
+ }
+}
+
+#define session_done() (*(volatile int *)(&session_done))
+volatile int session_done;
+
+static int __perf_session__process_pipe_events(struct perf_session *self,
+ struct perf_tool *tool)
+{
+ union perf_event event;
+ uint32_t size;
+ int skip = 0;
+ u64 head;
+ int err;
+ void *p;
+
+ perf_tool__fill_defaults(tool);
+
+ head = 0;
+more:
+ err = readn(self->fd, &event, sizeof(struct perf_event_header));
+ if (err <= 0) {
+ if (err == 0)
+ goto done;
+
+ pr_err("failed to read event header\n");
+ goto out_err;
+ }
+
+ if (self->header.needs_swap)
+ perf_event_header__bswap(&event.header);
+
+ size = event.header.size;
+ if (size == 0)
+ size = 8;
+
+ p = &event;
+ p += sizeof(struct perf_event_header);
+
+ if (size - sizeof(struct perf_event_header)) {
+ err = readn(self->fd, p, size - sizeof(struct perf_event_header));
+ if (err <= 0) {
+ if (err == 0) {
+ pr_err("unexpected end of event stream\n");
+ goto done;
+ }
+
+ pr_err("failed to read event data\n");
+ goto out_err;
+ }
+ }
+
+ if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
+ dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
+ head, event.header.size, event.header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (skip > 0)
+ head += skip;
+
+ if (!session_done())
+ goto more;
+done:
+ err = 0;
+out_err:
+ perf_session__warn_about_errors(self, tool);
+ perf_session_free_sample_buffers(self);
+ return err;
+}
+
+static union perf_event *
+fetch_mmaped_event(struct perf_session *session,
+ u64 head, size_t mmap_size, char *buf)
+{
+ union perf_event *event;
+
+ /*
+ * Ensure we have enough space remaining to read
+ * the size of the event in the headers.
+ */
+ if (head + sizeof(event->header) > mmap_size)
+ return NULL;
+
+ event = (union perf_event *)(buf + head);
+
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
+
+ if (head + event->header.size > mmap_size)
+ return NULL;
+
+ return event;
+}
+
+int __perf_session__process_events(struct perf_session *session,
+ u64 data_offset, u64 data_size,
+ u64 file_size, struct perf_tool *tool)
+{
+ u64 head, page_offset, file_offset, file_pos, progress_next;
+ int err, mmap_prot, mmap_flags, map_idx = 0;
+ size_t page_size, mmap_size;
+ char *buf, *mmaps[8];
+ union perf_event *event;
+ uint32_t size;
+
+ perf_tool__fill_defaults(tool);
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ page_offset = page_size * (data_offset / page_size);
+ file_offset = page_offset;
+ head = data_offset - page_offset;
+
+ if (data_offset + data_size < file_size)
+ file_size = data_offset + data_size;
+
+ progress_next = file_size / 16;
+
+ mmap_size = session->mmap_window;
+ if (mmap_size > file_size)
+ mmap_size = file_size;
+
+ memset(mmaps, 0, sizeof(mmaps));
+
+ mmap_prot = PROT_READ;
+ mmap_flags = MAP_SHARED;
+
+ if (session->header.needs_swap) {
+ mmap_prot |= PROT_WRITE;
+ mmap_flags = MAP_PRIVATE;
+ }
+remap:
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
+ file_offset);
+ if (buf == MAP_FAILED) {
+ pr_err("failed to mmap file\n");
+ err = -errno;
+ goto out_err;
+ }
+ mmaps[map_idx] = buf;
+ map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
+ file_pos = file_offset + head;
+
+more:
+ event = fetch_mmaped_event(session, head, mmap_size, buf);
+ if (!event) {
+ if (mmaps[map_idx]) {
+ munmap(mmaps[map_idx], mmap_size);
+ mmaps[map_idx] = NULL;
+ }
+
+ page_offset = page_size * (head / page_size);
+ file_offset += page_offset;
+ head -= page_offset;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+ if (size == 0 ||
+ perf_session__process_event(session, event, tool, file_pos) < 0) {
+ dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
+ file_offset + head, event->header.size,
+ event->header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+ file_pos += size;
+
+ if (file_pos >= progress_next) {
+ progress_next += file_size / 16;
+ ui_progress__update(file_pos, file_size,
+ "Processing events...");
+ }
+
+ if (file_pos < file_size)
+ goto more;
+
+ err = 0;
+ /* do the final flush for ordered samples */
+ session->ordered_samples.next_flush = ULLONG_MAX;
+ flush_sample_queue(session, tool);
+out_err:
+ perf_session__warn_about_errors(session, tool);
+ perf_session_free_sample_buffers(session);
+ return err;
+}
+
+int perf_session__process_events(struct perf_session *self,
+ struct perf_tool *tool)
+{
+ int err;
+
+ if (perf_session__register_idle_thread(self) == NULL)
+ return -ENOMEM;
+
+ if (!self->fd_pipe)
+ err = __perf_session__process_events(self,
+ self->header.data_offset,
+ self->header.data_size,
+ self->size, tool);
+ else
+ err = __perf_session__process_pipe_events(self, tool);
+
+ return err;
+}
+
+bool perf_session__has_traces(struct perf_session *self, const char *msg)
+{
+ if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
+ }
+
+ return true;
+}
+
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
+ const char *symbol_name, u64 addr)
+{
+ char *bracket;
+ enum map_type i;
+ struct ref_reloc_sym *ref;
+
+ ref = zalloc(sizeof(struct ref_reloc_sym));
+ if (ref == NULL)
+ return -ENOMEM;
+
+ ref->name = strdup(symbol_name);
+ if (ref->name == NULL) {
+ free(ref);
+ return -ENOMEM;
+ }
+
+ bracket = strchr(ref->name, ']');
+ if (bracket)
+ *bracket = '\0';
+
+ ref->addr = addr;
+
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ struct kmap *kmap = map__kmap(maps[i]);
+ kmap->ref_reloc_sym = ref;
+ }
+
+ return 0;
+}
+
+size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
+{
+ return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
+ __dsos__fprintf(&self->host_machine.user_dsos, fp) +
+ machines__fprintf_dsos(&self->machines, fp);
+}
+
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
+ bool with_hits)
+{
+ size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
+ return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
+}
+
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
+{
+ struct perf_evsel *pos;
+ size_t ret = fprintf(fp, "Aggregated stats:\n");
+
+ ret += hists__fprintf_nr_events(&session->hists, fp);
+
+ list_for_each_entry(pos, &session->evlist->entries, node) {
+ ret += fprintf(fp, "%s stats:\n", event_name(pos));
+ ret += hists__fprintf_nr_events(&pos->hists, fp);
+ }
+
+ return ret;
+}
+
+size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
+{
+ /*
+ * FIXME: Here we have to actually print all the machines in this
+ * session, not just the host...
+ */
+ return machine__fprintf(&session->host_machine, fp);
+}
+
+void perf_session__remove_thread(struct perf_session *session,
+ struct thread *th)
+{
+ /*
+ * FIXME: This one makes no sense, we need to remove the thread from
+ * the machine it belongs to, perf_session can have many machines, so
+ * doing it always on ->host_machine is wrong. Fix when auditing all
+ * the 'perf kvm' code.
+ */
+ machine__remove_thread(&session->host_machine, th);
+}
+
+struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
+ unsigned int type)
+{
+ struct perf_evsel *pos;
+
+ list_for_each_entry(pos, &session->evlist->entries, node) {
+ if (pos->attr.type == type)
+ return pos;
+ }
+ return NULL;
+}
+
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+ struct machine *machine, struct perf_evsel *evsel,
+ int print_sym, int print_dso, int print_symoffset)
+{
+ struct addr_location al;
+ struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
+ struct callchain_cursor_node *node;
+
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
+ NULL) < 0) {
+ error("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return;
+ }
+
+ if (symbol_conf.use_callchain && sample->callchain) {
+
+ if (machine__resolve_callchain(machine, evsel, al.thread,
+ sample->callchain, NULL) != 0) {
+ if (verbose)
+ error("Failed to resolve callchain. Skipping\n");
+ return;
+ }
+ callchain_cursor_commit(cursor);
+
+ while (1) {
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ break;
+
+ printf("\t%16" PRIx64, node->ip);
+ if (print_sym) {
+ printf(" ");
+ symbol__fprintf_symname(node->sym, stdout);
+ }
+ if (print_dso) {
+ printf(" (");
+ map__fprintf_dsoname(al.map, stdout);
+ printf(")");
+ }
+ printf("\n");
+
+ callchain_cursor_advance(cursor);
+ }
+
+ } else {
+ printf("%16" PRIx64, sample->ip);
+ if (print_sym) {
+ printf(" ");
+ if (print_symoffset)
+ symbol__fprintf_symname_offs(al.sym, &al,
+ stdout);
+ else
+ symbol__fprintf_symname(al.sym, stdout);
+ }
+
+ if (print_dso) {
+ printf(" (");
+ map__fprintf_dsoname(al.map, stdout);
+ printf(")");
+ }
+ }
+}
+
+int perf_session__cpu_bitmap(struct perf_session *session,
+ const char *cpu_list, unsigned long *cpu_bitmap)
+{
+ int i;
+ struct cpu_map *map;
+
+ for (i = 0; i < PERF_TYPE_MAX; ++i) {
+ struct perf_evsel *evsel;
+
+ evsel = perf_session__find_first_evtype(session, i);
+ if (!evsel)
+ continue;
+
+ if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
+ pr_err("File does not contain CPU events. "
+ "Remove -c option to proceed.\n");
+ return -1;
+ }
+ }
+
+ map = cpu_map__new(cpu_list);
+ if (map == NULL) {
+ pr_err("Invalid cpu_list\n");
+ return -1;
+ }
+
+ for (i = 0; i < map->nr; i++) {
+ int cpu = map->map[i];
+
+ if (cpu >= MAX_NR_CPUS) {
+ pr_err("Requested CPU %d too large. "
+ "Consider raising MAX_NR_CPUS\n", cpu);
+ return -1;
+ }
+
+ set_bit(cpu, cpu_bitmap);
+ }
+
+ return 0;
+}
+
+void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
+ bool full)
+{
+ struct stat st;
+ int ret;
+
+ if (session == NULL || fp == NULL)
+ return;
+
+ ret = fstat(session->fd, &st);
+ if (ret == -1)
+ return;
+
+ fprintf(fp, "# ========\n");
+ fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
+ perf_header__fprintf_info(session, fp, full);
+ fprintf(fp, "# ========\n#\n");
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/session.h b/ANDROID_3.4.5/tools/perf/util/session.h
new file mode 100644
index 00000000..7a5434c0
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/session.h
@@ -0,0 +1,160 @@
+#ifndef __PERF_SESSION_H
+#define __PERF_SESSION_H
+
+#include "hist.h"
+#include "event.h"
+#include "header.h"
+#include "symbol.h"
+#include "thread.h"
+#include <linux/rbtree.h>
+#include "../../../include/linux/perf_event.h"
+
+struct sample_queue;
+struct ip_callchain;
+struct thread;
+
+struct ordered_samples {
+ u64 last_flush;
+ u64 next_flush;
+ u64 max_timestamp;
+ struct list_head samples;
+ struct list_head sample_cache;
+ struct list_head to_free;
+ struct sample_queue *sample_buffer;
+ struct sample_queue *last_sample;
+ int sample_buffer_idx;
+ unsigned int nr_samples;
+};
+
+struct perf_session {
+ struct perf_header header;
+ unsigned long size;
+ unsigned long mmap_window;
+ struct machine host_machine;
+ struct rb_root machines;
+ struct perf_evlist *evlist;
+ /*
+ * FIXME: Need to split this up further, we need global
+ * stats + per event stats. 'perf diff' also needs
+ * to properly support multiple events in a single
+ * perf.data file.
+ */
+ struct hists hists;
+ u64 sample_type;
+ int sample_size;
+ int fd;
+ bool fd_pipe;
+ bool repipe;
+ bool sample_id_all;
+ u16 id_hdr_size;
+ int cwdlen;
+ char *cwd;
+ struct ordered_samples ordered_samples;
+ char filename[1];
+};
+
+struct perf_tool;
+
+struct perf_session *perf_session__new(const char *filename, int mode,
+ bool force, bool repipe,
+ struct perf_tool *tool);
+void perf_session__delete(struct perf_session *self);
+
+void perf_event_header__bswap(struct perf_event_header *self);
+
+int __perf_session__process_events(struct perf_session *self,
+ u64 data_offset, u64 data_size, u64 size,
+ struct perf_tool *tool);
+int perf_session__process_events(struct perf_session *self,
+ struct perf_tool *tool);
+
+int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
+
+struct branch_info *machine__resolve_bstack(struct machine *self,
+ struct thread *thread,
+ struct branch_stack *bs);
+
+bool perf_session__has_traces(struct perf_session *self, const char *msg);
+
+void mem_bswap_64(void *src, int byte_size);
+void perf_event__attr_swap(struct perf_event_attr *attr);
+
+int perf_session__create_kernel_maps(struct perf_session *self);
+
+void perf_session__update_sample_type(struct perf_session *self);
+void perf_session__remove_thread(struct perf_session *self, struct thread *th);
+
+static inline
+struct machine *perf_session__find_host_machine(struct perf_session *self)
+{
+ return &self->host_machine;
+}
+
+static inline
+struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
+{
+ if (pid == HOST_KERNEL_ID)
+ return &self->host_machine;
+ return machines__find(&self->machines, pid);
+}
+
+static inline
+struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
+{
+ if (pid == HOST_KERNEL_ID)
+ return &self->host_machine;
+ return machines__findnew(&self->machines, pid);
+}
+
+static inline
+void perf_session__process_machines(struct perf_session *self,
+ struct perf_tool *tool,
+ machine__process_t process)
+{
+ process(&self->host_machine, tool);
+ return machines__process(&self->machines, process, tool);
+}
+
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
+
+size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
+
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
+ FILE *fp, bool with_hits);
+
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
+
+static inline int perf_session__parse_sample(struct perf_session *session,
+ const union perf_event *event,
+ struct perf_sample *sample)
+{
+ return perf_event__parse_sample(event, session->sample_type,
+ session->sample_size,
+ session->sample_id_all, sample,
+ session->header.needs_swap);
+}
+
+static inline int perf_session__synthesize_sample(struct perf_session *session,
+ union perf_event *event,
+ const struct perf_sample *sample)
+{
+ return perf_event__synthesize_sample(event, session->sample_type,
+ sample, session->header.needs_swap);
+}
+
+struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
+ unsigned int type);
+
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+ struct machine *machine, struct perf_evsel *evsel,
+ int print_sym, int print_dso, int print_symoffset);
+
+int perf_session__cpu_bitmap(struct perf_session *session,
+ const char *cpu_list, unsigned long *cpu_bitmap);
+
+void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+#endif /* __PERF_SESSION_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/setup.py b/ANDROID_3.4.5/tools/perf/util/setup.py
new file mode 100644
index 00000000..d0f9f29c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/setup.py
@@ -0,0 +1,44 @@
+#!/usr/bin/python2
+
+from distutils.core import setup, Extension
+from os import getenv
+
+from distutils.command.build_ext import build_ext as _build_ext
+from distutils.command.install_lib import install_lib as _install_lib
+
+class build_ext(_build_ext):
+ def finalize_options(self):
+ _build_ext.finalize_options(self)
+ self.build_lib = build_lib
+ self.build_temp = build_tmp
+
+class install_lib(_install_lib):
+ def finalize_options(self):
+ _install_lib.finalize_options(self)
+ self.build_dir = build_lib
+
+
+cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
+cflags += getenv('CFLAGS', '').split()
+
+build_lib = getenv('PYTHON_EXTBUILD_LIB')
+build_tmp = getenv('PYTHON_EXTBUILD_TMP')
+
+ext_sources = [f.strip() for f in file('util/python-ext-sources')
+ if len(f.strip()) > 0 and f[0] != '#']
+
+perf = Extension('perf',
+ sources = ext_sources,
+ include_dirs = ['util/include'],
+ extra_compile_args = cflags,
+ )
+
+setup(name='perf',
+ version='0.1',
+ description='Interface with the Linux profiling infrastructure',
+ author='Arnaldo Carvalho de Melo',
+ author_email='acme@redhat.com',
+ license='GPLv2',
+ url='http://perf.wiki.kernel.org',
+ ext_modules=[perf],
+ cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
diff --git a/ANDROID_3.4.5/tools/perf/util/sigchain.c b/ANDROID_3.4.5/tools/perf/util/sigchain.c
new file mode 100644
index 00000000..ba785e9b
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/sigchain.c
@@ -0,0 +1,52 @@
+#include "sigchain.h"
+#include "cache.h"
+
+#define SIGCHAIN_MAX_SIGNALS 32
+
+struct sigchain_signal {
+ sigchain_fun *old;
+ int n;
+ int alloc;
+};
+static struct sigchain_signal signals[SIGCHAIN_MAX_SIGNALS];
+
+static void check_signum(int sig)
+{
+ if (sig < 1 || sig >= SIGCHAIN_MAX_SIGNALS)
+ die("BUG: signal out of range: %d", sig);
+}
+
+static int sigchain_push(int sig, sigchain_fun f)
+{
+ struct sigchain_signal *s = signals + sig;
+ check_signum(sig);
+
+ ALLOC_GROW(s->old, s->n + 1, s->alloc);
+ s->old[s->n] = signal(sig, f);
+ if (s->old[s->n] == SIG_ERR)
+ return -1;
+ s->n++;
+ return 0;
+}
+
+int sigchain_pop(int sig)
+{
+ struct sigchain_signal *s = signals + sig;
+ check_signum(sig);
+ if (s->n < 1)
+ return 0;
+
+ if (signal(sig, s->old[s->n - 1]) == SIG_ERR)
+ return -1;
+ s->n--;
+ return 0;
+}
+
+void sigchain_push_common(sigchain_fun f)
+{
+ sigchain_push(SIGINT, f);
+ sigchain_push(SIGHUP, f);
+ sigchain_push(SIGTERM, f);
+ sigchain_push(SIGQUIT, f);
+ sigchain_push(SIGPIPE, f);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/sigchain.h b/ANDROID_3.4.5/tools/perf/util/sigchain.h
new file mode 100644
index 00000000..959d64eb
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/sigchain.h
@@ -0,0 +1,10 @@
+#ifndef __PERF_SIGCHAIN_H
+#define __PERF_SIGCHAIN_H
+
+typedef void (*sigchain_fun)(int);
+
+int sigchain_pop(int sig);
+
+void sigchain_push_common(sigchain_fun f);
+
+#endif /* __PERF_SIGCHAIN_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/sort.c b/ANDROID_3.4.5/tools/perf/util/sort.c
new file mode 100644
index 00000000..a2723743
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/sort.c
@@ -0,0 +1,528 @@
+#include "sort.h"
+#include "hist.h"
+
+regex_t parent_regex;
+const char default_parent_pattern[] = "^sys_|^do_page_fault";
+const char *parent_pattern = default_parent_pattern;
+const char default_sort_order[] = "comm,dso,symbol";
+const char *sort_order = default_sort_order;
+int sort__need_collapse = 0;
+int sort__has_parent = 0;
+int sort__branch_mode = -1; /* -1 = means not set */
+
+enum sort_type sort__first_dimension;
+
+char * field_sep;
+
+LIST_HEAD(hist_entry__sort_list);
+
+static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
+{
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vsnprintf(bf, size, fmt, ap);
+ if (field_sep && n > 0) {
+ char *sep = bf;
+
+ while (1) {
+ sep = strchr(sep, *field_sep);
+ if (sep == NULL)
+ break;
+ *sep = '.';
+ }
+ }
+ va_end(ap);
+
+ if (n >= (int)size)
+ return size - 1;
+ return n;
+}
+
+static int64_t cmp_null(void *l, void *r)
+{
+ if (!l && !r)
+ return 0;
+ else if (!l)
+ return -1;
+ else
+ return 1;
+}
+
+/* --sort pid */
+
+static int64_t
+sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->pid - left->thread->pid;
+}
+
+static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*s:%5d", width,
+ self->thread->comm ?: "", self->thread->pid);
+}
+
+struct sort_entry sort_thread = {
+ .se_header = "Command: Pid",
+ .se_cmp = sort__thread_cmp,
+ .se_snprintf = hist_entry__thread_snprintf,
+ .se_width_idx = HISTC_THREAD,
+};
+
+/* --sort comm */
+
+static int64_t
+sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->pid - left->thread->pid;
+}
+
+static int64_t
+sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ char *comm_l = left->thread->comm;
+ char *comm_r = right->thread->comm;
+
+ if (!comm_l || !comm_r)
+ return cmp_null(comm_l, comm_r);
+
+ return strcmp(comm_l, comm_r);
+}
+
+static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
+}
+
+static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
+{
+ struct dso *dso_l = map_l ? map_l->dso : NULL;
+ struct dso *dso_r = map_r ? map_r->dso : NULL;
+ const char *dso_name_l, *dso_name_r;
+
+ if (!dso_l || !dso_r)
+ return cmp_null(dso_l, dso_r);
+
+ if (verbose) {
+ dso_name_l = dso_l->long_name;
+ dso_name_r = dso_r->long_name;
+ } else {
+ dso_name_l = dso_l->short_name;
+ dso_name_r = dso_r->short_name;
+ }
+
+ return strcmp(dso_name_l, dso_name_r);
+}
+
+struct sort_entry sort_comm = {
+ .se_header = "Command",
+ .se_cmp = sort__comm_cmp,
+ .se_collapse = sort__comm_collapse,
+ .se_snprintf = hist_entry__comm_snprintf,
+ .se_width_idx = HISTC_COMM,
+};
+
+/* --sort dso */
+
+static int64_t
+sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_cmp(left->ms.map, right->ms.map);
+}
+
+
+static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r,
+ u64 ip_l, u64 ip_r)
+{
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ if (sym_l == sym_r)
+ return 0;
+
+ if (sym_l)
+ ip_l = sym_l->start;
+ if (sym_r)
+ ip_r = sym_r->start;
+
+ return (int64_t)(ip_r - ip_l);
+}
+
+static int _hist_entry__dso_snprintf(struct map *map, char *bf,
+ size_t size, unsigned int width)
+{
+ if (map && map->dso) {
+ const char *dso_name = !verbose ? map->dso->short_name :
+ map->dso->long_name;
+ return repsep_snprintf(bf, size, "%-*s", width, dso_name);
+ }
+
+ return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
+}
+
+static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_snprintf(self->ms.map, bf, size, width);
+}
+
+static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
+ u64 ip, char level, char *bf, size_t size,
+ unsigned int width __used)
+{
+ size_t ret = 0;
+
+ if (verbose) {
+ char o = map ? dso__symtab_origin(map->dso) : '!';
+ ret += repsep_snprintf(bf, size, "%-#*llx %c ",
+ BITS_PER_LONG / 4, ip, o);
+ }
+
+ ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
+ if (sym)
+ ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
+ width - ret,
+ sym->name);
+ else {
+ size_t len = BITS_PER_LONG / 4;
+ ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
+ len, ip);
+ ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
+ width - ret, "");
+ }
+
+ return ret;
+}
+
+
+struct sort_entry sort_dso = {
+ .se_header = "Shared Object",
+ .se_cmp = sort__dso_cmp,
+ .se_snprintf = hist_entry__dso_snprintf,
+ .se_width_idx = HISTC_DSO,
+};
+
+static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width __used)
+{
+ return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
+ self->level, bf, size, width);
+}
+
+/* --sort symbol */
+static int64_t
+sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ u64 ip_l, ip_r;
+
+ if (!left->ms.sym && !right->ms.sym)
+ return right->level - left->level;
+
+ if (!left->ms.sym || !right->ms.sym)
+ return cmp_null(left->ms.sym, right->ms.sym);
+
+ if (left->ms.sym == right->ms.sym)
+ return 0;
+
+ ip_l = left->ms.sym->start;
+ ip_r = right->ms.sym->start;
+
+ return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r);
+}
+
+struct sort_entry sort_sym = {
+ .se_header = "Symbol",
+ .se_cmp = sort__sym_cmp,
+ .se_snprintf = hist_entry__sym_snprintf,
+ .se_width_idx = HISTC_SYMBOL,
+};
+
+/* --sort parent */
+
+static int64_t
+sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct symbol *sym_l = left->parent;
+ struct symbol *sym_r = right->parent;
+
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ return strcmp(sym_l->name, sym_r->name);
+}
+
+static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*s", width,
+ self->parent ? self->parent->name : "[other]");
+}
+
+struct sort_entry sort_parent = {
+ .se_header = "Parent symbol",
+ .se_cmp = sort__parent_cmp,
+ .se_snprintf = hist_entry__parent_snprintf,
+ .se_width_idx = HISTC_PARENT,
+};
+
+/* --sort cpu */
+
+static int64_t
+sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->cpu - left->cpu;
+}
+
+static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*d", width, self->cpu);
+}
+
+struct sort_entry sort_cpu = {
+ .se_header = "CPU",
+ .se_cmp = sort__cpu_cmp,
+ .se_snprintf = hist_entry__cpu_snprintf,
+ .se_width_idx = HISTC_CPU,
+};
+
+static int64_t
+sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_cmp(left->branch_info->from.map,
+ right->branch_info->from.map);
+}
+
+static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_snprintf(self->branch_info->from.map,
+ bf, size, width);
+}
+
+struct sort_entry sort_dso_from = {
+ .se_header = "Source Shared Object",
+ .se_cmp = sort__dso_from_cmp,
+ .se_snprintf = hist_entry__dso_from_snprintf,
+ .se_width_idx = HISTC_DSO_FROM,
+};
+
+static int64_t
+sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_cmp(left->branch_info->to.map,
+ right->branch_info->to.map);
+}
+
+static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_snprintf(self->branch_info->to.map,
+ bf, size, width);
+}
+
+static int64_t
+sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct addr_map_symbol *from_l = &left->branch_info->from;
+ struct addr_map_symbol *from_r = &right->branch_info->from;
+
+ if (!from_l->sym && !from_r->sym)
+ return right->level - left->level;
+
+ return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr,
+ from_r->addr);
+}
+
+static int64_t
+sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct addr_map_symbol *to_l = &left->branch_info->to;
+ struct addr_map_symbol *to_r = &right->branch_info->to;
+
+ if (!to_l->sym && !to_r->sym)
+ return right->level - left->level;
+
+ return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr);
+}
+
+static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width __used)
+{
+ struct addr_map_symbol *from = &self->branch_info->from;
+ return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
+ self->level, bf, size, width);
+
+}
+
+static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width __used)
+{
+ struct addr_map_symbol *to = &self->branch_info->to;
+ return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
+ self->level, bf, size, width);
+
+}
+
+struct sort_entry sort_dso_to = {
+ .se_header = "Target Shared Object",
+ .se_cmp = sort__dso_to_cmp,
+ .se_snprintf = hist_entry__dso_to_snprintf,
+ .se_width_idx = HISTC_DSO_TO,
+};
+
+struct sort_entry sort_sym_from = {
+ .se_header = "Source Symbol",
+ .se_cmp = sort__sym_from_cmp,
+ .se_snprintf = hist_entry__sym_from_snprintf,
+ .se_width_idx = HISTC_SYMBOL_FROM,
+};
+
+struct sort_entry sort_sym_to = {
+ .se_header = "Target Symbol",
+ .se_cmp = sort__sym_to_cmp,
+ .se_snprintf = hist_entry__sym_to_snprintf,
+ .se_width_idx = HISTC_SYMBOL_TO,
+};
+
+static int64_t
+sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ const unsigned char mp = left->branch_info->flags.mispred !=
+ right->branch_info->flags.mispred;
+ const unsigned char p = left->branch_info->flags.predicted !=
+ right->branch_info->flags.predicted;
+
+ return mp || p;
+}
+
+static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width){
+ static const char *out = "N/A";
+
+ if (self->branch_info->flags.predicted)
+ out = "N";
+ else if (self->branch_info->flags.mispred)
+ out = "Y";
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+struct sort_entry sort_mispredict = {
+ .se_header = "Branch Mispredicted",
+ .se_cmp = sort__mispredict_cmp,
+ .se_snprintf = hist_entry__mispredict_snprintf,
+ .se_width_idx = HISTC_MISPREDICT,
+};
+
+struct sort_dimension {
+ const char *name;
+ struct sort_entry *entry;
+ int taken;
+};
+
+#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
+
+static struct sort_dimension sort_dimensions[] = {
+ DIM(SORT_PID, "pid", sort_thread),
+ DIM(SORT_COMM, "comm", sort_comm),
+ DIM(SORT_DSO, "dso", sort_dso),
+ DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
+ DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
+ DIM(SORT_SYM, "symbol", sort_sym),
+ DIM(SORT_SYM_TO, "symbol_from", sort_sym_from),
+ DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to),
+ DIM(SORT_PARENT, "parent", sort_parent),
+ DIM(SORT_CPU, "cpu", sort_cpu),
+ DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+};
+
+int sort_dimension__add(const char *tok)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
+ struct sort_dimension *sd = &sort_dimensions[i];
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+ if (sd->entry == &sort_parent) {
+ int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
+ if (ret) {
+ char err[BUFSIZ];
+
+ regerror(ret, &parent_regex, err, sizeof(err));
+ pr_err("Invalid regex: %s\n%s", parent_pattern, err);
+ return -EINVAL;
+ }
+ sort__has_parent = 1;
+ }
+
+ if (sd->taken)
+ return 0;
+
+ if (sd->entry->se_collapse)
+ sort__need_collapse = 1;
+
+ if (list_empty(&hist_entry__sort_list)) {
+ if (!strcmp(sd->name, "pid"))
+ sort__first_dimension = SORT_PID;
+ else if (!strcmp(sd->name, "comm"))
+ sort__first_dimension = SORT_COMM;
+ else if (!strcmp(sd->name, "dso"))
+ sort__first_dimension = SORT_DSO;
+ else if (!strcmp(sd->name, "symbol"))
+ sort__first_dimension = SORT_SYM;
+ else if (!strcmp(sd->name, "parent"))
+ sort__first_dimension = SORT_PARENT;
+ else if (!strcmp(sd->name, "cpu"))
+ sort__first_dimension = SORT_CPU;
+ else if (!strcmp(sd->name, "symbol_from"))
+ sort__first_dimension = SORT_SYM_FROM;
+ else if (!strcmp(sd->name, "symbol_to"))
+ sort__first_dimension = SORT_SYM_TO;
+ else if (!strcmp(sd->name, "dso_from"))
+ sort__first_dimension = SORT_DSO_FROM;
+ else if (!strcmp(sd->name, "dso_to"))
+ sort__first_dimension = SORT_DSO_TO;
+ else if (!strcmp(sd->name, "mispredict"))
+ sort__first_dimension = SORT_MISPREDICT;
+ }
+
+ list_add_tail(&sd->entry->list, &hist_entry__sort_list);
+ sd->taken = 1;
+
+ return 0;
+ }
+ return -ESRCH;
+}
+
+void setup_sorting(const char * const usagestr[], const struct option *opts)
+{
+ char *tmp, *tok, *str = strdup(sort_order);
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (sort_dimension__add(tok) < 0) {
+ error("Unknown --sort key: `%s'", tok);
+ usage_with_options(usagestr, opts);
+ }
+ }
+
+ free(str);
+}
+
+void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
+ const char *list_name, FILE *fp)
+{
+ if (list && strlist__nr_entries(list) == 1) {
+ if (fp != NULL)
+ fprintf(fp, "# %s: %s\n", list_name,
+ strlist__entry(list, 0)->s);
+ self->elide = true;
+ }
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/sort.h b/ANDROID_3.4.5/tools/perf/util/sort.h
new file mode 100644
index 00000000..472aa5a6
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/sort.h
@@ -0,0 +1,123 @@
+#ifndef __PERF_SORT_H
+#define __PERF_SORT_H
+#include "../builtin.h"
+
+#include "util.h"
+
+#include "color.h"
+#include <linux/list.h>
+#include "cache.h"
+#include <linux/rbtree.h>
+#include "symbol.h"
+#include "string.h"
+#include "callchain.h"
+#include "strlist.h"
+#include "values.h"
+
+#include "../perf.h"
+#include "debug.h"
+#include "header.h"
+
+#include "parse-options.h"
+#include "parse-events.h"
+
+#include "thread.h"
+#include "sort.h"
+
+extern regex_t parent_regex;
+extern const char *sort_order;
+extern const char default_parent_pattern[];
+extern const char *parent_pattern;
+extern const char default_sort_order[];
+extern int sort__need_collapse;
+extern int sort__has_parent;
+extern int sort__branch_mode;
+extern char *field_sep;
+extern struct sort_entry sort_comm;
+extern struct sort_entry sort_dso;
+extern struct sort_entry sort_sym;
+extern struct sort_entry sort_parent;
+extern struct sort_entry sort_dso_from;
+extern struct sort_entry sort_dso_to;
+extern struct sort_entry sort_sym_from;
+extern struct sort_entry sort_sym_to;
+extern enum sort_type sort__first_dimension;
+
+/**
+ * struct hist_entry - histogram entry
+ *
+ * @row_offset - offset from the first callchain expanded to appear on screen
+ * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
+ */
+struct hist_entry {
+ struct rb_node rb_node_in;
+ struct rb_node rb_node;
+ u64 period;
+ u64 period_sys;
+ u64 period_us;
+ u64 period_guest_sys;
+ u64 period_guest_us;
+ struct map_symbol ms;
+ struct thread *thread;
+ u64 ip;
+ s32 cpu;
+ u32 nr_events;
+
+ /* XXX These two should move to some tree widget lib */
+ u16 row_offset;
+ u16 nr_rows;
+
+ bool init_have_children;
+ char level;
+ bool used;
+ u8 filtered;
+ struct symbol *parent;
+ union {
+ unsigned long position;
+ struct hist_entry *pair;
+ struct rb_root sorted_chain;
+ };
+ struct branch_info *branch_info;
+ struct callchain_root callchain[0];
+};
+
+enum sort_type {
+ SORT_PID,
+ SORT_COMM,
+ SORT_DSO,
+ SORT_SYM,
+ SORT_PARENT,
+ SORT_CPU,
+ SORT_DSO_FROM,
+ SORT_DSO_TO,
+ SORT_SYM_FROM,
+ SORT_SYM_TO,
+ SORT_MISPREDICT,
+};
+
+/*
+ * configurable sorting bits
+ */
+
+struct sort_entry {
+ struct list_head list;
+
+ const char *se_header;
+
+ int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
+ int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *);
+ int (*se_snprintf)(struct hist_entry *self, char *bf, size_t size,
+ unsigned int width);
+ u8 se_width_idx;
+ bool elide;
+};
+
+extern struct sort_entry sort_thread;
+extern struct list_head hist_entry__sort_list;
+
+void setup_sorting(const char * const usagestr[], const struct option *opts);
+extern int sort_dimension__add(const char *);
+void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
+ const char *list_name, FILE *fp);
+
+#endif /* __PERF_SORT_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/strbuf.c b/ANDROID_3.4.5/tools/perf/util/strbuf.c
new file mode 100644
index 00000000..2eeb51ba
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/strbuf.c
@@ -0,0 +1,134 @@
+#include "cache.h"
+#include <linux/kernel.h>
+
+int prefixcmp(const char *str, const char *prefix)
+{
+ for (; ; str++, prefix++)
+ if (!*prefix)
+ return 0;
+ else if (*str != *prefix)
+ return (unsigned char)*prefix - (unsigned char)*str;
+}
+
+/*
+ * Used as the default ->buf value, so that people can always assume
+ * buf is non NULL and ->buf is NUL terminated even for a freshly
+ * initialized strbuf.
+ */
+char strbuf_slopbuf[1];
+
+void strbuf_init(struct strbuf *sb, ssize_t hint)
+{
+ sb->alloc = sb->len = 0;
+ sb->buf = strbuf_slopbuf;
+ if (hint)
+ strbuf_grow(sb, hint);
+}
+
+void strbuf_release(struct strbuf *sb)
+{
+ if (sb->alloc) {
+ free(sb->buf);
+ strbuf_init(sb, 0);
+ }
+}
+
+char *strbuf_detach(struct strbuf *sb, size_t *sz)
+{
+ char *res = sb->alloc ? sb->buf : NULL;
+ if (sz)
+ *sz = sb->len;
+ strbuf_init(sb, 0);
+ return res;
+}
+
+void strbuf_grow(struct strbuf *sb, size_t extra)
+{
+ if (sb->len + extra + 1 <= sb->len)
+ die("you want to use way too much memory");
+ if (!sb->alloc)
+ sb->buf = NULL;
+ ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc);
+}
+
+static void strbuf_splice(struct strbuf *sb, size_t pos, size_t len,
+ const void *data, size_t dlen)
+{
+ if (pos + len < pos)
+ die("you want to use way too much memory");
+ if (pos > sb->len)
+ die("`pos' is too far after the end of the buffer");
+ if (pos + len > sb->len)
+ die("`pos + len' is too far after the end of the buffer");
+
+ if (dlen >= len)
+ strbuf_grow(sb, dlen - len);
+ memmove(sb->buf + pos + dlen,
+ sb->buf + pos + len,
+ sb->len - pos - len);
+ memcpy(sb->buf + pos, data, dlen);
+ strbuf_setlen(sb, sb->len + dlen - len);
+}
+
+void strbuf_remove(struct strbuf *sb, size_t pos, size_t len)
+{
+ strbuf_splice(sb, pos, len, NULL, 0);
+}
+
+void strbuf_add(struct strbuf *sb, const void *data, size_t len)
+{
+ strbuf_grow(sb, len);
+ memcpy(sb->buf + sb->len, data, len);
+ strbuf_setlen(sb, sb->len + len);
+}
+
+void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+{
+ int len;
+ va_list ap;
+
+ if (!strbuf_avail(sb))
+ strbuf_grow(sb, 64);
+ va_start(ap, fmt);
+ len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ va_end(ap);
+ if (len < 0)
+ die("your vscnprintf is broken");
+ if (len > strbuf_avail(sb)) {
+ strbuf_grow(sb, len);
+ va_start(ap, fmt);
+ len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ va_end(ap);
+ if (len > strbuf_avail(sb)) {
+ die("this should not happen, your snprintf is broken");
+ }
+ }
+ strbuf_setlen(sb, sb->len + len);
+}
+
+ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
+{
+ size_t oldlen = sb->len;
+ size_t oldalloc = sb->alloc;
+
+ strbuf_grow(sb, hint ? hint : 8192);
+ for (;;) {
+ ssize_t cnt;
+
+ cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1);
+ if (cnt < 0) {
+ if (oldalloc == 0)
+ strbuf_release(sb);
+ else
+ strbuf_setlen(sb, oldlen);
+ return -1;
+ }
+ if (!cnt)
+ break;
+ sb->len += cnt;
+ strbuf_grow(sb, 8192);
+ }
+
+ sb->buf[sb->len] = '\0';
+ return sb->len - oldlen;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/strbuf.h b/ANDROID_3.4.5/tools/perf/util/strbuf.h
new file mode 100644
index 00000000..436ac319
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/strbuf.h
@@ -0,0 +1,92 @@
+#ifndef __PERF_STRBUF_H
+#define __PERF_STRBUF_H
+
+/*
+ * Strbuf's can be use in many ways: as a byte array, or to store arbitrary
+ * long, overflow safe strings.
+ *
+ * Strbufs has some invariants that are very important to keep in mind:
+ *
+ * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to
+ * build complex strings/buffers whose final size isn't easily known.
+ *
+ * It is NOT legal to copy the ->buf pointer away.
+ * `strbuf_detach' is the operation that detachs a buffer from its shell
+ * while keeping the shell valid wrt its invariants.
+ *
+ * 2. the ->buf member is a byte array that has at least ->len + 1 bytes
+ * allocated. The extra byte is used to store a '\0', allowing the ->buf
+ * member to be a valid C-string. Every strbuf function ensure this
+ * invariant is preserved.
+ *
+ * Note that it is OK to "play" with the buffer directly if you work it
+ * that way:
+ *
+ * strbuf_grow(sb, SOME_SIZE);
+ * ... Here, the memory array starting at sb->buf, and of length
+ * ... strbuf_avail(sb) is all yours, and you are sure that
+ * ... strbuf_avail(sb) is at least SOME_SIZE.
+ * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE);
+ *
+ * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb).
+ *
+ * Doing so is safe, though if it has to be done in many places, adding the
+ * missing API to the strbuf module is the way to go.
+ *
+ * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1
+ * even if it's true in the current implementation. Alloc is somehow a
+ * "private" member that should not be messed with.
+ */
+
+#include <assert.h>
+
+extern char strbuf_slopbuf[];
+struct strbuf {
+ size_t alloc;
+ size_t len;
+ char *buf;
+};
+
+#define STRBUF_INIT { 0, 0, strbuf_slopbuf }
+
+/*----- strbuf life cycle -----*/
+extern void strbuf_init(struct strbuf *buf, ssize_t hint);
+extern void strbuf_release(struct strbuf *);
+extern char *strbuf_detach(struct strbuf *, size_t *);
+
+/*----- strbuf size related -----*/
+static inline ssize_t strbuf_avail(const struct strbuf *sb) {
+ return sb->alloc ? sb->alloc - sb->len - 1 : 0;
+}
+
+extern void strbuf_grow(struct strbuf *, size_t);
+
+static inline void strbuf_setlen(struct strbuf *sb, size_t len) {
+ if (!sb->alloc)
+ strbuf_grow(sb, 0);
+ assert(len < sb->alloc);
+ sb->len = len;
+ sb->buf[len] = '\0';
+}
+
+/*----- add data in your buffer -----*/
+static inline void strbuf_addch(struct strbuf *sb, int c) {
+ strbuf_grow(sb, 1);
+ sb->buf[sb->len++] = c;
+ sb->buf[sb->len] = '\0';
+}
+
+extern void strbuf_remove(struct strbuf *, size_t pos, size_t len);
+
+extern void strbuf_add(struct strbuf *, const void *, size_t);
+static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
+ strbuf_add(sb, s, strlen(s));
+}
+
+__attribute__((format(printf,2,3)))
+extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
+
+/* XXX: if read fails, any partial read is undone */
+extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
+
+#endif /* __PERF_STRBUF_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/strfilter.c b/ANDROID_3.4.5/tools/perf/util/strfilter.c
new file mode 100644
index 00000000..834c8ebf
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/strfilter.c
@@ -0,0 +1,199 @@
+#include "util.h"
+#include "string.h"
+#include "strfilter.h"
+
+/* Operators */
+static const char *OP_and = "&"; /* Logical AND */
+static const char *OP_or = "|"; /* Logical OR */
+static const char *OP_not = "!"; /* Logical NOT */
+
+#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
+#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')')
+
+static void strfilter_node__delete(struct strfilter_node *self)
+{
+ if (self) {
+ if (self->p && !is_operator(*self->p))
+ free((char *)self->p);
+ strfilter_node__delete(self->l);
+ strfilter_node__delete(self->r);
+ free(self);
+ }
+}
+
+void strfilter__delete(struct strfilter *self)
+{
+ if (self) {
+ strfilter_node__delete(self->root);
+ free(self);
+ }
+}
+
+static const char *get_token(const char *s, const char **e)
+{
+ const char *p;
+
+ while (isspace(*s)) /* Skip spaces */
+ s++;
+
+ if (*s == '\0') {
+ p = s;
+ goto end;
+ }
+
+ p = s + 1;
+ if (!is_separator(*s)) {
+ /* End search */
+retry:
+ while (*p && !is_separator(*p) && !isspace(*p))
+ p++;
+ /* Escape and special case: '!' is also used in glob pattern */
+ if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
+ p++;
+ goto retry;
+ }
+ }
+end:
+ *e = p;
+ return s;
+}
+
+static struct strfilter_node *strfilter_node__alloc(const char *op,
+ struct strfilter_node *l,
+ struct strfilter_node *r)
+{
+ struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node));
+
+ if (ret) {
+ ret->p = op;
+ ret->l = l;
+ ret->r = r;
+ }
+
+ return ret;
+}
+
+static struct strfilter_node *strfilter_node__new(const char *s,
+ const char **ep)
+{
+ struct strfilter_node root, *cur, *last_op;
+ const char *e;
+
+ if (!s)
+ return NULL;
+
+ memset(&root, 0, sizeof(root));
+ last_op = cur = &root;
+
+ s = get_token(s, &e);
+ while (*s != '\0' && *s != ')') {
+ switch (*s) {
+ case '&': /* Exchg last OP->r with AND */
+ if (!cur->r || !last_op->r)
+ goto error;
+ cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
+ if (!cur)
+ goto nomem;
+ last_op->r = cur;
+ last_op = cur;
+ break;
+ case '|': /* Exchg the root with OR */
+ if (!cur->r || !root.r)
+ goto error;
+ cur = strfilter_node__alloc(OP_or, root.r, NULL);
+ if (!cur)
+ goto nomem;
+ root.r = cur;
+ last_op = cur;
+ break;
+ case '!': /* Add NOT as a leaf node */
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
+ if (!cur->r)
+ goto nomem;
+ cur = cur->r;
+ break;
+ case '(': /* Recursively parses inside the parenthesis */
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__new(s + 1, &s);
+ if (!s)
+ goto nomem;
+ if (!cur->r || *s != ')')
+ goto error;
+ e = s + 1;
+ break;
+ default:
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__alloc(NULL, NULL, NULL);
+ if (!cur->r)
+ goto nomem;
+ cur->r->p = strndup(s, e - s);
+ if (!cur->r->p)
+ goto nomem;
+ }
+ s = get_token(e, &e);
+ }
+ if (!cur->r)
+ goto error;
+ *ep = s;
+ return root.r;
+nomem:
+ s = NULL;
+error:
+ *ep = s;
+ strfilter_node__delete(root.r);
+ return NULL;
+}
+
+/*
+ * Parse filter rule and return new strfilter.
+ * Return NULL if fail, and *ep == NULL if memory allocation failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err)
+{
+ struct strfilter *ret = zalloc(sizeof(struct strfilter));
+ const char *ep = NULL;
+
+ if (ret)
+ ret->root = strfilter_node__new(rules, &ep);
+
+ if (!ret || !ret->root || *ep != '\0') {
+ if (err)
+ *err = ep;
+ strfilter__delete(ret);
+ ret = NULL;
+ }
+
+ return ret;
+}
+
+static bool strfilter_node__compare(struct strfilter_node *self,
+ const char *str)
+{
+ if (!self || !self->p)
+ return false;
+
+ switch (*self->p) {
+ case '|': /* OR */
+ return strfilter_node__compare(self->l, str) ||
+ strfilter_node__compare(self->r, str);
+ case '&': /* AND */
+ return strfilter_node__compare(self->l, str) &&
+ strfilter_node__compare(self->r, str);
+ case '!': /* NOT */
+ return !strfilter_node__compare(self->r, str);
+ default:
+ return strglobmatch(str, self->p);
+ }
+}
+
+/* Return true if STR matches the filter rules */
+bool strfilter__compare(struct strfilter *self, const char *str)
+{
+ if (!self)
+ return false;
+ return strfilter_node__compare(self->root, str);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/strfilter.h b/ANDROID_3.4.5/tools/perf/util/strfilter.h
new file mode 100644
index 00000000..00f58a75
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/strfilter.h
@@ -0,0 +1,48 @@
+#ifndef __PERF_STRFILTER_H
+#define __PERF_STRFILTER_H
+/* General purpose glob matching filter */
+
+#include <linux/list.h>
+#include <stdbool.h>
+
+/* A node of string filter */
+struct strfilter_node {
+ struct strfilter_node *l; /* Tree left branche (for &,|) */
+ struct strfilter_node *r; /* Tree right branche (for !,&,|) */
+ const char *p; /* Operator or rule */
+};
+
+/* String filter */
+struct strfilter {
+ struct strfilter_node *root;
+};
+
+/**
+ * strfilter__new - Create a new string filter
+ * @rules: Filter rule, which is a combination of glob expressions.
+ * @err: Pointer which points an error detected on @rules
+ *
+ * Parse @rules and return new strfilter. Return NULL if an error detected.
+ * In that case, *@err will indicate where it is detected, and *@err is NULL
+ * if a memory allocation is failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err);
+
+/**
+ * strfilter__compare - compare given string and a string filter
+ * @self: String filter
+ * @str: target string
+ *
+ * Compare @str and @self. Return true if the str match the rule
+ */
+bool strfilter__compare(struct strfilter *self, const char *str);
+
+/**
+ * strfilter__delete - delete a string filter
+ * @self: String filter to delete
+ *
+ * Delete @self.
+ */
+void strfilter__delete(struct strfilter *self);
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/string.c b/ANDROID_3.4.5/tools/perf/util/string.c
new file mode 100644
index 00000000..d5836382
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/string.c
@@ -0,0 +1,315 @@
+#include "util.h"
+#include "string.h"
+
+#define K 1024LL
+/*
+ * perf_atoll()
+ * Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB")
+ * and return its numeric value
+ */
+s64 perf_atoll(const char *str)
+{
+ unsigned int i;
+ s64 length = -1, unit = 1;
+
+ if (!isdigit(str[0]))
+ goto out_err;
+
+ for (i = 1; i < strlen(str); i++) {
+ switch (str[i]) {
+ case 'B':
+ case 'b':
+ break;
+ case 'K':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto kilo;
+ case 'k':
+ if (str[i + 1] != 'b')
+ goto out_err;
+kilo:
+ unit = K;
+ break;
+ case 'M':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto mega;
+ case 'm':
+ if (str[i + 1] != 'b')
+ goto out_err;
+mega:
+ unit = K * K;
+ break;
+ case 'G':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto giga;
+ case 'g':
+ if (str[i + 1] != 'b')
+ goto out_err;
+giga:
+ unit = K * K * K;
+ break;
+ case 'T':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto tera;
+ case 't':
+ if (str[i + 1] != 'b')
+ goto out_err;
+tera:
+ unit = K * K * K * K;
+ break;
+ case '\0': /* only specified figures */
+ unit = 1;
+ break;
+ default:
+ if (!isdigit(str[i]))
+ goto out_err;
+ break;
+ }
+ }
+
+ length = atoll(str) * unit;
+ goto out;
+
+out_err:
+ length = -1;
+out:
+ return length;
+}
+
+/*
+ * Helper function for splitting a string into an argv-like array.
+ * originally copied from lib/argv_split.c
+ */
+static const char *skip_sep(const char *cp)
+{
+ while (*cp && isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static const char *skip_arg(const char *cp)
+{
+ while (*cp && !isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static int count_argc(const char *str)
+{
+ int count = 0;
+
+ while (*str) {
+ str = skip_sep(str);
+ if (*str) {
+ count++;
+ str = skip_arg(str);
+ }
+ }
+
+ return count;
+}
+
+/**
+ * argv_free - free an argv
+ * @argv - the argument vector to be freed
+ *
+ * Frees an argv and the strings it points to.
+ */
+void argv_free(char **argv)
+{
+ char **p;
+ for (p = argv; *p; p++)
+ free(*p);
+
+ free(argv);
+}
+
+/**
+ * argv_split - split a string at whitespace, returning an argv
+ * @str: the string to be split
+ * @argcp: returned argument count
+ *
+ * Returns an array of pointers to strings which are split out from
+ * @str. This is performed by strictly splitting on white-space; no
+ * quote processing is performed. Multiple whitespace characters are
+ * considered to be a single argument separator. The returned array
+ * is always NULL-terminated. Returns NULL on memory allocation
+ * failure.
+ */
+char **argv_split(const char *str, int *argcp)
+{
+ int argc = count_argc(str);
+ char **argv = zalloc(sizeof(*argv) * (argc+1));
+ char **argvp;
+
+ if (argv == NULL)
+ goto out;
+
+ if (argcp)
+ *argcp = argc;
+
+ argvp = argv;
+
+ while (*str) {
+ str = skip_sep(str);
+
+ if (*str) {
+ const char *p = str;
+ char *t;
+
+ str = skip_arg(str);
+
+ t = strndup(p, str-p);
+ if (t == NULL)
+ goto fail;
+ *argvp++ = t;
+ }
+ }
+ *argvp = NULL;
+
+out:
+ return argv;
+
+fail:
+ argv_free(argv);
+ return NULL;
+}
+
+/* Character class matching */
+static bool __match_charclass(const char *pat, char c, const char **npat)
+{
+ bool complement = false, ret = true;
+
+ if (*pat == '!') {
+ complement = true;
+ pat++;
+ }
+ if (*pat++ == c) /* First character is special */
+ goto end;
+
+ while (*pat && *pat != ']') { /* Matching */
+ if (*pat == '-' && *(pat + 1) != ']') { /* Range */
+ if (*(pat - 1) <= c && c <= *(pat + 1))
+ goto end;
+ if (*(pat - 1) > *(pat + 1))
+ goto error;
+ pat += 2;
+ } else if (*pat++ == c)
+ goto end;
+ }
+ if (!*pat)
+ goto error;
+ ret = false;
+
+end:
+ while (*pat && *pat != ']') /* Searching closing */
+ pat++;
+ if (!*pat)
+ goto error;
+ *npat = pat + 1;
+ return complement ? !ret : ret;
+
+error:
+ return false;
+}
+
+/* Glob/lazy pattern matching */
+static bool __match_glob(const char *str, const char *pat, bool ignore_space)
+{
+ while (*str && *pat && *pat != '*') {
+ if (ignore_space) {
+ /* Ignore spaces for lazy matching */
+ if (isspace(*str)) {
+ str++;
+ continue;
+ }
+ if (isspace(*pat)) {
+ pat++;
+ continue;
+ }
+ }
+ if (*pat == '?') { /* Matches any single character */
+ str++;
+ pat++;
+ continue;
+ } else if (*pat == '[') /* Character classes/Ranges */
+ if (__match_charclass(pat + 1, *str, &pat)) {
+ str++;
+ continue;
+ } else
+ return false;
+ else if (*pat == '\\') /* Escaped char match as normal char */
+ pat++;
+ if (*str++ != *pat++)
+ return false;
+ }
+ /* Check wild card */
+ if (*pat == '*') {
+ while (*pat == '*')
+ pat++;
+ if (!*pat) /* Tail wild card matches all */
+ return true;
+ while (*str)
+ if (__match_glob(str++, pat, ignore_space))
+ return true;
+ }
+ return !*str && !*pat;
+}
+
+/**
+ * strglobmatch - glob expression pattern matching
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This returns true if the @str matches @pat. @pat can includes wildcards
+ * ('*','?') and character classes ([CHARS], complementation and ranges are
+ * also supported). Also, this supports escape character ('\') to use special
+ * characters as normal character.
+ *
+ * Note: if @pat syntax is broken, this always returns false.
+ */
+bool strglobmatch(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, false);
+}
+
+/**
+ * strlazymatch - matching pattern strings lazily with glob pattern
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This is similar to strglobmatch, except this ignores spaces in
+ * the target string.
+ */
+bool strlazymatch(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, true);
+}
+
+/**
+ * strtailcmp - Compare the tail of two strings
+ * @s1: 1st string to be compared
+ * @s2: 2nd string to be compared
+ *
+ * Return 0 if whole of either string is same as another's tail part.
+ */
+int strtailcmp(const char *s1, const char *s2)
+{
+ int i1 = strlen(s1);
+ int i2 = strlen(s2);
+ while (--i1 >= 0 && --i2 >= 0) {
+ if (s1[i1] != s2[i2])
+ return s1[i1] - s2[i2];
+ }
+ return 0;
+}
+
diff --git a/ANDROID_3.4.5/tools/perf/util/strlist.c b/ANDROID_3.4.5/tools/perf/util/strlist.c
new file mode 100644
index 00000000..6783a204
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/strlist.c
@@ -0,0 +1,200 @@
+/*
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include "strlist.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct str_node *str_node__new(const char *s, bool dupstr)
+{
+ struct str_node *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ if (dupstr) {
+ s = strdup(s);
+ if (s == NULL)
+ goto out_delete;
+ }
+ self->s = s;
+ }
+
+ return self;
+
+out_delete:
+ free(self);
+ return NULL;
+}
+
+static void str_node__delete(struct str_node *self, bool dupstr)
+{
+ if (dupstr)
+ free((void *)self->s);
+ free(self);
+}
+
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct str_node *sn;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, new_entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ sn = str_node__new(new_entry, self->dupstr);
+ if (sn == NULL)
+ return -ENOMEM;
+
+ rb_link_node(&sn->rb_node, parent, p);
+ rb_insert_color(&sn->rb_node, &self->entries);
+ ++self->nr_entries;
+
+ return 0;
+}
+
+int strlist__load(struct strlist *self, const char *filename)
+{
+ char entry[1024];
+ int err;
+ FILE *fp = fopen(filename, "r");
+
+ if (fp == NULL)
+ return errno;
+
+ while (fgets(entry, sizeof(entry), fp) != NULL) {
+ const size_t len = strlen(entry);
+
+ if (len == 0)
+ continue;
+ entry[len - 1] = '\0';
+
+ err = strlist__add(self, entry);
+ if (err != 0)
+ goto out;
+ }
+
+ err = 0;
+out:
+ fclose(fp);
+ return err;
+}
+
+void strlist__remove(struct strlist *self, struct str_node *sn)
+{
+ rb_erase(&sn->rb_node, &self->entries);
+ str_node__delete(sn, self->dupstr);
+}
+
+struct str_node *strlist__find(struct strlist *self, const char *entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ struct str_node *sn;
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return sn;
+ }
+
+ return NULL;
+}
+
+static int strlist__parse_list_entry(struct strlist *self, const char *s)
+{
+ if (strncmp(s, "file://", 7) == 0)
+ return strlist__load(self, s + 7);
+
+ return strlist__add(self, s);
+}
+
+int strlist__parse_list(struct strlist *self, const char *s)
+{
+ char *sep;
+ int err;
+
+ while ((sep = strchr(s, ',')) != NULL) {
+ *sep = '\0';
+ err = strlist__parse_list_entry(self, s);
+ *sep = ',';
+ if (err != 0)
+ return err;
+ s = sep + 1;
+ }
+
+ return *s ? strlist__parse_list_entry(self, s) : 0;
+}
+
+struct strlist *strlist__new(bool dupstr, const char *slist)
+{
+ struct strlist *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->entries = RB_ROOT;
+ self->dupstr = dupstr;
+ self->nr_entries = 0;
+ if (slist && strlist__parse_list(self, slist) != 0)
+ goto out_error;
+ }
+
+ return self;
+out_error:
+ free(self);
+ return NULL;
+}
+
+void strlist__delete(struct strlist *self)
+{
+ if (self != NULL) {
+ struct str_node *pos;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next) {
+ pos = rb_entry(next, struct str_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ strlist__remove(self, pos);
+ }
+ self->entries = RB_ROOT;
+ free(self);
+ }
+}
+
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+
+ if (!idx--)
+ return pos;
+ }
+
+ return NULL;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/strlist.h b/ANDROID_3.4.5/tools/perf/util/strlist.h
new file mode 100644
index 00000000..3ba83900
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/strlist.h
@@ -0,0 +1,78 @@
+#ifndef __PERF_STRLIST_H
+#define __PERF_STRLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+struct str_node {
+ struct rb_node rb_node;
+ const char *s;
+};
+
+struct strlist {
+ struct rb_root entries;
+ unsigned int nr_entries;
+ bool dupstr;
+};
+
+struct strlist *strlist__new(bool dupstr, const char *slist);
+void strlist__delete(struct strlist *self);
+
+void strlist__remove(struct strlist *self, struct str_node *sn);
+int strlist__load(struct strlist *self, const char *filename);
+int strlist__add(struct strlist *self, const char *str);
+
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
+struct str_node *strlist__find(struct strlist *self, const char *entry);
+
+static inline bool strlist__has_entry(struct strlist *self, const char *entry)
+{
+ return strlist__find(self, entry) != NULL;
+}
+
+static inline bool strlist__empty(const struct strlist *self)
+{
+ return self->nr_entries == 0;
+}
+
+static inline unsigned int strlist__nr_entries(const struct strlist *self)
+{
+ return self->nr_entries;
+}
+
+/* For strlist iteration */
+static inline struct str_node *strlist__first(struct strlist *self)
+{
+ struct rb_node *rn = rb_first(&self->entries);
+ return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
+}
+static inline struct str_node *strlist__next(struct str_node *sn)
+{
+ struct rb_node *rn;
+ if (!sn)
+ return NULL;
+ rn = rb_next(&sn->rb_node);
+ return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
+}
+
+/**
+ * strlist_for_each - iterate over a strlist
+ * @pos: the &struct str_node to use as a loop cursor.
+ * @self: the &struct strlist for loop.
+ */
+#define strlist__for_each(pos, self) \
+ for (pos = strlist__first(self); pos; pos = strlist__next(pos))
+
+/**
+ * strlist_for_each_safe - iterate over a strlist safe against removal of
+ * str_node
+ * @pos: the &struct str_node to use as a loop cursor.
+ * @n: another &struct str_node to use as temporary storage.
+ * @self: the &struct strlist for loop.
+ */
+#define strlist__for_each_safe(pos, n, self) \
+ for (pos = strlist__first(self), n = strlist__next(pos); pos;\
+ pos = n, n = strlist__next(n))
+
+int strlist__parse_list(struct strlist *self, const char *s);
+#endif /* __PERF_STRLIST_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/svghelper.c b/ANDROID_3.4.5/tools/perf/util/svghelper.c
new file mode 100644
index 00000000..96c86604
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/svghelper.c
@@ -0,0 +1,501 @@
+/*
+ * svghelper.c - helper functions for outputting svg
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "svghelper.h"
+
+static u64 first_time, last_time;
+static u64 turbo_frequency, max_freq;
+
+
+#define SLOT_MULT 30.0
+#define SLOT_HEIGHT 25.0
+
+int svg_page_width = 1000;
+
+#define MIN_TEXT_SIZE 0.01
+
+static u64 total_height;
+static FILE *svgfile;
+
+static double cpu2slot(int cpu)
+{
+ return 2 * cpu + 1;
+}
+
+static double cpu2y(int cpu)
+{
+ return cpu2slot(cpu) * SLOT_MULT;
+}
+
+static double time2pixels(u64 __time)
+{
+ double X;
+
+ X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
+ return X;
+}
+
+/*
+ * Round text sizes so that the svg viewer only needs a discrete
+ * number of renderings of the font
+ */
+static double round_text_size(double size)
+{
+ int loop = 100;
+ double target = 10.0;
+
+ if (size >= 10.0)
+ return size;
+ while (loop--) {
+ if (size >= target)
+ return target;
+ target = target / 2.0;
+ }
+ return size;
+}
+
+void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
+{
+ int new_width;
+
+ svgfile = fopen(filename, "w");
+ if (!svgfile) {
+ fprintf(stderr, "Cannot open %s for output\n", filename);
+ return;
+ }
+ first_time = start;
+ first_time = first_time / 100000000 * 100000000;
+ last_time = end;
+
+ /*
+ * if the recording is short, we default to a width of 1000, but
+ * for longer recordings we want at least 200 units of width per second
+ */
+ new_width = (last_time - first_time) / 5000000;
+
+ if (new_width > svg_page_width)
+ svg_page_width = new_width;
+
+ total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
+ fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
+ fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+
+ fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
+
+ fprintf(svgfile, " rect { stroke-width: 1; }\n");
+ fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n");
+ fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c1 { fill:rgb(255,214,214); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c2 { fill:rgb(255,172,172); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c3 { fill:rgb(255,130,130); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c4 { fill:rgb(255, 88, 88); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c5 { fill:rgb(255, 44, 44); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c6 { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " line.pstate { stroke:rgb(255,255, 0); stroke-opacity:0.8; stroke-width:2; } \n");
+
+ fprintf(svgfile, " ]]>\n </style>\n</defs>\n");
+}
+
+void svg_box(int Yslot, u64 start, u64 end, const char *type)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
+}
+
+void svg_sample(int Yslot, int cpu, u64 start, u64 end)
+{
+ double text_size;
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"sample\"/>\n",
+ time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT);
+
+ text_size = (time2pixels(end)-time2pixels(start));
+ if (cpu > 9)
+ text_size = text_size/2;
+ if (text_size > 1.25)
+ text_size = 1.25;
+ text_size = round_text_size(text_size);
+
+ if (text_size > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
+ time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
+
+}
+
+static char *time_to_string(u64 duration)
+{
+ static char text[80];
+
+ text[0] = 0;
+
+ if (duration < 1000) /* less than 1 usec */
+ return text;
+
+ if (duration < 1000 * 1000) { /* less than 1 msec */
+ sprintf(text, "%4.1f us", duration / 1000.0);
+ return text;
+ }
+ sprintf(text, "%4.1f ms", duration / 1000.0 / 1000);
+
+ return text;
+}
+
+void svg_waiting(int Yslot, u64 start, u64 end)
+{
+ char *text;
+ const char *style;
+ double font_size;
+
+ if (!svgfile)
+ return;
+
+ style = "waiting";
+
+ if (end-start > 10 * 1000000) /* 10 msec */
+ style = "WAITING";
+
+ text = time_to_string(end-start);
+
+ font_size = 1.0 * (time2pixels(end)-time2pixels(start));
+
+ if (font_size > 3)
+ font_size = 3;
+
+ font_size = round_text_size(font_size);
+
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
+ fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
+ if (font_size > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%1.8fpt\"> %s</text>\n",
+ font_size, text);
+ fprintf(svgfile, "</g>\n");
+}
+
+static char *cpu_model(void)
+{
+ static char cpu_m[255];
+ char buf[256];
+ FILE *file;
+
+ cpu_m[0] = 0;
+ /* CPU type */
+ file = fopen("/proc/cpuinfo", "r");
+ if (file) {
+ while (fgets(buf, 255, file)) {
+ if (strstr(buf, "model name")) {
+ strncpy(cpu_m, &buf[13], 255);
+ break;
+ }
+ }
+ fclose(file);
+ }
+
+ /* CPU type */
+ file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r");
+ if (file) {
+ while (fgets(buf, 255, file)) {
+ unsigned int freq;
+ freq = strtoull(buf, NULL, 10);
+ if (freq > max_freq)
+ max_freq = freq;
+ }
+ fclose(file);
+ }
+ return cpu_m;
+}
+
+void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
+{
+ char cpu_string[80];
+ if (!svgfile)
+ return;
+
+ max_freq = __max_freq;
+ turbo_frequency = __turbo_freq;
+
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
+ time2pixels(first_time),
+ time2pixels(last_time)-time2pixels(first_time),
+ cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
+
+ sprintf(cpu_string, "CPU %i", (int)cpu+1);
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+ 10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
+
+ fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
+ 10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
+}
+
+void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name)
+{
+ double width;
+
+ if (!svgfile)
+ return;
+
+
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
+ fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
+ width = time2pixels(end)-time2pixels(start);
+ if (width > 6)
+ width = 6;
+
+ width = round_text_size(width);
+
+ if (width > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%3.8fpt\">%s</text>\n",
+ width, name);
+
+ fprintf(svgfile, "</g>\n");
+}
+
+void svg_cstate(int cpu, u64 start, u64 end, int type)
+{
+ double width;
+ char style[128];
+
+ if (!svgfile)
+ return;
+
+
+ if (type > 6)
+ type = 6;
+ sprintf(style, "c%i", type);
+
+ fprintf(svgfile, "<rect class=\"%s\" x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\"/>\n",
+ style,
+ time2pixels(start), time2pixels(end)-time2pixels(start),
+ cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
+
+ width = (time2pixels(end)-time2pixels(start))/2.0;
+ if (width > 6)
+ width = 6;
+
+ width = round_text_size(width);
+
+ if (width > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
+ time2pixels(start), cpu2y(cpu)+width, width, type);
+}
+
+static char *HzToHuman(unsigned long hz)
+{
+ static char buffer[1024];
+ unsigned long long Hz;
+
+ memset(buffer, 0, 1024);
+
+ Hz = hz;
+
+ /* default: just put the Number in */
+ sprintf(buffer, "%9lli", Hz);
+
+ if (Hz > 1000)
+ sprintf(buffer, " %6lli Mhz", (Hz+500)/1000);
+
+ if (Hz > 1500000)
+ sprintf(buffer, " %6.2f Ghz", (Hz+5000.0)/1000000);
+
+ if (Hz == turbo_frequency)
+ sprintf(buffer, "Turbo");
+
+ return buffer;
+}
+
+void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
+{
+ double height = 0;
+
+ if (!svgfile)
+ return;
+
+ if (max_freq)
+ height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
+ height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
+ fprintf(svgfile, "<line x1=\"%4.8f\" x2=\"%4.8f\" y1=\"%4.1f\" y2=\"%4.1f\" class=\"pstate\"/>\n",
+ time2pixels(start), time2pixels(end), height, height);
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
+ time2pixels(start), height+0.9, HzToHuman(freq));
+
+}
+
+
+void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2)
+{
+ double height;
+
+ if (!svgfile)
+ return;
+
+
+ if (row1 < row2) {
+ if (row1) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
+ if (desc2)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+ time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2);
+ }
+ if (row2) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row2 * SLOT_MULT);
+ if (desc1)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+ time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1);
+ }
+ } else {
+ if (row2) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
+ if (desc1)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+ time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1);
+ }
+ if (row1) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row1 * SLOT_MULT);
+ if (desc2)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+ time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2);
+ }
+ }
+ height = row1 * SLOT_MULT;
+ if (row2 > row1)
+ height += SLOT_HEIGHT;
+ if (row1)
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
+ time2pixels(start), height);
+}
+
+void svg_wakeline(u64 start, int row1, int row2)
+{
+ double height;
+
+ if (!svgfile)
+ return;
+
+
+ if (row1 < row2)
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT);
+ else
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT);
+
+ height = row1 * SLOT_MULT;
+ if (row2 > row1)
+ height += SLOT_HEIGHT;
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
+ time2pixels(start), height);
+}
+
+void svg_interrupt(u64 start, int row)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
+ time2pixels(start), row * SLOT_MULT);
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
+ time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
+}
+
+void svg_text(int Yslot, u64 start, const char *text)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+ time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text);
+}
+
+static void svg_legenda_box(int X, const char *text, const char *style)
+{
+ double boxsize;
+ boxsize = SLOT_HEIGHT / 2;
+
+ fprintf(svgfile, "<rect x=\"%i\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+ X, boxsize, boxsize, style);
+ fprintf(svgfile, "<text transform=\"translate(%4.8f, %4.8f)\" font-size=\"%4.8fpt\">%s</text>\n",
+ X + boxsize + 5, boxsize, 0.8 * boxsize, text);
+}
+
+void svg_legenda(void)
+{
+ if (!svgfile)
+ return;
+
+ svg_legenda_box(0, "Running", "sample");
+ svg_legenda_box(100, "Idle","c1");
+ svg_legenda_box(200, "Deeper Idle", "c3");
+ svg_legenda_box(350, "Deepest Idle", "c6");
+ svg_legenda_box(550, "Sleeping", "process2");
+ svg_legenda_box(650, "Waiting for cpu", "waiting");
+ svg_legenda_box(800, "Blocked on IO", "blocked");
+}
+
+void svg_time_grid(void)
+{
+ u64 i;
+
+ if (!svgfile)
+ return;
+
+ i = first_time;
+ while (i < last_time) {
+ int color = 220;
+ double thickness = 0.075;
+ if ((i % 100000000) == 0) {
+ thickness = 0.5;
+ color = 192;
+ }
+ if ((i % 1000000000) == 0) {
+ thickness = 2.0;
+ color = 128;
+ }
+
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+ time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
+
+ i += 10000000;
+ }
+}
+
+void svg_close(void)
+{
+ if (svgfile) {
+ fprintf(svgfile, "</svg>\n");
+ fclose(svgfile);
+ svgfile = NULL;
+ }
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/svghelper.h b/ANDROID_3.4.5/tools/perf/util/svghelper.h
new file mode 100644
index 00000000..e0781989
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/svghelper.h
@@ -0,0 +1,28 @@
+#ifndef __PERF_SVGHELPER_H
+#define __PERF_SVGHELPER_H
+
+#include "types.h"
+
+extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
+extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
+extern void svg_sample(int Yslot, int cpu, u64 start, u64 end);
+extern void svg_waiting(int Yslot, u64 start, u64 end);
+extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
+
+
+extern void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name);
+extern void svg_cstate(int cpu, u64 start, u64 end, int type);
+extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
+
+
+extern void svg_time_grid(void);
+extern void svg_legenda(void);
+extern void svg_wakeline(u64 start, int row1, int row2);
+extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2);
+extern void svg_interrupt(u64 start, int row);
+extern void svg_text(int Yslot, u64 start, const char *text);
+extern void svg_close(void);
+
+extern int svg_page_width;
+
+#endif /* __PERF_SVGHELPER_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/symbol.c b/ANDROID_3.4.5/tools/perf/util/symbol.c
new file mode 100644
index 00000000..ab9867b2
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/symbol.c
@@ -0,0 +1,2785 @@
+#include <dirent.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include "build-id.h"
+#include "util.h"
+#include "debug.h"
+#include "symbol.h"
+#include "strlist.h"
+
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#include <limits.h>
+#include <sys/utsname.h>
+
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 256
+#endif
+
+#ifndef NT_GNU_BUILD_ID
+#define NT_GNU_BUILD_ID 3
+#endif
+
+static bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
+static int elf_read_build_id(Elf *elf, void *bf, size_t size);
+static void dsos__add(struct list_head *head, struct dso *dso);
+static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
+static int dso__load_kernel_sym(struct dso *dso, struct map *map,
+ symbol_filter_t filter);
+static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
+ symbol_filter_t filter);
+static int vmlinux_path__nr_entries;
+static char **vmlinux_path;
+
+struct symbol_conf symbol_conf = {
+ .exclude_other = true,
+ .use_modules = true,
+ .try_vmlinux_path = true,
+ .annotate_src = true,
+ .symfs = "",
+};
+
+int dso__name_len(const struct dso *dso)
+{
+ if (!dso)
+ return strlen("[unknown]");
+ if (verbose)
+ return dso->long_name_len;
+
+ return dso->short_name_len;
+}
+
+bool dso__loaded(const struct dso *dso, enum map_type type)
+{
+ return dso->loaded & (1 << type);
+}
+
+bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
+{
+ return dso->sorted_by_name & (1 << type);
+}
+
+static void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
+{
+ dso->sorted_by_name |= (1 << type);
+}
+
+bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+{
+ symbol_type = toupper(symbol_type);
+
+ switch (map_type) {
+ case MAP__FUNCTION:
+ return symbol_type == 'T' || symbol_type == 'W';
+ case MAP__VARIABLE:
+ return symbol_type == 'D';
+ default:
+ return false;
+ }
+}
+
+static int prefix_underscores_count(const char *str)
+{
+ const char *tail = str;
+
+ while (*tail == '_')
+ tail++;
+
+ return tail - str;
+}
+
+#define SYMBOL_A 0
+#define SYMBOL_B 1
+
+static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
+{
+ s64 a;
+ s64 b;
+
+ /* Prefer a symbol with non zero length */
+ a = syma->end - syma->start;
+ b = symb->end - symb->start;
+ if ((b == 0) && (a > 0))
+ return SYMBOL_A;
+ else if ((a == 0) && (b > 0))
+ return SYMBOL_B;
+
+ /* Prefer a non weak symbol over a weak one */
+ a = syma->binding == STB_WEAK;
+ b = symb->binding == STB_WEAK;
+ if (b && !a)
+ return SYMBOL_A;
+ if (a && !b)
+ return SYMBOL_B;
+
+ /* Prefer a global symbol over a non global one */
+ a = syma->binding == STB_GLOBAL;
+ b = symb->binding == STB_GLOBAL;
+ if (a && !b)
+ return SYMBOL_A;
+ if (b && !a)
+ return SYMBOL_B;
+
+ /* Prefer a symbol with less underscores */
+ a = prefix_underscores_count(syma->name);
+ b = prefix_underscores_count(symb->name);
+ if (b > a)
+ return SYMBOL_A;
+ else if (a > b)
+ return SYMBOL_B;
+
+ /* If all else fails, choose the symbol with the longest name */
+ if (strlen(syma->name) >= strlen(symb->name))
+ return SYMBOL_A;
+ else
+ return SYMBOL_B;
+}
+
+static void symbols__fixup_duplicate(struct rb_root *symbols)
+{
+ struct rb_node *nd;
+ struct symbol *curr, *next;
+
+ nd = rb_first(symbols);
+
+ while (nd) {
+ curr = rb_entry(nd, struct symbol, rb_node);
+again:
+ nd = rb_next(&curr->rb_node);
+ next = rb_entry(nd, struct symbol, rb_node);
+
+ if (!nd)
+ break;
+
+ if (curr->start != next->start)
+ continue;
+
+ if (choose_best_symbol(curr, next) == SYMBOL_A) {
+ rb_erase(&next->rb_node, symbols);
+ goto again;
+ } else {
+ nd = rb_next(&curr->rb_node);
+ rb_erase(&curr->rb_node, symbols);
+ }
+ }
+}
+
+static void symbols__fixup_end(struct rb_root *symbols)
+{
+ struct rb_node *nd, *prevnd = rb_first(symbols);
+ struct symbol *curr, *prev;
+
+ if (prevnd == NULL)
+ return;
+
+ curr = rb_entry(prevnd, struct symbol, rb_node);
+
+ for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+ prev = curr;
+ curr = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev->end == prev->start && prev->end != curr->start)
+ prev->end = curr->start - 1;
+ }
+
+ /* Last entry */
+ if (curr->end == curr->start)
+ curr->end = roundup(curr->start, 4096);
+}
+
+static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
+{
+ struct map *prev, *curr;
+ struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
+
+ if (prevnd == NULL)
+ return;
+
+ curr = rb_entry(prevnd, struct map, rb_node);
+
+ for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+ prev = curr;
+ curr = rb_entry(nd, struct map, rb_node);
+ prev->end = curr->start - 1;
+ }
+
+ /*
+ * We still haven't the actual symbols, so guess the
+ * last map final address.
+ */
+ curr->end = ~0ULL;
+}
+
+static void map_groups__fixup_end(struct map_groups *mg)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ __map_groups__fixup_end(mg, i);
+}
+
+static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
+ const char *name)
+{
+ size_t namelen = strlen(name) + 1;
+ struct symbol *sym = calloc(1, (symbol_conf.priv_size +
+ sizeof(*sym) + namelen));
+ if (sym == NULL)
+ return NULL;
+
+ if (symbol_conf.priv_size)
+ sym = ((void *)sym) + symbol_conf.priv_size;
+
+ sym->start = start;
+ sym->end = len ? start + len - 1 : start;
+ sym->binding = binding;
+ sym->namelen = namelen - 1;
+
+ pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
+ __func__, name, start, sym->end);
+ memcpy(sym->name, name, namelen);
+
+ return sym;
+}
+
+void symbol__delete(struct symbol *sym)
+{
+ free(((void *)sym) - symbol_conf.priv_size);
+}
+
+static size_t symbol__fprintf(struct symbol *sym, FILE *fp)
+{
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
+ sym->start, sym->end,
+ sym->binding == STB_GLOBAL ? 'g' :
+ sym->binding == STB_LOCAL ? 'l' : 'w',
+ sym->name);
+}
+
+size_t symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al, FILE *fp)
+{
+ unsigned long offset;
+ size_t length;
+
+ if (sym && sym->name) {
+ length = fprintf(fp, "%s", sym->name);
+ if (al) {
+ offset = al->addr - sym->start;
+ length += fprintf(fp, "+0x%lx", offset);
+ }
+ return length;
+ } else
+ return fprintf(fp, "[unknown]");
+}
+
+size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
+{
+ return symbol__fprintf_symname_offs(sym, NULL, fp);
+}
+
+void dso__set_long_name(struct dso *dso, char *name)
+{
+ if (name == NULL)
+ return;
+ dso->long_name = name;
+ dso->long_name_len = strlen(name);
+}
+
+static void dso__set_short_name(struct dso *dso, const char *name)
+{
+ if (name == NULL)
+ return;
+ dso->short_name = name;
+ dso->short_name_len = strlen(name);
+}
+
+static void dso__set_basename(struct dso *dso)
+{
+ dso__set_short_name(dso, basename(dso->long_name));
+}
+
+struct dso *dso__new(const char *name)
+{
+ struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
+
+ if (dso != NULL) {
+ int i;
+ strcpy(dso->name, name);
+ dso__set_long_name(dso, dso->name);
+ dso__set_short_name(dso, dso->name);
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
+ dso->symtab_type = SYMTAB__NOT_FOUND;
+ dso->loaded = 0;
+ dso->sorted_by_name = 0;
+ dso->has_build_id = 0;
+ dso->kernel = DSO_TYPE_USER;
+ INIT_LIST_HEAD(&dso->node);
+ }
+
+ return dso;
+}
+
+static void symbols__delete(struct rb_root *symbols)
+{
+ struct symbol *pos;
+ struct rb_node *next = rb_first(symbols);
+
+ while (next) {
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, symbols);
+ symbol__delete(pos);
+ }
+}
+
+void dso__delete(struct dso *dso)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ symbols__delete(&dso->symbols[i]);
+ if (dso->sname_alloc)
+ free((char *)dso->short_name);
+ if (dso->lname_alloc)
+ free(dso->long_name);
+ free(dso);
+}
+
+void dso__set_build_id(struct dso *dso, void *build_id)
+{
+ memcpy(dso->build_id, build_id, sizeof(dso->build_id));
+ dso->has_build_id = 1;
+}
+
+static void symbols__insert(struct rb_root *symbols, struct symbol *sym)
+{
+ struct rb_node **p = &symbols->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 ip = sym->start;
+ struct symbol *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct symbol, rb_node);
+ if (ip < s->start)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&sym->rb_node, parent, p);
+ rb_insert_color(&sym->rb_node, symbols);
+}
+
+static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
+{
+ struct rb_node *n;
+
+ if (symbols == NULL)
+ return NULL;
+
+ n = symbols->rb_node;
+
+ while (n) {
+ struct symbol *s = rb_entry(n, struct symbol, rb_node);
+
+ if (ip < s->start)
+ n = n->rb_left;
+ else if (ip > s->end)
+ n = n->rb_right;
+ else
+ return s;
+ }
+
+ return NULL;
+}
+
+struct symbol_name_rb_node {
+ struct rb_node rb_node;
+ struct symbol sym;
+};
+
+static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
+{
+ struct rb_node **p = &symbols->rb_node;
+ struct rb_node *parent = NULL;
+ struct symbol_name_rb_node *symn, *s;
+
+ symn = container_of(sym, struct symbol_name_rb_node, sym);
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
+ if (strcmp(sym->name, s->sym.name) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&symn->rb_node, parent, p);
+ rb_insert_color(&symn->rb_node, symbols);
+}
+
+static void symbols__sort_by_name(struct rb_root *symbols,
+ struct rb_root *source)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ symbols__insert_by_name(symbols, pos);
+ }
+}
+
+static struct symbol *symbols__find_by_name(struct rb_root *symbols,
+ const char *name)
+{
+ struct rb_node *n;
+
+ if (symbols == NULL)
+ return NULL;
+
+ n = symbols->rb_node;
+
+ while (n) {
+ struct symbol_name_rb_node *s;
+ int cmp;
+
+ s = rb_entry(n, struct symbol_name_rb_node, rb_node);
+ cmp = strcmp(name, s->sym.name);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return &s->sym;
+ }
+
+ return NULL;
+}
+
+struct symbol *dso__find_symbol(struct dso *dso,
+ enum map_type type, u64 addr)
+{
+ return symbols__find(&dso->symbols[type], addr);
+}
+
+struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
+ const char *name)
+{
+ return symbols__find_by_name(&dso->symbol_names[type], name);
+}
+
+void dso__sort_by_name(struct dso *dso, enum map_type type)
+{
+ dso__set_sorted_by_name(dso, type);
+ return symbols__sort_by_name(&dso->symbol_names[type],
+ &dso->symbols[type]);
+}
+
+int build_id__sprintf(const u8 *build_id, int len, char *bf)
+{
+ char *bid = bf;
+ const u8 *raw = build_id;
+ int i;
+
+ for (i = 0; i < len; ++i) {
+ sprintf(bid, "%02x", *raw);
+ ++raw;
+ bid += 2;
+ }
+
+ return raw - build_id;
+}
+
+size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+ return fprintf(fp, "%s", sbuild_id);
+}
+
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ enum map_type type, FILE *fp)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+ struct symbol_name_rb_node *pos;
+
+ for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+ fprintf(fp, "%s\n", pos->sym.name);
+ }
+
+ return ret;
+}
+
+size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
+
+ if (dso->short_name != dso->long_name)
+ ret += fprintf(fp, "%s, ", dso->long_name);
+ ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
+ dso->loaded ? "" : "NOT ");
+ ret += dso__fprintf_buildid(dso, fp);
+ ret += fprintf(fp, ")\n");
+ for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ ret += symbol__fprintf(pos, fp);
+ }
+
+ return ret;
+}
+
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start, u64 end))
+{
+ char *line = NULL;
+ size_t n;
+ int err = -1;
+ FILE *file = fopen(filename, "r");
+
+ if (file == NULL)
+ goto out_failure;
+
+ err = 0;
+
+ while (!feof(file)) {
+ u64 start;
+ int line_len, len;
+ char symbol_type;
+ char *symbol_name;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0 || !line)
+ break;
+
+ line[--line_len] = '\0'; /* \n */
+
+ len = hex2u64(line, &start);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ symbol_type = line[len];
+ len += 2;
+ symbol_name = line + len;
+ len = line_len - len;
+
+ if (len >= KSYM_NAME_LEN) {
+ err = -1;
+ break;
+ }
+
+ /*
+ * module symbols are not sorted so we add all
+ * symbols with zero length and rely on
+ * symbols__fixup_end() to fix it up.
+ */
+ err = process_symbol(arg, symbol_name,
+ symbol_type, start, start);
+ if (err)
+ break;
+ }
+
+ free(line);
+ fclose(file);
+ return err;
+
+out_failure:
+ return -1;
+}
+
+struct process_kallsyms_args {
+ struct map *map;
+ struct dso *dso;
+};
+
+static u8 kallsyms2elf_type(char type)
+{
+ if (type == 'W')
+ return STB_WEAK;
+
+ return isupper(type) ? STB_GLOBAL : STB_LOCAL;
+}
+
+static int map__process_kallsym_symbol(void *arg, const char *name,
+ char type, u64 start, u64 end)
+{
+ struct symbol *sym;
+ struct process_kallsyms_args *a = arg;
+ struct rb_root *root = &a->dso->symbols[a->map->type];
+
+ if (!symbol_type__is_a(type, a->map->type))
+ return 0;
+
+ sym = symbol__new(start, end - start + 1,
+ kallsyms2elf_type(type), name);
+ if (sym == NULL)
+ return -ENOMEM;
+ /*
+ * We will pass the symbols to the filter later, in
+ * map__split_kallsyms, when we have split the maps per module
+ */
+ symbols__insert(root, sym);
+
+ return 0;
+}
+
+/*
+ * Loads the function entries in /proc/kallsyms into kernel_map->dso,
+ * so that we can in the next step set the symbol ->end address and then
+ * call kernel_maps__split_kallsyms.
+ */
+static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
+ struct map *map)
+{
+ struct process_kallsyms_args args = { .map = map, .dso = dso, };
+ return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
+}
+
+/*
+ * Split the symbols into maps, making sure there are no overlaps, i.e. the
+ * kernel range is broken in several maps, named [kernel].N, as we don't have
+ * the original ELF section names vmlinux have.
+ */
+static int dso__split_kallsyms(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct machine *machine = kmaps->machine;
+ struct map *curr_map = map;
+ struct symbol *pos;
+ int count = 0, moved = 0;
+ struct rb_root *root = &dso->symbols[map->type];
+ struct rb_node *next = rb_first(root);
+ int kernel_range = 0;
+
+ while (next) {
+ char *module;
+
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ module = strchr(pos->name, '\t');
+ if (module) {
+ if (!symbol_conf.use_modules)
+ goto discard_symbol;
+
+ *module++ = '\0';
+
+ if (strcmp(curr_map->dso->short_name, module)) {
+ if (curr_map != map &&
+ dso->kernel == DSO_TYPE_GUEST_KERNEL &&
+ machine__is_default_guest(machine)) {
+ /*
+ * We assume all symbols of a module are
+ * continuous in * kallsyms, so curr_map
+ * points to a module and all its
+ * symbols are in its kmap. Mark it as
+ * loaded.
+ */
+ dso__set_loaded(curr_map->dso,
+ curr_map->type);
+ }
+
+ curr_map = map_groups__find_by_name(kmaps,
+ map->type, module);
+ if (curr_map == NULL) {
+ pr_debug("%s/proc/{kallsyms,modules} "
+ "inconsistency while looking "
+ "for \"%s\" module!\n",
+ machine->root_dir, module);
+ curr_map = map;
+ goto discard_symbol;
+ }
+
+ if (curr_map->dso->loaded &&
+ !machine__is_default_guest(machine))
+ goto discard_symbol;
+ }
+ /*
+ * So that we look just like we get from .ko files,
+ * i.e. not prelinked, relative to map->start.
+ */
+ pos->start = curr_map->map_ip(curr_map, pos->start);
+ pos->end = curr_map->map_ip(curr_map, pos->end);
+ } else if (curr_map != map) {
+ char dso_name[PATH_MAX];
+ struct dso *ndso;
+
+ if (count == 0) {
+ curr_map = map;
+ goto filter_symbol;
+ }
+
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ snprintf(dso_name, sizeof(dso_name),
+ "[guest.kernel].%d",
+ kernel_range++);
+ else
+ snprintf(dso_name, sizeof(dso_name),
+ "[kernel].%d",
+ kernel_range++);
+
+ ndso = dso__new(dso_name);
+ if (ndso == NULL)
+ return -1;
+
+ ndso->kernel = dso->kernel;
+
+ curr_map = map__new2(pos->start, ndso, map->type);
+ if (curr_map == NULL) {
+ dso__delete(ndso);
+ return -1;
+ }
+
+ curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
+ map_groups__insert(kmaps, curr_map);
+ ++kernel_range;
+ }
+filter_symbol:
+ if (filter && filter(curr_map, pos)) {
+discard_symbol: rb_erase(&pos->rb_node, root);
+ symbol__delete(pos);
+ } else {
+ if (curr_map != map) {
+ rb_erase(&pos->rb_node, root);
+ symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
+ ++moved;
+ } else
+ ++count;
+ }
+ }
+
+ if (curr_map != map &&
+ dso->kernel == DSO_TYPE_GUEST_KERNEL &&
+ machine__is_default_guest(kmaps->machine)) {
+ dso__set_loaded(curr_map->dso, curr_map->type);
+ }
+
+ return count + moved;
+}
+
+static bool symbol__restricted_filename(const char *filename,
+ const char *restricted_filename)
+{
+ bool restricted = false;
+
+ if (symbol_conf.kptr_restrict) {
+ char *r = realpath(filename, NULL);
+
+ if (r != NULL) {
+ restricted = strcmp(r, restricted_filename) == 0;
+ free(r);
+ return restricted;
+ }
+ }
+
+ return restricted;
+}
+
+int dso__load_kallsyms(struct dso *dso, const char *filename,
+ struct map *map, symbol_filter_t filter)
+{
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return -1;
+
+ if (dso__load_all_kallsyms(dso, filename, map) < 0)
+ return -1;
+
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
+ symbols__fixup_end(&dso->symbols[map->type]);
+
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
+ else
+ dso->symtab_type = SYMTAB__KALLSYMS;
+
+ return dso__split_kallsyms(dso, map, filter);
+}
+
+static int dso__load_perf_map(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ int nr_syms = 0;
+
+ file = fopen(dso->long_name, "r");
+ if (file == NULL)
+ goto out_failure;
+
+ while (!feof(file)) {
+ u64 start, size;
+ struct symbol *sym;
+ int line_len, len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ len = hex2u64(line, &start);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ len += hex2u64(line + len, &size);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ sym = symbol__new(start, size, STB_GLOBAL, line + len);
+
+ if (sym == NULL)
+ goto out_delete_line;
+
+ if (filter && filter(map, sym))
+ symbol__delete(sym);
+ else {
+ symbols__insert(&dso->symbols[map->type], sym);
+ nr_syms++;
+ }
+ }
+
+ free(line);
+ fclose(file);
+
+ return nr_syms;
+
+out_delete_line:
+ free(line);
+out_failure:
+ return -1;
+}
+
+/**
+ * elf_symtab__for_each_symbol - iterate thru all the symbols
+ *
+ * @syms: struct elf_symtab instance to iterate
+ * @idx: uint32_t idx
+ * @sym: GElf_Sym iterator
+ */
+#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
+ for (idx = 0, gelf_getsym(syms, idx, &sym);\
+ idx < nr_syms; \
+ idx++, gelf_getsym(syms, idx, &sym))
+
+static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+{
+ return GELF_ST_TYPE(sym->st_info);
+}
+
+static inline int elf_sym__is_function(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_FUNC &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline bool elf_sym__is_object(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_OBJECT &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
+static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
+}
+
+static inline const char *elf_sym__name(const GElf_Sym *sym,
+ const Elf_Data *symstrs)
+{
+ return symstrs->d_buf + sym->st_name;
+}
+
+static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name,
+ size_t *idx)
+{
+ Elf_Scn *sec = NULL;
+ size_t cnt = 1;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *str;
+
+ gelf_getshdr(sec, shp);
+ str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
+ if (!strcmp(name, str)) {
+ if (idx)
+ *idx = cnt;
+ break;
+ }
+ ++cnt;
+ }
+
+ return sec;
+}
+
+#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
+
+#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
+
+/*
+ * We need to check if we have a .dynsym, so that we can handle the
+ * .plt, synthesizing its symbols, that aren't on the symtabs (be it
+ * .dynsym or .symtab).
+ * And always look at the original dso, not at debuginfo packages, that
+ * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
+ */
+static int
+dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map,
+ symbol_filter_t filter)
+{
+ uint32_t nr_rel_entries, idx;
+ GElf_Sym sym;
+ u64 plt_offset;
+ GElf_Shdr shdr_plt;
+ struct symbol *f;
+ GElf_Shdr shdr_rel_plt, shdr_dynsym;
+ Elf_Data *reldata, *syms, *symstrs;
+ Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
+ size_t dynsym_idx;
+ GElf_Ehdr ehdr;
+ char sympltname[1024];
+ Elf *elf;
+ int nr = 0, symidx, fd, err = 0;
+
+ fd = open(name, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ goto out_close;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out_elf_end;
+
+ scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
+ ".dynsym", &dynsym_idx);
+ if (scn_dynsym == NULL)
+ goto out_elf_end;
+
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rela.plt", NULL);
+ if (scn_plt_rel == NULL) {
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rel.plt", NULL);
+ if (scn_plt_rel == NULL)
+ goto out_elf_end;
+ }
+
+ err = -1;
+
+ if (shdr_rel_plt.sh_link != dynsym_idx)
+ goto out_elf_end;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
+ goto out_elf_end;
+
+ /*
+ * Fetch the relocation section to find the idxes to the GOT
+ * and the symbols in the .dynsym they refer to.
+ */
+ reldata = elf_getdata(scn_plt_rel, NULL);
+ if (reldata == NULL)
+ goto out_elf_end;
+
+ syms = elf_getdata(scn_dynsym, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
+ if (scn_symstrs == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(scn_symstrs, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
+ plt_offset = shdr_plt.sh_offset;
+
+ if (shdr_rel_plt.sh_type == SHT_RELA) {
+ GElf_Rela pos_mem, *pos;
+
+ elf_section__for_each_rela(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&dso->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ } else if (shdr_rel_plt.sh_type == SHT_REL) {
+ GElf_Rel pos_mem, *pos;
+ elf_section__for_each_rel(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&dso->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ }
+
+ err = 0;
+out_elf_end:
+ elf_end(elf);
+out_close:
+ close(fd);
+
+ if (err == 0)
+ return nr;
+out:
+ pr_debug("%s: problems reading %s PLT info.\n",
+ __func__, dso->long_name);
+ return 0;
+}
+
+static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sym__is_function(sym);
+ case MAP__VARIABLE:
+ return elf_sym__is_object(sym);
+ default:
+ return false;
+ }
+}
+
+static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
+ enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sec__is_text(shdr, secstrs);
+ case MAP__VARIABLE:
+ return elf_sec__is_data(shdr, secstrs);
+ default:
+ return false;
+ }
+}
+
+static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
+{
+ Elf_Scn *sec = NULL;
+ GElf_Shdr shdr;
+ size_t cnt = 1;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ gelf_getshdr(sec, &shdr);
+
+ if ((addr >= shdr.sh_addr) &&
+ (addr < (shdr.sh_addr + shdr.sh_size)))
+ return cnt;
+
+ ++cnt;
+ }
+
+ return -1;
+}
+
+static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
+ int fd, symbol_filter_t filter, int kmodule,
+ int want_symtab)
+{
+ struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
+ struct map *curr_map = map;
+ struct dso *curr_dso = dso;
+ Elf_Data *symstrs, *secstrs;
+ uint32_t nr_syms;
+ int err = -1;
+ uint32_t idx;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr, opdshdr;
+ Elf_Data *syms, *opddata = NULL;
+ GElf_Sym sym;
+ Elf_Scn *sec, *sec_strndx, *opdsec;
+ Elf *elf;
+ int nr = 0;
+ size_t opdidx = 0;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
+ goto out_close;
+ }
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_debug("%s: cannot get elf header.\n", __func__);
+ goto out_elf_end;
+ }
+
+ /* Always reject images with a mismatched build-id: */
+ if (dso->has_build_id) {
+ u8 build_id[BUILD_ID_SIZE];
+
+ if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
+ goto out_elf_end;
+
+ if (!dso__build_id_equal(dso, build_id))
+ goto out_elf_end;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
+ if (sec == NULL) {
+ if (want_symtab)
+ goto out_elf_end;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
+ if (sec == NULL)
+ goto out_elf_end;
+ }
+
+ opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
+ if (opdshdr.sh_type != SHT_PROGBITS)
+ opdsec = NULL;
+ if (opdsec)
+ opddata = elf_rawdata(opdsec, NULL);
+
+ syms = elf_getdata(sec, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ sec = elf_getscn(elf, shdr.sh_link);
+ if (sec == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(sec, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (secstrs == NULL)
+ goto out_elf_end;
+
+ nr_syms = shdr.sh_size / shdr.sh_entsize;
+
+ memset(&sym, 0, sizeof(sym));
+ if (dso->kernel == DSO_TYPE_USER) {
+ dso->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL);
+ } else {
+ dso->adjust_symbols = 0;
+ }
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ struct symbol *f;
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+ char *demangled = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
+
+ if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+
+ if (!is_label && !elf_sym__is_a(&sym, map->type))
+ continue;
+
+ /* Reject ARM ELF "mapping symbols": these aren't unique and
+ * don't identify functions, so will confuse the profile
+ * output: */
+ if (ehdr.e_machine == EM_ARM) {
+ if (!strcmp(elf_name, "$a") ||
+ !strcmp(elf_name, "$d") ||
+ !strcmp(elf_name, "$t"))
+ continue;
+ }
+
+ if (opdsec && sym.st_shndx == opdidx) {
+ u32 offset = sym.st_value - opdshdr.sh_addr;
+ u64 *opd = opddata->d_buf + offset;
+ sym.st_value = *opd;
+ sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
+ }
+
+ sec = elf_getscn(elf, sym.st_shndx);
+ if (!sec)
+ goto out_elf_end;
+
+ gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
+
+ /* On ARM, symbols for thumb functions have 1 added to
+ * the symbol address as a flag - remove it */
+ if ((ehdr.e_machine == EM_ARM) &&
+ (map->type == MAP__FUNCTION) &&
+ (sym.st_value & 1))
+ --sym.st_value;
+
+ if (dso->kernel != DSO_TYPE_USER || kmodule) {
+ char dso_name[PATH_MAX];
+
+ if (strcmp(section_name,
+ (curr_dso->short_name +
+ dso->short_name_len)) == 0)
+ goto new_symbol;
+
+ if (strcmp(section_name, ".text") == 0) {
+ curr_map = map;
+ curr_dso = dso;
+ goto new_symbol;
+ }
+
+ snprintf(dso_name, sizeof(dso_name),
+ "%s%s", dso->short_name, section_name);
+
+ curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
+ if (curr_map == NULL) {
+ u64 start = sym.st_value;
+
+ if (kmodule)
+ start += map->start + shdr.sh_offset;
+
+ curr_dso = dso__new(dso_name);
+ if (curr_dso == NULL)
+ goto out_elf_end;
+ curr_dso->kernel = dso->kernel;
+ curr_dso->long_name = dso->long_name;
+ curr_dso->long_name_len = dso->long_name_len;
+ curr_map = map__new2(start, curr_dso,
+ map->type);
+ if (curr_map == NULL) {
+ dso__delete(curr_dso);
+ goto out_elf_end;
+ }
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ curr_dso->symtab_type = dso->symtab_type;
+ map_groups__insert(kmap->kmaps, curr_map);
+ dsos__add(&dso->node, curr_dso);
+ dso__set_loaded(curr_dso, map->type);
+ } else
+ curr_dso = curr_map->dso;
+
+ goto new_symbol;
+ }
+
+ if (curr_dso->adjust_symbols) {
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
+ (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
+ /*
+ * We need to figure out if the object was created from C++ sources
+ * DWARF DW_compile_unit has this, but we don't always have access
+ * to it...
+ */
+ demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
+ if (demangled != NULL)
+ elf_name = demangled;
+new_symbol:
+ f = symbol__new(sym.st_value, sym.st_size,
+ GELF_ST_BIND(sym.st_info), elf_name);
+ free(demangled);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(curr_map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&curr_dso->symbols[curr_map->type], f);
+ nr++;
+ }
+ }
+
+ /*
+ * For misannotated, zeroed, ASM function sizes.
+ */
+ if (nr > 0) {
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
+ symbols__fixup_end(&dso->symbols[map->type]);
+ if (kmap) {
+ /*
+ * We need to fixup this here too because we create new
+ * maps here, for things like vsyscall sections.
+ */
+ __map_groups__fixup_end(kmap->kmaps, map->type);
+ }
+ }
+ err = nr;
+out_elf_end:
+ elf_end(elf);
+out_close:
+ return err;
+}
+
+static bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
+{
+ return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
+}
+
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
+{
+ bool have_build_id = false;
+ struct dso *pos;
+
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
+ if (pos->has_build_id) {
+ have_build_id = true;
+ continue;
+ }
+ if (filename__read_build_id(pos->long_name, pos->build_id,
+ sizeof(pos->build_id)) > 0) {
+ have_build_id = true;
+ pos->has_build_id = true;
+ }
+ }
+
+ return have_build_id;
+}
+
+/*
+ * Align offset to 4 bytes as needed for note name and descriptor data.
+ */
+#define NOTE_ALIGN(n) (((n) + 3) & -4U)
+
+static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+{
+ int err = -1;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+ void *ptr;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note.gnu.build-id", NULL);
+ if (sec == NULL) {
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".notes", NULL);
+ if (sec == NULL)
+ goto out;
+ }
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out;
+
+ ptr = data->d_buf;
+ while (ptr < (data->d_buf + data->d_size)) {
+ GElf_Nhdr *nhdr = ptr;
+ size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
+ const char *name;
+
+ ptr += sizeof(*nhdr);
+ name = ptr;
+ ptr += namesz;
+ if (nhdr->n_type == NT_GNU_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU")) {
+ if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+ err = descsz;
+ break;
+ }
+ }
+ ptr += descsz;
+ }
+
+out:
+ return err;
+}
+
+int filename__read_build_id(const char *filename, void *bf, size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ err = elf_read_build_id(elf, bf, size);
+
+ elf_end(elf);
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
+int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+{
+ int fd, err = -1;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ while (1) {
+ char bf[BUFSIZ];
+ GElf_Nhdr nhdr;
+ size_t namesz, descsz;
+
+ if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
+ break;
+
+ namesz = NOTE_ALIGN(nhdr.n_namesz);
+ descsz = NOTE_ALIGN(nhdr.n_descsz);
+ if (nhdr.n_type == NT_GNU_BUILD_ID &&
+ nhdr.n_namesz == sizeof("GNU")) {
+ if (read(fd, bf, namesz) != (ssize_t)namesz)
+ break;
+ if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(descsz, size);
+ if (read(fd, build_id, sz) == (ssize_t)sz) {
+ memset(build_id + sz, 0, size - sz);
+ err = 0;
+ break;
+ }
+ } else if (read(fd, bf, descsz) != (ssize_t)descsz)
+ break;
+ } else {
+ int n = namesz + descsz;
+ if (read(fd, bf, n) != n)
+ break;
+ }
+ }
+ close(fd);
+out:
+ return err;
+}
+
+char dso__symtab_origin(const struct dso *dso)
+{
+ static const char origin[] = {
+ [SYMTAB__KALLSYMS] = 'k',
+ [SYMTAB__JAVA_JIT] = 'j',
+ [SYMTAB__BUILD_ID_CACHE] = 'B',
+ [SYMTAB__FEDORA_DEBUGINFO] = 'f',
+ [SYMTAB__UBUNTU_DEBUGINFO] = 'u',
+ [SYMTAB__BUILDID_DEBUGINFO] = 'b',
+ [SYMTAB__SYSTEM_PATH_DSO] = 'd',
+ [SYMTAB__SYSTEM_PATH_KMODULE] = 'K',
+ [SYMTAB__GUEST_KALLSYMS] = 'g',
+ [SYMTAB__GUEST_KMODULE] = 'G',
+ };
+
+ if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND)
+ return '!';
+ return origin[dso->symtab_type];
+}
+
+int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
+{
+ int size = PATH_MAX;
+ char *name;
+ int ret = -1;
+ int fd;
+ struct machine *machine;
+ const char *root_dir;
+ int want_symtab;
+
+ dso__set_loaded(dso, map->type);
+
+ if (dso->kernel == DSO_TYPE_KERNEL)
+ return dso__load_kernel_sym(dso, map, filter);
+ else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ return dso__load_guest_kernel_sym(dso, map, filter);
+
+ if (map->groups && map->groups->machine)
+ machine = map->groups->machine;
+ else
+ machine = NULL;
+
+ name = malloc(size);
+ if (!name)
+ return -1;
+
+ dso->adjust_symbols = 0;
+
+ if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
+ struct stat st;
+
+ if (lstat(dso->name, &st) < 0)
+ return -1;
+
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ pr_warning("File %s not owned by current user or root, "
+ "ignoring it.\n", dso->name);
+ return -1;
+ }
+
+ ret = dso__load_perf_map(dso, map, filter);
+ dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT :
+ SYMTAB__NOT_FOUND;
+ return ret;
+ }
+
+ /* Iterate over candidate debug images.
+ * On the first pass, only load images if they have a full symtab.
+ * Failing that, do a second pass where we accept .dynsym also
+ */
+ want_symtab = 1;
+restart:
+ for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE;
+ dso->symtab_type != SYMTAB__NOT_FOUND;
+ dso->symtab_type++) {
+ switch (dso->symtab_type) {
+ case SYMTAB__BUILD_ID_CACHE:
+ /* skip the locally configured cache if a symfs is given */
+ if (symbol_conf.symfs[0] ||
+ (dso__build_id_filename(dso, name, size) == NULL)) {
+ continue;
+ }
+ break;
+ case SYMTAB__FEDORA_DEBUGINFO:
+ snprintf(name, size, "%s/usr/lib/debug%s.debug",
+ symbol_conf.symfs, dso->long_name);
+ break;
+ case SYMTAB__UBUNTU_DEBUGINFO:
+ snprintf(name, size, "%s/usr/lib/debug%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+ case SYMTAB__BUILDID_DEBUGINFO: {
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+
+ if (!dso->has_build_id)
+ continue;
+
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id),
+ build_id_hex);
+ snprintf(name, size,
+ "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
+ symbol_conf.symfs, build_id_hex, build_id_hex + 2);
+ }
+ break;
+ case SYMTAB__SYSTEM_PATH_DSO:
+ snprintf(name, size, "%s%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+ case SYMTAB__GUEST_KMODULE:
+ if (map->groups && machine)
+ root_dir = machine->root_dir;
+ else
+ root_dir = "";
+ snprintf(name, size, "%s%s%s", symbol_conf.symfs,
+ root_dir, dso->long_name);
+ break;
+
+ case SYMTAB__SYSTEM_PATH_KMODULE:
+ snprintf(name, size, "%s%s", symbol_conf.symfs,
+ dso->long_name);
+ break;
+ default:;
+ }
+
+ /* Name is now the name of the next image to try */
+ fd = open(name, O_RDONLY);
+ if (fd < 0)
+ continue;
+
+ ret = dso__load_sym(dso, map, name, fd, filter, 0,
+ want_symtab);
+ close(fd);
+
+ /*
+ * Some people seem to have debuginfo files _WITHOUT_ debug
+ * info!?!?
+ */
+ if (!ret)
+ continue;
+
+ if (ret > 0) {
+ int nr_plt;
+
+ nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter);
+ if (nr_plt > 0)
+ ret += nr_plt;
+ break;
+ }
+ }
+
+ /*
+ * If we wanted a full symtab but no image had one,
+ * relax our requirements and repeat the search.
+ */
+ if (ret <= 0 && want_symtab) {
+ want_symtab = 0;
+ goto restart;
+ }
+
+ free(name);
+ if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
+ return 0;
+ return ret;
+}
+
+struct map *map_groups__find_by_name(struct map_groups *mg,
+ enum map_type type, const char *name)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+
+ if (map->dso && strcmp(map->dso->short_name, name) == 0)
+ return map;
+ }
+
+ return NULL;
+}
+
+static int dso__kernel_module_get_build_id(struct dso *dso,
+ const char *root_dir)
+{
+ char filename[PATH_MAX];
+ /*
+ * kernel module short names are of the form "[module]" and
+ * we need just "module" here.
+ */
+ const char *name = dso->short_name + 1;
+
+ snprintf(filename, sizeof(filename),
+ "%s/sys/module/%.*s/notes/.note.gnu.build-id",
+ root_dir, (int)strlen(name) - 1, name);
+
+ if (sysfs__read_build_id(filename, dso->build_id,
+ sizeof(dso->build_id)) == 0)
+ dso->has_build_id = true;
+
+ return 0;
+}
+
+static int map_groups__set_modules_path_dir(struct map_groups *mg,
+ const char *dir_name)
+{
+ struct dirent *dent;
+ DIR *dir = opendir(dir_name);
+ int ret = 0;
+
+ if (!dir) {
+ pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
+ return -1;
+ }
+
+ while ((dent = readdir(dir)) != NULL) {
+ char path[PATH_MAX];
+ struct stat st;
+
+ /*sshfs might return bad dent->d_type, so we have to stat*/
+ snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ ret = map_groups__set_modules_path_dir(mg, path);
+ if (ret < 0)
+ goto out;
+ } else {
+ char *dot = strrchr(dent->d_name, '.'),
+ dso_name[PATH_MAX];
+ struct map *map;
+ char *long_name;
+
+ if (dot == NULL || strcmp(dot, ".ko"))
+ continue;
+ snprintf(dso_name, sizeof(dso_name), "[%.*s]",
+ (int)(dot - dent->d_name), dent->d_name);
+
+ strxfrchar(dso_name, '-', '_');
+ map = map_groups__find_by_name(mg, MAP__FUNCTION,
+ dso_name);
+ if (map == NULL)
+ continue;
+
+ long_name = strdup(path);
+ if (long_name == NULL) {
+ ret = -1;
+ goto out;
+ }
+ dso__set_long_name(map->dso, long_name);
+ map->dso->lname_alloc = 1;
+ dso__kernel_module_get_build_id(map->dso, "");
+ }
+ }
+
+out:
+ closedir(dir);
+ return ret;
+}
+
+static char *get_kernel_version(const char *root_dir)
+{
+ char version[PATH_MAX];
+ FILE *file;
+ char *name, *tmp;
+ const char *prefix = "Linux version ";
+
+ sprintf(version, "%s/proc/version", root_dir);
+ file = fopen(version, "r");
+ if (!file)
+ return NULL;
+
+ version[0] = '\0';
+ tmp = fgets(version, sizeof(version), file);
+ fclose(file);
+
+ name = strstr(version, prefix);
+ if (!name)
+ return NULL;
+ name += strlen(prefix);
+ tmp = strchr(name, ' ');
+ if (tmp)
+ *tmp = '\0';
+
+ return strdup(name);
+}
+
+static int machine__set_modules_path(struct machine *machine)
+{
+ char *version;
+ char modules_path[PATH_MAX];
+
+ version = get_kernel_version(machine->root_dir);
+ if (!version)
+ return -1;
+
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+ machine->root_dir, version);
+ free(version);
+
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+}
+
+/*
+ * Constructor variant for modules (where we know from /proc/modules where
+ * they are loaded) and for vmlinux, where only after we load all the
+ * symbols we'll know where it starts and ends.
+ */
+static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
+{
+ struct map *map = calloc(1, (sizeof(*map) +
+ (dso->kernel ? sizeof(struct kmap) : 0)));
+ if (map != NULL) {
+ /*
+ * ->end will be filled after we load all the symbols
+ */
+ map__init(map, type, start, 0, 0, dso);
+ }
+
+ return map;
+}
+
+struct map *machine__new_module(struct machine *machine, u64 start,
+ const char *filename)
+{
+ struct map *map;
+ struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
+
+ if (dso == NULL)
+ return NULL;
+
+ map = map__new2(start, dso, MAP__FUNCTION);
+ if (map == NULL)
+ return NULL;
+
+ if (machine__is_host(machine))
+ dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE;
+ else
+ dso->symtab_type = SYMTAB__GUEST_KMODULE;
+ map_groups__insert(&machine->kmaps, map);
+ return map;
+}
+
+static int machine__create_modules(struct machine *machine)
+{
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ struct map *map;
+ const char *modules;
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ modules = symbol_conf.default_guest_modules;
+ else {
+ sprintf(path, "%s/proc/modules", machine->root_dir);
+ modules = path;
+ }
+
+ if (symbol__restricted_filename(path, "/proc/modules"))
+ return -1;
+
+ file = fopen(modules, "r");
+ if (file == NULL)
+ return -1;
+
+ while (!feof(file)) {
+ char name[PATH_MAX];
+ u64 start;
+ char *sep;
+ int line_len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ sep = strrchr(line, 'x');
+ if (sep == NULL)
+ continue;
+
+ hex2u64(sep + 1, &start);
+
+ sep = strchr(line, ' ');
+ if (sep == NULL)
+ continue;
+
+ *sep = '\0';
+
+ snprintf(name, sizeof(name), "[%s]", line);
+ map = machine__new_module(machine, start, name);
+ if (map == NULL)
+ goto out_delete_line;
+ dso__kernel_module_get_build_id(map->dso, machine->root_dir);
+ }
+
+ free(line);
+ fclose(file);
+
+ return machine__set_modules_path(machine);
+
+out_delete_line:
+ free(line);
+out_failure:
+ return -1;
+}
+
+int dso__load_vmlinux(struct dso *dso, struct map *map,
+ const char *vmlinux, symbol_filter_t filter)
+{
+ int err = -1, fd;
+ char symfs_vmlinux[PATH_MAX];
+
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
+ symbol_conf.symfs, vmlinux);
+ fd = open(symfs_vmlinux, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ dso__set_long_name(dso, (char *)vmlinux);
+ dso__set_loaded(dso, map->type);
+ err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0);
+ close(fd);
+
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", symfs_vmlinux);
+
+ return err;
+}
+
+int dso__load_vmlinux_path(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ int i, err = 0;
+ char *filename;
+
+ pr_debug("Looking at the vmlinux_path (%d entries long)\n",
+ vmlinux_path__nr_entries + 1);
+
+ filename = dso__build_id_filename(dso, NULL, 0);
+ if (filename != NULL) {
+ err = dso__load_vmlinux(dso, map, filename, filter);
+ if (err > 0) {
+ dso__set_long_name(dso, filename);
+ goto out;
+ }
+ free(filename);
+ }
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i) {
+ err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter);
+ if (err > 0) {
+ dso__set_long_name(dso, strdup(vmlinux_path[i]));
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+static int dso__load_kernel_sym(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ int err;
+ const char *kallsyms_filename = NULL;
+ char *kallsyms_allocated_filename = NULL;
+ /*
+ * Step 1: if the user specified a kallsyms or vmlinux filename, use
+ * it and only it, reporting errors to the user if it cannot be used.
+ *
+ * For instance, try to analyse an ARM perf.data file _without_ a
+ * build-id, or if the user specifies the wrong path to the right
+ * vmlinux file, obviously we can't fallback to another vmlinux (a
+ * x86_86 one, on the machine where analysis is being performed, say),
+ * or worse, /proc/kallsyms.
+ *
+ * If the specified file _has_ a build-id and there is a build-id
+ * section in the perf.data file, we will still do the expected
+ * validation in dso__load_vmlinux and will bail out if they don't
+ * match.
+ */
+ if (symbol_conf.kallsyms_name != NULL) {
+ kallsyms_filename = symbol_conf.kallsyms_name;
+ goto do_kallsyms;
+ }
+
+ if (symbol_conf.vmlinux_name != NULL) {
+ err = dso__load_vmlinux(dso, map,
+ symbol_conf.vmlinux_name, filter);
+ if (err > 0) {
+ dso__set_long_name(dso,
+ strdup(symbol_conf.vmlinux_name));
+ goto out_fixup;
+ }
+ return err;
+ }
+
+ if (vmlinux_path != NULL) {
+ err = dso__load_vmlinux_path(dso, map, filter);
+ if (err > 0)
+ goto out_fixup;
+ }
+
+ /* do not try local files if a symfs was given */
+ if (symbol_conf.symfs[0] != 0)
+ return -1;
+
+ /*
+ * Say the kernel DSO was created when processing the build-id header table,
+ * we have a build-id, so check if it is the same as the running kernel,
+ * using it if it is.
+ */
+ if (dso->has_build_id) {
+ u8 kallsyms_build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
+ sizeof(kallsyms_build_id)) == 0) {
+ if (dso__build_id_equal(dso, kallsyms_build_id)) {
+ kallsyms_filename = "/proc/kallsyms";
+ goto do_kallsyms;
+ }
+ }
+ /*
+ * Now look if we have it on the build-id cache in
+ * $HOME/.debug/[kernel.kallsyms].
+ */
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id),
+ sbuild_id);
+
+ if (asprintf(&kallsyms_allocated_filename,
+ "%s/.debug/[kernel.kallsyms]/%s",
+ getenv("HOME"), sbuild_id) == -1) {
+ pr_err("Not enough memory for kallsyms file lookup\n");
+ return -1;
+ }
+
+ kallsyms_filename = kallsyms_allocated_filename;
+
+ if (access(kallsyms_filename, F_OK)) {
+ pr_err("No kallsyms or vmlinux with build-id %s "
+ "was found\n", sbuild_id);
+ free(kallsyms_allocated_filename);
+ return -1;
+ }
+ } else {
+ /*
+ * Last resort, if we don't have a build-id and couldn't find
+ * any vmlinux file, try the running kernel kallsyms table.
+ */
+ kallsyms_filename = "/proc/kallsyms";
+ }
+
+do_kallsyms:
+ err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", kallsyms_filename);
+ free(kallsyms_allocated_filename);
+
+ if (err > 0) {
+out_fixup:
+ if (kallsyms_filename != NULL)
+ dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
+ map__fixup_start(map);
+ map__fixup_end(map);
+ }
+
+ return err;
+}
+
+static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ int err;
+ const char *kallsyms_filename = NULL;
+ struct machine *machine;
+ char path[PATH_MAX];
+
+ if (!map->groups) {
+ pr_debug("Guest kernel map hasn't the point to groups\n");
+ return -1;
+ }
+ machine = map->groups->machine;
+
+ if (machine__is_default_guest(machine)) {
+ /*
+ * if the user specified a vmlinux filename, use it and only
+ * it, reporting errors to the user if it cannot be used.
+ * Or use file guest_kallsyms inputted by user on commandline
+ */
+ if (symbol_conf.default_guest_vmlinux_name != NULL) {
+ err = dso__load_vmlinux(dso, map,
+ symbol_conf.default_guest_vmlinux_name, filter);
+ goto out_try_fixup;
+ }
+
+ kallsyms_filename = symbol_conf.default_guest_kallsyms;
+ if (!kallsyms_filename)
+ return -1;
+ } else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ kallsyms_filename = path;
+ }
+
+ err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", kallsyms_filename);
+
+out_try_fixup:
+ if (err > 0) {
+ if (kallsyms_filename != NULL) {
+ machine__mmap_name(machine, path, sizeof(path));
+ dso__set_long_name(dso, strdup(path));
+ }
+ map__fixup_start(map);
+ map__fixup_end(map);
+ }
+
+ return err;
+}
+
+static void dsos__add(struct list_head *head, struct dso *dso)
+{
+ list_add_tail(&dso->node, head);
+}
+
+static struct dso *dsos__find(struct list_head *head, const char *name)
+{
+ struct dso *pos;
+
+ list_for_each_entry(pos, head, node)
+ if (strcmp(pos->long_name, name) == 0)
+ return pos;
+ return NULL;
+}
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name)
+{
+ struct dso *dso = dsos__find(head, name);
+
+ if (!dso) {
+ dso = dso__new(name);
+ if (dso != NULL) {
+ dsos__add(head, dso);
+ dso__set_basename(dso);
+ }
+ }
+
+ return dso;
+}
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ ret += dso__fprintf(pos, i, fp);
+ }
+
+ return ret;
+}
+
+size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = 0;
+
+ for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += __dsos__fprintf(&pos->kernel_dsos, fp);
+ ret += __dsos__fprintf(&pos->user_dsos, fp);
+ }
+
+ return ret;
+}
+
+static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool with_hits)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
+ ret += dso__fprintf_buildid(pos, fp);
+ ret += fprintf(fp, " %s\n", pos->long_name);
+ }
+ return ret;
+}
+
+size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
+ bool with_hits)
+{
+ return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) +
+ __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits);
+}
+
+size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
+ FILE *fp, bool with_hits)
+{
+ struct rb_node *nd;
+ size_t ret = 0;
+
+ for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
+ }
+ return ret;
+}
+
+static struct dso*
+dso__kernel_findnew(struct machine *machine, const char *name,
+ const char *short_name, int dso_type)
+{
+ /*
+ * The kernel dso could be created by build_id processing.
+ */
+ struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name);
+
+ /*
+ * We need to run this in all cases, since during the build_id
+ * processing we had no idea this was the kernel dso.
+ */
+ if (dso != NULL) {
+ dso__set_short_name(dso, short_name);
+ dso->kernel = dso_type;
+ }
+
+ return dso;
+}
+
+void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
+{
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ return;
+ sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
+ if (sysfs__read_build_id(path, dso->build_id,
+ sizeof(dso->build_id)) == 0)
+ dso->has_build_id = true;
+}
+
+static struct dso *machine__get_kernel(struct machine *machine)
+{
+ const char *vmlinux_name = NULL;
+ struct dso *kernel;
+
+ if (machine__is_host(machine)) {
+ vmlinux_name = symbol_conf.vmlinux_name;
+ if (!vmlinux_name)
+ vmlinux_name = "[kernel.kallsyms]";
+
+ kernel = dso__kernel_findnew(machine, vmlinux_name,
+ "[kernel]",
+ DSO_TYPE_KERNEL);
+ } else {
+ char bf[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ vmlinux_name = symbol_conf.default_guest_vmlinux_name;
+ if (!vmlinux_name)
+ vmlinux_name = machine__mmap_name(machine, bf,
+ sizeof(bf));
+
+ kernel = dso__kernel_findnew(machine, vmlinux_name,
+ "[guest.kernel]",
+ DSO_TYPE_GUEST_KERNEL);
+ }
+
+ if (kernel != NULL && (!kernel->has_build_id))
+ dso__read_running_kernel_build_id(kernel, machine);
+
+ return kernel;
+}
+
+struct process_args {
+ u64 start;
+};
+
+static int symbol__in_kernel(void *arg, const char *name,
+ char type __used, u64 start, u64 end __used)
+{
+ struct process_args *args = arg;
+
+ if (strchr(name, '['))
+ return 0;
+
+ args->start = start;
+ return 1;
+}
+
+/* Figure out the start address of kernel map from /proc/kallsyms */
+static u64 machine__get_kernel_start_addr(struct machine *machine)
+{
+ const char *filename;
+ char path[PATH_MAX];
+ struct process_args args;
+
+ if (machine__is_host(machine)) {
+ filename = "/proc/kallsyms";
+ } else {
+ if (machine__is_default_guest(machine))
+ filename = (char *)symbol_conf.default_guest_kallsyms;
+ else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ filename = path;
+ }
+ }
+
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return 0;
+
+ if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
+ return 0;
+
+ return args.start;
+}
+
+int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
+{
+ enum map_type type;
+ u64 start = machine__get_kernel_start_addr(machine);
+
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
+
+ machine->vmlinux_maps[type] = map__new2(start, kernel, type);
+ if (machine->vmlinux_maps[type] == NULL)
+ return -1;
+
+ machine->vmlinux_maps[type]->map_ip =
+ machine->vmlinux_maps[type]->unmap_ip =
+ identity__map_ip;
+ kmap = map__kmap(machine->vmlinux_maps[type]);
+ kmap->kmaps = &machine->kmaps;
+ map_groups__insert(&machine->kmaps,
+ machine->vmlinux_maps[type]);
+ }
+
+ return 0;
+}
+
+void machine__destroy_kernel_maps(struct machine *machine)
+{
+ enum map_type type;
+
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
+
+ if (machine->vmlinux_maps[type] == NULL)
+ continue;
+
+ kmap = map__kmap(machine->vmlinux_maps[type]);
+ map_groups__remove(&machine->kmaps,
+ machine->vmlinux_maps[type]);
+ if (kmap->ref_reloc_sym) {
+ /*
+ * ref_reloc_sym is shared among all maps, so free just
+ * on one of them.
+ */
+ if (type == MAP__FUNCTION) {
+ free((char *)kmap->ref_reloc_sym->name);
+ kmap->ref_reloc_sym->name = NULL;
+ free(kmap->ref_reloc_sym);
+ }
+ kmap->ref_reloc_sym = NULL;
+ }
+
+ map__delete(machine->vmlinux_maps[type]);
+ machine->vmlinux_maps[type] = NULL;
+ }
+}
+
+int machine__create_kernel_maps(struct machine *machine)
+{
+ struct dso *kernel = machine__get_kernel(machine);
+
+ if (kernel == NULL ||
+ __machine__create_kernel_maps(machine, kernel) < 0)
+ return -1;
+
+ if (symbol_conf.use_modules && machine__create_modules(machine) < 0)
+ pr_debug("Problems creating module maps, continuing anyway...\n");
+ /*
+ * Now that we have all the maps created, just set the ->end of them:
+ */
+ map_groups__fixup_end(&machine->kmaps);
+ return 0;
+}
+
+static void vmlinux_path__exit(void)
+{
+ while (--vmlinux_path__nr_entries >= 0) {
+ free(vmlinux_path[vmlinux_path__nr_entries]);
+ vmlinux_path[vmlinux_path__nr_entries] = NULL;
+ }
+
+ free(vmlinux_path);
+ vmlinux_path = NULL;
+}
+
+static int vmlinux_path__init(void)
+{
+ struct utsname uts;
+ char bf[PATH_MAX];
+
+ vmlinux_path = malloc(sizeof(char *) * 5);
+ if (vmlinux_path == NULL)
+ return -1;
+
+ vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+
+ /* only try running kernel version if no symfs was given */
+ if (symbol_conf.symfs[0] != 0)
+ return 0;
+
+ if (uname(&uts) < 0)
+ return -1;
+
+ snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
+ uts.release);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+
+ return 0;
+
+out_fail:
+ vmlinux_path__exit();
+ return -1;
+}
+
+size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
+{
+ int i;
+ size_t printed = 0;
+ struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
+
+ if (kdso->has_build_id) {
+ char filename[PATH_MAX];
+ if (dso__build_id_filename(kdso, filename, sizeof(filename)))
+ printed += fprintf(fp, "[0] %s\n", filename);
+ }
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i)
+ printed += fprintf(fp, "[%d] %s\n",
+ i + kdso->has_build_id, vmlinux_path[i]);
+
+ return printed;
+}
+
+static int setup_list(struct strlist **list, const char *list_str,
+ const char *list_name)
+{
+ if (list_str == NULL)
+ return 0;
+
+ *list = strlist__new(true, list_str);
+ if (!*list) {
+ pr_err("problems parsing %s list\n", list_name);
+ return -1;
+ }
+ return 0;
+}
+
+static bool symbol__read_kptr_restrict(void)
+{
+ bool value = false;
+
+ if (geteuid() != 0) {
+ FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
+ if (fp != NULL) {
+ char line[8];
+
+ if (fgets(line, sizeof(line), fp) != NULL)
+ value = atoi(line) != 0;
+
+ fclose(fp);
+ }
+ }
+
+ return value;
+}
+
+int symbol__init(void)
+{
+ const char *symfs;
+
+ if (symbol_conf.initialized)
+ return 0;
+
+ symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64));
+
+ elf_version(EV_CURRENT);
+ if (symbol_conf.sort_by_name)
+ symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
+ sizeof(struct symbol));
+
+ if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
+ return -1;
+
+ if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
+ pr_err("'.' is the only non valid --field-separator argument\n");
+ return -1;
+ }
+
+ if (setup_list(&symbol_conf.dso_list,
+ symbol_conf.dso_list_str, "dso") < 0)
+ return -1;
+
+ if (setup_list(&symbol_conf.comm_list,
+ symbol_conf.comm_list_str, "comm") < 0)
+ goto out_free_dso_list;
+
+ if (setup_list(&symbol_conf.sym_list,
+ symbol_conf.sym_list_str, "symbol") < 0)
+ goto out_free_comm_list;
+
+ /*
+ * A path to symbols of "/" is identical to ""
+ * reset here for simplicity.
+ */
+ symfs = realpath(symbol_conf.symfs, NULL);
+ if (symfs == NULL)
+ symfs = symbol_conf.symfs;
+ if (strcmp(symfs, "/") == 0)
+ symbol_conf.symfs = "";
+ if (symfs != symbol_conf.symfs)
+ free((void *)symfs);
+
+ symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
+
+ symbol_conf.initialized = true;
+ return 0;
+
+out_free_comm_list:
+ strlist__delete(symbol_conf.comm_list);
+out_free_dso_list:
+ strlist__delete(symbol_conf.dso_list);
+ return -1;
+}
+
+void symbol__exit(void)
+{
+ if (!symbol_conf.initialized)
+ return;
+ strlist__delete(symbol_conf.sym_list);
+ strlist__delete(symbol_conf.dso_list);
+ strlist__delete(symbol_conf.comm_list);
+ vmlinux_path__exit();
+ symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+ symbol_conf.initialized = false;
+}
+
+int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
+{
+ struct machine *machine = machines__findnew(machines, pid);
+
+ if (machine == NULL)
+ return -1;
+
+ return machine__create_kernel_maps(machine);
+}
+
+static int hex(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ return -1;
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int hex2u64(const char *ptr, u64 *long_val)
+{
+ const char *p = ptr;
+ *long_val = 0;
+
+ while (*p) {
+ const int hex_val = hex(*p);
+
+ if (hex_val < 0)
+ break;
+
+ *long_val = (*long_val << 4) | hex_val;
+ p++;
+ }
+
+ return p - ptr;
+}
+
+char *strxfrchar(char *s, char from, char to)
+{
+ char *p = s;
+
+ while ((p = strchr(p, from)) != NULL)
+ *p++ = to;
+
+ return s;
+}
+
+int machines__create_guest_kernel_maps(struct rb_root *machines)
+{
+ int ret = 0;
+ struct dirent **namelist = NULL;
+ int i, items = 0;
+ char path[PATH_MAX];
+ pid_t pid;
+
+ if (symbol_conf.default_guest_vmlinux_name ||
+ symbol_conf.default_guest_modules ||
+ symbol_conf.default_guest_kallsyms) {
+ machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
+ }
+
+ if (symbol_conf.guestmount) {
+ items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
+ if (items <= 0)
+ return -ENOENT;
+ for (i = 0; i < items; i++) {
+ if (!isdigit(namelist[i]->d_name[0])) {
+ /* Filter out . and .. */
+ continue;
+ }
+ pid = atoi(namelist[i]->d_name);
+ sprintf(path, "%s/%s/proc/kallsyms",
+ symbol_conf.guestmount,
+ namelist[i]->d_name);
+ ret = access(path, R_OK);
+ if (ret) {
+ pr_debug("Can't access file %s\n", path);
+ goto failure;
+ }
+ machines__create_kernel_maps(machines, pid);
+ }
+failure:
+ free(namelist);
+ }
+
+ return ret;
+}
+
+void machines__destroy_guest_kernel_maps(struct rb_root *machines)
+{
+ struct rb_node *next = rb_first(machines);
+
+ while (next) {
+ struct machine *pos = rb_entry(next, struct machine, rb_node);
+
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, machines);
+ machine__delete(pos);
+ }
+}
+
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, symbol_filter_t filter)
+{
+ struct map *map = machine->vmlinux_maps[type];
+ int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ /*
+ * Since /proc/kallsyms will have multiple sessions for the
+ * kernel, with modules between them, fixup the end of all
+ * sections.
+ */
+ __map_groups__fixup_end(&machine->kmaps, type);
+ }
+
+ return ret;
+}
+
+int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
+ symbol_filter_t filter)
+{
+ struct map *map = machine->vmlinux_maps[type];
+ int ret = dso__load_vmlinux_path(map->dso, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ map__reloc_vmlinux(map);
+ }
+
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/symbol.h b/ANDROID_3.4.5/tools/perf/util/symbol.h
new file mode 100644
index 00000000..ac49ef20
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/symbol.h
@@ -0,0 +1,270 @@
+#ifndef __PERF_SYMBOL
+#define __PERF_SYMBOL 1
+
+#include <linux/types.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include "map.h"
+#include "../perf.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+
+#ifdef HAVE_CPLUS_DEMANGLE
+extern char *cplus_demangle(const char *, int);
+
+static inline char *bfd_demangle(void __used *v, const char *c, int i)
+{
+ return cplus_demangle(c, i);
+}
+#else
+#ifdef NO_DEMANGLE
+static inline char *bfd_demangle(void __used *v, const char __used *c,
+ int __used i)
+{
+ return NULL;
+}
+#else
+#include <bfd.h>
+#endif
+#endif
+
+int hex2u64(const char *ptr, u64 *val);
+char *strxfrchar(char *s, char from, char to);
+
+/*
+ * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
+ * for newer versions we can use mmap to reduce memory usage:
+ */
+#ifdef LIBELF_NO_MMAP
+# define PERF_ELF_C_READ_MMAP ELF_C_READ
+#else
+# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP
+#endif
+
+#ifndef DMGL_PARAMS
+#define DMGL_PARAMS (1 << 0) /* Include function args */
+#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
+#endif
+
+#define BUILD_ID_SIZE 20
+
+/** struct symbol - symtab entry
+ *
+ * @ignore - resolvable but tools ignore it (e.g. idle routines)
+ */
+struct symbol {
+ struct rb_node rb_node;
+ u64 start;
+ u64 end;
+ u16 namelen;
+ u8 binding;
+ bool ignore;
+ char name[0];
+};
+
+void symbol__delete(struct symbol *sym);
+
+struct strlist;
+
+struct symbol_conf {
+ unsigned short priv_size;
+ unsigned short nr_events;
+ bool try_vmlinux_path,
+ show_kernel_path,
+ use_modules,
+ sort_by_name,
+ show_nr_samples,
+ show_total_period,
+ use_callchain,
+ exclude_other,
+ show_cpu_utilization,
+ initialized,
+ kptr_restrict,
+ annotate_asm_raw,
+ annotate_src;
+ const char *vmlinux_name,
+ *kallsyms_name,
+ *source_prefix,
+ *field_sep;
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ const char *dso_list_str,
+ *comm_list_str,
+ *sym_list_str,
+ *col_width_list_str;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list,
+ *dso_from_list,
+ *dso_to_list,
+ *sym_from_list,
+ *sym_to_list;
+ const char *symfs;
+};
+
+extern struct symbol_conf symbol_conf;
+
+static inline void *symbol__priv(struct symbol *sym)
+{
+ return ((void *)sym) - symbol_conf.priv_size;
+}
+
+struct ref_reloc_sym {
+ const char *name;
+ u64 addr;
+ u64 unrelocated_addr;
+};
+
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ bool unfolded;
+ bool has_children;
+};
+
+struct addr_map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ u64 al_addr;
+};
+
+struct branch_info {
+ struct addr_map_symbol from;
+ struct addr_map_symbol to;
+ struct branch_flags flags;
+};
+
+struct addr_location {
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ char level;
+ bool filtered;
+ u8 cpumode;
+ s32 cpu;
+};
+
+enum dso_kernel_type {
+ DSO_TYPE_USER = 0,
+ DSO_TYPE_KERNEL,
+ DSO_TYPE_GUEST_KERNEL
+};
+
+struct dso {
+ struct list_head node;
+ struct rb_root symbols[MAP__NR_TYPES];
+ struct rb_root symbol_names[MAP__NR_TYPES];
+ enum dso_kernel_type kernel;
+ u8 adjust_symbols:1;
+ u8 has_build_id:1;
+ u8 hit:1;
+ u8 annotate_warned:1;
+ u8 sname_alloc:1;
+ u8 lname_alloc:1;
+ unsigned char symtab_type;
+ u8 sorted_by_name;
+ u8 loaded;
+ u8 build_id[BUILD_ID_SIZE];
+ const char *short_name;
+ char *long_name;
+ u16 long_name_len;
+ u16 short_name_len;
+ char name[0];
+};
+
+struct dso *dso__new(const char *name);
+void dso__delete(struct dso *dso);
+
+int dso__name_len(const struct dso *dso);
+
+bool dso__loaded(const struct dso *dso, enum map_type type);
+bool dso__sorted_by_name(const struct dso *dso, enum map_type type);
+
+static inline void dso__set_loaded(struct dso *dso, enum map_type type)
+{
+ dso->loaded |= (1 << type);
+}
+
+void dso__sort_by_name(struct dso *dso, enum map_type type);
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name);
+
+int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter);
+int dso__load_vmlinux(struct dso *dso, struct map *map,
+ const char *vmlinux, symbol_filter_t filter);
+int dso__load_vmlinux_path(struct dso *dso, struct map *map,
+ symbol_filter_t filter);
+int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
+ symbol_filter_t filter);
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, symbol_filter_t filter);
+int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
+ symbol_filter_t filter);
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp);
+
+size_t machine__fprintf_dsos_buildid(struct machine *machine,
+ FILE *fp, bool with_hits);
+size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp);
+size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
+ FILE *fp, bool with_hits);
+size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ enum map_type type, FILE *fp);
+size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
+
+enum symtab_type {
+ SYMTAB__KALLSYMS = 0,
+ SYMTAB__GUEST_KALLSYMS,
+ SYMTAB__JAVA_JIT,
+ SYMTAB__BUILD_ID_CACHE,
+ SYMTAB__FEDORA_DEBUGINFO,
+ SYMTAB__UBUNTU_DEBUGINFO,
+ SYMTAB__BUILDID_DEBUGINFO,
+ SYMTAB__SYSTEM_PATH_DSO,
+ SYMTAB__GUEST_KMODULE,
+ SYMTAB__SYSTEM_PATH_KMODULE,
+ SYMTAB__NOT_FOUND,
+};
+
+char dso__symtab_origin(const struct dso *dso);
+void dso__set_long_name(struct dso *dso, char *name);
+void dso__set_build_id(struct dso *dso, void *build_id);
+void dso__read_running_kernel_build_id(struct dso *dso,
+ struct machine *machine);
+struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
+ u64 addr);
+struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
+ const char *name);
+
+int filename__read_build_id(const char *filename, void *bf, size_t size);
+int sysfs__read_build_id(const char *filename, void *bf, size_t size);
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
+int build_id__sprintf(const u8 *build_id, int len, char *bf);
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start, u64 end));
+
+void machine__destroy_kernel_maps(struct machine *machine);
+int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
+int machine__create_kernel_maps(struct machine *machine);
+
+int machines__create_kernel_maps(struct rb_root *machines, pid_t pid);
+int machines__create_guest_kernel_maps(struct rb_root *machines);
+void machines__destroy_guest_kernel_maps(struct rb_root *machines);
+
+int symbol__init(void);
+void symbol__exit(void);
+size_t symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al, FILE *fp);
+size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
+bool symbol_type__is_a(char symbol_type, enum map_type map_type);
+
+size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
+
+#endif /* __PERF_SYMBOL */
diff --git a/ANDROID_3.4.5/tools/perf/util/sysfs.c b/ANDROID_3.4.5/tools/perf/util/sysfs.c
new file mode 100644
index 00000000..48c6902e
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/sysfs.c
@@ -0,0 +1,60 @@
+
+#include "util.h"
+#include "sysfs.h"
+
+static const char * const sysfs_known_mountpoints[] = {
+ "/sys",
+ 0,
+};
+
+static int sysfs_found;
+char sysfs_mountpoint[PATH_MAX];
+
+static int sysfs_valid_mountpoint(const char *sysfs)
+{
+ struct statfs st_fs;
+
+ if (statfs(sysfs, &st_fs) < 0)
+ return -ENOENT;
+ else if (st_fs.f_type != (long) SYSFS_MAGIC)
+ return -ENOENT;
+
+ return 0;
+}
+
+const char *sysfs_find_mountpoint(void)
+{
+ const char * const *ptr;
+ char type[100];
+ FILE *fp;
+
+ if (sysfs_found)
+ return (const char *) sysfs_mountpoint;
+
+ ptr = sysfs_known_mountpoints;
+ while (*ptr) {
+ if (sysfs_valid_mountpoint(*ptr) == 0) {
+ sysfs_found = 1;
+ strcpy(sysfs_mountpoint, *ptr);
+ return sysfs_mountpoint;
+ }
+ ptr++;
+ }
+
+ /* give up and parse /proc/mounts */
+ fp = fopen("/proc/mounts", "r");
+ if (fp == NULL)
+ return NULL;
+
+ while (!sysfs_found &&
+ fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
+ sysfs_mountpoint, type) == 2) {
+
+ if (strcmp(type, "sysfs") == 0)
+ sysfs_found = 1;
+ }
+
+ fclose(fp);
+
+ return sysfs_found ? sysfs_mountpoint : NULL;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/sysfs.h b/ANDROID_3.4.5/tools/perf/util/sysfs.h
new file mode 100644
index 00000000..a813b720
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/sysfs.h
@@ -0,0 +1,6 @@
+#ifndef __SYSFS_H__
+#define __SYSFS_H__
+
+const char *sysfs_find_mountpoint(void);
+
+#endif /* __DEBUGFS_H__ */
diff --git a/ANDROID_3.4.5/tools/perf/util/thread.c b/ANDROID_3.4.5/tools/perf/util/thread.c
new file mode 100644
index 00000000..fb4b7ea6
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/thread.c
@@ -0,0 +1,140 @@
+#include "../perf.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "session.h"
+#include "thread.h"
+#include "util.h"
+#include "debug.h"
+
+static struct thread *thread__new(pid_t pid)
+{
+ struct thread *self = zalloc(sizeof(*self));
+
+ if (self != NULL) {
+ map_groups__init(&self->mg);
+ self->pid = pid;
+ self->comm = malloc(32);
+ if (self->comm)
+ snprintf(self->comm, 32, ":%d", self->pid);
+ }
+
+ return self;
+}
+
+void thread__delete(struct thread *self)
+{
+ map_groups__exit(&self->mg);
+ free(self->comm);
+ free(self);
+}
+
+int thread__set_comm(struct thread *self, const char *comm)
+{
+ int err;
+
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(comm);
+ err = self->comm == NULL ? -ENOMEM : 0;
+ if (!err) {
+ self->comm_set = true;
+ map_groups__flush(&self->mg);
+ }
+ return err;
+}
+
+int thread__comm_len(struct thread *self)
+{
+ if (!self->comm_len) {
+ if (!self->comm)
+ return 0;
+ self->comm_len = strlen(self->comm);
+ }
+
+ return self->comm_len;
+}
+
+static size_t thread__fprintf(struct thread *self, FILE *fp)
+{
+ return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
+ map_groups__fprintf(&self->mg, verbose, fp);
+}
+
+struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
+{
+ struct rb_node **p = &self->threads.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread *th;
+
+ /*
+ * Font-end cache - PID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
+ */
+ if (self->last_match && self->last_match->pid == pid)
+ return self->last_match;
+
+ while (*p != NULL) {
+ parent = *p;
+ th = rb_entry(parent, struct thread, rb_node);
+
+ if (th->pid == pid) {
+ self->last_match = th;
+ return th;
+ }
+
+ if (pid < th->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ th = thread__new(pid);
+ if (th != NULL) {
+ rb_link_node(&th->rb_node, parent, p);
+ rb_insert_color(&th->rb_node, &self->threads);
+ self->last_match = th;
+ }
+
+ return th;
+}
+
+void thread__insert_map(struct thread *self, struct map *map)
+{
+ map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
+ map_groups__insert(&self->mg, map);
+}
+
+int thread__fork(struct thread *self, struct thread *parent)
+{
+ int i;
+
+ if (parent->comm_set) {
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(parent->comm);
+ if (!self->comm)
+ return -ENOMEM;
+ self->comm_set = true;
+ }
+
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
+ return -ENOMEM;
+ return 0;
+}
+
+size_t machine__fprintf(struct machine *machine, FILE *fp)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
+ struct thread *pos = rb_entry(nd, struct thread, rb_node);
+
+ ret += thread__fprintf(pos, fp);
+ }
+
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/thread.h b/ANDROID_3.4.5/tools/perf/util/thread.h
new file mode 100644
index 00000000..70c2c13f
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/thread.h
@@ -0,0 +1,44 @@
+#ifndef __PERF_THREAD_H
+#define __PERF_THREAD_H
+
+#include <linux/rbtree.h>
+#include <unistd.h>
+#include "symbol.h"
+
+struct thread {
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
+ struct map_groups mg;
+ pid_t pid;
+ char shortname[3];
+ bool comm_set;
+ char *comm;
+ int comm_len;
+};
+
+struct machine;
+
+void thread__delete(struct thread *self);
+
+int thread__set_comm(struct thread *self, const char *comm);
+int thread__comm_len(struct thread *self);
+void thread__insert_map(struct thread *self, struct map *map);
+int thread__fork(struct thread *self, struct thread *parent);
+
+static inline struct map *thread__find_map(struct thread *self,
+ enum map_type type, u64 addr)
+{
+ return self ? map_groups__find(&self->mg, type, addr) : NULL;
+}
+
+void thread__find_addr_map(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al);
+
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al,
+ symbol_filter_t filter);
+#endif /* __PERF_THREAD_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/thread_map.c b/ANDROID_3.4.5/tools/perf/util/thread_map.c
new file mode 100644
index 00000000..84d9bd78
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/thread_map.c
@@ -0,0 +1,297 @@
+#include <dirent.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include "strlist.h"
+#include <string.h>
+#include "thread_map.h"
+
+/* Skip "." and ".." directories */
+static int filter(const struct dirent *dir)
+{
+ if (dir->d_name[0] == '.')
+ return 0;
+ else
+ return 1;
+}
+
+struct thread_map *thread_map__new_by_pid(pid_t pid)
+{
+ struct thread_map *threads;
+ char name[256];
+ int items;
+ struct dirent **namelist = NULL;
+ int i;
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, filter, NULL);
+ if (items <= 0)
+ return NULL;
+
+ threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
+ if (threads != NULL) {
+ for (i = 0; i < items; i++)
+ threads->map[i] = atoi(namelist[i]->d_name);
+ threads->nr = items;
+ }
+
+ for (i=0; i<items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ return threads;
+}
+
+struct thread_map *thread_map__new_by_tid(pid_t tid)
+{
+ struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
+
+ if (threads != NULL) {
+ threads->map[0] = tid;
+ threads->nr = 1;
+ }
+
+ return threads;
+}
+
+struct thread_map *thread_map__new_by_uid(uid_t uid)
+{
+ DIR *proc;
+ int max_threads = 32, items, i;
+ char path[256];
+ struct dirent dirent, *next, **namelist = NULL;
+ struct thread_map *threads = malloc(sizeof(*threads) +
+ max_threads * sizeof(pid_t));
+ if (threads == NULL)
+ goto out;
+
+ proc = opendir("/proc");
+ if (proc == NULL)
+ goto out_free_threads;
+
+ threads->nr = 0;
+
+ while (!readdir_r(proc, &dirent, &next) && next) {
+ char *end;
+ bool grow = false;
+ struct stat st;
+ pid_t pid = strtol(dirent.d_name, &end, 10);
+
+ if (*end) /* only interested in proper numerical dirents */
+ continue;
+
+ snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+
+ if (stat(path, &st) != 0)
+ continue;
+
+ if (st.st_uid != uid)
+ continue;
+
+ snprintf(path, sizeof(path), "/proc/%d/task", pid);
+ items = scandir(path, &namelist, filter, NULL);
+ if (items <= 0)
+ goto out_free_closedir;
+
+ while (threads->nr + items >= max_threads) {
+ max_threads *= 2;
+ grow = true;
+ }
+
+ if (grow) {
+ struct thread_map *tmp;
+
+ tmp = realloc(threads, (sizeof(*threads) +
+ max_threads * sizeof(pid_t)));
+ if (tmp == NULL)
+ goto out_free_namelist;
+
+ threads = tmp;
+ }
+
+ for (i = 0; i < items; i++)
+ threads->map[threads->nr + i] = atoi(namelist[i]->d_name);
+
+ for (i = 0; i < items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ threads->nr += items;
+ }
+
+out_closedir:
+ closedir(proc);
+out:
+ return threads;
+
+out_free_threads:
+ free(threads);
+ return NULL;
+
+out_free_namelist:
+ for (i = 0; i < items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+out_free_closedir:
+ free(threads);
+ threads = NULL;
+ goto out_closedir;
+}
+
+struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
+{
+ if (pid != -1)
+ return thread_map__new_by_pid(pid);
+
+ if (tid == -1 && uid != UINT_MAX)
+ return thread_map__new_by_uid(uid);
+
+ return thread_map__new_by_tid(tid);
+}
+
+static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
+{
+ struct thread_map *threads = NULL, *nt;
+ char name[256];
+ int items, total_tasks = 0;
+ struct dirent **namelist = NULL;
+ int i, j = 0;
+ pid_t pid, prev_pid = INT_MAX;
+ char *end_ptr;
+ struct str_node *pos;
+ struct strlist *slist = strlist__new(false, pid_str);
+
+ if (!slist)
+ return NULL;
+
+ strlist__for_each(pos, slist) {
+ pid = strtol(pos->s, &end_ptr, 10);
+
+ if (pid == INT_MIN || pid == INT_MAX ||
+ (*end_ptr != '\0' && *end_ptr != ','))
+ goto out_free_threads;
+
+ if (pid == prev_pid)
+ continue;
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, filter, NULL);
+ if (items <= 0)
+ goto out_free_threads;
+
+ total_tasks += items;
+ nt = realloc(threads, (sizeof(*threads) +
+ sizeof(pid_t) * total_tasks));
+ if (nt == NULL)
+ goto out_free_threads;
+
+ threads = nt;
+
+ if (threads) {
+ for (i = 0; i < items; i++)
+ threads->map[j++] = atoi(namelist[i]->d_name);
+ threads->nr = total_tasks;
+ }
+
+ for (i = 0; i < items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ if (!threads)
+ break;
+ }
+
+out:
+ strlist__delete(slist);
+ return threads;
+
+out_free_threads:
+ free(threads);
+ threads = NULL;
+ goto out;
+}
+
+static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
+{
+ struct thread_map *threads = NULL, *nt;
+ int ntasks = 0;
+ pid_t tid, prev_tid = INT_MAX;
+ char *end_ptr;
+ struct str_node *pos;
+ struct strlist *slist;
+
+ /* perf-stat expects threads to be generated even if tid not given */
+ if (!tid_str) {
+ threads = malloc(sizeof(*threads) + sizeof(pid_t));
+ if (threads != NULL) {
+ threads->map[0] = -1;
+ threads->nr = 1;
+ }
+ return threads;
+ }
+
+ slist = strlist__new(false, tid_str);
+ if (!slist)
+ return NULL;
+
+ strlist__for_each(pos, slist) {
+ tid = strtol(pos->s, &end_ptr, 10);
+
+ if (tid == INT_MIN || tid == INT_MAX ||
+ (*end_ptr != '\0' && *end_ptr != ','))
+ goto out_free_threads;
+
+ if (tid == prev_tid)
+ continue;
+
+ ntasks++;
+ nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks);
+
+ if (nt == NULL)
+ goto out_free_threads;
+
+ threads = nt;
+ threads->map[ntasks - 1] = tid;
+ threads->nr = ntasks;
+ }
+out:
+ return threads;
+
+out_free_threads:
+ free(threads);
+ threads = NULL;
+ goto out;
+}
+
+struct thread_map *thread_map__new_str(const char *pid, const char *tid,
+ uid_t uid)
+{
+ if (pid)
+ return thread_map__new_by_pid_str(pid);
+
+ if (!tid && uid != UINT_MAX)
+ return thread_map__new_by_uid(uid);
+
+ return thread_map__new_by_tid_str(tid);
+}
+
+void thread_map__delete(struct thread_map *threads)
+{
+ free(threads);
+}
+
+size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
+{
+ int i;
+ size_t printed = fprintf(fp, "%d thread%s: ",
+ threads->nr, threads->nr > 1 ? "s" : "");
+ for (i = 0; i < threads->nr; ++i)
+ printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]);
+
+ return printed + fprintf(fp, "\n");
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/thread_map.h b/ANDROID_3.4.5/tools/perf/util/thread_map.h
new file mode 100644
index 00000000..7da80f14
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/thread_map.h
@@ -0,0 +1,24 @@
+#ifndef __PERF_THREAD_MAP_H
+#define __PERF_THREAD_MAP_H
+
+#include <sys/types.h>
+#include <stdio.h>
+
+struct thread_map {
+ int nr;
+ int map[];
+};
+
+struct thread_map *thread_map__new_by_pid(pid_t pid);
+struct thread_map *thread_map__new_by_tid(pid_t tid);
+struct thread_map *thread_map__new_by_uid(uid_t uid);
+struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
+
+struct thread_map *thread_map__new_str(const char *pid,
+ const char *tid, uid_t uid);
+
+void thread_map__delete(struct thread_map *threads);
+
+size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
+
+#endif /* __PERF_THREAD_MAP_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/tool.h b/ANDROID_3.4.5/tools/perf/util/tool.h
new file mode 100644
index 00000000..b0e1aadb
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/tool.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_TOOL_H
+#define __PERF_TOOL_H
+
+#include <stdbool.h>
+
+struct perf_session;
+union perf_event;
+struct perf_evlist;
+struct perf_evsel;
+struct perf_sample;
+struct perf_tool;
+struct machine;
+
+typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel, struct machine *machine);
+
+typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+
+typedef int (*event_attr_op)(union perf_event *event,
+ struct perf_evlist **pevlist);
+typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event);
+
+typedef int (*event_synth_op)(union perf_event *event,
+ struct perf_session *session);
+
+typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
+ struct perf_session *session);
+
+struct perf_tool {
+ event_sample sample,
+ read;
+ event_op mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ throttle,
+ unthrottle;
+ event_attr_op attr;
+ event_synth_op tracing_data;
+ event_simple_op event_type;
+ event_op2 finished_round,
+ build_id;
+ bool ordered_samples;
+ bool ordering_requires_timestamps;
+};
+
+#endif /* __PERF_TOOL_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/top.c b/ANDROID_3.4.5/tools/perf/util/top.c
new file mode 100644
index 00000000..09fe579c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/top.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Refactored from builtin-top.c, see that files for further copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "cpumap.h"
+#include "event.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "symbol.h"
+#include "top.h"
+#include <inttypes.h>
+
+#define SNPRINTF(buf, size, fmt, args...) \
+({ \
+ size_t r = snprintf(buf, size, fmt, ## args); \
+ r > size ? size : r; \
+})
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
+{
+ float samples_per_sec = top->samples / top->delay_secs;
+ float ksamples_per_sec = top->kernel_samples / top->delay_secs;
+ float esamples_percent = (100.0 * top->exact_samples) / top->samples;
+ size_t ret = 0;
+
+ if (!perf_guest) {
+ ret = SNPRINTF(bf, size,
+ " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
+ " exact: %4.1f%% [", samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ } else {
+ float us_samples_per_sec = top->us_samples / top->delay_secs;
+ float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
+ float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
+
+ ret = SNPRINTF(bf, size,
+ " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
+ " guest kernel:%4.1f%% guest us:%4.1f%%"
+ " exact: %4.1f%% [", samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_kernel_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_us_samples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ }
+
+ if (top->evlist->nr_entries == 1) {
+ struct perf_evsel *first;
+ first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
+ (uint64_t)first->attr.sample_period,
+ top->freq ? "Hz" : "");
+ }
+
+ ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel));
+
+ ret += SNPRINTF(bf + ret, size - ret, "], ");
+
+ if (top->target_pid)
+ ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
+ top->target_pid);
+ else if (top->target_tid)
+ ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
+ top->target_tid);
+ else if (top->uid_str != NULL)
+ ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
+ top->uid_str);
+ else
+ ret += SNPRINTF(bf + ret, size - ret, " (all");
+
+ if (top->cpu_list)
+ ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
+ top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
+ else {
+ if (top->target_tid)
+ ret += SNPRINTF(bf + ret, size - ret, ")");
+ else
+ ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
+ top->evlist->cpus->nr,
+ top->evlist->cpus->nr > 1 ? "s" : "");
+ }
+
+ return ret;
+}
+
+void perf_top__reset_sample_counters(struct perf_top *top)
+{
+ top->samples = top->us_samples = top->kernel_samples =
+ top->exact_samples = top->guest_kernel_samples =
+ top->guest_us_samples = 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/top.h b/ANDROID_3.4.5/tools/perf/util/top.h
new file mode 100644
index 00000000..ce61cb2d
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/top.h
@@ -0,0 +1,55 @@
+#ifndef __PERF_TOP_H
+#define __PERF_TOP_H 1
+
+#include "tool.h"
+#include "types.h"
+#include <stddef.h>
+#include <stdbool.h>
+
+struct perf_evlist;
+struct perf_evsel;
+struct perf_session;
+
+struct perf_top {
+ struct perf_tool tool;
+ struct perf_evlist *evlist;
+ /*
+ * Symbols will be added here in perf_event__process_sample and will
+ * get out after decayed.
+ */
+ u64 samples;
+ u64 kernel_samples, us_samples;
+ u64 exact_samples;
+ u64 guest_us_samples, guest_kernel_samples;
+ int print_entries, count_filter, delay_secs;
+ int freq;
+ const char *target_pid, *target_tid;
+ uid_t uid;
+ bool hide_kernel_symbols, hide_user_symbols, zero;
+ bool system_wide;
+ bool use_tui, use_stdio;
+ bool sort_has_symbols;
+ bool dont_use_callchains;
+ bool kptr_restrict_warned;
+ bool vmlinux_warned;
+ bool inherit;
+ bool group;
+ bool sample_id_all_missing;
+ bool exclude_guest_missing;
+ bool dump_symtab;
+ const char *cpu_list;
+ struct hist_entry *sym_filter_entry;
+ struct perf_evsel *sym_evsel;
+ struct perf_session *session;
+ struct winsize winsize;
+ unsigned int mmap_pages;
+ int default_interval;
+ int realtime_prio;
+ int sym_pcnt_filter;
+ const char *sym_filter;
+ const char *uid_str;
+};
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
+void perf_top__reset_sample_counters(struct perf_top *top);
+#endif /* __PERF_TOP_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/trace-event-info.c b/ANDROID_3.4.5/tools/perf/util/trace-event-info.c
new file mode 100644
index 00000000..fc22cf5c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/trace-event-info.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include "util.h"
+#include <dirent.h>
+#include <mntent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+
+#include "../perf.h"
+#include "trace-event.h"
+#include "debugfs.h"
+#include "evsel.h"
+
+#define VERSION "0.5"
+
+#define TRACE_CTRL "tracing_on"
+#define TRACE "trace"
+#define AVAILABLE "available_tracers"
+#define CURRENT "current_tracer"
+#define ITER_CTRL "trace_options"
+#define MAX_LATENCY "tracing_max_latency"
+
+unsigned int page_size;
+
+static const char *output_file = "trace.info";
+static int output_fd;
+
+struct event_list {
+ struct event_list *next;
+ const char *event;
+};
+
+struct events {
+ struct events *sibling;
+ struct events *children;
+ struct events *next;
+ char *name;
+};
+
+
+void *malloc_or_die(unsigned int size)
+{
+ void *data;
+
+ data = malloc(size);
+ if (!data)
+ die("malloc");
+ return data;
+}
+
+static const char *find_debugfs(void)
+{
+ const char *path = debugfs_mount(NULL);
+
+ if (!path)
+ die("Your kernel not support debugfs filesystem");
+
+ return path;
+}
+
+/*
+ * Finds the path to the debugfs/tracing
+ * Allocates the string and stores it.
+ */
+static const char *find_tracing_dir(void)
+{
+ static char *tracing;
+ static int tracing_found;
+ const char *debugfs;
+
+ if (tracing_found)
+ return tracing;
+
+ debugfs = find_debugfs();
+
+ tracing = malloc_or_die(strlen(debugfs) + 9);
+
+ sprintf(tracing, "%s/tracing", debugfs);
+
+ tracing_found = 1;
+ return tracing;
+}
+
+static char *get_tracing_file(const char *name)
+{
+ const char *tracing;
+ char *file;
+
+ tracing = find_tracing_dir();
+ if (!tracing)
+ return NULL;
+
+ file = malloc_or_die(strlen(tracing) + strlen(name) + 2);
+
+ sprintf(file, "%s/%s", tracing, name);
+ return file;
+}
+
+static void put_tracing_file(char *file)
+{
+ free(file);
+}
+
+static ssize_t calc_data_size;
+
+static ssize_t write_or_die(const void *buf, size_t len)
+{
+ int ret;
+
+ if (calc_data_size) {
+ calc_data_size += len;
+ return len;
+ }
+
+ ret = write(output_fd, buf, len);
+ if (ret < 0)
+ die("writing to '%s'", output_file);
+
+ return ret;
+}
+
+int bigendian(void)
+{
+ unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
+ unsigned int *ptr;
+
+ ptr = (unsigned int *)(void *)str;
+ return *ptr == 0x01020304;
+}
+
+/* unfortunately, you can not stat debugfs or proc files for size */
+static void record_file(const char *file, size_t hdr_sz)
+{
+ unsigned long long size = 0;
+ char buf[BUFSIZ], *sizep;
+ off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR);
+ int r, fd;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ die("Can't read '%s'", file);
+
+ /* put in zeros for file size, then fill true size later */
+ if (hdr_sz)
+ write_or_die(&size, hdr_sz);
+
+ do {
+ r = read(fd, buf, BUFSIZ);
+ if (r > 0) {
+ size += r;
+ write_or_die(buf, r);
+ }
+ } while (r > 0);
+ close(fd);
+
+ /* ugh, handle big-endian hdr_size == 4 */
+ sizep = (char*)&size;
+ if (bigendian())
+ sizep += sizeof(u64) - hdr_sz;
+
+ if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0)
+ die("writing to %s", output_file);
+}
+
+static void read_header_files(void)
+{
+ char *path;
+ struct stat st;
+
+ path = get_tracing_file("events/header_page");
+ if (stat(path, &st) < 0)
+ die("can't read '%s'", path);
+
+ write_or_die("header_page", 12);
+ record_file(path, 8);
+ put_tracing_file(path);
+
+ path = get_tracing_file("events/header_event");
+ if (stat(path, &st) < 0)
+ die("can't read '%s'", path);
+
+ write_or_die("header_event", 13);
+ record_file(path, 8);
+ put_tracing_file(path);
+}
+
+static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
+{
+ while (tps) {
+ if (!strcmp(sys, tps->name))
+ return true;
+ tps = tps->next;
+ }
+
+ return false;
+}
+
+static void copy_event_system(const char *sys, struct tracepoint_path *tps)
+{
+ struct dirent *dent;
+ struct stat st;
+ char *format;
+ DIR *dir;
+ int count = 0;
+ int ret;
+
+ dir = opendir(sys);
+ if (!dir)
+ die("can't read directory '%s'", sys);
+
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ !name_in_tp_list(dent->d_name, tps))
+ continue;
+ format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ sprintf(format, "%s/%s/format", sys, dent->d_name);
+ ret = stat(format, &st);
+ free(format);
+ if (ret < 0)
+ continue;
+ count++;
+ }
+
+ write_or_die(&count, 4);
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ !name_in_tp_list(dent->d_name, tps))
+ continue;
+ format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ sprintf(format, "%s/%s/format", sys, dent->d_name);
+ ret = stat(format, &st);
+
+ if (ret >= 0)
+ record_file(format, 8);
+
+ free(format);
+ }
+ closedir(dir);
+}
+
+static void read_ftrace_files(struct tracepoint_path *tps)
+{
+ char *path;
+
+ path = get_tracing_file("events/ftrace");
+
+ copy_event_system(path, tps);
+
+ put_tracing_file(path);
+}
+
+static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
+{
+ while (tps) {
+ if (!strcmp(sys, tps->system))
+ return true;
+ tps = tps->next;
+ }
+
+ return false;
+}
+
+static void read_event_files(struct tracepoint_path *tps)
+{
+ struct dirent *dent;
+ struct stat st;
+ char *path;
+ char *sys;
+ DIR *dir;
+ int count = 0;
+ int ret;
+
+ path = get_tracing_file("events");
+
+ dir = opendir(path);
+ if (!dir)
+ die("can't read directory '%s'", path);
+
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ strcmp(dent->d_name, "ftrace") == 0 ||
+ !system_in_tp_list(dent->d_name, tps))
+ continue;
+ count++;
+ }
+
+ write_or_die(&count, 4);
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ strcmp(dent->d_name, "ftrace") == 0 ||
+ !system_in_tp_list(dent->d_name, tps))
+ continue;
+ sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
+ sprintf(sys, "%s/%s", path, dent->d_name);
+ ret = stat(sys, &st);
+ if (ret >= 0) {
+ write_or_die(dent->d_name, strlen(dent->d_name) + 1);
+ copy_event_system(sys, tps);
+ }
+ free(sys);
+ }
+
+ closedir(dir);
+ put_tracing_file(path);
+}
+
+static void read_proc_kallsyms(void)
+{
+ unsigned int size;
+ const char *path = "/proc/kallsyms";
+ struct stat st;
+ int ret;
+
+ ret = stat(path, &st);
+ if (ret < 0) {
+ /* not found */
+ size = 0;
+ write_or_die(&size, 4);
+ return;
+ }
+ record_file(path, 4);
+}
+
+static void read_ftrace_printk(void)
+{
+ unsigned int size;
+ char *path;
+ struct stat st;
+ int ret;
+
+ path = get_tracing_file("printk_formats");
+ ret = stat(path, &st);
+ if (ret < 0) {
+ /* not found */
+ size = 0;
+ write_or_die(&size, 4);
+ goto out;
+ }
+ record_file(path, 4);
+
+out:
+ put_tracing_file(path);
+}
+
+static struct tracepoint_path *
+get_tracepoints_path(struct list_head *pattrs)
+{
+ struct tracepoint_path path, *ppath = &path;
+ struct perf_evsel *pos;
+ int nr_tracepoints = 0;
+
+ list_for_each_entry(pos, pattrs, node) {
+ if (pos->attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+ ++nr_tracepoints;
+ ppath->next = tracepoint_id_to_path(pos->attr.config);
+ if (!ppath->next)
+ die("%s\n", "No memory to alloc tracepoints list");
+ ppath = ppath->next;
+ }
+
+ return nr_tracepoints > 0 ? path.next : NULL;
+}
+
+static void
+put_tracepoints_path(struct tracepoint_path *tps)
+{
+ while (tps) {
+ struct tracepoint_path *t = tps;
+
+ tps = tps->next;
+ free(t->name);
+ free(t->system);
+ free(t);
+ }
+}
+
+bool have_tracepoints(struct list_head *pattrs)
+{
+ struct perf_evsel *pos;
+
+ list_for_each_entry(pos, pattrs, node)
+ if (pos->attr.type == PERF_TYPE_TRACEPOINT)
+ return true;
+
+ return false;
+}
+
+static void tracing_data_header(void)
+{
+ char buf[20];
+
+ /* just guessing this is someone's birthday.. ;) */
+ buf[0] = 23;
+ buf[1] = 8;
+ buf[2] = 68;
+ memcpy(buf + 3, "tracing", 7);
+
+ write_or_die(buf, 10);
+
+ write_or_die(VERSION, strlen(VERSION) + 1);
+
+ /* save endian */
+ if (bigendian())
+ buf[0] = 1;
+ else
+ buf[0] = 0;
+
+ write_or_die(buf, 1);
+
+ /* save size of long */
+ buf[0] = sizeof(long);
+ write_or_die(buf, 1);
+
+ /* save page_size */
+ page_size = sysconf(_SC_PAGESIZE);
+ write_or_die(&page_size, 4);
+}
+
+struct tracing_data *tracing_data_get(struct list_head *pattrs,
+ int fd, bool temp)
+{
+ struct tracepoint_path *tps;
+ struct tracing_data *tdata;
+
+ output_fd = fd;
+
+ tps = get_tracepoints_path(pattrs);
+ if (!tps)
+ return NULL;
+
+ tdata = malloc_or_die(sizeof(*tdata));
+ tdata->temp = temp;
+ tdata->size = 0;
+
+ if (temp) {
+ int temp_fd;
+
+ snprintf(tdata->temp_file, sizeof(tdata->temp_file),
+ "/tmp/perf-XXXXXX");
+ if (!mkstemp(tdata->temp_file))
+ die("Can't make temp file");
+
+ temp_fd = open(tdata->temp_file, O_RDWR);
+ if (temp_fd < 0)
+ die("Can't read '%s'", tdata->temp_file);
+
+ /*
+ * Set the temp file the default output, so all the
+ * tracing data are stored into it.
+ */
+ output_fd = temp_fd;
+ }
+
+ tracing_data_header();
+ read_header_files();
+ read_ftrace_files(tps);
+ read_event_files(tps);
+ read_proc_kallsyms();
+ read_ftrace_printk();
+
+ /*
+ * All tracing data are stored by now, we can restore
+ * the default output file in case we used temp file.
+ */
+ if (temp) {
+ tdata->size = lseek(output_fd, 0, SEEK_CUR);
+ close(output_fd);
+ output_fd = fd;
+ }
+
+ put_tracepoints_path(tps);
+ return tdata;
+}
+
+void tracing_data_put(struct tracing_data *tdata)
+{
+ if (tdata->temp) {
+ record_file(tdata->temp_file, 0);
+ unlink(tdata->temp_file);
+ }
+
+ free(tdata);
+}
+
+int read_tracing_data(int fd, struct list_head *pattrs)
+{
+ struct tracing_data *tdata;
+
+ /*
+ * We work over the real file, so we can write data
+ * directly, no temp file is needed.
+ */
+ tdata = tracing_data_get(pattrs, fd, false);
+ if (!tdata)
+ return -ENOMEM;
+
+ tracing_data_put(tdata);
+ return 0;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/trace-event-parse.c b/ANDROID_3.4.5/tools/perf/util/trace-event-parse.c
new file mode 100644
index 00000000..dfd1bd83
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/trace-event-parse.c
@@ -0,0 +1,3153 @@
+/*
+ * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The parts for function graph printing was taken and modified from the
+ * Linux Kernel that were written by Frederic Weisbecker.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+int header_page_ts_offset;
+int header_page_ts_size;
+int header_page_size_offset;
+int header_page_size_size;
+int header_page_overwrite_offset;
+int header_page_overwrite_size;
+int header_page_data_offset;
+int header_page_data_size;
+
+bool latency_format;
+
+static char *input_buf;
+static unsigned long long input_buf_ptr;
+static unsigned long long input_buf_siz;
+
+static int cpus;
+static int long_size;
+static int is_flag_field;
+static int is_symbolic_field;
+
+static struct format_field *
+find_any_field(struct event *event, const char *name);
+
+static void init_input_buf(char *buf, unsigned long long size)
+{
+ input_buf = buf;
+ input_buf_siz = size;
+ input_buf_ptr = 0;
+}
+
+struct cmdline {
+ char *comm;
+ int pid;
+};
+
+static struct cmdline *cmdlines;
+static int cmdline_count;
+
+static int cmdline_cmp(const void *a, const void *b)
+{
+ const struct cmdline *ca = a;
+ const struct cmdline *cb = b;
+
+ if (ca->pid < cb->pid)
+ return -1;
+ if (ca->pid > cb->pid)
+ return 1;
+
+ return 0;
+}
+
+void parse_cmdlines(char *file, int size __unused)
+{
+ struct cmdline_list {
+ struct cmdline_list *next;
+ char *comm;
+ int pid;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ sscanf(line, "%d %as", &item->pid,
+ (float *)(void *)&item->comm); /* workaround gcc warning */
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ cmdline_count++;
+ }
+
+ cmdlines = malloc_or_die(sizeof(*cmdlines) * cmdline_count);
+
+ i = 0;
+ while (list) {
+ cmdlines[i].pid = list->pid;
+ cmdlines[i].comm = list->comm;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(cmdlines, cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+}
+
+static struct func_map {
+ unsigned long long addr;
+ char *func;
+ char *mod;
+} *func_list;
+static unsigned int func_count;
+
+static int func_cmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if (fa->addr < fb->addr)
+ return -1;
+ if (fa->addr > fb->addr)
+ return 1;
+
+ return 0;
+}
+
+void parse_proc_kallsyms(char *file, unsigned int size __unused)
+{
+ struct func_list {
+ struct func_list *next;
+ unsigned long long addr;
+ char *func;
+ char *mod;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ char ch;
+ int ret __used;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ item->mod = NULL;
+ ret = sscanf(line, "%as %c %as\t[%as",
+ (float *)(void *)&addr_str, /* workaround gcc warning */
+ &ch,
+ (float *)(void *)&item->func,
+ (float *)(void *)&item->mod);
+ item->addr = strtoull(addr_str, NULL, 16);
+ free(addr_str);
+
+ /* truncate the extra ']' */
+ if (item->mod)
+ item->mod[strlen(item->mod) - 1] = 0;
+
+
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ func_count++;
+ }
+
+ func_list = malloc_or_die(sizeof(*func_list) * (func_count + 1));
+
+ i = 0;
+ while (list) {
+ func_list[i].func = list->func;
+ func_list[i].addr = list->addr;
+ func_list[i].mod = list->mod;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(func_list, func_count, sizeof(*func_list), func_cmp);
+
+ /*
+ * Add a special record at the end.
+ */
+ func_list[func_count].func = NULL;
+ func_list[func_count].addr = 0;
+ func_list[func_count].mod = NULL;
+}
+
+/*
+ * We are searching for a record in between, not an exact
+ * match.
+ */
+static int func_bcmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if ((fa->addr == fb->addr) ||
+
+ (fa->addr > fb->addr &&
+ fa->addr < (fb+1)->addr))
+ return 0;
+
+ if (fa->addr < fb->addr)
+ return -1;
+
+ return 1;
+}
+
+static struct func_map *find_func(unsigned long long addr)
+{
+ struct func_map *func;
+ struct func_map key;
+
+ key.addr = addr;
+
+ func = bsearch(&key, func_list, func_count, sizeof(*func_list),
+ func_bcmp);
+
+ return func;
+}
+
+void print_funcs(void)
+{
+ int i;
+
+ for (i = 0; i < (int)func_count; i++) {
+ printf("%016llx %s",
+ func_list[i].addr,
+ func_list[i].func);
+ if (func_list[i].mod)
+ printf(" [%s]\n", func_list[i].mod);
+ else
+ printf("\n");
+ }
+}
+
+static struct printk_map {
+ unsigned long long addr;
+ char *printk;
+} *printk_list;
+static unsigned int printk_count;
+
+static int printk_cmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if (fa->addr < fb->addr)
+ return -1;
+ if (fa->addr > fb->addr)
+ return 1;
+
+ return 0;
+}
+
+static struct printk_map *find_printk(unsigned long long addr)
+{
+ struct printk_map *printk;
+ struct printk_map key;
+
+ key.addr = addr;
+
+ printk = bsearch(&key, printk_list, printk_count, sizeof(*printk_list),
+ printk_cmp);
+
+ return printk;
+}
+
+void parse_ftrace_printk(char *file, unsigned int size __unused)
+{
+ struct printk_list {
+ struct printk_list *next;
+ unsigned long long addr;
+ char *printk;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ addr_str = strsep(&line, ":");
+ if (!line) {
+ warning("error parsing print strings");
+ break;
+ }
+ item = malloc_or_die(sizeof(*item));
+ item->addr = strtoull(addr_str, NULL, 16);
+ /* fmt still has a space, skip it */
+ item->printk = strdup(line+1);
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ printk_count++;
+ }
+
+ printk_list = malloc_or_die(sizeof(*printk_list) * printk_count + 1);
+
+ i = 0;
+ while (list) {
+ printk_list[i].printk = list->printk;
+ printk_list[i].addr = list->addr;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(printk_list, printk_count, sizeof(*printk_list), printk_cmp);
+}
+
+void print_printk(void)
+{
+ int i;
+
+ for (i = 0; i < (int)printk_count; i++) {
+ printf("%016llx %s\n",
+ printk_list[i].addr,
+ printk_list[i].printk);
+ }
+}
+
+static struct event *alloc_event(void)
+{
+ struct event *event;
+
+ event = malloc_or_die(sizeof(*event));
+ memset(event, 0, sizeof(*event));
+
+ return event;
+}
+
+enum event_type {
+ EVENT_ERROR,
+ EVENT_NONE,
+ EVENT_SPACE,
+ EVENT_NEWLINE,
+ EVENT_OP,
+ EVENT_DELIM,
+ EVENT_ITEM,
+ EVENT_DQUOTE,
+ EVENT_SQUOTE,
+};
+
+static struct event *event_list;
+
+static void add_event(struct event *event)
+{
+ event->next = event_list;
+ event_list = event;
+}
+
+static int event_item_type(enum event_type type)
+{
+ switch (type) {
+ case EVENT_ITEM ... EVENT_SQUOTE:
+ return 1;
+ case EVENT_ERROR ... EVENT_DELIM:
+ default:
+ return 0;
+ }
+}
+
+static void free_arg(struct print_arg *arg)
+{
+ if (!arg)
+ return;
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ if (arg->atom.atom)
+ free(arg->atom.atom);
+ break;
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_OP:
+ default:
+ /* todo */
+ break;
+ }
+
+ free(arg);
+}
+
+static enum event_type get_type(int ch)
+{
+ if (ch == '\n')
+ return EVENT_NEWLINE;
+ if (isspace(ch))
+ return EVENT_SPACE;
+ if (isalnum(ch) || ch == '_')
+ return EVENT_ITEM;
+ if (ch == '\'')
+ return EVENT_SQUOTE;
+ if (ch == '"')
+ return EVENT_DQUOTE;
+ if (!isprint(ch))
+ return EVENT_NONE;
+ if (ch == '(' || ch == ')' || ch == ',')
+ return EVENT_DELIM;
+
+ return EVENT_OP;
+}
+
+static int __read_char(void)
+{
+ if (input_buf_ptr >= input_buf_siz)
+ return -1;
+
+ return input_buf[input_buf_ptr++];
+}
+
+static int __peek_char(void)
+{
+ if (input_buf_ptr >= input_buf_siz)
+ return -1;
+
+ return input_buf[input_buf_ptr];
+}
+
+static enum event_type __read_token(char **tok)
+{
+ char buf[BUFSIZ];
+ int ch, last_ch, quote_ch, next_ch;
+ int i = 0;
+ int tok_size = 0;
+ enum event_type type;
+
+ *tok = NULL;
+
+
+ ch = __read_char();
+ if (ch < 0)
+ return EVENT_NONE;
+
+ type = get_type(ch);
+ if (type == EVENT_NONE)
+ return type;
+
+ buf[i++] = ch;
+
+ switch (type) {
+ case EVENT_NEWLINE:
+ case EVENT_DELIM:
+ *tok = malloc_or_die(2);
+ (*tok)[0] = ch;
+ (*tok)[1] = 0;
+ return type;
+
+ case EVENT_OP:
+ switch (ch) {
+ case '-':
+ next_ch = __peek_char();
+ if (next_ch == '>') {
+ buf[i++] = __read_char();
+ break;
+ }
+ /* fall through */
+ case '+':
+ case '|':
+ case '&':
+ case '>':
+ case '<':
+ last_ch = ch;
+ ch = __peek_char();
+ if (ch != last_ch)
+ goto test_equal;
+ buf[i++] = __read_char();
+ switch (last_ch) {
+ case '>':
+ case '<':
+ goto test_equal;
+ default:
+ break;
+ }
+ break;
+ case '!':
+ case '=':
+ goto test_equal;
+ default: /* what should we do instead? */
+ break;
+ }
+ buf[i] = 0;
+ *tok = strdup(buf);
+ return type;
+
+ test_equal:
+ ch = __peek_char();
+ if (ch == '=')
+ buf[i++] = __read_char();
+ break;
+
+ case EVENT_DQUOTE:
+ case EVENT_SQUOTE:
+ /* don't keep quotes */
+ i--;
+ quote_ch = ch;
+ last_ch = 0;
+ do {
+ if (i == (BUFSIZ - 1)) {
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + BUFSIZ);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+
+ if (!*tok)
+ return EVENT_NONE;
+ tok_size += BUFSIZ;
+ i = 0;
+ }
+ last_ch = ch;
+ ch = __read_char();
+ buf[i++] = ch;
+ /* the '\' '\' will cancel itself */
+ if (ch == '\\' && last_ch == '\\')
+ last_ch = 0;
+ } while (ch != quote_ch || last_ch == '\\');
+ /* remove the last quote */
+ i--;
+ goto out;
+
+ case EVENT_ERROR ... EVENT_SPACE:
+ case EVENT_ITEM:
+ default:
+ break;
+ }
+
+ while (get_type(__peek_char()) == type) {
+ if (i == (BUFSIZ - 1)) {
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + BUFSIZ);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+
+ if (!*tok)
+ return EVENT_NONE;
+ tok_size += BUFSIZ;
+ i = 0;
+ }
+ ch = __read_char();
+ buf[i++] = ch;
+ }
+
+ out:
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + i);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+ if (!*tok)
+ return EVENT_NONE;
+
+ return type;
+}
+
+static void free_token(char *tok)
+{
+ if (tok)
+ free(tok);
+}
+
+static enum event_type read_token(char **tok)
+{
+ enum event_type type;
+
+ for (;;) {
+ type = __read_token(tok);
+ if (type != EVENT_SPACE)
+ return type;
+
+ free_token(*tok);
+ }
+
+ /* not reached */
+ return EVENT_NONE;
+}
+
+/* no newline */
+static enum event_type read_token_item(char **tok)
+{
+ enum event_type type;
+
+ for (;;) {
+ type = __read_token(tok);
+ if (type != EVENT_SPACE && type != EVENT_NEWLINE)
+ return type;
+
+ free_token(*tok);
+ }
+
+ /* not reached */
+ return EVENT_NONE;
+}
+
+static int test_type(enum event_type type, enum event_type expect)
+{
+ if (type != expect) {
+ warning("Error: expected type %d but read %d",
+ expect, type);
+ return -1;
+ }
+ return 0;
+}
+
+static int __test_type_token(enum event_type type, char *token,
+ enum event_type expect, const char *expect_tok,
+ bool warn)
+{
+ if (type != expect) {
+ if (warn)
+ warning("Error: expected type %d but read %d",
+ expect, type);
+ return -1;
+ }
+
+ if (strcmp(token, expect_tok) != 0) {
+ if (warn)
+ warning("Error: expected '%s' but read '%s'",
+ expect_tok, token);
+ return -1;
+ }
+ return 0;
+}
+
+static int test_type_token(enum event_type type, char *token,
+ enum event_type expect, const char *expect_tok)
+{
+ return __test_type_token(type, token, expect, expect_tok, true);
+}
+
+static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
+{
+ enum event_type type;
+
+ if (newline_ok)
+ type = read_token(tok);
+ else
+ type = read_token_item(tok);
+ return test_type(type, expect);
+}
+
+static int read_expect_type(enum event_type expect, char **tok)
+{
+ return __read_expect_type(expect, tok, 1);
+}
+
+static int __read_expected(enum event_type expect, const char *str,
+ int newline_ok, bool warn)
+{
+ enum event_type type;
+ char *token;
+ int ret;
+
+ if (newline_ok)
+ type = read_token(&token);
+ else
+ type = read_token_item(&token);
+
+ ret = __test_type_token(type, token, expect, str, warn);
+
+ free_token(token);
+
+ return ret;
+}
+
+static int read_expected(enum event_type expect, const char *str)
+{
+ return __read_expected(expect, str, 1, true);
+}
+
+static int read_expected_item(enum event_type expect, const char *str)
+{
+ return __read_expected(expect, str, 0, true);
+}
+
+static char *event_read_name(void)
+{
+ char *token;
+
+ if (read_expected(EVENT_ITEM, "name") < 0)
+ return NULL;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return NULL;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ return token;
+
+ fail:
+ free_token(token);
+ return NULL;
+}
+
+static int event_read_id(void)
+{
+ char *token;
+ int id = -1;
+
+ if (read_expected_item(EVENT_ITEM, "ID") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto free;
+
+ id = strtoul(token, NULL, 0);
+
+ free:
+ free_token(token);
+ return id;
+}
+
+static int field_is_string(struct format_field *field)
+{
+ if ((field->flags & FIELD_IS_ARRAY) &&
+ (!strstr(field->type, "char") || !strstr(field->type, "u8") ||
+ !strstr(field->type, "s8")))
+ return 1;
+
+ return 0;
+}
+
+static int field_is_dynamic(struct format_field *field)
+{
+ if (!strncmp(field->type, "__data_loc", 10))
+ return 1;
+
+ return 0;
+}
+
+static int event_read_fields(struct event *event, struct format_field **fields)
+{
+ struct format_field *field = NULL;
+ enum event_type type;
+ char *token;
+ char *last_token;
+ int count = 0;
+
+ do {
+ type = read_token(&token);
+ if (type == EVENT_NEWLINE) {
+ free_token(token);
+ return count;
+ }
+
+ count++;
+
+ if (test_type_token(type, token, EVENT_ITEM, "field"))
+ goto fail;
+ free_token(token);
+
+ type = read_token(&token);
+ /*
+ * The ftrace fields may still use the "special" name.
+ * Just ignore it.
+ */
+ if (event->flags & EVENT_FL_ISFTRACE &&
+ type == EVENT_ITEM && strcmp(token, "special") == 0) {
+ free_token(token);
+ type = read_token(&token);
+ }
+
+ if (test_type_token(type, token, EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ last_token = token;
+
+ field = malloc_or_die(sizeof(*field));
+ memset(field, 0, sizeof(*field));
+
+ /* read the rest of the type */
+ for (;;) {
+ type = read_token(&token);
+ if (type == EVENT_ITEM ||
+ (type == EVENT_OP && strcmp(token, "*") == 0) ||
+ /*
+ * Some of the ftrace fields are broken and have
+ * an illegal "." in them.
+ */
+ (event->flags & EVENT_FL_ISFTRACE &&
+ type == EVENT_OP && strcmp(token, ".") == 0)) {
+
+ if (strcmp(token, "*") == 0)
+ field->flags |= FIELD_IS_POINTER;
+
+ if (field->type) {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(last_token) + 2);
+ strcat(field->type, " ");
+ strcat(field->type, last_token);
+ } else
+ field->type = last_token;
+ last_token = token;
+ continue;
+ }
+
+ break;
+ }
+
+ if (!field->type) {
+ die("no type found");
+ goto fail;
+ }
+ field->name = last_token;
+
+ if (test_type(type, EVENT_OP))
+ goto fail;
+
+ if (strcmp(token, "[") == 0) {
+ enum event_type last_type = type;
+ char *brackets = token;
+ int len;
+
+ field->flags |= FIELD_IS_ARRAY;
+
+ type = read_token(&token);
+ while (strcmp(token, "]") != 0) {
+ if (last_type == EVENT_ITEM &&
+ type == EVENT_ITEM)
+ len = 2;
+ else
+ len = 1;
+ last_type = type;
+
+ brackets = realloc(brackets,
+ strlen(brackets) +
+ strlen(token) + len);
+ if (len == 2)
+ strcat(brackets, " ");
+ strcat(brackets, token);
+ free_token(token);
+ type = read_token(&token);
+ if (type == EVENT_NONE) {
+ die("failed to find token");
+ goto fail;
+ }
+ }
+
+ free_token(token);
+
+ brackets = realloc(brackets, strlen(brackets) + 2);
+ strcat(brackets, "]");
+
+ /* add brackets to type */
+
+ type = read_token(&token);
+ /*
+ * If the next token is not an OP, then it is of
+ * the format: type [] item;
+ */
+ if (type == EVENT_ITEM) {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(field->name) +
+ strlen(brackets) + 2);
+ strcat(field->type, " ");
+ strcat(field->type, field->name);
+ free_token(field->name);
+ strcat(field->type, brackets);
+ field->name = token;
+ type = read_token(&token);
+ } else {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(brackets) + 1);
+ strcat(field->type, brackets);
+ }
+ free(brackets);
+ }
+
+ if (field_is_string(field)) {
+ field->flags |= FIELD_IS_STRING;
+ if (field_is_dynamic(field))
+ field->flags |= FIELD_IS_DYNAMIC;
+ }
+
+ if (test_type_token(type, token, EVENT_OP, ";"))
+ goto fail;
+ free_token(token);
+
+ if (read_expected(EVENT_ITEM, "offset") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+ field->offset = strtoul(token, NULL, 0);
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_ITEM, "size") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+ field->size = strtoul(token, NULL, 0);
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ type = read_token(&token);
+ if (type != EVENT_NEWLINE) {
+ /* newer versions of the kernel have a "signed" type */
+ if (test_type_token(type, token, EVENT_ITEM, "signed"))
+ goto fail;
+
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+
+ if (strtoul(token, NULL, 0))
+ field->flags |= FIELD_IS_SIGNED;
+
+ free_token(token);
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ }
+
+ free_token(token);
+
+ *fields = field;
+ fields = &field->next;
+
+ } while (1);
+
+ return 0;
+
+fail:
+ free_token(token);
+fail_expect:
+ if (field)
+ free(field);
+ return -1;
+}
+
+static int event_read_format(struct event *event)
+{
+ char *token;
+ int ret;
+
+ if (read_expected_item(EVENT_ITEM, "format") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ free_token(token);
+
+ ret = event_read_fields(event, &event->format.common_fields);
+ if (ret < 0)
+ return ret;
+ event->format.nr_common = ret;
+
+ ret = event_read_fields(event, &event->format.fields);
+ if (ret < 0)
+ return ret;
+ event->format.nr_fields = ret;
+
+ return 0;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+enum event_type
+process_arg_token(struct event *event, struct print_arg *arg,
+ char **tok, enum event_type type);
+
+static enum event_type
+process_arg(struct event *event, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+ char *token;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return process_arg_token(event, arg, tok, type);
+}
+
+static enum event_type
+process_cond(struct event *event, struct print_arg *top, char **tok)
+{
+ struct print_arg *arg, *left, *right;
+ enum event_type type;
+ char *token = NULL;
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ left = malloc_or_die(sizeof(*left));
+
+ right = malloc_or_die(sizeof(*right));
+
+ arg->type = PRINT_OP;
+ arg->op.left = left;
+ arg->op.right = right;
+
+ *tok = NULL;
+ type = process_arg(event, left, &token);
+ if (test_type_token(type, token, EVENT_OP, ":"))
+ goto out_free;
+
+ arg->op.op = token;
+
+ type = process_arg(event, right, &token);
+
+ top->op.right = arg;
+
+ *tok = token;
+ return type;
+
+out_free:
+ free_token(*tok);
+ free(right);
+ free(left);
+ free_arg(arg);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_array(struct event *event, struct print_arg *top, char **tok)
+{
+ struct print_arg *arg;
+ enum event_type type;
+ char *token = NULL;
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ *tok = NULL;
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_OP, "]"))
+ goto out_free;
+
+ top->op.right = arg;
+
+ free_token(token);
+ type = read_token_item(&token);
+ *tok = token;
+
+ return type;
+
+out_free:
+ free_token(*tok);
+ free_arg(arg);
+ return EVENT_ERROR;
+}
+
+static int get_op_prio(char *op)
+{
+ if (!op[1]) {
+ switch (op[0]) {
+ case '*':
+ case '/':
+ case '%':
+ return 6;
+ case '+':
+ case '-':
+ return 7;
+ /* '>>' and '<<' are 8 */
+ case '<':
+ case '>':
+ return 9;
+ /* '==' and '!=' are 10 */
+ case '&':
+ return 11;
+ case '^':
+ return 12;
+ case '|':
+ return 13;
+ case '?':
+ return 16;
+ default:
+ die("unknown op '%c'", op[0]);
+ return -1;
+ }
+ } else {
+ if (strcmp(op, "++") == 0 ||
+ strcmp(op, "--") == 0) {
+ return 3;
+ } else if (strcmp(op, ">>") == 0 ||
+ strcmp(op, "<<") == 0) {
+ return 8;
+ } else if (strcmp(op, ">=") == 0 ||
+ strcmp(op, "<=") == 0) {
+ return 9;
+ } else if (strcmp(op, "==") == 0 ||
+ strcmp(op, "!=") == 0) {
+ return 10;
+ } else if (strcmp(op, "&&") == 0) {
+ return 14;
+ } else if (strcmp(op, "||") == 0) {
+ return 15;
+ } else {
+ die("unknown op '%s'", op);
+ return -1;
+ }
+ }
+}
+
+static void set_op_prio(struct print_arg *arg)
+{
+
+ /* single ops are the greatest */
+ if (!arg->op.left || arg->op.left->type == PRINT_NULL) {
+ arg->op.prio = 0;
+ return;
+ }
+
+ arg->op.prio = get_op_prio(arg->op.op);
+}
+
+static enum event_type
+process_op(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *left, *right = NULL;
+ enum event_type type;
+ char *token;
+
+ /* the op is passed in via tok */
+ token = *tok;
+
+ if (arg->type == PRINT_OP && !arg->op.left) {
+ /* handle single op */
+ if (token[1]) {
+ die("bad op token %s", token);
+ return EVENT_ERROR;
+ }
+ switch (token[0]) {
+ case '!':
+ case '+':
+ case '-':
+ break;
+ default:
+ die("bad op token %s", token);
+ return EVENT_ERROR;
+ }
+
+ /* make an empty left */
+ left = malloc_or_die(sizeof(*left));
+ left->type = PRINT_NULL;
+ arg->op.left = left;
+
+ right = malloc_or_die(sizeof(*right));
+ arg->op.right = right;
+
+ type = process_arg(event, right, tok);
+
+ } else if (strcmp(token, "?") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+ /* copy the top arg to the left */
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+ arg->op.prio = 0;
+
+ type = process_cond(event, arg, tok);
+
+ } else if (strcmp(token, ">>") == 0 ||
+ strcmp(token, "<<") == 0 ||
+ strcmp(token, "&") == 0 ||
+ strcmp(token, "|") == 0 ||
+ strcmp(token, "&&") == 0 ||
+ strcmp(token, "||") == 0 ||
+ strcmp(token, "-") == 0 ||
+ strcmp(token, "+") == 0 ||
+ strcmp(token, "*") == 0 ||
+ strcmp(token, "^") == 0 ||
+ strcmp(token, "/") == 0 ||
+ strcmp(token, "<") == 0 ||
+ strcmp(token, ">") == 0 ||
+ strcmp(token, "==") == 0 ||
+ strcmp(token, "!=") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+
+ /* copy the top arg to the left */
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+
+ set_op_prio(arg);
+
+ right = malloc_or_die(sizeof(*right));
+
+ type = read_token_item(&token);
+ *tok = token;
+
+ /* could just be a type pointer */
+ if ((strcmp(arg->op.op, "*") == 0) &&
+ type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
+ if (left->type != PRINT_ATOM)
+ die("bad pointer type");
+ left->atom.atom = realloc(left->atom.atom,
+ sizeof(left->atom.atom) + 3);
+ strcat(left->atom.atom, " *");
+ *arg = *left;
+ free(arg);
+
+ return type;
+ }
+
+ type = process_arg_token(event, right, tok, type);
+
+ arg->op.right = right;
+
+ } else if (strcmp(token, "[") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+
+ arg->op.prio = 0;
+ type = process_array(event, arg, tok);
+
+ } else {
+ warning("unknown op '%s'", token);
+ event->flags |= EVENT_FL_FAILED;
+ /* the arg is now the left side */
+ return EVENT_NONE;
+ }
+
+ if (type == EVENT_OP) {
+ int prio;
+
+ /* higher prios need to be closer to the root */
+ prio = get_op_prio(*tok);
+
+ if (prio > arg->op.prio)
+ return process_op(event, arg, tok);
+
+ return process_op(event, right, tok);
+ }
+
+ return type;
+}
+
+static enum event_type
+process_entry(struct event *event __unused, struct print_arg *arg,
+ char **tok)
+{
+ enum event_type type;
+ char *field;
+ char *token;
+
+ if (read_expected(EVENT_OP, "->") < 0)
+ return EVENT_ERROR;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+ field = token;
+
+ arg->type = PRINT_FIELD;
+ arg->field.name = field;
+
+ if (is_flag_field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ arg->field.field->flags |= FIELD_IS_FLAG;
+ is_flag_field = 0;
+ } else if (is_symbolic_field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ arg->field.field->flags |= FIELD_IS_SYMBOLIC;
+ is_symbolic_field = 0;
+ }
+
+ type = read_token(&token);
+ *tok = token;
+
+ return type;
+
+fail:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static char *arg_eval (struct print_arg *arg);
+
+static long long arg_num_eval(struct print_arg *arg)
+{
+ long long left, right;
+ long long val = 0;
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ val = strtoll(arg->atom.atom, NULL, 0);
+ break;
+ case PRINT_TYPE:
+ val = arg_num_eval(arg->typecast.item);
+ break;
+ case PRINT_OP:
+ switch (arg->op.op[0]) {
+ case '|':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ if (arg->op.op[1])
+ val = left || right;
+ else
+ val = left | right;
+ break;
+ case '&':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ if (arg->op.op[1])
+ val = left && right;
+ else
+ val = left & right;
+ break;
+ case '<':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left < right;
+ break;
+ case '<':
+ val = left << right;
+ break;
+ case '=':
+ val = left <= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '>':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left > right;
+ break;
+ case '>':
+ val = left >> right;
+ break;
+ case '=':
+ val = left >= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '=':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+
+ if (arg->op.op[1] != '=')
+ die("unknown op '%s'", arg->op.op);
+
+ val = left == right;
+ break;
+ case '!':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+
+ switch (arg->op.op[1]) {
+ case '=':
+ val = left != right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '+':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ val = left + right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_SYMBOL:
+ case PRINT_STRING:
+ default:
+ die("invalid eval type %d", arg->type);
+
+ }
+ return val;
+}
+
+static char *arg_eval (struct print_arg *arg)
+{
+ long long val;
+ static char buf[20];
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ return arg->atom.atom;
+ case PRINT_TYPE:
+ return arg_eval(arg->typecast.item);
+ case PRINT_OP:
+ val = arg_num_eval(arg);
+ sprintf(buf, "%lld", val);
+ return buf;
+
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_SYMBOL:
+ case PRINT_STRING:
+ default:
+ die("invalid eval type %d", arg->type);
+ break;
+ }
+
+ return NULL;
+}
+
+static enum event_type
+process_fields(struct event *event, struct print_flag_sym **list, char **tok)
+{
+ enum event_type type;
+ struct print_arg *arg = NULL;
+ struct print_flag_sym *field;
+ char *token = NULL;
+ char *value;
+
+ do {
+ free_token(token);
+ type = read_token_item(&token);
+ if (test_type_token(type, token, EVENT_OP, "{"))
+ break;
+
+ arg = malloc_or_die(sizeof(*arg));
+
+ free_token(token);
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_OP)
+ type = process_op(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ goto out_free;
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ field = malloc_or_die(sizeof(*field));
+ memset(field, 0, sizeof(*field));
+
+ value = arg_eval(arg);
+ field->value = strdup(value);
+
+ free_token(token);
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_OP, "}"))
+ goto out_free;
+
+ value = arg_eval(arg);
+ field->str = strdup(value);
+ free_arg(arg);
+ arg = NULL;
+
+ *list = field;
+ list = &field->next;
+
+ free_token(token);
+ type = read_token_item(&token);
+ } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
+
+ *tok = token;
+ return type;
+
+out_free:
+ free_arg(arg);
+ free_token(token);
+
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_flags(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_FLAGS;
+
+ if (read_expected_item(EVENT_DELIM, "(") < 0)
+ return EVENT_ERROR;
+
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+ while (type == EVENT_OP)
+ type = process_op(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ arg->flags.field = field;
+
+ type = read_token_item(&token);
+ if (event_item_type(type)) {
+ arg->flags.delim = token;
+ type = read_token_item(&token);
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ type = process_fields(event, &arg->flags.flags, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
+ goto out_free;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+out_free:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_symbols(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_SYMBOL;
+
+ if (read_expected_item(EVENT_DELIM, "(") < 0)
+ return EVENT_ERROR;
+
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ arg->symbol.field = field;
+
+ type = process_fields(event, &arg->symbol.symbols, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
+ goto out_free;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+out_free:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_paren(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *item_arg;
+ enum event_type type;
+ char *token;
+
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
+
+ if (type == EVENT_OP)
+ type = process_op(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
+
+ if (test_type_token(type, token, EVENT_DELIM, ")")) {
+ free_token(token);
+ return EVENT_ERROR;
+ }
+
+ free_token(token);
+ type = read_token_item(&token);
+
+ /*
+ * If the next token is an item or another open paren, then
+ * this was a typecast.
+ */
+ if (event_item_type(type) ||
+ (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
+
+ /* make this a typecast and contine */
+
+ /* prevous must be an atom */
+ if (arg->type != PRINT_ATOM)
+ die("previous needed to be PRINT_ATOM");
+
+ item_arg = malloc_or_die(sizeof(*item_arg));
+
+ arg->type = PRINT_TYPE;
+ arg->typecast.type = arg->atom.atom;
+ arg->typecast.item = item_arg;
+ type = process_arg_token(event, item_arg, &token, type);
+
+ }
+
+ *tok = token;
+ return type;
+}
+
+
+static enum event_type
+process_str(struct event *event __unused, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+ char *token;
+
+ if (read_expected(EVENT_DELIM, "(") < 0)
+ return EVENT_ERROR;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ arg->type = PRINT_STRING;
+ arg->string.string = token;
+ arg->string.offset = -1;
+
+ if (read_expected(EVENT_DELIM, ")") < 0)
+ return EVENT_ERROR;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return type;
+fail:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+enum event_type
+process_arg_token(struct event *event, struct print_arg *arg,
+ char **tok, enum event_type type)
+{
+ char *token;
+ char *atom;
+
+ token = *tok;
+
+ switch (type) {
+ case EVENT_ITEM:
+ if (strcmp(token, "REC") == 0) {
+ free_token(token);
+ type = process_entry(event, arg, &token);
+ } else if (strcmp(token, "__print_flags") == 0) {
+ free_token(token);
+ is_flag_field = 1;
+ type = process_flags(event, arg, &token);
+ } else if (strcmp(token, "__print_symbolic") == 0) {
+ free_token(token);
+ is_symbolic_field = 1;
+ type = process_symbols(event, arg, &token);
+ } else if (strcmp(token, "__get_str") == 0) {
+ free_token(token);
+ type = process_str(event, arg, &token);
+ } else {
+ atom = token;
+ /* test the next token */
+ type = read_token_item(&token);
+
+ /* atoms can be more than one token long */
+ while (type == EVENT_ITEM) {
+ atom = realloc(atom, strlen(atom) + strlen(token) + 2);
+ strcat(atom, " ");
+ strcat(atom, token);
+ free_token(token);
+ type = read_token_item(&token);
+ }
+
+ /* todo, test for function */
+
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = atom;
+ }
+ break;
+ case EVENT_DQUOTE:
+ case EVENT_SQUOTE:
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = token;
+ type = read_token_item(&token);
+ break;
+ case EVENT_DELIM:
+ if (strcmp(token, "(") == 0) {
+ free_token(token);
+ type = process_paren(event, arg, &token);
+ break;
+ }
+ case EVENT_OP:
+ /* handle single ops */
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = NULL;
+ type = process_op(event, arg, &token);
+
+ break;
+
+ case EVENT_ERROR ... EVENT_NEWLINE:
+ default:
+ die("unexpected type %d", type);
+ }
+ *tok = token;
+
+ return type;
+}
+
+static int event_read_print_args(struct event *event, struct print_arg **list)
+{
+ enum event_type type = EVENT_ERROR;
+ struct print_arg *arg;
+ char *token;
+ int args = 0;
+
+ do {
+ if (type == EVENT_NEWLINE) {
+ free_token(token);
+ type = read_token_item(&token);
+ continue;
+ }
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_ERROR) {
+ free_arg(arg);
+ return -1;
+ }
+
+ *list = arg;
+ args++;
+
+ if (type == EVENT_OP) {
+ type = process_op(event, arg, &token);
+ list = &arg->next;
+ continue;
+ }
+
+ if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
+ free_token(token);
+ *list = arg;
+ list = &arg->next;
+ continue;
+ }
+ break;
+ } while (type != EVENT_NONE);
+
+ if (type != EVENT_NONE)
+ free_token(token);
+
+ return args;
+}
+
+static int event_read_print(struct event *event)
+{
+ enum event_type type;
+ char *token;
+ int ret;
+
+ if (read_expected_item(EVENT_ITEM, "print") < 0)
+ return -1;
+
+ if (read_expected(EVENT_ITEM, "fmt") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_DQUOTE, &token) < 0)
+ goto fail;
+
+ concat:
+ event->print_fmt.format = token;
+ event->print_fmt.args = NULL;
+
+ /* ok to have no arg */
+ type = read_token_item(&token);
+
+ if (type == EVENT_NONE)
+ return 0;
+
+ /* Handle concatination of print lines */
+ if (type == EVENT_DQUOTE) {
+ char *cat;
+
+ cat = malloc_or_die(strlen(event->print_fmt.format) +
+ strlen(token) + 1);
+ strcpy(cat, event->print_fmt.format);
+ strcat(cat, token);
+ free_token(token);
+ free_token(event->print_fmt.format);
+ event->print_fmt.format = NULL;
+ token = cat;
+ goto concat;
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto fail;
+
+ free_token(token);
+
+ ret = event_read_print_args(event, &event->print_fmt.args);
+ if (ret < 0)
+ return -1;
+
+ return ret;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+static struct format_field *
+find_common_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ for (format = event->format.common_fields;
+ format; format = format->next) {
+ if (strcmp(format->name, name) == 0)
+ break;
+ }
+
+ return format;
+}
+
+static struct format_field *
+find_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ for (format = event->format.fields;
+ format; format = format->next) {
+ if (strcmp(format->name, name) == 0)
+ break;
+ }
+
+ return format;
+}
+
+static struct format_field *
+find_any_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ format = find_common_field(event, name);
+ if (format)
+ return format;
+ return find_field(event, name);
+}
+
+unsigned long long read_size(void *ptr, int size)
+{
+ switch (size) {
+ case 1:
+ return *(unsigned char *)ptr;
+ case 2:
+ return data2host2(ptr);
+ case 4:
+ return data2host4(ptr);
+ case 8:
+ return data2host8(ptr);
+ default:
+ /* BUG! */
+ return 0;
+ }
+}
+
+unsigned long long
+raw_field_value(struct event *event, const char *name, void *data)
+{
+ struct format_field *field;
+
+ field = find_any_field(event, name);
+ if (!field)
+ return 0ULL;
+
+ return read_size(data + field->offset, field->size);
+}
+
+void *raw_field_ptr(struct event *event, const char *name, void *data)
+{
+ struct format_field *field;
+
+ field = find_any_field(event, name);
+ if (!field)
+ return NULL;
+
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ int offset;
+
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+
+ return data + offset;
+ }
+
+ return data + field->offset;
+}
+
+static int get_common_info(const char *type, int *offset, int *size)
+{
+ struct event *event;
+ struct format_field *field;
+
+ /*
+ * All events should have the same common elements.
+ * Pick any event to find where the type is;
+ */
+ if (!event_list)
+ die("no event_list!");
+
+ event = event_list;
+ field = find_common_field(event, type);
+ if (!field)
+ die("field '%s' not found", type);
+
+ *offset = field->offset;
+ *size = field->size;
+
+ return 0;
+}
+
+static int __parse_common(void *data, int *size, int *offset,
+ const char *name)
+{
+ int ret;
+
+ if (!*size) {
+ ret = get_common_info(name, offset, size);
+ if (ret < 0)
+ return ret;
+ }
+ return read_size(data + *offset, *size);
+}
+
+int trace_parse_common_type(void *data)
+{
+ static int type_offset;
+ static int type_size;
+
+ return __parse_common(data, &type_size, &type_offset,
+ "common_type");
+}
+
+int trace_parse_common_pid(void *data)
+{
+ static int pid_offset;
+ static int pid_size;
+
+ return __parse_common(data, &pid_size, &pid_offset,
+ "common_pid");
+}
+
+int parse_common_pc(void *data)
+{
+ static int pc_offset;
+ static int pc_size;
+
+ return __parse_common(data, &pc_size, &pc_offset,
+ "common_preempt_count");
+}
+
+int parse_common_flags(void *data)
+{
+ static int flags_offset;
+ static int flags_size;
+
+ return __parse_common(data, &flags_size, &flags_offset,
+ "common_flags");
+}
+
+int parse_common_lock_depth(void *data)
+{
+ static int ld_offset;
+ static int ld_size;
+ int ret;
+
+ ret = __parse_common(data, &ld_size, &ld_offset,
+ "common_lock_depth");
+ if (ret < 0)
+ return -1;
+
+ return ret;
+}
+
+struct event *trace_find_event(int id)
+{
+ struct event *event;
+
+ for (event = event_list; event; event = event->next) {
+ if (event->id == id)
+ break;
+ }
+ return event;
+}
+
+struct event *trace_find_next_event(struct event *event)
+{
+ if (!event)
+ return event_list;
+
+ return event->next;
+}
+
+static unsigned long long eval_num_arg(void *data, int size,
+ struct event *event, struct print_arg *arg)
+{
+ unsigned long long val = 0;
+ unsigned long long left, right;
+ struct print_arg *larg;
+
+ switch (arg->type) {
+ case PRINT_NULL:
+ /* ?? */
+ return 0;
+ case PRINT_ATOM:
+ return strtoull(arg->atom.atom, NULL, 0);
+ case PRINT_FIELD:
+ if (!arg->field.field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ if (!arg->field.field)
+ die("field %s not found", arg->field.name);
+ }
+ /* must be a number */
+ val = read_size(data + arg->field.field->offset,
+ arg->field.field->size);
+ break;
+ case PRINT_FLAGS:
+ case PRINT_SYMBOL:
+ break;
+ case PRINT_TYPE:
+ return eval_num_arg(data, size, event, arg->typecast.item);
+ case PRINT_STRING:
+ return 0;
+ break;
+ case PRINT_OP:
+ if (strcmp(arg->op.op, "[") == 0) {
+ /*
+ * Arrays are special, since we don't want
+ * to read the arg as is.
+ */
+ if (arg->op.left->type != PRINT_FIELD)
+ goto default_op; /* oops, all bets off */
+ larg = arg->op.left;
+ if (!larg->field.field) {
+ larg->field.field =
+ find_any_field(event, larg->field.name);
+ if (!larg->field.field)
+ die("field %s not found", larg->field.name);
+ }
+ right = eval_num_arg(data, size, event, arg->op.right);
+ val = read_size(data + larg->field.field->offset +
+ right * long_size, long_size);
+ break;
+ }
+ default_op:
+ left = eval_num_arg(data, size, event, arg->op.left);
+ right = eval_num_arg(data, size, event, arg->op.right);
+ switch (arg->op.op[0]) {
+ case '|':
+ if (arg->op.op[1])
+ val = left || right;
+ else
+ val = left | right;
+ break;
+ case '&':
+ if (arg->op.op[1])
+ val = left && right;
+ else
+ val = left & right;
+ break;
+ case '<':
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left < right;
+ break;
+ case '<':
+ val = left << right;
+ break;
+ case '=':
+ val = left <= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '>':
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left > right;
+ break;
+ case '>':
+ val = left >> right;
+ break;
+ case '=':
+ val = left >= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '=':
+ if (arg->op.op[1] != '=')
+ die("unknown op '%s'", arg->op.op);
+ val = left == right;
+ break;
+ case '-':
+ val = left - right;
+ break;
+ case '+':
+ val = left + right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ default: /* not sure what to do there */
+ return 0;
+ }
+ return val;
+}
+
+struct flag {
+ const char *name;
+ unsigned long long value;
+};
+
+static const struct flag flags[] = {
+ { "HI_SOFTIRQ", 0 },
+ { "TIMER_SOFTIRQ", 1 },
+ { "NET_TX_SOFTIRQ", 2 },
+ { "NET_RX_SOFTIRQ", 3 },
+ { "BLOCK_SOFTIRQ", 4 },
+ { "BLOCK_IOPOLL_SOFTIRQ", 5 },
+ { "TASKLET_SOFTIRQ", 6 },
+ { "SCHED_SOFTIRQ", 7 },
+ { "HRTIMER_SOFTIRQ", 8 },
+ { "RCU_SOFTIRQ", 9 },
+
+ { "HRTIMER_NORESTART", 0 },
+ { "HRTIMER_RESTART", 1 },
+};
+
+unsigned long long eval_flag(const char *flag)
+{
+ int i;
+
+ /*
+ * Some flags in the format files do not get converted.
+ * If the flag is not numeric, see if it is something that
+ * we already know about.
+ */
+ if (isdigit(flag[0]))
+ return strtoull(flag, NULL, 0);
+
+ for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++)
+ if (strcmp(flags[i].name, flag) == 0)
+ return flags[i].value;
+
+ return 0;
+}
+
+static void print_str_arg(void *data, int size,
+ struct event *event, struct print_arg *arg)
+{
+ struct print_flag_sym *flag;
+ unsigned long long val, fval;
+ char *str;
+ int print;
+
+ switch (arg->type) {
+ case PRINT_NULL:
+ /* ?? */
+ return;
+ case PRINT_ATOM:
+ printf("%s", arg->atom.atom);
+ return;
+ case PRINT_FIELD:
+ if (!arg->field.field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ if (!arg->field.field)
+ die("field %s not found", arg->field.name);
+ }
+ str = malloc_or_die(arg->field.field->size + 1);
+ memcpy(str, data + arg->field.field->offset,
+ arg->field.field->size);
+ str[arg->field.field->size] = 0;
+ printf("%s", str);
+ free(str);
+ break;
+ case PRINT_FLAGS:
+ val = eval_num_arg(data, size, event, arg->flags.field);
+ print = 0;
+ for (flag = arg->flags.flags; flag; flag = flag->next) {
+ fval = eval_flag(flag->value);
+ if (!val && !fval) {
+ printf("%s", flag->str);
+ break;
+ }
+ if (fval && (val & fval) == fval) {
+ if (print && arg->flags.delim)
+ printf("%s", arg->flags.delim);
+ printf("%s", flag->str);
+ print = 1;
+ val &= ~fval;
+ }
+ }
+ break;
+ case PRINT_SYMBOL:
+ val = eval_num_arg(data, size, event, arg->symbol.field);
+ for (flag = arg->symbol.symbols; flag; flag = flag->next) {
+ fval = eval_flag(flag->value);
+ if (val == fval) {
+ printf("%s", flag->str);
+ break;
+ }
+ }
+ break;
+
+ case PRINT_TYPE:
+ break;
+ case PRINT_STRING: {
+ int str_offset;
+
+ if (arg->string.offset == -1) {
+ struct format_field *f;
+
+ f = find_any_field(event, arg->string.string);
+ arg->string.offset = f->offset;
+ }
+ str_offset = *(int *)(data + arg->string.offset);
+ str_offset &= 0xffff;
+ printf("%s", ((char *)data) + str_offset);
+ break;
+ }
+ case PRINT_OP:
+ /*
+ * The only op for string should be ? :
+ */
+ if (arg->op.op[0] != '?')
+ return;
+ val = eval_num_arg(data, size, event, arg->op.left);
+ if (val)
+ print_str_arg(data, size, event, arg->op.right->op.left);
+ else
+ print_str_arg(data, size, event, arg->op.right->op.right);
+ break;
+ default:
+ /* well... */
+ break;
+ }
+}
+
+static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event *event)
+{
+ static struct format_field *field, *ip_field;
+ struct print_arg *args, *arg, **next;
+ unsigned long long ip, val;
+ char *ptr;
+ void *bptr;
+
+ if (!field) {
+ field = find_field(event, "buf");
+ if (!field)
+ die("can't find buffer field for binary printk");
+ ip_field = find_field(event, "ip");
+ if (!ip_field)
+ die("can't find ip field for binary printk");
+ }
+
+ ip = read_size(data + ip_field->offset, ip_field->size);
+
+ /*
+ * The first arg is the IP pointer.
+ */
+ args = malloc_or_die(sizeof(*args));
+ arg = args;
+ arg->next = NULL;
+ next = &arg->next;
+
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = malloc_or_die(32);
+ sprintf(arg->atom.atom, "%lld", ip);
+
+ /* skip the first "%pf : " */
+ for (ptr = fmt + 6, bptr = data + field->offset;
+ bptr < data + size && *ptr; ptr++) {
+ int ls = 0;
+
+ if (*ptr == '%') {
+ process_again:
+ ptr++;
+ switch (*ptr) {
+ case '%':
+ break;
+ case 'l':
+ ls++;
+ goto process_again;
+ case 'L':
+ ls = 2;
+ goto process_again;
+ case '0' ... '9':
+ goto process_again;
+ case 'p':
+ ls = 1;
+ /* fall through */
+ case 'd':
+ case 'u':
+ case 'x':
+ case 'i':
+ /* the pointers are always 4 bytes aligned */
+ bptr = (void *)(((unsigned long)bptr + 3) &
+ ~3);
+ switch (ls) {
+ case 0:
+ case 1:
+ ls = long_size;
+ break;
+ case 2:
+ ls = 8;
+ default:
+ break;
+ }
+ val = read_size(bptr, ls);
+ bptr += ls;
+ arg = malloc_or_die(sizeof(*arg));
+ arg->next = NULL;
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = malloc_or_die(32);
+ sprintf(arg->atom.atom, "%lld", val);
+ *next = arg;
+ next = &arg->next;
+ break;
+ case 's':
+ arg = malloc_or_die(sizeof(*arg));
+ arg->next = NULL;
+ arg->type = PRINT_STRING;
+ arg->string.string = strdup(bptr);
+ bptr += strlen(bptr) + 1;
+ *next = arg;
+ next = &arg->next;
+ default:
+ break;
+ }
+ }
+ }
+
+ return args;
+}
+
+static void free_args(struct print_arg *args)
+{
+ struct print_arg *next;
+
+ while (args) {
+ next = args->next;
+
+ if (args->type == PRINT_ATOM)
+ free(args->atom.atom);
+ else
+ free(args->string.string);
+ free(args);
+ args = next;
+ }
+}
+
+static char *get_bprint_format(void *data, int size __unused, struct event *event)
+{
+ unsigned long long addr;
+ static struct format_field *field;
+ struct printk_map *printk;
+ char *format;
+ char *p;
+
+ if (!field) {
+ field = find_field(event, "fmt");
+ if (!field)
+ die("can't find format field for binary printk");
+ printf("field->offset = %d size=%d\n", field->offset, field->size);
+ }
+
+ addr = read_size(data + field->offset, field->size);
+
+ printk = find_printk(addr);
+ if (!printk) {
+ format = malloc_or_die(45);
+ sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
+ addr);
+ return format;
+ }
+
+ p = printk->printk;
+ /* Remove any quotes. */
+ if (*p == '"')
+ p++;
+ format = malloc_or_die(strlen(p) + 10);
+ sprintf(format, "%s : %s", "%pf", p);
+ /* remove ending quotes and new line since we will add one too */
+ p = format + strlen(format) - 1;
+ if (*p == '"')
+ *p = 0;
+
+ p -= 2;
+ if (strcmp(p, "\\n") == 0)
+ *p = 0;
+
+ return format;
+}
+
+static void pretty_print(void *data, int size, struct event *event)
+{
+ struct print_fmt *print_fmt = &event->print_fmt;
+ struct print_arg *arg = print_fmt->args;
+ struct print_arg *args = NULL;
+ const char *ptr = print_fmt->format;
+ unsigned long long val;
+ struct func_map *func;
+ const char *saveptr;
+ char *bprint_fmt = NULL;
+ char format[32];
+ int show_func;
+ int len;
+ int ls;
+
+ if (event->flags & EVENT_FL_ISFUNC)
+ ptr = " %pF <-- %pF";
+
+ if (event->flags & EVENT_FL_ISBPRINT) {
+ bprint_fmt = get_bprint_format(data, size, event);
+ args = make_bprint_args(bprint_fmt, data, size, event);
+ arg = args;
+ ptr = bprint_fmt;
+ }
+
+ for (; *ptr; ptr++) {
+ ls = 0;
+ if (*ptr == '\\') {
+ ptr++;
+ switch (*ptr) {
+ case 'n':
+ printf("\n");
+ break;
+ case 't':
+ printf("\t");
+ break;
+ case 'r':
+ printf("\r");
+ break;
+ case '\\':
+ printf("\\");
+ break;
+ default:
+ printf("%c", *ptr);
+ break;
+ }
+
+ } else if (*ptr == '%') {
+ saveptr = ptr;
+ show_func = 0;
+ cont_process:
+ ptr++;
+ switch (*ptr) {
+ case '%':
+ printf("%%");
+ break;
+ case 'l':
+ ls++;
+ goto cont_process;
+ case 'L':
+ ls = 2;
+ goto cont_process;
+ case 'z':
+ case 'Z':
+ case '0' ... '9':
+ goto cont_process;
+ case 'p':
+ if (long_size == 4)
+ ls = 1;
+ else
+ ls = 2;
+
+ if (*(ptr+1) == 'F' ||
+ *(ptr+1) == 'f') {
+ ptr++;
+ show_func = *ptr;
+ }
+
+ /* fall through */
+ case 'd':
+ case 'i':
+ case 'x':
+ case 'X':
+ case 'u':
+ if (!arg)
+ die("no argument match");
+
+ len = ((unsigned long)ptr + 1) -
+ (unsigned long)saveptr;
+
+ /* should never happen */
+ if (len > 32)
+ die("bad format!");
+
+ memcpy(format, saveptr, len);
+ format[len] = 0;
+
+ val = eval_num_arg(data, size, event, arg);
+ arg = arg->next;
+
+ if (show_func) {
+ func = find_func(val);
+ if (func) {
+ printf("%s", func->func);
+ if (show_func == 'F')
+ printf("+0x%llx",
+ val - func->addr);
+ break;
+ }
+ }
+ switch (ls) {
+ case 0:
+ printf(format, (int)val);
+ break;
+ case 1:
+ printf(format, (long)val);
+ break;
+ case 2:
+ printf(format, (long long)val);
+ break;
+ default:
+ die("bad count (%d)", ls);
+ }
+ break;
+ case 's':
+ if (!arg)
+ die("no matching argument");
+
+ print_str_arg(data, size, event, arg);
+ arg = arg->next;
+ break;
+ default:
+ printf(">%c<", *ptr);
+
+ }
+ } else
+ printf("%c", *ptr);
+ }
+
+ if (args) {
+ free_args(args);
+ free(bprint_fmt);
+ }
+}
+
+static inline int log10_cpu(int nb)
+{
+ if (nb / 100)
+ return 3;
+ if (nb / 10)
+ return 2;
+ return 1;
+}
+
+static void print_lat_fmt(void *data, int size __unused)
+{
+ unsigned int lat_flags;
+ unsigned int pc;
+ int lock_depth;
+ int hardirq;
+ int softirq;
+
+ lat_flags = parse_common_flags(data);
+ pc = parse_common_pc(data);
+ lock_depth = parse_common_lock_depth(data);
+
+ hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
+ softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
+
+ printf("%c%c%c",
+ (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
+ 'X' : '.',
+ (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
+ 'N' : '.',
+ (hardirq && softirq) ? 'H' :
+ hardirq ? 'h' : softirq ? 's' : '.');
+
+ if (pc)
+ printf("%x", pc);
+ else
+ printf(".");
+
+ if (lock_depth < 0)
+ printf(". ");
+ else
+ printf("%d ", lock_depth);
+}
+
+#define TRACE_GRAPH_INDENT 2
+
+static struct record *
+get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
+ struct record *next)
+{
+ struct format_field *field;
+ struct event *event;
+ unsigned long val;
+ int type;
+ int pid;
+
+ type = trace_parse_common_type(next->data);
+ event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ if (!(event->flags & EVENT_FL_ISFUNCRET))
+ return NULL;
+
+ pid = trace_parse_common_pid(next->data);
+ field = find_field(event, "func");
+ if (!field)
+ die("function return does not have field func");
+
+ val = read_size(next->data + field->offset, field->size);
+
+ if (cur_pid != pid || cur_func != val)
+ return NULL;
+
+ /* this is a leaf, now advance the iterator */
+ return trace_read_data(cpu);
+}
+
+/* Signal a overhead of time execution to the output */
+static void print_graph_overhead(unsigned long long duration)
+{
+ /* Non nested entry or return */
+ if (duration == ~0ULL)
+ return (void)printf(" ");
+
+ /* Duration exceeded 100 msecs */
+ if (duration > 100000ULL)
+ return (void)printf("! ");
+
+ /* Duration exceeded 10 msecs */
+ if (duration > 10000ULL)
+ return (void)printf("+ ");
+
+ printf(" ");
+}
+
+static void print_graph_duration(unsigned long long duration)
+{
+ unsigned long usecs = duration / 1000;
+ unsigned long nsecs_rem = duration % 1000;
+ /* log10(ULONG_MAX) + '\0' */
+ char msecs_str[21];
+ char nsecs_str[5];
+ int len;
+ int i;
+
+ sprintf(msecs_str, "%lu", usecs);
+
+ /* Print msecs */
+ len = printf("%lu", usecs);
+
+ /* Print nsecs (we don't want to exceed 7 numbers) */
+ if (len < 7) {
+ snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
+ len += printf(".%s", nsecs_str);
+ }
+
+ printf(" us ");
+
+ /* Print remaining spaces to fit the row's width */
+ for (i = len; i < 7; i++)
+ printf(" ");
+
+ printf("| ");
+}
+
+static void
+print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
+{
+ unsigned long long rettime, calltime;
+ unsigned long long duration, depth;
+ unsigned long long val;
+ struct format_field *field;
+ struct func_map *func;
+ struct event *ret_event;
+ int type;
+ int i;
+
+ type = trace_parse_common_type(ret_rec->data);
+ ret_event = trace_find_event(type);
+
+ field = find_field(ret_event, "rettime");
+ if (!field)
+ die("can't find rettime in return graph");
+ rettime = read_size(ret_rec->data + field->offset, field->size);
+
+ field = find_field(ret_event, "calltime");
+ if (!field)
+ die("can't find rettime in return graph");
+ calltime = read_size(ret_rec->data + field->offset, field->size);
+
+ duration = rettime - calltime;
+
+ /* Overhead */
+ print_graph_overhead(duration);
+
+ /* Duration */
+ print_graph_duration(duration);
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("can't find func in entry graph");
+ val = read_size(data + field->offset, field->size);
+ func = find_func(val);
+
+ if (func)
+ printf("%s();", func->func);
+ else
+ printf("%llx();", val);
+}
+
+static void print_graph_nested(struct event *event, void *data)
+{
+ struct format_field *field;
+ unsigned long long depth;
+ unsigned long long val;
+ struct func_map *func;
+ int i;
+
+ /* No overhead */
+ print_graph_overhead(-1);
+
+ /* No time */
+ printf(" | ");
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("can't find func in entry graph");
+ val = read_size(data + field->offset, field->size);
+ func = find_func(val);
+
+ if (func)
+ printf("%s() {", func->func);
+ else
+ printf("%llx() {", val);
+}
+
+static void
+pretty_print_func_ent(void *data, int size, struct event *event,
+ int cpu, int pid)
+{
+ struct format_field *field;
+ struct record *rec;
+ void *copy_data;
+ unsigned long val;
+
+ if (latency_format) {
+ print_lat_fmt(data, size);
+ printf(" | ");
+ }
+
+ field = find_field(event, "func");
+ if (!field)
+ die("function entry does not have func field");
+
+ val = read_size(data + field->offset, field->size);
+
+ /*
+ * peek_data may unmap the data pointer. Copy it first.
+ */
+ copy_data = malloc_or_die(size);
+ memcpy(copy_data, data, size);
+ data = copy_data;
+
+ rec = trace_peek_data(cpu);
+ if (rec) {
+ rec = get_return_for_leaf(cpu, pid, val, rec);
+ if (rec) {
+ print_graph_entry_leaf(event, data, rec);
+ goto out_free;
+ }
+ }
+ print_graph_nested(event, data);
+out_free:
+ free(data);
+}
+
+static void
+pretty_print_func_ret(void *data, int size __unused, struct event *event)
+{
+ unsigned long long rettime, calltime;
+ unsigned long long duration, depth;
+ struct format_field *field;
+ int i;
+
+ if (latency_format) {
+ print_lat_fmt(data, size);
+ printf(" | ");
+ }
+
+ field = find_field(event, "rettime");
+ if (!field)
+ die("can't find rettime in return graph");
+ rettime = read_size(data + field->offset, field->size);
+
+ field = find_field(event, "calltime");
+ if (!field)
+ die("can't find calltime in return graph");
+ calltime = read_size(data + field->offset, field->size);
+
+ duration = rettime - calltime;
+
+ /* Overhead */
+ print_graph_overhead(duration);
+
+ /* Duration */
+ print_graph_duration(duration);
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ printf("}");
+}
+
+static void
+pretty_print_func_graph(void *data, int size, struct event *event,
+ int cpu, int pid)
+{
+ if (event->flags & EVENT_FL_ISFUNCENT)
+ pretty_print_func_ent(data, size, event, cpu, pid);
+ else if (event->flags & EVENT_FL_ISFUNCRET)
+ pretty_print_func_ret(data, size, event);
+ printf("\n");
+}
+
+void print_trace_event(int cpu, void *data, int size)
+{
+ struct event *event;
+ int type;
+ int pid;
+
+ type = trace_parse_common_type(data);
+
+ event = trace_find_event(type);
+ if (!event) {
+ warning("ug! no event found for type %d", type);
+ return;
+ }
+
+ pid = trace_parse_common_pid(data);
+
+ if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET))
+ return pretty_print_func_graph(data, size, event, cpu, pid);
+
+ if (latency_format)
+ print_lat_fmt(data, size);
+
+ if (event->flags & EVENT_FL_FAILED) {
+ printf("EVENT '%s' FAILED TO PARSE\n",
+ event->name);
+ return;
+ }
+
+ pretty_print(data, size, event);
+}
+
+static void print_fields(struct print_flag_sym *field)
+{
+ printf("{ %s, %s }", field->value, field->str);
+ if (field->next) {
+ printf(", ");
+ print_fields(field->next);
+ }
+}
+
+static void print_args(struct print_arg *args)
+{
+ int print_paren = 1;
+
+ switch (args->type) {
+ case PRINT_NULL:
+ printf("null");
+ break;
+ case PRINT_ATOM:
+ printf("%s", args->atom.atom);
+ break;
+ case PRINT_FIELD:
+ printf("REC->%s", args->field.name);
+ break;
+ case PRINT_FLAGS:
+ printf("__print_flags(");
+ print_args(args->flags.field);
+ printf(", %s, ", args->flags.delim);
+ print_fields(args->flags.flags);
+ printf(")");
+ break;
+ case PRINT_SYMBOL:
+ printf("__print_symbolic(");
+ print_args(args->symbol.field);
+ printf(", ");
+ print_fields(args->symbol.symbols);
+ printf(")");
+ break;
+ case PRINT_STRING:
+ printf("__get_str(%s)", args->string.string);
+ break;
+ case PRINT_TYPE:
+ printf("(%s)", args->typecast.type);
+ print_args(args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ print_paren = 0;
+ if (print_paren)
+ printf("(");
+ print_args(args->op.left);
+ printf(" %s ", args->op.op);
+ print_args(args->op.right);
+ if (print_paren)
+ printf(")");
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+ if (args->next) {
+ printf("\n");
+ print_args(args->next);
+ }
+}
+
+int parse_ftrace_file(char *buf, unsigned long size)
+{
+ struct format_field *field;
+ struct print_arg *arg, **list;
+ struct event *event;
+ int ret;
+
+ init_input_buf(buf, size);
+
+ event = alloc_event();
+ if (!event)
+ return -ENOMEM;
+
+ event->flags |= EVENT_FL_ISFTRACE;
+
+ event->name = event_read_name();
+ if (!event->name)
+ die("failed to read ftrace event name");
+
+ if (strcmp(event->name, "function") == 0)
+ event->flags |= EVENT_FL_ISFUNC;
+
+ else if (strcmp(event->name, "funcgraph_entry") == 0)
+ event->flags |= EVENT_FL_ISFUNCENT;
+
+ else if (strcmp(event->name, "funcgraph_exit") == 0)
+ event->flags |= EVENT_FL_ISFUNCRET;
+
+ else if (strcmp(event->name, "bprint") == 0)
+ event->flags |= EVENT_FL_ISBPRINT;
+
+ event->id = event_read_id();
+ if (event->id < 0)
+ die("failed to read ftrace event id");
+
+ add_event(event);
+
+ ret = event_read_format(event);
+ if (ret < 0)
+ die("failed to read ftrace event format");
+
+ ret = event_read_print(event);
+ if (ret < 0)
+ die("failed to read ftrace event print fmt");
+
+ /* New ftrace handles args */
+ if (ret > 0)
+ return 0;
+ /*
+ * The arguments for ftrace files are parsed by the fields.
+ * Set up the fields as their arguments.
+ */
+ list = &event->print_fmt.args;
+ for (field = event->format.fields; field; field = field->next) {
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+ *list = arg;
+ list = &arg->next;
+ arg->type = PRINT_FIELD;
+ arg->field.name = field->name;
+ arg->field.field = field;
+ }
+ return 0;
+}
+
+int parse_event_file(char *buf, unsigned long size, char *sys)
+{
+ struct event *event;
+ int ret;
+
+ init_input_buf(buf, size);
+
+ event = alloc_event();
+ if (!event)
+ return -ENOMEM;
+
+ event->name = event_read_name();
+ if (!event->name)
+ die("failed to read event name");
+
+ event->id = event_read_id();
+ if (event->id < 0)
+ die("failed to read event id");
+
+ ret = event_read_format(event);
+ if (ret < 0) {
+ warning("failed to read event format for %s", event->name);
+ goto event_failed;
+ }
+
+ ret = event_read_print(event);
+ if (ret < 0) {
+ warning("failed to read event print fmt for %s", event->name);
+ goto event_failed;
+ }
+
+ event->system = strdup(sys);
+
+#define PRINT_ARGS 0
+ if (PRINT_ARGS && event->print_fmt.args)
+ print_args(event->print_fmt.args);
+
+ add_event(event);
+ return 0;
+
+ event_failed:
+ event->flags |= EVENT_FL_FAILED;
+ /* still add it even if it failed */
+ add_event(event);
+ return -1;
+}
+
+void parse_set_info(int nr_cpus, int long_sz)
+{
+ cpus = nr_cpus;
+ long_size = long_sz;
+}
+
+int common_pc(struct scripting_context *context)
+{
+ return parse_common_pc(context->event_data);
+}
+
+int common_flags(struct scripting_context *context)
+{
+ return parse_common_flags(context->event_data);
+}
+
+int common_lock_depth(struct scripting_context *context)
+{
+ return parse_common_lock_depth(context->event_data);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/trace-event-read.c b/ANDROID_3.4.5/tools/perf/util/trace-event-read.c
new file mode 100644
index 00000000..b9592e0d
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/trace-event-read.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define _FILE_OFFSET_BITS 64
+
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+static int input_fd;
+
+static int read_page;
+
+int file_bigendian;
+int host_bigendian;
+static int long_size;
+
+static unsigned long page_size;
+
+static ssize_t calc_data_size;
+static bool repipe;
+
+static int do_read(int fd, void *buf, int size)
+{
+ int rsize = size;
+
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret <= 0)
+ return -1;
+
+ if (repipe) {
+ int retw = write(STDOUT_FILENO, buf, ret);
+
+ if (retw <= 0 || retw != ret)
+ die("repiping input file");
+ }
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return rsize;
+}
+
+static int read_or_die(void *data, int size)
+{
+ int r;
+
+ r = do_read(input_fd, data, size);
+ if (r <= 0)
+ die("reading input file (size expected=%d received=%d)",
+ size, r);
+
+ if (calc_data_size)
+ calc_data_size += r;
+
+ return r;
+}
+
+/* If it fails, the next read will report it */
+static void skip(int size)
+{
+ char buf[BUFSIZ];
+ int r;
+
+ while (size) {
+ r = size > BUFSIZ ? BUFSIZ : size;
+ read_or_die(buf, r);
+ size -= r;
+ };
+}
+
+static unsigned int read4(void)
+{
+ unsigned int data;
+
+ read_or_die(&data, 4);
+ return __data2host4(data);
+}
+
+static unsigned long long read8(void)
+{
+ unsigned long long data;
+
+ read_or_die(&data, 8);
+ return __data2host8(data);
+}
+
+static char *read_string(void)
+{
+ char buf[BUFSIZ];
+ char *str = NULL;
+ int size = 0;
+ off_t r;
+ char c;
+
+ for (;;) {
+ r = read(input_fd, &c, 1);
+ if (r < 0)
+ die("reading input file");
+
+ if (!r)
+ die("no data");
+
+ if (repipe) {
+ int retw = write(STDOUT_FILENO, &c, 1);
+
+ if (retw <= 0 || retw != r)
+ die("repiping input file string");
+ }
+
+ buf[size++] = c;
+
+ if (!c)
+ break;
+ }
+
+ if (calc_data_size)
+ calc_data_size += size;
+
+ str = malloc_or_die(size);
+ memcpy(str, buf, size);
+
+ return str;
+}
+
+static void read_proc_kallsyms(void)
+{
+ unsigned int size;
+ char *buf;
+
+ size = read4();
+ if (!size)
+ return;
+
+ buf = malloc_or_die(size + 1);
+ read_or_die(buf, size);
+ buf[size] = '\0';
+
+ parse_proc_kallsyms(buf, size);
+
+ free(buf);
+}
+
+static void read_ftrace_printk(void)
+{
+ unsigned int size;
+ char *buf;
+
+ size = read4();
+ if (!size)
+ return;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+
+ parse_ftrace_printk(buf, size);
+
+ free(buf);
+}
+
+static void read_header_files(void)
+{
+ unsigned long long size;
+ char *header_event;
+ char buf[BUFSIZ];
+
+ read_or_die(buf, 12);
+
+ if (memcmp(buf, "header_page", 12) != 0)
+ die("did not read header page");
+
+ size = read8();
+ skip(size);
+
+ /*
+ * The size field in the page is of type long,
+ * use that instead, since it represents the kernel.
+ */
+ long_size = header_page_size_size;
+
+ read_or_die(buf, 13);
+ if (memcmp(buf, "header_event", 13) != 0)
+ die("did not read header event");
+
+ size = read8();
+ header_event = malloc_or_die(size);
+ read_or_die(header_event, size);
+ free(header_event);
+}
+
+static void read_ftrace_file(unsigned long long size)
+{
+ char *buf;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+ parse_ftrace_file(buf, size);
+ free(buf);
+}
+
+static void read_event_file(char *sys, unsigned long long size)
+{
+ char *buf;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+ parse_event_file(buf, size, sys);
+ free(buf);
+}
+
+static void read_ftrace_files(void)
+{
+ unsigned long long size;
+ int count;
+ int i;
+
+ count = read4();
+
+ for (i = 0; i < count; i++) {
+ size = read8();
+ read_ftrace_file(size);
+ }
+}
+
+static void read_event_files(void)
+{
+ unsigned long long size;
+ char *sys;
+ int systems;
+ int count;
+ int i,x;
+
+ systems = read4();
+
+ for (i = 0; i < systems; i++) {
+ sys = read_string();
+
+ count = read4();
+ for (x=0; x < count; x++) {
+ size = read8();
+ read_event_file(sys, size);
+ }
+ }
+}
+
+struct cpu_data {
+ unsigned long long offset;
+ unsigned long long size;
+ unsigned long long timestamp;
+ struct record *next;
+ char *page;
+ int cpu;
+ int index;
+ int page_size;
+};
+
+static struct cpu_data *cpu_data;
+
+static void update_cpu_data_index(int cpu)
+{
+ cpu_data[cpu].offset += page_size;
+ cpu_data[cpu].size -= page_size;
+ cpu_data[cpu].index = 0;
+}
+
+static void get_next_page(int cpu)
+{
+ off_t save_seek;
+ off_t ret;
+
+ if (!cpu_data[cpu].page)
+ return;
+
+ if (read_page) {
+ if (cpu_data[cpu].size <= page_size) {
+ free(cpu_data[cpu].page);
+ cpu_data[cpu].page = NULL;
+ return;
+ }
+
+ update_cpu_data_index(cpu);
+
+ /* other parts of the code may expect the pointer to not move */
+ save_seek = lseek(input_fd, 0, SEEK_CUR);
+
+ ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
+ if (ret == (off_t)-1)
+ die("failed to lseek");
+ ret = read(input_fd, cpu_data[cpu].page, page_size);
+ if (ret < 0)
+ die("failed to read page");
+
+ /* reset the file pointer back */
+ lseek(input_fd, save_seek, SEEK_SET);
+
+ return;
+ }
+
+ munmap(cpu_data[cpu].page, page_size);
+ cpu_data[cpu].page = NULL;
+
+ if (cpu_data[cpu].size <= page_size)
+ return;
+
+ update_cpu_data_index(cpu);
+
+ cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
+ input_fd, cpu_data[cpu].offset);
+ if (cpu_data[cpu].page == MAP_FAILED)
+ die("failed to mmap cpu %d at offset 0x%llx",
+ cpu, cpu_data[cpu].offset);
+}
+
+static unsigned int type_len4host(unsigned int type_len_ts)
+{
+ if (file_bigendian)
+ return (type_len_ts >> 27) & ((1 << 5) - 1);
+ else
+ return type_len_ts & ((1 << 5) - 1);
+}
+
+static unsigned int ts4host(unsigned int type_len_ts)
+{
+ if (file_bigendian)
+ return type_len_ts & ((1 << 27) - 1);
+ else
+ return type_len_ts >> 5;
+}
+
+static int calc_index(void *ptr, int cpu)
+{
+ return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
+}
+
+struct record *trace_peek_data(int cpu)
+{
+ struct record *data;
+ void *page = cpu_data[cpu].page;
+ int idx = cpu_data[cpu].index;
+ void *ptr = page + idx;
+ unsigned long long extend;
+ unsigned int type_len_ts;
+ unsigned int type_len;
+ unsigned int delta;
+ unsigned int length = 0;
+
+ if (cpu_data[cpu].next)
+ return cpu_data[cpu].next;
+
+ if (!page)
+ return NULL;
+
+ if (!idx) {
+ /* FIXME: handle header page */
+ if (header_page_ts_size != 8)
+ die("expected a long long type for timestamp");
+ cpu_data[cpu].timestamp = data2host8(ptr);
+ ptr += 8;
+ switch (header_page_size_size) {
+ case 4:
+ cpu_data[cpu].page_size = data2host4(ptr);
+ ptr += 4;
+ break;
+ case 8:
+ cpu_data[cpu].page_size = data2host8(ptr);
+ ptr += 8;
+ break;
+ default:
+ die("bad long size");
+ }
+ ptr = cpu_data[cpu].page + header_page_data_offset;
+ }
+
+read_again:
+ idx = calc_index(ptr, cpu);
+
+ if (idx >= cpu_data[cpu].page_size) {
+ get_next_page(cpu);
+ return trace_peek_data(cpu);
+ }
+
+ type_len_ts = data2host4(ptr);
+ ptr += 4;
+
+ type_len = type_len4host(type_len_ts);
+ delta = ts4host(type_len_ts);
+
+ switch (type_len) {
+ case RINGBUF_TYPE_PADDING:
+ if (!delta)
+ die("error, hit unexpected end of page");
+ length = data2host4(ptr);
+ ptr += 4;
+ length *= 4;
+ ptr += length;
+ goto read_again;
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ extend = data2host4(ptr);
+ ptr += 4;
+ extend <<= TS_SHIFT;
+ extend += delta;
+ cpu_data[cpu].timestamp += extend;
+ goto read_again;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ ptr += 12;
+ break;
+ case 0:
+ length = data2host4(ptr);
+ ptr += 4;
+ die("here! length=%d", length);
+ break;
+ default:
+ length = type_len * 4;
+ break;
+ }
+
+ cpu_data[cpu].timestamp += delta;
+
+ data = malloc_or_die(sizeof(*data));
+ memset(data, 0, sizeof(*data));
+
+ data->ts = cpu_data[cpu].timestamp;
+ data->size = length;
+ data->data = ptr;
+ ptr += length;
+
+ cpu_data[cpu].index = calc_index(ptr, cpu);
+ cpu_data[cpu].next = data;
+
+ return data;
+}
+
+struct record *trace_read_data(int cpu)
+{
+ struct record *data;
+
+ data = trace_peek_data(cpu);
+ cpu_data[cpu].next = NULL;
+
+ return data;
+}
+
+ssize_t trace_report(int fd, bool __repipe)
+{
+ char buf[BUFSIZ];
+ char test[] = { 23, 8, 68 };
+ char *version;
+ int show_version = 0;
+ int show_funcs = 0;
+ int show_printk = 0;
+ ssize_t size;
+
+ calc_data_size = 1;
+ repipe = __repipe;
+
+ input_fd = fd;
+
+ read_or_die(buf, 3);
+ if (memcmp(buf, test, 3) != 0)
+ die("no trace data in the file");
+
+ read_or_die(buf, 7);
+ if (memcmp(buf, "tracing", 7) != 0)
+ die("not a trace file (missing 'tracing' tag)");
+
+ version = read_string();
+ if (show_version)
+ printf("version = %s\n", version);
+ free(version);
+
+ read_or_die(buf, 1);
+ file_bigendian = buf[0];
+ host_bigendian = bigendian();
+
+ read_or_die(buf, 1);
+ long_size = buf[0];
+
+ page_size = read4();
+
+ read_header_files();
+
+ read_ftrace_files();
+ read_event_files();
+ read_proc_kallsyms();
+ read_ftrace_printk();
+
+ size = calc_data_size - 1;
+ calc_data_size = 0;
+ repipe = false;
+
+ if (show_funcs) {
+ print_funcs();
+ return size;
+ }
+ if (show_printk) {
+ print_printk();
+ return size;
+ }
+
+ return size;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/trace-event-scripting.c b/ANDROID_3.4.5/tools/perf/util/trace-event-scripting.c
new file mode 100644
index 00000000..18ae6c18
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/trace-event-scripting.c
@@ -0,0 +1,166 @@
+/*
+ * trace-event-scripting. Scripting engine common and initialization code.
+ *
+ * Copyright (C) 2009-2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+struct scripting_context *scripting_context;
+
+static int stop_script_unsupported(void)
+{
+ return 0;
+}
+
+static void process_event_unsupported(union perf_event *event __unused,
+ struct perf_sample *sample __unused,
+ struct perf_evsel *evsel __unused,
+ struct machine *machine __unused,
+ struct thread *thread __unused)
+{
+}
+
+static void print_python_unsupported_msg(void)
+{
+ fprintf(stderr, "Python scripting not supported."
+ " Install libpython and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install python-dev (ubuntu)"
+ "\n # yum install python-devel (Fedora)"
+ "\n etc.\n");
+}
+
+static int python_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_python_unsupported_msg();
+
+ return -1;
+}
+
+static int python_generate_script_unsupported(const char *outfile __unused)
+{
+ print_python_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops python_scripting_unsupported_ops = {
+ .name = "Python",
+ .start_script = python_start_script_unsupported,
+ .stop_script = stop_script_unsupported,
+ .process_event = process_event_unsupported,
+ .generate_script = python_generate_script_unsupported,
+};
+
+static void register_python_scripting(struct scripting_ops *scripting_ops)
+{
+ int err;
+ err = script_spec_register("Python", scripting_ops);
+ if (err)
+ die("error registering Python script extension");
+
+ err = script_spec_register("py", scripting_ops);
+ if (err)
+ die("error registering py script extension");
+
+ scripting_context = malloc(sizeof(struct scripting_context));
+}
+
+#ifdef NO_LIBPYTHON
+void setup_python_scripting(void)
+{
+ register_python_scripting(&python_scripting_unsupported_ops);
+}
+#else
+extern struct scripting_ops python_scripting_ops;
+
+void setup_python_scripting(void)
+{
+ register_python_scripting(&python_scripting_ops);
+}
+#endif
+
+static void print_perl_unsupported_msg(void)
+{
+ fprintf(stderr, "Perl scripting not supported."
+ " Install libperl and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install libperl-dev (ubuntu)"
+ "\n # yum install 'perl(ExtUtils::Embed)' (Fedora)"
+ "\n etc.\n");
+}
+
+static int perl_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_perl_unsupported_msg();
+
+ return -1;
+}
+
+static int perl_generate_script_unsupported(const char *outfile __unused)
+{
+ print_perl_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops perl_scripting_unsupported_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script_unsupported,
+ .stop_script = stop_script_unsupported,
+ .process_event = process_event_unsupported,
+ .generate_script = perl_generate_script_unsupported,
+};
+
+static void register_perl_scripting(struct scripting_ops *scripting_ops)
+{
+ int err;
+ err = script_spec_register("Perl", scripting_ops);
+ if (err)
+ die("error registering Perl script extension");
+
+ err = script_spec_register("pl", scripting_ops);
+ if (err)
+ die("error registering pl script extension");
+
+ scripting_context = malloc(sizeof(struct scripting_context));
+}
+
+#ifdef NO_LIBPERL
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_unsupported_ops);
+}
+#else
+extern struct scripting_ops perl_scripting_ops;
+
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_ops);
+}
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/trace-event.h b/ANDROID_3.4.5/tools/perf/util/trace-event.h
new file mode 100644
index 00000000..58ae14c5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/trace-event.h
@@ -0,0 +1,317 @@
+#ifndef __PERF_TRACE_EVENTS_H
+#define __PERF_TRACE_EVENTS_H
+
+#include <stdbool.h>
+#include "parse-events.h"
+
+struct machine;
+struct perf_sample;
+union perf_event;
+struct thread;
+
+#define __unused __attribute__((unused))
+
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (page_size - 1)
+#endif
+
+enum {
+ RINGBUF_TYPE_PADDING = 29,
+ RINGBUF_TYPE_TIME_EXTEND = 30,
+ RINGBUF_TYPE_TIME_STAMP = 31,
+};
+
+#ifndef TS_SHIFT
+#define TS_SHIFT 27
+#endif
+
+#define NSECS_PER_SEC 1000000000ULL
+#define NSECS_PER_USEC 1000ULL
+
+enum format_flags {
+ FIELD_IS_ARRAY = 1,
+ FIELD_IS_POINTER = 2,
+ FIELD_IS_SIGNED = 4,
+ FIELD_IS_STRING = 8,
+ FIELD_IS_DYNAMIC = 16,
+ FIELD_IS_FLAG = 32,
+ FIELD_IS_SYMBOLIC = 64,
+};
+
+struct format_field {
+ struct format_field *next;
+ char *type;
+ char *name;
+ int offset;
+ int size;
+ unsigned long flags;
+};
+
+struct format {
+ int nr_common;
+ int nr_fields;
+ struct format_field *common_fields;
+ struct format_field *fields;
+};
+
+struct print_arg_atom {
+ char *atom;
+};
+
+struct print_arg_string {
+ char *string;
+ int offset;
+};
+
+struct print_arg_field {
+ char *name;
+ struct format_field *field;
+};
+
+struct print_flag_sym {
+ struct print_flag_sym *next;
+ char *value;
+ char *str;
+};
+
+struct print_arg_typecast {
+ char *type;
+ struct print_arg *item;
+};
+
+struct print_arg_flags {
+ struct print_arg *field;
+ char *delim;
+ struct print_flag_sym *flags;
+};
+
+struct print_arg_symbol {
+ struct print_arg *field;
+ struct print_flag_sym *symbols;
+};
+
+struct print_arg;
+
+struct print_arg_op {
+ char *op;
+ int prio;
+ struct print_arg *left;
+ struct print_arg *right;
+};
+
+struct print_arg_func {
+ char *name;
+ struct print_arg *args;
+};
+
+enum print_arg_type {
+ PRINT_NULL,
+ PRINT_ATOM,
+ PRINT_FIELD,
+ PRINT_FLAGS,
+ PRINT_SYMBOL,
+ PRINT_TYPE,
+ PRINT_STRING,
+ PRINT_OP,
+};
+
+struct print_arg {
+ struct print_arg *next;
+ enum print_arg_type type;
+ union {
+ struct print_arg_atom atom;
+ struct print_arg_field field;
+ struct print_arg_typecast typecast;
+ struct print_arg_flags flags;
+ struct print_arg_symbol symbol;
+ struct print_arg_func func;
+ struct print_arg_string string;
+ struct print_arg_op op;
+ };
+};
+
+struct print_fmt {
+ char *format;
+ struct print_arg *args;
+};
+
+struct event {
+ struct event *next;
+ char *name;
+ int id;
+ int flags;
+ struct format format;
+ struct print_fmt print_fmt;
+ char *system;
+};
+
+enum {
+ EVENT_FL_ISFTRACE = 0x01,
+ EVENT_FL_ISPRINT = 0x02,
+ EVENT_FL_ISBPRINT = 0x04,
+ EVENT_FL_ISFUNC = 0x08,
+ EVENT_FL_ISFUNCENT = 0x10,
+ EVENT_FL_ISFUNCRET = 0x20,
+
+ EVENT_FL_FAILED = 0x80000000
+};
+
+struct record {
+ unsigned long long ts;
+ int size;
+ void *data;
+};
+
+struct record *trace_peek_data(int cpu);
+struct record *trace_read_data(int cpu);
+
+void parse_set_info(int nr_cpus, int long_sz);
+
+ssize_t trace_report(int fd, bool repipe);
+
+void *malloc_or_die(unsigned int size);
+
+void parse_cmdlines(char *file, int size);
+void parse_proc_kallsyms(char *file, unsigned int size);
+void parse_ftrace_printk(char *file, unsigned int size);
+
+void print_funcs(void);
+void print_printk(void);
+
+int parse_ftrace_file(char *buf, unsigned long size);
+int parse_event_file(char *buf, unsigned long size, char *sys);
+void print_trace_event(int cpu, void *data, int size);
+
+extern int file_bigendian;
+extern int host_bigendian;
+
+int bigendian(void);
+
+static inline unsigned short __data2host2(unsigned short data)
+{
+ unsigned short swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 8) |
+ ((data & (0xffULL << 8)) >> 8);
+
+ return swap;
+}
+
+static inline unsigned int __data2host4(unsigned int data)
+{
+ unsigned int swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 24) |
+ ((data & (0xffULL << 8)) << 8) |
+ ((data & (0xffULL << 16)) >> 8) |
+ ((data & (0xffULL << 24)) >> 24);
+
+ return swap;
+}
+
+static inline unsigned long long __data2host8(unsigned long long data)
+{
+ unsigned long long swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 56) |
+ ((data & (0xffULL << 8)) << 40) |
+ ((data & (0xffULL << 16)) << 24) |
+ ((data & (0xffULL << 24)) << 8) |
+ ((data & (0xffULL << 32)) >> 8) |
+ ((data & (0xffULL << 40)) >> 24) |
+ ((data & (0xffULL << 48)) >> 40) |
+ ((data & (0xffULL << 56)) >> 56);
+
+ return swap;
+}
+
+#define data2host2(ptr) __data2host2(*(unsigned short *)ptr)
+#define data2host4(ptr) __data2host4(*(unsigned int *)ptr)
+#define data2host8(ptr) ({ \
+ unsigned long long __val; \
+ \
+ memcpy(&__val, (ptr), sizeof(unsigned long long)); \
+ __data2host8(__val); \
+})
+
+extern int header_page_ts_offset;
+extern int header_page_ts_size;
+extern int header_page_size_offset;
+extern int header_page_size_size;
+extern int header_page_data_offset;
+extern int header_page_data_size;
+
+extern bool latency_format;
+
+int trace_parse_common_type(void *data);
+int trace_parse_common_pid(void *data);
+int parse_common_pc(void *data);
+int parse_common_flags(void *data);
+int parse_common_lock_depth(void *data);
+struct event *trace_find_event(int id);
+struct event *trace_find_next_event(struct event *event);
+unsigned long long read_size(void *ptr, int size);
+unsigned long long
+raw_field_value(struct event *event, const char *name, void *data);
+void *raw_field_ptr(struct event *event, const char *name, void *data);
+unsigned long long eval_flag(const char *flag);
+
+int read_tracing_data(int fd, struct list_head *pattrs);
+
+struct tracing_data {
+ /* size is only valid if temp is 'true' */
+ ssize_t size;
+ bool temp;
+ char temp_file[50];
+};
+
+struct tracing_data *tracing_data_get(struct list_head *pattrs,
+ int fd, bool temp);
+void tracing_data_put(struct tracing_data *tdata);
+
+
+/* taken from kernel/trace/trace.h */
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+};
+
+struct scripting_ops {
+ const char *name;
+ int (*start_script) (const char *script, int argc, const char **argv);
+ int (*stop_script) (void);
+ void (*process_event) (union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine,
+ struct thread *thread);
+ int (*generate_script) (const char *outfile);
+};
+
+int script_spec_register(const char *spec, struct scripting_ops *ops);
+
+void setup_perl_scripting(void);
+void setup_python_scripting(void);
+
+struct scripting_context {
+ void *event_data;
+};
+
+int common_pc(struct scripting_context *context);
+int common_flags(struct scripting_context *context);
+int common_lock_depth(struct scripting_context *context);
+
+#endif /* __PERF_TRACE_EVENTS_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/types.h b/ANDROID_3.4.5/tools/perf/util/types.h
new file mode 100644
index 00000000..5f3689a3
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/types.h
@@ -0,0 +1,19 @@
+#ifndef __PERF_TYPES_H
+#define __PERF_TYPES_H
+
+#include <stdint.h>
+
+/*
+ * We define u64 as uint64_t for every architecture
+ * so that we can print it with "%"PRIx64 without getting warnings.
+ */
+typedef uint64_t u64;
+typedef int64_t s64;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned char u8;
+typedef signed char s8;
+
+#endif /* __PERF_TYPES_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/browser.c b/ANDROID_3.4.5/tools/perf/util/ui/browser.c
new file mode 100644
index 00000000..55682912
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/browser.c
@@ -0,0 +1,597 @@
+#include "../util.h"
+#include "../cache.h"
+#include "../../perf.h"
+#include "libslang.h"
+#include <newt.h>
+#include "ui.h"
+#include "util.h"
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <stdlib.h>
+#include <sys/ttydefaults.h>
+#include "browser.h"
+#include "helpline.h"
+#include "keysyms.h"
+#include "../color.h"
+
+static int ui_browser__percent_color(struct ui_browser *browser,
+ double percent, bool current)
+{
+ if (current && (!browser->use_navkeypressed || browser->navkeypressed))
+ return HE_COLORSET_SELECTED;
+ if (percent >= MIN_RED)
+ return HE_COLORSET_TOP;
+ if (percent >= MIN_GREEN)
+ return HE_COLORSET_MEDIUM;
+ return HE_COLORSET_NORMAL;
+}
+
+void ui_browser__set_color(struct ui_browser *self __used, int color)
+{
+ SLsmg_set_color(color);
+}
+
+void ui_browser__set_percent_color(struct ui_browser *self,
+ double percent, bool current)
+{
+ int color = ui_browser__percent_color(self, percent, current);
+ ui_browser__set_color(self, color);
+}
+
+void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+{
+ SLsmg_gotorc(self->y + y, self->x + x);
+}
+
+static struct list_head *
+ui_browser__list_head_filter_entries(struct ui_browser *browser,
+ struct list_head *pos)
+{
+ do {
+ if (!browser->filter || !browser->filter(browser, pos))
+ return pos;
+ pos = pos->next;
+ } while (pos != browser->entries);
+
+ return NULL;
+}
+
+static struct list_head *
+ui_browser__list_head_filter_prev_entries(struct ui_browser *browser,
+ struct list_head *pos)
+{
+ do {
+ if (!browser->filter || !browser->filter(browser, pos))
+ return pos;
+ pos = pos->prev;
+ } while (pos != browser->entries);
+
+ return NULL;
+}
+
+void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+{
+ struct list_head *head = self->entries;
+ struct list_head *pos;
+
+ if (self->nr_entries == 0)
+ return;
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = ui_browser__list_head_filter_entries(self, head->next);
+ break;
+ case SEEK_CUR:
+ pos = self->top;
+ break;
+ case SEEK_END:
+ pos = ui_browser__list_head_filter_prev_entries(self, head->prev);
+ break;
+ default:
+ return;
+ }
+
+ assert(pos != NULL);
+
+ if (offset > 0) {
+ while (offset-- != 0)
+ pos = ui_browser__list_head_filter_entries(self, pos->next);
+ } else {
+ while (offset++ != 0)
+ pos = ui_browser__list_head_filter_prev_entries(self, pos->prev);
+ }
+
+ self->top = pos;
+}
+
+void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+{
+ struct rb_root *root = self->entries;
+ struct rb_node *nd;
+
+ switch (whence) {
+ case SEEK_SET:
+ nd = rb_first(root);
+ break;
+ case SEEK_CUR:
+ nd = self->top;
+ break;
+ case SEEK_END:
+ nd = rb_last(root);
+ break;
+ default:
+ return;
+ }
+
+ if (offset > 0) {
+ while (offset-- != 0)
+ nd = rb_next(nd);
+ } else {
+ while (offset++ != 0)
+ nd = rb_prev(nd);
+ }
+
+ self->top = nd;
+}
+
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+{
+ struct rb_node *nd;
+ int row = 0;
+
+ if (self->top == NULL)
+ self->top = rb_first(self->entries);
+
+ nd = self->top;
+
+ while (nd != NULL) {
+ ui_browser__gotorc(self, row, 0);
+ self->write(self, nd, row);
+ if (++row == self->height)
+ break;
+ nd = rb_next(nd);
+ }
+
+ return row;
+}
+
+bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+{
+ return self->top_idx + row == self->index;
+}
+
+void ui_browser__refresh_dimensions(struct ui_browser *self)
+{
+ self->width = SLtt_Screen_Cols - 1;
+ self->height = SLtt_Screen_Rows - 2;
+ self->y = 1;
+ self->x = 0;
+}
+
+void ui_browser__handle_resize(struct ui_browser *browser)
+{
+ ui__refresh_dimensions(false);
+ ui_browser__show(browser, browser->title, ui_helpline__current);
+ ui_browser__refresh(browser);
+}
+
+int ui_browser__warning(struct ui_browser *browser, int timeout,
+ const char *format, ...)
+{
+ va_list args;
+ char *text;
+ int key = 0, err;
+
+ va_start(args, format);
+ err = vasprintf(&text, format, args);
+ va_end(args);
+
+ if (err < 0) {
+ va_start(args, format);
+ ui_helpline__vpush(format, args);
+ va_end(args);
+ } else {
+ while ((key == ui__question_window("Warning!", text,
+ "Press any key...",
+ timeout)) == K_RESIZE)
+ ui_browser__handle_resize(browser);
+ free(text);
+ }
+
+ return key;
+}
+
+int ui_browser__help_window(struct ui_browser *browser, const char *text)
+{
+ int key;
+
+ while ((key = ui__help_window(text)) == K_RESIZE)
+ ui_browser__handle_resize(browser);
+
+ return key;
+}
+
+bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
+{
+ int key;
+
+ while ((key = ui__dialog_yesno(text)) == K_RESIZE)
+ ui_browser__handle_resize(browser);
+
+ return key == K_ENTER || toupper(key) == 'Y';
+}
+
+void ui_browser__reset_index(struct ui_browser *self)
+{
+ self->index = self->top_idx = 0;
+ self->seek(self, 0, SEEK_SET);
+}
+
+void __ui_browser__show_title(struct ui_browser *browser, const char *title)
+{
+ SLsmg_gotorc(0, 0);
+ ui_browser__set_color(browser, NEWT_COLORSET_ROOT);
+ slsmg_write_nstring(title, browser->width + 1);
+}
+
+void ui_browser__show_title(struct ui_browser *browser, const char *title)
+{
+ pthread_mutex_lock(&ui__lock);
+ __ui_browser__show_title(browser, title);
+ pthread_mutex_unlock(&ui__lock);
+}
+
+int ui_browser__show(struct ui_browser *self, const char *title,
+ const char *helpline, ...)
+{
+ int err;
+ va_list ap;
+
+ ui_browser__refresh_dimensions(self);
+
+ pthread_mutex_lock(&ui__lock);
+ __ui_browser__show_title(self, title);
+
+ self->title = title;
+ free(self->helpline);
+ self->helpline = NULL;
+
+ va_start(ap, helpline);
+ err = vasprintf(&self->helpline, helpline, ap);
+ va_end(ap);
+ if (err > 0)
+ ui_helpline__push(self->helpline);
+ pthread_mutex_unlock(&ui__lock);
+ return err ? 0 : -1;
+}
+
+void ui_browser__hide(struct ui_browser *browser __used)
+{
+ pthread_mutex_lock(&ui__lock);
+ ui_helpline__pop();
+ pthread_mutex_unlock(&ui__lock);
+}
+
+static void ui_browser__scrollbar_set(struct ui_browser *browser)
+{
+ int height = browser->height, h = 0, pct = 0,
+ col = browser->width,
+ row = browser->y - 1;
+
+ if (browser->nr_entries > 1) {
+ pct = ((browser->index * (browser->height - 1)) /
+ (browser->nr_entries - 1));
+ }
+
+ SLsmg_set_char_set(1);
+
+ while (h < height) {
+ ui_browser__gotorc(browser, row++, col);
+ SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_CKBRD_CHAR);
+ ++h;
+ }
+
+ SLsmg_set_char_set(0);
+}
+
+static int __ui_browser__refresh(struct ui_browser *browser)
+{
+ int row;
+ int width = browser->width;
+
+ row = browser->refresh(browser);
+ ui_browser__set_color(browser, HE_COLORSET_NORMAL);
+
+ if (!browser->use_navkeypressed || browser->navkeypressed)
+ ui_browser__scrollbar_set(browser);
+ else
+ width += 1;
+
+ SLsmg_fill_region(browser->y + row, browser->x,
+ browser->height - row, width, ' ');
+
+ return 0;
+}
+
+int ui_browser__refresh(struct ui_browser *browser)
+{
+ pthread_mutex_lock(&ui__lock);
+ __ui_browser__refresh(browser);
+ pthread_mutex_unlock(&ui__lock);
+
+ return 0;
+}
+
+/*
+ * Here we're updating nr_entries _after_ we started browsing, i.e. we have to
+ * forget about any reference to any entry in the underlying data structure,
+ * that is why we do a SEEK_SET. Think about 'perf top' in the hists browser
+ * after an output_resort and hist decay.
+ */
+void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries)
+{
+ off_t offset = nr_entries - browser->nr_entries;
+
+ browser->nr_entries = nr_entries;
+
+ if (offset < 0) {
+ if (browser->top_idx < (u64)-offset)
+ offset = -browser->top_idx;
+
+ browser->index += offset;
+ browser->top_idx += offset;
+ }
+
+ browser->top = NULL;
+ browser->seek(browser, browser->top_idx, SEEK_SET);
+}
+
+int ui_browser__run(struct ui_browser *self, int delay_secs)
+{
+ int err, key;
+
+ while (1) {
+ off_t offset;
+
+ pthread_mutex_lock(&ui__lock);
+ err = __ui_browser__refresh(self);
+ SLsmg_refresh();
+ pthread_mutex_unlock(&ui__lock);
+ if (err < 0)
+ break;
+
+ key = ui__getch(delay_secs);
+
+ if (key == K_RESIZE) {
+ ui__refresh_dimensions(false);
+ ui_browser__refresh_dimensions(self);
+ __ui_browser__show_title(self, self->title);
+ ui_helpline__puts(self->helpline);
+ continue;
+ }
+
+ if (self->use_navkeypressed && !self->navkeypressed) {
+ if (key == K_DOWN || key == K_UP ||
+ key == K_PGDN || key == K_PGUP ||
+ key == K_HOME || key == K_END ||
+ key == ' ') {
+ self->navkeypressed = true;
+ continue;
+ } else
+ return key;
+ }
+
+ switch (key) {
+ case K_DOWN:
+ if (self->index == self->nr_entries - 1)
+ break;
+ ++self->index;
+ if (self->index == self->top_idx + self->height) {
+ ++self->top_idx;
+ self->seek(self, +1, SEEK_CUR);
+ }
+ break;
+ case K_UP:
+ if (self->index == 0)
+ break;
+ --self->index;
+ if (self->index < self->top_idx) {
+ --self->top_idx;
+ self->seek(self, -1, SEEK_CUR);
+ }
+ break;
+ case K_PGDN:
+ case ' ':
+ if (self->top_idx + self->height > self->nr_entries - 1)
+ break;
+
+ offset = self->height;
+ if (self->index + offset > self->nr_entries - 1)
+ offset = self->nr_entries - 1 - self->index;
+ self->index += offset;
+ self->top_idx += offset;
+ self->seek(self, +offset, SEEK_CUR);
+ break;
+ case K_PGUP:
+ if (self->top_idx == 0)
+ break;
+
+ if (self->top_idx < self->height)
+ offset = self->top_idx;
+ else
+ offset = self->height;
+
+ self->index -= offset;
+ self->top_idx -= offset;
+ self->seek(self, -offset, SEEK_CUR);
+ break;
+ case K_HOME:
+ ui_browser__reset_index(self);
+ break;
+ case K_END:
+ offset = self->height - 1;
+ if (offset >= self->nr_entries)
+ offset = self->nr_entries - 1;
+
+ self->index = self->nr_entries - 1;
+ self->top_idx = self->index - offset;
+ self->seek(self, -offset, SEEK_END);
+ break;
+ default:
+ return key;
+ }
+ }
+ return -1;
+}
+
+unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+{
+ struct list_head *pos;
+ struct list_head *head = self->entries;
+ int row = 0;
+
+ if (self->top == NULL || self->top == self->entries)
+ self->top = ui_browser__list_head_filter_entries(self, head->next);
+
+ pos = self->top;
+
+ list_for_each_from(pos, head) {
+ if (!self->filter || !self->filter(self, pos)) {
+ ui_browser__gotorc(self, row, 0);
+ self->write(self, pos, row);
+ if (++row == self->height)
+ break;
+ }
+ }
+
+ return row;
+}
+
+static struct ui_browser__colorset {
+ const char *name, *fg, *bg;
+ int colorset;
+} ui_browser__colorsets[] = {
+ {
+ .colorset = HE_COLORSET_TOP,
+ .name = "top",
+ .fg = "red",
+ .bg = "default",
+ },
+ {
+ .colorset = HE_COLORSET_MEDIUM,
+ .name = "medium",
+ .fg = "green",
+ .bg = "default",
+ },
+ {
+ .colorset = HE_COLORSET_NORMAL,
+ .name = "normal",
+ .fg = "default",
+ .bg = "default",
+ },
+ {
+ .colorset = HE_COLORSET_SELECTED,
+ .name = "selected",
+ .fg = "black",
+ .bg = "lightgray",
+ },
+ {
+ .colorset = HE_COLORSET_CODE,
+ .name = "code",
+ .fg = "blue",
+ .bg = "default",
+ },
+ {
+ .name = NULL,
+ }
+};
+
+
+static int ui_browser__color_config(const char *var, const char *value,
+ void *data __used)
+{
+ char *fg = NULL, *bg;
+ int i;
+
+ /* same dir for all commands */
+ if (prefixcmp(var, "colors.") != 0)
+ return 0;
+
+ for (i = 0; ui_browser__colorsets[i].name != NULL; ++i) {
+ const char *name = var + 7;
+
+ if (strcmp(ui_browser__colorsets[i].name, name) != 0)
+ continue;
+
+ fg = strdup(value);
+ if (fg == NULL)
+ break;
+
+ bg = strchr(fg, ',');
+ if (bg == NULL)
+ break;
+
+ *bg = '\0';
+ while (isspace(*++bg));
+ ui_browser__colorsets[i].bg = bg;
+ ui_browser__colorsets[i].fg = fg;
+ return 0;
+ }
+
+ free(fg);
+ return -1;
+}
+
+void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_SET:
+ browser->top = browser->entries;
+ break;
+ case SEEK_CUR:
+ browser->top = browser->top + browser->top_idx + offset;
+ break;
+ case SEEK_END:
+ browser->top = browser->top + browser->nr_entries + offset;
+ break;
+ default:
+ return;
+ }
+}
+
+unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
+{
+ unsigned int row = 0, idx = browser->top_idx;
+ char **pos;
+
+ if (browser->top == NULL)
+ browser->top = browser->entries;
+
+ pos = (char **)browser->top;
+ while (idx < browser->nr_entries) {
+ if (!browser->filter || !browser->filter(browser, *pos)) {
+ ui_browser__gotorc(browser, row, 0);
+ browser->write(browser, pos, row);
+ if (++row == browser->height)
+ break;
+ }
+
+ ++idx;
+ ++pos;
+ }
+
+ return row;
+}
+
+void ui_browser__init(void)
+{
+ int i = 0;
+
+ perf_config(ui_browser__color_config, NULL);
+
+ while (ui_browser__colorsets[i].name) {
+ struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
+ sltt_set_color(c->colorset, c->name, c->fg, c->bg);
+ }
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/browser.h b/ANDROID_3.4.5/tools/perf/util/ui/browser.h
new file mode 100644
index 00000000..6ee82f60
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/browser.h
@@ -0,0 +1,65 @@
+#ifndef _PERF_UI_BROWSER_H_
+#define _PERF_UI_BROWSER_H_ 1
+
+#include <stdbool.h>
+#include <sys/types.h>
+#include "../types.h"
+
+#define HE_COLORSET_TOP 50
+#define HE_COLORSET_MEDIUM 51
+#define HE_COLORSET_NORMAL 52
+#define HE_COLORSET_SELECTED 53
+#define HE_COLORSET_CODE 54
+
+struct ui_browser {
+ u64 index, top_idx;
+ void *top, *entries;
+ u16 y, x, width, height;
+ void *priv;
+ const char *title;
+ char *helpline;
+ unsigned int (*refresh)(struct ui_browser *self);
+ void (*write)(struct ui_browser *self, void *entry, int row);
+ void (*seek)(struct ui_browser *self, off_t offset, int whence);
+ bool (*filter)(struct ui_browser *self, void *entry);
+ u32 nr_entries;
+ bool navkeypressed;
+ bool use_navkeypressed;
+};
+
+void ui_browser__set_color(struct ui_browser *self, int color);
+void ui_browser__set_percent_color(struct ui_browser *self,
+ double percent, bool current);
+bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
+void ui_browser__refresh_dimensions(struct ui_browser *self);
+void ui_browser__reset_index(struct ui_browser *self);
+
+void ui_browser__gotorc(struct ui_browser *self, int y, int x);
+void __ui_browser__show_title(struct ui_browser *browser, const char *title);
+void ui_browser__show_title(struct ui_browser *browser, const char *title);
+int ui_browser__show(struct ui_browser *self, const char *title,
+ const char *helpline, ...);
+void ui_browser__hide(struct ui_browser *self);
+int ui_browser__refresh(struct ui_browser *self);
+int ui_browser__run(struct ui_browser *browser, int delay_secs);
+void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries);
+void ui_browser__handle_resize(struct ui_browser *browser);
+
+int ui_browser__warning(struct ui_browser *browser, int timeout,
+ const char *format, ...);
+int ui_browser__help_window(struct ui_browser *browser, const char *text);
+bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
+int ui_browser__input_window(const char *title, const char *text, char *input,
+ const char *exit_msg, int delay_sec);
+
+void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
+unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
+
+void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
+
+void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence);
+unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
+
+void ui_browser__init(void);
+#endif /* _PERF_UI_BROWSER_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/browsers/annotate.c b/ANDROID_3.4.5/tools/perf/util/ui/browsers/annotate.c
new file mode 100644
index 00000000..57a4c6ef
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/browsers/annotate.c
@@ -0,0 +1,433 @@
+#include "../../util.h"
+#include "../browser.h"
+#include "../helpline.h"
+#include "../libslang.h"
+#include "../ui.h"
+#include "../util.h"
+#include "../../annotate.h"
+#include "../../hist.h"
+#include "../../sort.h"
+#include "../../symbol.h"
+#include <pthread.h>
+#include <newt.h>
+
+struct annotate_browser {
+ struct ui_browser b;
+ struct rb_root entries;
+ struct rb_node *curr_hot;
+ struct objdump_line *selection;
+ int nr_asm_entries;
+ int nr_entries;
+ bool hide_src_code;
+};
+
+struct objdump_line_rb_node {
+ struct rb_node rb_node;
+ double percent;
+ u32 idx;
+ int idx_asm;
+};
+
+static inline
+struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self)
+{
+ return (struct objdump_line_rb_node *)(self + 1);
+}
+
+static bool objdump_line__filter(struct ui_browser *browser, void *entry)
+{
+ struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
+
+ if (ab->hide_src_code) {
+ struct objdump_line *ol = list_entry(entry, struct objdump_line, node);
+ return ol->offset == -1;
+ }
+
+ return false;
+}
+
+static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+{
+ struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
+ struct objdump_line *ol = list_entry(entry, struct objdump_line, node);
+ bool current_entry = ui_browser__is_current_entry(self, row);
+ int width = self->width;
+
+ if (ol->offset != -1) {
+ struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
+ ui_browser__set_percent_color(self, olrb->percent, current_entry);
+ slsmg_printf(" %7.2f ", olrb->percent);
+ } else {
+ ui_browser__set_percent_color(self, 0, current_entry);
+ slsmg_write_nstring(" ", 9);
+ }
+
+ SLsmg_write_char(':');
+ slsmg_write_nstring(" ", 8);
+
+ /* The scroll bar isn't being used */
+ if (!self->navkeypressed)
+ width += 1;
+
+ if (!ab->hide_src_code && ol->offset != -1)
+ if (!current_entry || (self->use_navkeypressed &&
+ !self->navkeypressed))
+ ui_browser__set_color(self, HE_COLORSET_CODE);
+
+ if (!*ol->line)
+ slsmg_write_nstring(" ", width - 18);
+ else
+ slsmg_write_nstring(ol->line, width - 18);
+
+ if (current_entry)
+ ab->selection = ol;
+}
+
+static double objdump_line__calc_percent(struct objdump_line *self,
+ struct symbol *sym, int evidx)
+{
+ double percent = 0.0;
+
+ if (self->offset != -1) {
+ int len = sym->end - sym->start;
+ unsigned int hits = 0;
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ s64 offset = self->offset;
+ struct objdump_line *next;
+
+ next = objdump__get_next_ip_line(&notes->src->source, self);
+ while (offset < (s64)len &&
+ (next == NULL || offset < next->offset)) {
+ if (src_line) {
+ percent += src_line[offset].percent;
+ } else
+ hits += h->addr[offset];
+
+ ++offset;
+ }
+ /*
+ * If the percentage wasn't already calculated in
+ * symbol__get_source_line, do it now:
+ */
+ if (src_line == NULL && h->sum)
+ percent = 100.0 * hits / h->sum;
+ }
+
+ return percent;
+}
+
+static void objdump__insert_line(struct rb_root *self,
+ struct objdump_line_rb_node *line)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct objdump_line_rb_node *l;
+
+ while (*p != NULL) {
+ parent = *p;
+ l = rb_entry(parent, struct objdump_line_rb_node, rb_node);
+ if (line->percent < l->percent)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&line->rb_node, parent, p);
+ rb_insert_color(&line->rb_node, self);
+}
+
+static void annotate_browser__set_top(struct annotate_browser *self,
+ struct rb_node *nd)
+{
+ struct objdump_line_rb_node *rbpos;
+ struct objdump_line *pos;
+ unsigned back;
+
+ ui_browser__refresh_dimensions(&self->b);
+ back = self->b.height / 2;
+ rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node);
+ pos = ((struct objdump_line *)rbpos) - 1;
+ self->b.top_idx = self->b.index = rbpos->idx;
+
+ while (self->b.top_idx != 0 && back != 0) {
+ pos = list_entry(pos->node.prev, struct objdump_line, node);
+
+ --self->b.top_idx;
+ --back;
+ }
+
+ self->b.top = pos;
+ self->curr_hot = nd;
+}
+
+static void annotate_browser__calc_percent(struct annotate_browser *browser,
+ int evidx)
+{
+ struct map_symbol *ms = browser->b.priv;
+ struct symbol *sym = ms->sym;
+ struct annotation *notes = symbol__annotation(sym);
+ struct objdump_line *pos;
+
+ browser->entries = RB_ROOT;
+
+ pthread_mutex_lock(&notes->lock);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ struct objdump_line_rb_node *rbpos = objdump_line__rb(pos);
+ rbpos->percent = objdump_line__calc_percent(pos, sym, evidx);
+ if (rbpos->percent < 0.01) {
+ RB_CLEAR_NODE(&rbpos->rb_node);
+ continue;
+ }
+ objdump__insert_line(&browser->entries, rbpos);
+ }
+ pthread_mutex_unlock(&notes->lock);
+
+ browser->curr_hot = rb_last(&browser->entries);
+}
+
+static bool annotate_browser__toggle_source(struct annotate_browser *browser)
+{
+ struct objdump_line *ol;
+ struct objdump_line_rb_node *olrb;
+ off_t offset = browser->b.index - browser->b.top_idx;
+
+ browser->b.seek(&browser->b, offset, SEEK_CUR);
+ ol = list_entry(browser->b.top, struct objdump_line, node);
+ olrb = objdump_line__rb(ol);
+
+ if (browser->hide_src_code) {
+ if (olrb->idx_asm < offset)
+ offset = olrb->idx;
+
+ browser->b.nr_entries = browser->nr_entries;
+ browser->hide_src_code = false;
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ browser->b.top_idx = olrb->idx - offset;
+ browser->b.index = olrb->idx;
+ } else {
+ if (olrb->idx_asm < 0) {
+ ui_helpline__puts("Only available for assembly lines.");
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ return false;
+ }
+
+ if (olrb->idx_asm < offset)
+ offset = olrb->idx_asm;
+
+ browser->b.nr_entries = browser->nr_asm_entries;
+ browser->hide_src_code = true;
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ browser->b.top_idx = olrb->idx_asm - offset;
+ browser->b.index = olrb->idx_asm;
+ }
+
+ return true;
+}
+
+static int annotate_browser__run(struct annotate_browser *self, int evidx,
+ void(*timer)(void *arg),
+ void *arg, int delay_secs)
+{
+ struct rb_node *nd = NULL;
+ struct map_symbol *ms = self->b.priv;
+ struct symbol *sym = ms->sym;
+ const char *help = "<-/ESC: Exit, TAB/shift+TAB: Cycle hot lines, "
+ "H: Go to hottest line, ->/ENTER: Line action, "
+ "S: Toggle source code view";
+ int key;
+
+ if (ui_browser__show(&self->b, sym->name, help) < 0)
+ return -1;
+
+ annotate_browser__calc_percent(self, evidx);
+
+ if (self->curr_hot)
+ annotate_browser__set_top(self, self->curr_hot);
+
+ nd = self->curr_hot;
+
+ while (1) {
+ key = ui_browser__run(&self->b, delay_secs);
+
+ if (delay_secs != 0) {
+ annotate_browser__calc_percent(self, evidx);
+ /*
+ * Current line focus got out of the list of most active
+ * lines, NULL it so that if TAB|UNTAB is pressed, we
+ * move to curr_hot (current hottest line).
+ */
+ if (nd != NULL && RB_EMPTY_NODE(nd))
+ nd = NULL;
+ }
+
+ switch (key) {
+ case K_TIMER:
+ if (timer != NULL)
+ timer(arg);
+
+ if (delay_secs != 0)
+ symbol__annotate_decay_histogram(sym, evidx);
+ continue;
+ case K_TAB:
+ if (nd != NULL) {
+ nd = rb_prev(nd);
+ if (nd == NULL)
+ nd = rb_last(&self->entries);
+ } else
+ nd = self->curr_hot;
+ break;
+ case K_UNTAB:
+ if (nd != NULL)
+ nd = rb_next(nd);
+ if (nd == NULL)
+ nd = rb_first(&self->entries);
+ else
+ nd = self->curr_hot;
+ break;
+ case 'H':
+ case 'h':
+ nd = self->curr_hot;
+ break;
+ case 'S':
+ case 's':
+ if (annotate_browser__toggle_source(self))
+ ui_helpline__puts(help);
+ continue;
+ case K_ENTER:
+ case K_RIGHT:
+ if (self->selection == NULL) {
+ ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
+ continue;
+ }
+
+ if (self->selection->offset == -1) {
+ ui_helpline__puts("Actions are only available for assembly lines.");
+ continue;
+ } else {
+ char *s = strstr(self->selection->line, "callq ");
+ struct annotation *notes;
+ struct symbol *target;
+ u64 ip;
+
+ if (s == NULL) {
+ ui_helpline__puts("Actions are only available for the 'callq' instruction.");
+ continue;
+ }
+
+ s = strchr(s, ' ');
+ if (s++ == NULL) {
+ ui_helpline__puts("Invallid callq instruction.");
+ continue;
+ }
+
+ ip = strtoull(s, NULL, 16);
+ ip = ms->map->map_ip(ms->map, ip);
+ target = map__find_symbol(ms->map, ip, NULL);
+ if (target == NULL) {
+ ui_helpline__puts("The called function was not found.");
+ continue;
+ }
+
+ notes = symbol__annotation(target);
+ pthread_mutex_lock(&notes->lock);
+
+ if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
+ pthread_mutex_unlock(&notes->lock);
+ ui__warning("Not enough memory for annotating '%s' symbol!\n",
+ target->name);
+ continue;
+ }
+
+ pthread_mutex_unlock(&notes->lock);
+ symbol__tui_annotate(target, ms->map, evidx,
+ timer, arg, delay_secs);
+ ui_browser__show_title(&self->b, sym->name);
+ }
+ continue;
+ case K_LEFT:
+ case K_ESC:
+ case 'q':
+ case CTRL('c'):
+ goto out;
+ default:
+ continue;
+ }
+
+ if (nd != NULL)
+ annotate_browser__set_top(self, nd);
+ }
+out:
+ ui_browser__hide(&self->b);
+ return key;
+}
+
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
+ void(*timer)(void *arg), void *arg, int delay_secs)
+{
+ return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
+ timer, arg, delay_secs);
+}
+
+int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
+ void(*timer)(void *arg), void *arg,
+ int delay_secs)
+{
+ struct objdump_line *pos, *n;
+ struct annotation *notes;
+ struct map_symbol ms = {
+ .map = map,
+ .sym = sym,
+ };
+ struct annotate_browser browser = {
+ .b = {
+ .refresh = ui_browser__list_head_refresh,
+ .seek = ui_browser__list_head_seek,
+ .write = annotate_browser__write,
+ .filter = objdump_line__filter,
+ .priv = &ms,
+ .use_navkeypressed = true,
+ },
+ };
+ int ret;
+
+ if (sym == NULL)
+ return -1;
+
+ if (map->dso->annotate_warned)
+ return -1;
+
+ if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
+ ui__error("%s", ui_helpline__last_msg);
+ return -1;
+ }
+
+ ui_helpline__push("Press <- or ESC to exit");
+
+ notes = symbol__annotation(sym);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ struct objdump_line_rb_node *rbpos;
+ size_t line_len = strlen(pos->line);
+
+ if (browser.b.width < line_len)
+ browser.b.width = line_len;
+ rbpos = objdump_line__rb(pos);
+ rbpos->idx = browser.nr_entries++;
+ if (pos->offset != -1)
+ rbpos->idx_asm = browser.nr_asm_entries++;
+ else
+ rbpos->idx_asm = -1;
+ }
+
+ browser.b.nr_entries = browser.nr_entries;
+ browser.b.entries = &notes->src->source,
+ browser.b.width += 18; /* Percentage */
+ ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
+ list_for_each_entry_safe(pos, n, &notes->src->source, node) {
+ list_del(&pos->node);
+ objdump_line__free(pos);
+ }
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/browsers/hists.c b/ANDROID_3.4.5/tools/perf/util/ui/browsers/hists.c
new file mode 100644
index 00000000..2f83e5dc
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/browsers/hists.c
@@ -0,0 +1,1341 @@
+#include <stdio.h>
+#include "../libslang.h"
+#include <stdlib.h>
+#include <string.h>
+#include <newt.h>
+#include <linux/rbtree.h>
+
+#include "../../evsel.h"
+#include "../../evlist.h"
+#include "../../hist.h"
+#include "../../pstack.h"
+#include "../../sort.h"
+#include "../../util.h"
+
+#include "../browser.h"
+#include "../helpline.h"
+#include "../util.h"
+#include "../ui.h"
+#include "map.h"
+
+struct hist_browser {
+ struct ui_browser b;
+ struct hists *hists;
+ struct hist_entry *he_selection;
+ struct map_symbol *selection;
+ bool has_symbols;
+};
+
+static int hists__browser_title(struct hists *self, char *bf, size_t size,
+ const char *ev_name);
+
+static void hist_browser__refresh_dimensions(struct hist_browser *self)
+{
+ /* 3 == +/- toggle symbol before actual hist_entry rendering */
+ self->b.width = 3 + (hists__sort_list_width(self->hists) +
+ sizeof("[k]"));
+}
+
+static void hist_browser__reset(struct hist_browser *self)
+{
+ self->b.nr_entries = self->hists->nr_entries;
+ hist_browser__refresh_dimensions(self);
+ ui_browser__reset_index(&self->b);
+}
+
+static char tree__folded_sign(bool unfolded)
+{
+ return unfolded ? '-' : '+';
+}
+
+static char map_symbol__folded(const struct map_symbol *self)
+{
+ return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+}
+
+static char hist_entry__folded(const struct hist_entry *self)
+{
+ return map_symbol__folded(&self->ms);
+}
+
+static char callchain_list__folded(const struct callchain_list *self)
+{
+ return map_symbol__folded(&self->ms);
+}
+
+static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+{
+ self->unfolded = unfold ? self->has_children : false;
+}
+
+static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+{
+ int n = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+ struct callchain_list *chain;
+ char folded_sign = ' '; /* No children */
+
+ list_for_each_entry(chain, &child->val, list) {
+ ++n;
+ /* We need this because we may not have children */
+ folded_sign = callchain_list__folded(chain);
+ if (folded_sign == '+')
+ break;
+ }
+
+ if (folded_sign == '-') /* Have children and they're unfolded */
+ n += callchain_node__count_rows_rb_tree(child);
+ }
+
+ return n;
+}
+
+static int callchain_node__count_rows(struct callchain_node *node)
+{
+ struct callchain_list *chain;
+ bool unfolded = false;
+ int n = 0;
+
+ list_for_each_entry(chain, &node->val, list) {
+ ++n;
+ unfolded = chain->ms.unfolded;
+ }
+
+ if (unfolded)
+ n += callchain_node__count_rows_rb_tree(node);
+
+ return n;
+}
+
+static int callchain__count_rows(struct rb_root *chain)
+{
+ struct rb_node *nd;
+ int n = 0;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ n += callchain_node__count_rows(node);
+ }
+
+ return n;
+}
+
+static bool map_symbol__toggle_fold(struct map_symbol *self)
+{
+ if (!self)
+ return false;
+
+ if (!self->has_children)
+ return false;
+
+ self->unfolded = !self->unfolded;
+ return true;
+}
+
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+{
+ struct rb_node *nd = rb_first(&self->rb_root);
+
+ for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+ struct callchain_list *chain;
+ bool first = true;
+
+ list_for_each_entry(chain, &child->val, list) {
+ if (first) {
+ first = false;
+ chain->ms.has_children = chain->list.next != &child->val ||
+ !RB_EMPTY_ROOT(&child->rb_root);
+ } else
+ chain->ms.has_children = chain->list.next == &child->val &&
+ !RB_EMPTY_ROOT(&child->rb_root);
+ }
+
+ callchain_node__init_have_children_rb_tree(child);
+ }
+}
+
+static void callchain_node__init_have_children(struct callchain_node *self)
+{
+ struct callchain_list *chain;
+
+ list_for_each_entry(chain, &self->val, list)
+ chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
+
+ callchain_node__init_have_children_rb_tree(self);
+}
+
+static void callchain__init_have_children(struct rb_root *self)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ callchain_node__init_have_children(node);
+ }
+}
+
+static void hist_entry__init_have_children(struct hist_entry *self)
+{
+ if (!self->init_have_children) {
+ self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
+ callchain__init_have_children(&self->sorted_chain);
+ self->init_have_children = true;
+ }
+}
+
+static bool hist_browser__toggle_fold(struct hist_browser *self)
+{
+ if (map_symbol__toggle_fold(self->selection)) {
+ struct hist_entry *he = self->he_selection;
+
+ hist_entry__init_have_children(he);
+ self->hists->nr_entries -= he->nr_rows;
+
+ if (he->ms.unfolded)
+ he->nr_rows = callchain__count_rows(&he->sorted_chain);
+ else
+ he->nr_rows = 0;
+ self->hists->nr_entries += he->nr_rows;
+ self->b.nr_entries = self->hists->nr_entries;
+
+ return true;
+ }
+
+ /* If it doesn't have children, no toggling performed */
+ return false;
+}
+
+static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+{
+ int n = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+ struct callchain_list *chain;
+ bool has_children = false;
+
+ list_for_each_entry(chain, &child->val, list) {
+ ++n;
+ map_symbol__set_folding(&chain->ms, unfold);
+ has_children = chain->ms.has_children;
+ }
+
+ if (has_children)
+ n += callchain_node__set_folding_rb_tree(child, unfold);
+ }
+
+ return n;
+}
+
+static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
+{
+ struct callchain_list *chain;
+ bool has_children = false;
+ int n = 0;
+
+ list_for_each_entry(chain, &node->val, list) {
+ ++n;
+ map_symbol__set_folding(&chain->ms, unfold);
+ has_children = chain->ms.has_children;
+ }
+
+ if (has_children)
+ n += callchain_node__set_folding_rb_tree(node, unfold);
+
+ return n;
+}
+
+static int callchain__set_folding(struct rb_root *chain, bool unfold)
+{
+ struct rb_node *nd;
+ int n = 0;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ n += callchain_node__set_folding(node, unfold);
+ }
+
+ return n;
+}
+
+static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+{
+ hist_entry__init_have_children(self);
+ map_symbol__set_folding(&self->ms, unfold);
+
+ if (self->ms.has_children) {
+ int n = callchain__set_folding(&self->sorted_chain, unfold);
+ self->nr_rows = unfold ? n : 0;
+ } else
+ self->nr_rows = 0;
+}
+
+static void hists__set_folding(struct hists *self, bool unfold)
+{
+ struct rb_node *nd;
+
+ self->nr_entries = 0;
+
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+ hist_entry__set_folding(he, unfold);
+ self->nr_entries += 1 + he->nr_rows;
+ }
+}
+
+static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+{
+ hists__set_folding(self->hists, unfold);
+ self->b.nr_entries = self->hists->nr_entries;
+ /* Go to the start, we may be way after valid entries after a collapse */
+ ui_browser__reset_index(&self->b);
+}
+
+static void ui_browser__warn_lost_events(struct ui_browser *browser)
+{
+ ui_browser__warning(browser, 4,
+ "Events are being lost, check IO/CPU overload!\n\n"
+ "You may want to run 'perf' using a RT scheduler policy:\n\n"
+ " perf top -r 80\n\n"
+ "Or reduce the sampling frequency.");
+}
+
+static int hist_browser__run(struct hist_browser *self, const char *ev_name,
+ void(*timer)(void *arg), void *arg, int delay_secs)
+{
+ int key;
+ char title[160];
+
+ self->b.entries = &self->hists->entries;
+ self->b.nr_entries = self->hists->nr_entries;
+
+ hist_browser__refresh_dimensions(self);
+ hists__browser_title(self->hists, title, sizeof(title), ev_name);
+
+ if (ui_browser__show(&self->b, title,
+ "Press '?' for help on key bindings") < 0)
+ return -1;
+
+ while (1) {
+ key = ui_browser__run(&self->b, delay_secs);
+
+ switch (key) {
+ case K_TIMER:
+ timer(arg);
+ ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
+
+ if (self->hists->stats.nr_lost_warned !=
+ self->hists->stats.nr_events[PERF_RECORD_LOST]) {
+ self->hists->stats.nr_lost_warned =
+ self->hists->stats.nr_events[PERF_RECORD_LOST];
+ ui_browser__warn_lost_events(&self->b);
+ }
+
+ hists__browser_title(self->hists, title, sizeof(title), ev_name);
+ ui_browser__show_title(&self->b, title);
+ continue;
+ case 'D': { /* Debug */
+ static int seq;
+ struct hist_entry *h = rb_entry(self->b.top,
+ struct hist_entry, rb_node);
+ ui_helpline__pop();
+ ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
+ seq++, self->b.nr_entries,
+ self->hists->nr_entries,
+ self->b.height,
+ self->b.index,
+ self->b.top_idx,
+ h->row_offset, h->nr_rows);
+ }
+ break;
+ case 'C':
+ /* Collapse the whole world. */
+ hist_browser__set_folding(self, false);
+ break;
+ case 'E':
+ /* Expand the whole world. */
+ hist_browser__set_folding(self, true);
+ break;
+ case K_ENTER:
+ if (hist_browser__toggle_fold(self))
+ break;
+ /* fall thru */
+ default:
+ goto out;
+ }
+ }
+out:
+ ui_browser__hide(&self->b);
+ return key;
+}
+
+static char *callchain_list__sym_name(struct callchain_list *self,
+ char *bf, size_t bfsize)
+{
+ if (self->ms.sym)
+ return self->ms.sym->name;
+
+ snprintf(bf, bfsize, "%#" PRIx64, self->ip);
+ return bf;
+}
+
+#define LEVEL_OFFSET_STEP 3
+
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+ struct callchain_node *chain_node,
+ u64 total, int level,
+ unsigned short row,
+ off_t *row_offset,
+ bool *is_current_entry)
+{
+ struct rb_node *node;
+ int first_row = row, width, offset = level * LEVEL_OFFSET_STEP;
+ u64 new_total, remaining;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = chain_node->children_hit;
+ else
+ new_total = total;
+
+ remaining = new_total;
+ node = rb_first(&chain_node->rb_root);
+ while (node) {
+ struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+ struct rb_node *next = rb_next(node);
+ u64 cumul = callchain_cumul_hits(child);
+ struct callchain_list *chain;
+ char folded_sign = ' ';
+ int first = true;
+ int extra_offset = 0;
+
+ remaining -= cumul;
+
+ list_for_each_entry(chain, &child->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ const char *str;
+ int color;
+ bool was_first = first;
+
+ if (first)
+ first = false;
+ else
+ extra_offset = LEVEL_OFFSET_STEP;
+
+ folded_sign = callchain_list__folded(chain);
+ if (*row_offset != 0) {
+ --*row_offset;
+ goto do_next;
+ }
+
+ alloc_str = NULL;
+ str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ if (was_first) {
+ double percent = cumul * 100.0 / new_total;
+
+ if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str;
+ }
+
+ color = HE_COLORSET_NORMAL;
+ width = self->b.width - (offset + extra_offset + 2);
+ if (ui_browser__is_current_entry(&self->b, row)) {
+ self->selection = &chain->ms;
+ color = HE_COLORSET_SELECTED;
+ *is_current_entry = true;
+ }
+
+ ui_browser__set_color(&self->b, color);
+ ui_browser__gotorc(&self->b, row, 0);
+ slsmg_write_nstring(" ", offset + extra_offset);
+ slsmg_printf("%c ", folded_sign);
+ slsmg_write_nstring(str, width);
+ free(alloc_str);
+
+ if (++row == self->b.height)
+ goto out;
+do_next:
+ if (folded_sign == '+')
+ break;
+ }
+
+ if (folded_sign == '-') {
+ const int new_level = level + (extra_offset ? 2 : 1);
+ row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+ new_level, row, row_offset,
+ is_current_entry);
+ }
+ if (row == self->b.height)
+ goto out;
+ node = next;
+ }
+out:
+ return row - first_row;
+}
+
+static int hist_browser__show_callchain_node(struct hist_browser *self,
+ struct callchain_node *node,
+ int level, unsigned short row,
+ off_t *row_offset,
+ bool *is_current_entry)
+{
+ struct callchain_list *chain;
+ int first_row = row,
+ offset = level * LEVEL_OFFSET_STEP,
+ width = self->b.width - offset;
+ char folded_sign = ' ';
+
+ list_for_each_entry(chain, &node->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *s;
+ int color;
+
+ folded_sign = callchain_list__folded(chain);
+
+ if (*row_offset != 0) {
+ --*row_offset;
+ continue;
+ }
+
+ color = HE_COLORSET_NORMAL;
+ if (ui_browser__is_current_entry(&self->b, row)) {
+ self->selection = &chain->ms;
+ color = HE_COLORSET_SELECTED;
+ *is_current_entry = true;
+ }
+
+ s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ ui_browser__gotorc(&self->b, row, 0);
+ ui_browser__set_color(&self->b, color);
+ slsmg_write_nstring(" ", offset);
+ slsmg_printf("%c ", folded_sign);
+ slsmg_write_nstring(s, width - 2);
+
+ if (++row == self->b.height)
+ goto out;
+ }
+
+ if (folded_sign == '-')
+ row += hist_browser__show_callchain_node_rb_tree(self, node,
+ self->hists->stats.total_period,
+ level + 1, row,
+ row_offset,
+ is_current_entry);
+out:
+ return row - first_row;
+}
+
+static int hist_browser__show_callchain(struct hist_browser *self,
+ struct rb_root *chain,
+ int level, unsigned short row,
+ off_t *row_offset,
+ bool *is_current_entry)
+{
+ struct rb_node *nd;
+ int first_row = row;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+
+ row += hist_browser__show_callchain_node(self, node, level,
+ row, row_offset,
+ is_current_entry);
+ if (row == self->b.height)
+ break;
+ }
+
+ return row - first_row;
+}
+
+static int hist_browser__show_entry(struct hist_browser *self,
+ struct hist_entry *entry,
+ unsigned short row)
+{
+ char s[256];
+ double percent;
+ int printed = 0;
+ int width = self->b.width - 6; /* The percentage */
+ char folded_sign = ' ';
+ bool current_entry = ui_browser__is_current_entry(&self->b, row);
+ off_t row_offset = entry->row_offset;
+
+ if (current_entry) {
+ self->he_selection = entry;
+ self->selection = &entry->ms;
+ }
+
+ if (symbol_conf.use_callchain) {
+ hist_entry__init_have_children(entry);
+ folded_sign = hist_entry__folded(entry);
+ }
+
+ if (row_offset == 0) {
+ hist_entry__snprintf(entry, s, sizeof(s), self->hists);
+ percent = (entry->period * 100.0) / self->hists->stats.total_period;
+
+ ui_browser__set_percent_color(&self->b, percent, current_entry);
+ ui_browser__gotorc(&self->b, row, 0);
+ if (symbol_conf.use_callchain) {
+ slsmg_printf("%c ", folded_sign);
+ width -= 2;
+ }
+
+ slsmg_printf(" %5.2f%%", percent);
+
+ /* The scroll bar isn't being used */
+ if (!self->b.navkeypressed)
+ width += 1;
+
+ if (!current_entry || !self->b.navkeypressed)
+ ui_browser__set_color(&self->b, HE_COLORSET_NORMAL);
+
+ if (symbol_conf.show_nr_samples) {
+ slsmg_printf(" %11u", entry->nr_events);
+ width -= 12;
+ }
+
+ if (symbol_conf.show_total_period) {
+ slsmg_printf(" %12" PRIu64, entry->period);
+ width -= 13;
+ }
+
+ slsmg_write_nstring(s, width);
+ ++row;
+ ++printed;
+ } else
+ --row_offset;
+
+ if (folded_sign == '-' && row != self->b.height) {
+ printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+ 1, row, &row_offset,
+ &current_entry);
+ if (current_entry)
+ self->he_selection = entry;
+ }
+
+ return printed;
+}
+
+static void ui_browser__hists_init_top(struct ui_browser *browser)
+{
+ if (browser->top == NULL) {
+ struct hist_browser *hb;
+
+ hb = container_of(browser, struct hist_browser, b);
+ browser->top = rb_first(&hb->hists->entries);
+ }
+}
+
+static unsigned int hist_browser__refresh(struct ui_browser *self)
+{
+ unsigned row = 0;
+ struct rb_node *nd;
+ struct hist_browser *hb = container_of(self, struct hist_browser, b);
+
+ ui_browser__hists_init_top(self);
+
+ for (nd = self->top; nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (h->filtered)
+ continue;
+
+ row += hist_browser__show_entry(hb, h, row);
+ if (row == self->height)
+ break;
+ }
+
+ return row;
+}
+
+static struct rb_node *hists__filter_entries(struct rb_node *nd)
+{
+ while (nd != NULL) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ if (!h->filtered)
+ return nd;
+
+ nd = rb_next(nd);
+ }
+
+ return NULL;
+}
+
+static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
+{
+ while (nd != NULL) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ if (!h->filtered)
+ return nd;
+
+ nd = rb_prev(nd);
+ }
+
+ return NULL;
+}
+
+static void ui_browser__hists_seek(struct ui_browser *self,
+ off_t offset, int whence)
+{
+ struct hist_entry *h;
+ struct rb_node *nd;
+ bool first = true;
+
+ if (self->nr_entries == 0)
+ return;
+
+ ui_browser__hists_init_top(self);
+
+ switch (whence) {
+ case SEEK_SET:
+ nd = hists__filter_entries(rb_first(self->entries));
+ break;
+ case SEEK_CUR:
+ nd = self->top;
+ goto do_offset;
+ case SEEK_END:
+ nd = hists__filter_prev_entries(rb_last(self->entries));
+ first = false;
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * Moves not relative to the first visible entry invalidates its
+ * row_offset:
+ */
+ h = rb_entry(self->top, struct hist_entry, rb_node);
+ h->row_offset = 0;
+
+ /*
+ * Here we have to check if nd is expanded (+), if it is we can't go
+ * the next top level hist_entry, instead we must compute an offset of
+ * what _not_ to show and not change the first visible entry.
+ *
+ * This offset increments when we are going from top to bottom and
+ * decreases when we're going from bottom to top.
+ *
+ * As we don't have backpointers to the top level in the callchains
+ * structure, we need to always print the whole hist_entry callchain,
+ * skipping the first ones that are before the first visible entry
+ * and stop when we printed enough lines to fill the screen.
+ */
+do_offset:
+ if (offset > 0) {
+ do {
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ if (h->ms.unfolded) {
+ u16 remaining = h->nr_rows - h->row_offset;
+ if (offset > remaining) {
+ offset -= remaining;
+ h->row_offset = 0;
+ } else {
+ h->row_offset += offset;
+ offset = 0;
+ self->top = nd;
+ break;
+ }
+ }
+ nd = hists__filter_entries(rb_next(nd));
+ if (nd == NULL)
+ break;
+ --offset;
+ self->top = nd;
+ } while (offset != 0);
+ } else if (offset < 0) {
+ while (1) {
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ if (h->ms.unfolded) {
+ if (first) {
+ if (-offset > h->row_offset) {
+ offset += h->row_offset;
+ h->row_offset = 0;
+ } else {
+ h->row_offset += offset;
+ offset = 0;
+ self->top = nd;
+ break;
+ }
+ } else {
+ if (-offset > h->nr_rows) {
+ offset += h->nr_rows;
+ h->row_offset = 0;
+ } else {
+ h->row_offset = h->nr_rows + offset;
+ offset = 0;
+ self->top = nd;
+ break;
+ }
+ }
+ }
+
+ nd = hists__filter_prev_entries(rb_prev(nd));
+ if (nd == NULL)
+ break;
+ ++offset;
+ self->top = nd;
+ if (offset == 0) {
+ /*
+ * Last unfiltered hist_entry, check if it is
+ * unfolded, if it is then we should have
+ * row_offset at its last entry.
+ */
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ if (h->ms.unfolded)
+ h->row_offset = h->nr_rows;
+ break;
+ }
+ first = false;
+ }
+ } else {
+ self->top = nd;
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ h->row_offset = 0;
+ }
+}
+
+static struct hist_browser *hist_browser__new(struct hists *hists)
+{
+ struct hist_browser *self = zalloc(sizeof(*self));
+
+ if (self) {
+ self->hists = hists;
+ self->b.refresh = hist_browser__refresh;
+ self->b.seek = ui_browser__hists_seek;
+ self->b.use_navkeypressed = true;
+ if (sort__branch_mode == 1)
+ self->has_symbols = sort_sym_from.list.next != NULL;
+ else
+ self->has_symbols = sort_sym.list.next != NULL;
+ }
+
+ return self;
+}
+
+static void hist_browser__delete(struct hist_browser *self)
+{
+ free(self);
+}
+
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+{
+ return self->he_selection;
+}
+
+static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+{
+ return self->he_selection->thread;
+}
+
+static int hists__browser_title(struct hists *self, char *bf, size_t size,
+ const char *ev_name)
+{
+ char unit;
+ int printed;
+ const struct dso *dso = self->dso_filter;
+ const struct thread *thread = self->thread_filter;
+ unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
+
+ nr_events = convert_unit(nr_events, &unit);
+ printed = scnprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
+
+ if (self->uid_filter_str)
+ printed += snprintf(bf + printed, size - printed,
+ ", UID: %s", self->uid_filter_str);
+ if (thread)
+ printed += scnprintf(bf + printed, size - printed,
+ ", Thread: %s(%d)",
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid);
+ if (dso)
+ printed += scnprintf(bf + printed, size - printed,
+ ", DSO: %s", dso->short_name);
+ return printed;
+}
+
+static inline void free_popup_options(char **options, int n)
+{
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ free(options[i]);
+ options[i] = NULL;
+ }
+}
+
+static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
+ const char *helpline, const char *ev_name,
+ bool left_exits,
+ void(*timer)(void *arg), void *arg,
+ int delay_secs)
+{
+ struct hists *self = &evsel->hists;
+ struct hist_browser *browser = hist_browser__new(self);
+ struct branch_info *bi;
+ struct pstack *fstack;
+ char *options[16];
+ int nr_options = 0;
+ int key = -1;
+ char buf[64];
+
+ if (browser == NULL)
+ return -1;
+
+ fstack = pstack__new(2);
+ if (fstack == NULL)
+ goto out;
+
+ ui_helpline__push(helpline);
+
+ memset(options, 0, sizeof(options));
+
+ while (1) {
+ const struct thread *thread = NULL;
+ const struct dso *dso = NULL;
+ int choice = 0,
+ annotate = -2, zoom_dso = -2, zoom_thread = -2,
+ annotate_f = -2, annotate_t = -2, browse_map = -2;
+
+ nr_options = 0;
+
+ key = hist_browser__run(browser, ev_name, timer, arg, delay_secs);
+
+ if (browser->he_selection != NULL) {
+ thread = hist_browser__selected_thread(browser);
+ dso = browser->selection->map ? browser->selection->map->dso : NULL;
+ }
+ switch (key) {
+ case K_TAB:
+ case K_UNTAB:
+ if (nr_events == 1)
+ continue;
+ /*
+ * Exit the browser, let hists__browser_tree
+ * go to the next or previous
+ */
+ goto out_free_stack;
+ case 'a':
+ if (!browser->has_symbols) {
+ ui_browser__warning(&browser->b, delay_secs * 2,
+ "Annotation is only available for symbolic views, "
+ "include \"sym*\" in --sort to use it.");
+ continue;
+ }
+
+ if (browser->selection == NULL ||
+ browser->selection->sym == NULL ||
+ browser->selection->map->dso->annotate_warned)
+ continue;
+ goto do_annotate;
+ case 'd':
+ goto zoom_dso;
+ case 't':
+ goto zoom_thread;
+ case 's':
+ if (ui_browser__input_window("Symbol to show",
+ "Please enter the name of symbol you want to see",
+ buf, "ENTER: OK, ESC: Cancel",
+ delay_secs * 2) == K_ENTER) {
+ self->symbol_filter_str = *buf ? buf : NULL;
+ hists__filter_by_symbol(self);
+ hist_browser__reset(browser);
+ }
+ continue;
+ case K_F1:
+ case 'h':
+ case '?':
+ ui_browser__help_window(&browser->b,
+ "h/?/F1 Show this window\n"
+ "UP/DOWN/PGUP\n"
+ "PGDN/SPACE Navigate\n"
+ "q/ESC/CTRL+C Exit browser\n\n"
+ "For multiple event sessions:\n\n"
+ "TAB/UNTAB Switch events\n\n"
+ "For symbolic views (--sort has sym):\n\n"
+ "-> Zoom into DSO/Threads & Annotate current symbol\n"
+ "<- Zoom out\n"
+ "a Annotate current symbol\n"
+ "C Collapse all callchains\n"
+ "E Expand all callchains\n"
+ "d Zoom into current DSO\n"
+ "t Zoom into current Thread\n"
+ "s Filter symbol by name");
+ continue;
+ case K_ENTER:
+ case K_RIGHT:
+ /* menu */
+ break;
+ case K_LEFT: {
+ const void *top;
+
+ if (pstack__empty(fstack)) {
+ /*
+ * Go back to the perf_evsel_menu__run or other user
+ */
+ if (left_exits)
+ goto out_free_stack;
+ continue;
+ }
+ top = pstack__pop(fstack);
+ if (top == &browser->hists->dso_filter)
+ goto zoom_out_dso;
+ if (top == &browser->hists->thread_filter)
+ goto zoom_out_thread;
+ continue;
+ }
+ case K_ESC:
+ if (!left_exits &&
+ !ui_browser__dialog_yesno(&browser->b,
+ "Do you really want to exit?"))
+ continue;
+ /* Fall thru */
+ case 'q':
+ case CTRL('c'):
+ goto out_free_stack;
+ default:
+ continue;
+ }
+
+ if (!browser->has_symbols)
+ goto add_exit_option;
+
+ if (sort__branch_mode == 1) {
+ bi = browser->he_selection->branch_info;
+ if (browser->selection != NULL &&
+ bi &&
+ bi->from.sym != NULL &&
+ !bi->from.map->dso->annotate_warned &&
+ asprintf(&options[nr_options], "Annotate %s",
+ bi->from.sym->name) > 0)
+ annotate_f = nr_options++;
+
+ if (browser->selection != NULL &&
+ bi &&
+ bi->to.sym != NULL &&
+ !bi->to.map->dso->annotate_warned &&
+ (bi->to.sym != bi->from.sym ||
+ bi->to.map->dso != bi->from.map->dso) &&
+ asprintf(&options[nr_options], "Annotate %s",
+ bi->to.sym->name) > 0)
+ annotate_t = nr_options++;
+ } else {
+
+ if (browser->selection != NULL &&
+ browser->selection->sym != NULL &&
+ !browser->selection->map->dso->annotate_warned &&
+ asprintf(&options[nr_options], "Annotate %s",
+ browser->selection->sym->name) > 0)
+ annotate = nr_options++;
+ }
+
+ if (thread != NULL &&
+ asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
+ (browser->hists->thread_filter ? "out of" : "into"),
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid) > 0)
+ zoom_thread = nr_options++;
+
+ if (dso != NULL &&
+ asprintf(&options[nr_options], "Zoom %s %s DSO",
+ (browser->hists->dso_filter ? "out of" : "into"),
+ (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
+ zoom_dso = nr_options++;
+
+ if (browser->selection != NULL &&
+ browser->selection->map != NULL &&
+ asprintf(&options[nr_options], "Browse map details") > 0)
+ browse_map = nr_options++;
+add_exit_option:
+ options[nr_options++] = (char *)"Exit";
+retry_popup_menu:
+ choice = ui__popup_menu(nr_options, options);
+
+ if (choice == nr_options - 1)
+ break;
+
+ if (choice == -1) {
+ free_popup_options(options, nr_options - 1);
+ continue;
+ }
+
+ if (choice == annotate || choice == annotate_t || choice == annotate_f) {
+ struct hist_entry *he;
+ int err;
+do_annotate:
+ he = hist_browser__selected_entry(browser);
+ if (he == NULL)
+ continue;
+
+ /*
+ * we stash the branch_info symbol + map into the
+ * the ms so we don't have to rewrite all the annotation
+ * code to use branch_info.
+ * in branch mode, the ms struct is not used
+ */
+ if (choice == annotate_f) {
+ he->ms.sym = he->branch_info->from.sym;
+ he->ms.map = he->branch_info->from.map;
+ } else if (choice == annotate_t) {
+ he->ms.sym = he->branch_info->to.sym;
+ he->ms.map = he->branch_info->to.map;
+ }
+
+ /*
+ * Don't let this be freed, say, by hists__decay_entry.
+ */
+ he->used = true;
+ err = hist_entry__tui_annotate(he, evsel->idx,
+ timer, arg, delay_secs);
+ he->used = false;
+ /*
+ * offer option to annotate the other branch source or target
+ * (if they exists) when returning from annotate
+ */
+ if ((err == 'q' || err == CTRL('c'))
+ && annotate_t != -2 && annotate_f != -2)
+ goto retry_popup_menu;
+
+ ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
+ if (err)
+ ui_browser__handle_resize(&browser->b);
+
+ } else if (choice == browse_map)
+ map__browse(browser->selection->map);
+ else if (choice == zoom_dso) {
+zoom_dso:
+ if (browser->hists->dso_filter) {
+ pstack__remove(fstack, &browser->hists->dso_filter);
+zoom_out_dso:
+ ui_helpline__pop();
+ browser->hists->dso_filter = NULL;
+ sort_dso.elide = false;
+ } else {
+ if (dso == NULL)
+ continue;
+ ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
+ dso->kernel ? "the Kernel" : dso->short_name);
+ browser->hists->dso_filter = dso;
+ sort_dso.elide = true;
+ pstack__push(fstack, &browser->hists->dso_filter);
+ }
+ hists__filter_by_dso(self);
+ hist_browser__reset(browser);
+ } else if (choice == zoom_thread) {
+zoom_thread:
+ if (browser->hists->thread_filter) {
+ pstack__remove(fstack, &browser->hists->thread_filter);
+zoom_out_thread:
+ ui_helpline__pop();
+ browser->hists->thread_filter = NULL;
+ sort_thread.elide = false;
+ } else {
+ ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
+ thread->comm_set ? thread->comm : "",
+ thread->pid);
+ browser->hists->thread_filter = thread;
+ sort_thread.elide = true;
+ pstack__push(fstack, &browser->hists->thread_filter);
+ }
+ hists__filter_by_thread(self);
+ hist_browser__reset(browser);
+ }
+ }
+out_free_stack:
+ pstack__delete(fstack);
+out:
+ hist_browser__delete(browser);
+ free_popup_options(options, nr_options - 1);
+ return key;
+}
+
+struct perf_evsel_menu {
+ struct ui_browser b;
+ struct perf_evsel *selection;
+ bool lost_events, lost_events_warned;
+};
+
+static void perf_evsel_menu__write(struct ui_browser *browser,
+ void *entry, int row)
+{
+ struct perf_evsel_menu *menu = container_of(browser,
+ struct perf_evsel_menu, b);
+ struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+ unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
+ const char *ev_name = event_name(evsel);
+ char bf[256], unit;
+ const char *warn = " ";
+ size_t printed;
+
+ ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+ HE_COLORSET_NORMAL);
+
+ nr_events = convert_unit(nr_events, &unit);
+ printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
+ unit, unit == ' ' ? "" : " ", ev_name);
+ slsmg_printf("%s", bf);
+
+ nr_events = evsel->hists.stats.nr_events[PERF_RECORD_LOST];
+ if (nr_events != 0) {
+ menu->lost_events = true;
+ if (!current_entry)
+ ui_browser__set_color(browser, HE_COLORSET_TOP);
+ nr_events = convert_unit(nr_events, &unit);
+ printed += scnprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!",
+ nr_events, unit, unit == ' ' ? "" : " ");
+ warn = bf;
+ }
+
+ slsmg_write_nstring(warn, browser->width - printed);
+
+ if (current_entry)
+ menu->selection = evsel;
+}
+
+static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
+ int nr_events, const char *help,
+ void(*timer)(void *arg), void *arg, int delay_secs)
+{
+ struct perf_evlist *evlist = menu->b.priv;
+ struct perf_evsel *pos;
+ const char *ev_name, *title = "Available samples";
+ int key;
+
+ if (ui_browser__show(&menu->b, title,
+ "ESC: exit, ENTER|->: Browse histograms") < 0)
+ return -1;
+
+ while (1) {
+ key = ui_browser__run(&menu->b, delay_secs);
+
+ switch (key) {
+ case K_TIMER:
+ timer(arg);
+
+ if (!menu->lost_events_warned && menu->lost_events) {
+ ui_browser__warn_lost_events(&menu->b);
+ menu->lost_events_warned = true;
+ }
+ continue;
+ case K_RIGHT:
+ case K_ENTER:
+ if (!menu->selection)
+ continue;
+ pos = menu->selection;
+browse_hists:
+ perf_evlist__set_selected(evlist, pos);
+ /*
+ * Give the calling tool a chance to populate the non
+ * default evsel resorted hists tree.
+ */
+ if (timer)
+ timer(arg);
+ ev_name = event_name(pos);
+ key = perf_evsel__hists_browse(pos, nr_events, help,
+ ev_name, true, timer,
+ arg, delay_secs);
+ ui_browser__show_title(&menu->b, title);
+ switch (key) {
+ case K_TAB:
+ if (pos->node.next == &evlist->entries)
+ pos = list_entry(evlist->entries.next, struct perf_evsel, node);
+ else
+ pos = list_entry(pos->node.next, struct perf_evsel, node);
+ goto browse_hists;
+ case K_UNTAB:
+ if (pos->node.prev == &evlist->entries)
+ pos = list_entry(evlist->entries.prev, struct perf_evsel, node);
+ else
+ pos = list_entry(pos->node.prev, struct perf_evsel, node);
+ goto browse_hists;
+ case K_ESC:
+ if (!ui_browser__dialog_yesno(&menu->b,
+ "Do you really want to exit?"))
+ continue;
+ /* Fall thru */
+ case 'q':
+ case CTRL('c'):
+ goto out;
+ default:
+ continue;
+ }
+ case K_LEFT:
+ continue;
+ case K_ESC:
+ if (!ui_browser__dialog_yesno(&menu->b,
+ "Do you really want to exit?"))
+ continue;
+ /* Fall thru */
+ case 'q':
+ case CTRL('c'):
+ goto out;
+ default:
+ continue;
+ }
+ }
+
+out:
+ ui_browser__hide(&menu->b);
+ return key;
+}
+
+static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
+ const char *help,
+ void(*timer)(void *arg), void *arg,
+ int delay_secs)
+{
+ struct perf_evsel *pos;
+ struct perf_evsel_menu menu = {
+ .b = {
+ .entries = &evlist->entries,
+ .refresh = ui_browser__list_head_refresh,
+ .seek = ui_browser__list_head_seek,
+ .write = perf_evsel_menu__write,
+ .nr_entries = evlist->nr_entries,
+ .priv = evlist,
+ },
+ };
+
+ ui_helpline__push("Press ESC to exit");
+
+ list_for_each_entry(pos, &evlist->entries, node) {
+ const char *ev_name = event_name(pos);
+ size_t line_len = strlen(ev_name) + 7;
+
+ if (menu.b.width < line_len)
+ menu.b.width = line_len;
+ /*
+ * Cache the evsel name, tracepoints have a _high_ cost per
+ * event_name() call.
+ */
+ if (pos->name == NULL)
+ pos->name = strdup(ev_name);
+ }
+
+ return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer,
+ arg, delay_secs);
+}
+
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
+ void(*timer)(void *arg), void *arg,
+ int delay_secs)
+{
+
+ if (evlist->nr_entries == 1) {
+ struct perf_evsel *first = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+ const char *ev_name = event_name(first);
+ return perf_evsel__hists_browse(first, evlist->nr_entries, help,
+ ev_name, false, timer, arg,
+ delay_secs);
+ }
+
+ return __perf_evlist__tui_browse_hists(evlist, help,
+ timer, arg, delay_secs);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/browsers/map.c b/ANDROID_3.4.5/tools/perf/util/ui/browsers/map.c
new file mode 100644
index 00000000..eca6575a
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/browsers/map.c
@@ -0,0 +1,154 @@
+#include "../libslang.h"
+#include <elf.h>
+#include <newt.h>
+#include <inttypes.h>
+#include <sys/ttydefaults.h>
+#include <string.h>
+#include <linux/bitops.h>
+#include "../../util.h"
+#include "../../debug.h"
+#include "../../symbol.h"
+#include "../browser.h"
+#include "../helpline.h"
+#include "map.h"
+
+static int ui_entry__read(const char *title, char *bf, size_t size, int width)
+{
+ struct newtExitStruct es;
+ newtComponent form, entry;
+ const char *result;
+ int err = -1;
+
+ newtCenteredWindow(width, 1, title);
+ form = newtForm(NULL, NULL, 0);
+ if (form == NULL)
+ return -1;
+
+ entry = newtEntry(0, 0, "0x", width, &result, NEWT_FLAG_SCROLL);
+ if (entry == NULL)
+ goto out_free_form;
+
+ newtFormAddComponent(form, entry);
+ newtFormAddHotKey(form, NEWT_KEY_ENTER);
+ newtFormAddHotKey(form, NEWT_KEY_ESCAPE);
+ newtFormAddHotKey(form, NEWT_KEY_LEFT);
+ newtFormAddHotKey(form, CTRL('c'));
+ newtFormRun(form, &es);
+
+ if (result != NULL) {
+ strncpy(bf, result, size);
+ err = 0;
+ }
+out_free_form:
+ newtPopWindow();
+ newtFormDestroy(form);
+ return err;
+}
+
+struct map_browser {
+ struct ui_browser b;
+ struct map *map;
+ u8 addrlen;
+};
+
+static void map_browser__write(struct ui_browser *self, void *nd, int row)
+{
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ struct map_browser *mb = container_of(self, struct map_browser, b);
+ bool current_entry = ui_browser__is_current_entry(self, row);
+ int width;
+
+ ui_browser__set_percent_color(self, 0, current_entry);
+ slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ",
+ mb->addrlen, sym->start, mb->addrlen, sym->end,
+ sym->binding == STB_GLOBAL ? 'g' :
+ sym->binding == STB_LOCAL ? 'l' : 'w');
+ width = self->width - ((mb->addrlen * 2) + 4);
+ if (width > 0)
+ slsmg_write_nstring(sym->name, width);
+}
+
+/* FIXME uber-kludgy, see comment on cmd_report... */
+static u32 *symbol__browser_index(struct symbol *self)
+{
+ return ((void *)self) - sizeof(struct rb_node) - sizeof(u32);
+}
+
+static int map_browser__search(struct map_browser *self)
+{
+ char target[512];
+ struct symbol *sym;
+ int err = ui_entry__read("Search by name/addr", target, sizeof(target), 40);
+
+ if (err)
+ return err;
+
+ if (target[0] == '0' && tolower(target[1]) == 'x') {
+ u64 addr = strtoull(target, NULL, 16);
+ sym = map__find_symbol(self->map, addr, NULL);
+ } else
+ sym = map__find_symbol_by_name(self->map, target, NULL);
+
+ if (sym != NULL) {
+ u32 *idx = symbol__browser_index(sym);
+
+ self->b.top = &sym->rb_node;
+ self->b.index = self->b.top_idx = *idx;
+ } else
+ ui_helpline__fpush("%s not found!", target);
+
+ return 0;
+}
+
+static int map_browser__run(struct map_browser *self)
+{
+ int key;
+
+ if (ui_browser__show(&self->b, self->map->dso->long_name,
+ "Press <- or ESC to exit, %s / to search",
+ verbose ? "" : "restart with -v to use") < 0)
+ return -1;
+
+ while (1) {
+ key = ui_browser__run(&self->b, 0);
+
+ if (verbose && key == '/')
+ map_browser__search(self);
+ else
+ break;
+ }
+
+ ui_browser__hide(&self->b);
+ return key;
+}
+
+int map__browse(struct map *self)
+{
+ struct map_browser mb = {
+ .b = {
+ .entries = &self->dso->symbols[self->type],
+ .refresh = ui_browser__rb_tree_refresh,
+ .seek = ui_browser__rb_tree_seek,
+ .write = map_browser__write,
+ },
+ .map = self,
+ };
+ struct rb_node *nd;
+ char tmp[BITS_PER_LONG / 4];
+ u64 maxaddr = 0;
+
+ for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+
+ if (maxaddr < pos->end)
+ maxaddr = pos->end;
+ if (verbose) {
+ u32 *idx = symbol__browser_index(pos);
+ *idx = mb.b.nr_entries;
+ }
+ ++mb.b.nr_entries;
+ }
+
+ mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr);
+ return map_browser__run(&mb);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/browsers/map.h b/ANDROID_3.4.5/tools/perf/util/ui/browsers/map.h
new file mode 100644
index 00000000..df8581a4
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/browsers/map.h
@@ -0,0 +1,6 @@
+#ifndef _PERF_UI_MAP_BROWSER_H_
+#define _PERF_UI_MAP_BROWSER_H_ 1
+struct map;
+
+int map__browse(struct map *self);
+#endif /* _PERF_UI_MAP_BROWSER_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/helpline.c b/ANDROID_3.4.5/tools/perf/util/ui/helpline.c
new file mode 100644
index 00000000..2f950c26
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/helpline.c
@@ -0,0 +1,79 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../debug.h"
+#include "helpline.h"
+#include "ui.h"
+#include "libslang.h"
+
+void ui_helpline__pop(void)
+{
+}
+
+char ui_helpline__current[512];
+
+void ui_helpline__push(const char *msg)
+{
+ const size_t sz = sizeof(ui_helpline__current);
+
+ SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
+ SLsmg_set_color(0);
+ SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
+ SLsmg_refresh();
+ strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
+}
+
+void ui_helpline__vpush(const char *fmt, va_list ap)
+{
+ char *s;
+
+ if (vasprintf(&s, fmt, ap) < 0)
+ vfprintf(stderr, fmt, ap);
+ else {
+ ui_helpline__push(s);
+ free(s);
+ }
+}
+
+void ui_helpline__fpush(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ ui_helpline__vpush(fmt, ap);
+ va_end(ap);
+}
+
+void ui_helpline__puts(const char *msg)
+{
+ ui_helpline__pop();
+ ui_helpline__push(msg);
+}
+
+void ui_helpline__init(void)
+{
+ ui_helpline__puts(" ");
+}
+
+char ui_helpline__last_msg[1024];
+
+int ui_helpline__show_help(const char *format, va_list ap)
+{
+ int ret;
+ static int backlog;
+
+ pthread_mutex_lock(&ui__lock);
+ ret = vscnprintf(ui_helpline__last_msg + backlog,
+ sizeof(ui_helpline__last_msg) - backlog, format, ap);
+ backlog += ret;
+
+ if (ui_helpline__last_msg[backlog - 1] == '\n') {
+ ui_helpline__puts(ui_helpline__last_msg);
+ SLsmg_refresh();
+ backlog = 0;
+ }
+ pthread_mutex_unlock(&ui__lock);
+
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/helpline.h b/ANDROID_3.4.5/tools/perf/util/ui/helpline.h
new file mode 100644
index 00000000..7bab6b34
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/helpline.h
@@ -0,0 +1,16 @@
+#ifndef _PERF_UI_HELPLINE_H_
+#define _PERF_UI_HELPLINE_H_ 1
+
+#include <stdio.h>
+#include <stdarg.h>
+
+void ui_helpline__init(void);
+void ui_helpline__pop(void);
+void ui_helpline__push(const char *msg);
+void ui_helpline__vpush(const char *fmt, va_list ap);
+void ui_helpline__fpush(const char *fmt, ...);
+void ui_helpline__puts(const char *msg);
+
+extern char ui_helpline__current[];
+
+#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/keysyms.h b/ANDROID_3.4.5/tools/perf/util/ui/keysyms.h
new file mode 100644
index 00000000..809eca57
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/keysyms.h
@@ -0,0 +1,27 @@
+#ifndef _PERF_KEYSYMS_H_
+#define _PERF_KEYSYMS_H_ 1
+
+#include "libslang.h"
+
+#define K_DOWN SL_KEY_DOWN
+#define K_END SL_KEY_END
+#define K_ENTER '\r'
+#define K_ESC 033
+#define K_F1 SL_KEY_F(1)
+#define K_HOME SL_KEY_HOME
+#define K_LEFT SL_KEY_LEFT
+#define K_PGDN SL_KEY_NPAGE
+#define K_PGUP SL_KEY_PPAGE
+#define K_RIGHT SL_KEY_RIGHT
+#define K_TAB '\t'
+#define K_UNTAB SL_KEY_UNTAB
+#define K_UP SL_KEY_UP
+#define K_BKSPC 0x7f
+#define K_DEL SL_KEY_DELETE
+
+/* Not really keys */
+#define K_TIMER -1
+#define K_ERROR -2
+#define K_RESIZE -3
+
+#endif /* _PERF_KEYSYMS_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/libslang.h b/ANDROID_3.4.5/tools/perf/util/ui/libslang.h
new file mode 100644
index 00000000..4d54b645
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/libslang.h
@@ -0,0 +1,29 @@
+#ifndef _PERF_UI_SLANG_H_
+#define _PERF_UI_SLANG_H_ 1
+/*
+ * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
+ * the build if it isn't defined. Use the equivalent one that glibc
+ * has on features.h.
+ */
+#include <features.h>
+#ifndef HAVE_LONG_LONG
+#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
+#endif
+#include <slang.h>
+
+#if SLANG_VERSION < 20104
+#define slsmg_printf(msg, args...) \
+ SLsmg_printf((char *)(msg), ##args)
+#define slsmg_write_nstring(msg, len) \
+ SLsmg_write_nstring((char *)(msg), len)
+#define sltt_set_color(obj, name, fg, bg) \
+ SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg))
+#else
+#define slsmg_printf SLsmg_printf
+#define slsmg_write_nstring SLsmg_write_nstring
+#define sltt_set_color SLtt_set_color
+#endif
+
+#define SL_KEY_UNTAB 0x1000
+
+#endif /* _PERF_UI_SLANG_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/progress.c b/ANDROID_3.4.5/tools/perf/util/ui/progress.c
new file mode 100644
index 00000000..13aa64e5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/progress.c
@@ -0,0 +1,32 @@
+#include "../cache.h"
+#include "progress.h"
+#include "libslang.h"
+#include "ui.h"
+#include "browser.h"
+
+void ui_progress__update(u64 curr, u64 total, const char *title)
+{
+ int bar, y;
+ /*
+ * FIXME: We should have a per UI backend way of showing progress,
+ * stdio will just show a percentage as NN%, etc.
+ */
+ if (use_browser <= 0)
+ return;
+
+ if (total == 0)
+ return;
+
+ ui__refresh_dimensions(true);
+ pthread_mutex_lock(&ui__lock);
+ y = SLtt_Screen_Rows / 2 - 2;
+ SLsmg_set_color(0);
+ SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
+ SLsmg_gotorc(y++, 1);
+ SLsmg_write_string((char *)title);
+ SLsmg_set_color(HE_COLORSET_SELECTED);
+ bar = ((SLtt_Screen_Cols - 2) * curr) / total;
+ SLsmg_fill_region(y, 1, 1, bar, ' ');
+ SLsmg_refresh();
+ pthread_mutex_unlock(&ui__lock);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/progress.h b/ANDROID_3.4.5/tools/perf/util/ui/progress.h
new file mode 100644
index 00000000..d9c205b5
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/progress.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_UI_PROGRESS_H_
+#define _PERF_UI_PROGRESS_H_ 1
+
+#include <../types.h>
+
+void ui_progress__update(u64 curr, u64 total, const char *title);
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/setup.c b/ANDROID_3.4.5/tools/perf/util/ui/setup.c
new file mode 100644
index 00000000..85a69faa
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/setup.c
@@ -0,0 +1,155 @@
+#include <newt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include "../cache.h"
+#include "../debug.h"
+#include "browser.h"
+#include "helpline.h"
+#include "ui.h"
+#include "util.h"
+#include "libslang.h"
+#include "keysyms.h"
+
+pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
+
+static volatile int ui__need_resize;
+
+void ui__refresh_dimensions(bool force)
+{
+ if (force || ui__need_resize) {
+ ui__need_resize = 0;
+ pthread_mutex_lock(&ui__lock);
+ SLtt_get_screen_size();
+ SLsmg_reinit_smg();
+ pthread_mutex_unlock(&ui__lock);
+ }
+}
+
+static void ui__sigwinch(int sig __used)
+{
+ ui__need_resize = 1;
+}
+
+static void ui__setup_sigwinch(void)
+{
+ static bool done;
+
+ if (done)
+ return;
+
+ done = true;
+ pthread__unblock_sigwinch();
+ signal(SIGWINCH, ui__sigwinch);
+}
+
+int ui__getch(int delay_secs)
+{
+ struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL;
+ fd_set read_set;
+ int err, key;
+
+ ui__setup_sigwinch();
+
+ FD_ZERO(&read_set);
+ FD_SET(0, &read_set);
+
+ if (delay_secs) {
+ timeout.tv_sec = delay_secs;
+ timeout.tv_usec = 0;
+ }
+
+ err = select(1, &read_set, NULL, NULL, ptimeout);
+
+ if (err == 0)
+ return K_TIMER;
+
+ if (err == -1) {
+ if (errno == EINTR)
+ return K_RESIZE;
+ return K_ERROR;
+ }
+
+ key = SLang_getkey();
+ if (key != K_ESC)
+ return key;
+
+ FD_ZERO(&read_set);
+ FD_SET(0, &read_set);
+ timeout.tv_sec = 0;
+ timeout.tv_usec = 20;
+ err = select(1, &read_set, NULL, NULL, &timeout);
+ if (err == 0)
+ return K_ESC;
+
+ SLang_ungetkey(key);
+ return SLkp_getkey();
+}
+
+static void newt_suspend(void *d __used)
+{
+ newtSuspend();
+ raise(SIGTSTP);
+ newtResume();
+}
+
+static int ui__init(void)
+{
+ int err = SLkp_init();
+
+ if (err < 0)
+ goto out;
+
+ SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
+out:
+ return err;
+}
+
+static void ui__exit(void)
+{
+ SLtt_set_cursor_visibility(1);
+ SLsmg_refresh();
+ SLsmg_reset_smg();
+ SLang_reset_tty();
+}
+
+static void ui__signal(int sig)
+{
+ ui__exit();
+ psignal(sig, "perf");
+ exit(0);
+}
+
+void setup_browser(bool fallback_to_pager)
+{
+ if (!isatty(1) || !use_browser || dump_trace) {
+ use_browser = 0;
+ if (fallback_to_pager)
+ setup_pager();
+ return;
+ }
+
+ use_browser = 1;
+ newtInit();
+ ui__init();
+ newtSetSuspendCallback(newt_suspend, NULL);
+ ui_helpline__init();
+ ui_browser__init();
+
+ signal(SIGSEGV, ui__signal);
+ signal(SIGFPE, ui__signal);
+ signal(SIGINT, ui__signal);
+ signal(SIGQUIT, ui__signal);
+ signal(SIGTERM, ui__signal);
+}
+
+void exit_browser(bool wait_for_ok)
+{
+ if (use_browser > 0) {
+ if (wait_for_ok)
+ ui__question_window("Fatal Error",
+ ui_helpline__last_msg,
+ "Press any key...", 0);
+ ui__exit();
+ }
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/ui.h b/ANDROID_3.4.5/tools/perf/util/ui/ui.h
new file mode 100644
index 00000000..7b670454
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/ui.h
@@ -0,0 +1,11 @@
+#ifndef _PERF_UI_H_
+#define _PERF_UI_H_ 1
+
+#include <pthread.h>
+#include <stdbool.h>
+
+extern pthread_mutex_t ui__lock;
+
+void ui__refresh_dimensions(bool force);
+
+#endif /* _PERF_UI_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/util.c b/ANDROID_3.4.5/tools/perf/util/ui/util.c
new file mode 100644
index 00000000..ad4374a1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/util.c
@@ -0,0 +1,250 @@
+#include "../util.h"
+#include <signal.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/ttydefaults.h>
+
+#include "../cache.h"
+#include "../debug.h"
+#include "browser.h"
+#include "keysyms.h"
+#include "helpline.h"
+#include "ui.h"
+#include "util.h"
+#include "libslang.h"
+
+static void ui_browser__argv_write(struct ui_browser *browser,
+ void *entry, int row)
+{
+ char **arg = entry;
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+
+ ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+ HE_COLORSET_NORMAL);
+ slsmg_write_nstring(*arg, browser->width);
+}
+
+static int popup_menu__run(struct ui_browser *menu)
+{
+ int key;
+
+ if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
+ return -1;
+
+ while (1) {
+ key = ui_browser__run(menu, 0);
+
+ switch (key) {
+ case K_RIGHT:
+ case K_ENTER:
+ key = menu->index;
+ break;
+ case K_LEFT:
+ case K_ESC:
+ case 'q':
+ case CTRL('c'):
+ key = -1;
+ break;
+ default:
+ continue;
+ }
+
+ break;
+ }
+
+ ui_browser__hide(menu);
+ return key;
+}
+
+int ui__popup_menu(int argc, char * const argv[])
+{
+ struct ui_browser menu = {
+ .entries = (void *)argv,
+ .refresh = ui_browser__argv_refresh,
+ .seek = ui_browser__argv_seek,
+ .write = ui_browser__argv_write,
+ .nr_entries = argc,
+ };
+
+ return popup_menu__run(&menu);
+}
+
+int ui_browser__input_window(const char *title, const char *text, char *input,
+ const char *exit_msg, int delay_secs)
+{
+ int x, y, len, key;
+ int max_len = 60, nr_lines = 0;
+ static char buf[50];
+ const char *t;
+
+ t = text;
+ while (1) {
+ const char *sep = strchr(t, '\n');
+
+ if (sep == NULL)
+ sep = strchr(t, '\0');
+ len = sep - t;
+ if (max_len < len)
+ max_len = len;
+ ++nr_lines;
+ if (*sep == '\0')
+ break;
+ t = sep + 1;
+ }
+
+ max_len += 2;
+ nr_lines += 8;
+ y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+ x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+ SLsmg_set_color(0);
+ SLsmg_draw_box(y, x++, nr_lines, max_len);
+ if (title) {
+ SLsmg_gotorc(y, x + 1);
+ SLsmg_write_string((char *)title);
+ }
+ SLsmg_gotorc(++y, x);
+ nr_lines -= 7;
+ max_len -= 2;
+ SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+ nr_lines, max_len, 1);
+ y += nr_lines;
+ len = 5;
+ while (len--) {
+ SLsmg_gotorc(y + len - 1, x);
+ SLsmg_write_nstring((char *)" ", max_len);
+ }
+ SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+ SLsmg_gotorc(y + 3, x);
+ SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_refresh();
+
+ x += 2;
+ len = 0;
+ key = ui__getch(delay_secs);
+ while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+ if (key == K_BKSPC) {
+ if (len == 0)
+ goto next_key;
+ SLsmg_gotorc(y, x + --len);
+ SLsmg_write_char(' ');
+ } else {
+ buf[len] = key;
+ SLsmg_gotorc(y, x + len++);
+ SLsmg_write_char(key);
+ }
+ SLsmg_refresh();
+
+ /* XXX more graceful overflow handling needed */
+ if (len == sizeof(buf) - 1) {
+ ui_helpline__push("maximum size of symbol name reached!");
+ key = K_ENTER;
+ break;
+ }
+next_key:
+ key = ui__getch(delay_secs);
+ }
+
+ buf[len] = '\0';
+ strncpy(input, buf, len+1);
+ return key;
+}
+
+int ui__question_window(const char *title, const char *text,
+ const char *exit_msg, int delay_secs)
+{
+ int x, y;
+ int max_len = 0, nr_lines = 0;
+ const char *t;
+
+ t = text;
+ while (1) {
+ const char *sep = strchr(t, '\n');
+ int len;
+
+ if (sep == NULL)
+ sep = strchr(t, '\0');
+ len = sep - t;
+ if (max_len < len)
+ max_len = len;
+ ++nr_lines;
+ if (*sep == '\0')
+ break;
+ t = sep + 1;
+ }
+
+ max_len += 2;
+ nr_lines += 4;
+ y = SLtt_Screen_Rows / 2 - nr_lines / 2,
+ x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+ SLsmg_set_color(0);
+ SLsmg_draw_box(y, x++, nr_lines, max_len);
+ if (title) {
+ SLsmg_gotorc(y, x + 1);
+ SLsmg_write_string((char *)title);
+ }
+ SLsmg_gotorc(++y, x);
+ nr_lines -= 2;
+ max_len -= 2;
+ SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+ nr_lines, max_len, 1);
+ SLsmg_gotorc(y + nr_lines - 2, x);
+ SLsmg_write_nstring((char *)" ", max_len);
+ SLsmg_gotorc(y + nr_lines - 1, x);
+ SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_refresh();
+ return ui__getch(delay_secs);
+}
+
+int ui__help_window(const char *text)
+{
+ return ui__question_window("Help", text, "Press any key...", 0);
+}
+
+int ui__dialog_yesno(const char *msg)
+{
+ return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
+}
+
+int __ui__warning(const char *title, const char *format, va_list args)
+{
+ char *s;
+
+ if (use_browser > 0 && vasprintf(&s, format, args) > 0) {
+ int key;
+
+ pthread_mutex_lock(&ui__lock);
+ key = ui__question_window(title, s, "Press any key...", 0);
+ pthread_mutex_unlock(&ui__lock);
+ free(s);
+ return key;
+ }
+
+ fprintf(stderr, "%s:\n", title);
+ vfprintf(stderr, format, args);
+ return K_ESC;
+}
+
+int ui__warning(const char *format, ...)
+{
+ int key;
+ va_list args;
+
+ va_start(args, format);
+ key = __ui__warning("Warning", format, args);
+ va_end(args);
+ return key;
+}
+
+int ui__error(const char *format, ...)
+{
+ int key;
+ va_list args;
+
+ va_start(args, format);
+ key = __ui__warning("Error", format, args);
+ va_end(args);
+ return key;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/ui/util.h b/ANDROID_3.4.5/tools/perf/util/ui/util.h
new file mode 100644
index 00000000..2d1738bd
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/ui/util.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_UI_UTIL_H_
+#define _PERF_UI_UTIL_H_ 1
+
+#include <stdarg.h>
+
+int ui__getch(int delay_secs);
+int ui__popup_menu(int argc, char * const argv[]);
+int ui__help_window(const char *text);
+int ui__dialog_yesno(const char *msg);
+int ui__question_window(const char *title, const char *text,
+ const char *exit_msg, int delay_secs);
+int __ui__warning(const char *title, const char *format, va_list args);
+
+#endif /* _PERF_UI_UTIL_H_ */
diff --git a/ANDROID_3.4.5/tools/perf/util/usage.c b/ANDROID_3.4.5/tools/perf/util/usage.c
new file mode 100644
index 00000000..52bb07c6
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/usage.c
@@ -0,0 +1,122 @@
+/*
+ * usage.c
+ *
+ * Various reporting routines.
+ * Originally copied from GIT source.
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "util.h"
+#include "debug.h"
+
+static void report(const char *prefix, const char *err, va_list params)
+{
+ char msg[1024];
+ vsnprintf(msg, sizeof(msg), err, params);
+ fprintf(stderr, " %s%s\n", prefix, msg);
+}
+
+static NORETURN void usage_builtin(const char *err)
+{
+ fprintf(stderr, "\n Usage: %s\n", err);
+ exit(129);
+}
+
+static NORETURN void die_builtin(const char *err, va_list params)
+{
+ report(" Fatal: ", err, params);
+ exit(128);
+}
+
+static void error_builtin(const char *err, va_list params)
+{
+ report(" Error: ", err, params);
+}
+
+static void warn_builtin(const char *warn, va_list params)
+{
+ report(" Warning: ", warn, params);
+}
+
+/* If we are in a dlopen()ed .so write to a global variable would segfault
+ * (ugh), so keep things static. */
+static void (*usage_routine)(const char *err) NORETURN = usage_builtin;
+static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin;
+static void (*error_routine)(const char *err, va_list params) = error_builtin;
+static void (*warn_routine)(const char *err, va_list params) = warn_builtin;
+
+void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN)
+{
+ die_routine = routine;
+}
+
+void usage(const char *err)
+{
+ usage_routine(err);
+}
+
+void die(const char *err, ...)
+{
+ va_list params;
+
+ va_start(params, err);
+ die_routine(err, params);
+ va_end(params);
+}
+
+int error(const char *err, ...)
+{
+ va_list params;
+
+ va_start(params, err);
+ error_routine(err, params);
+ va_end(params);
+ return -1;
+}
+
+void warning(const char *warn, ...)
+{
+ va_list params;
+
+ va_start(params, warn);
+ warn_routine(warn, params);
+ va_end(params);
+}
+
+uid_t parse_target_uid(const char *str, const char *tid, const char *pid)
+{
+ struct passwd pwd, *result;
+ char buf[1024];
+
+ if (str == NULL)
+ return UINT_MAX;
+
+ /* UID and PID are mutually exclusive */
+ if (tid || pid) {
+ ui__warning("PID/TID switch overriding UID\n");
+ sleep(1);
+ return UINT_MAX;
+ }
+
+ getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
+
+ if (result == NULL) {
+ char *endptr;
+ int uid = strtol(str, &endptr, 10);
+
+ if (*endptr != '\0') {
+ ui__error("Invalid user %s\n", str);
+ return UINT_MAX - 1;
+ }
+
+ getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
+
+ if (result == NULL) {
+ ui__error("Problems obtaining information for user %s\n",
+ str);
+ return UINT_MAX - 1;
+ }
+ }
+
+ return result->pw_uid;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/util.c b/ANDROID_3.4.5/tools/perf/util/util.c
new file mode 100644
index 00000000..8109a907
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/util.c
@@ -0,0 +1,150 @@
+#include "../perf.h"
+#include "util.h"
+#include <sys/mman.h>
+
+/*
+ * XXX We need to find a better place for these things...
+ */
+bool perf_host = true;
+bool perf_guest = false;
+
+void event_attr_init(struct perf_event_attr *attr)
+{
+ if (!perf_host)
+ attr->exclude_host = 1;
+ if (!perf_guest)
+ attr->exclude_guest = 1;
+ /* to capture ABI version */
+ attr->size = sizeof(*attr);
+}
+
+int mkdir_p(char *path, mode_t mode)
+{
+ struct stat st;
+ int err;
+ char *d = path;
+
+ if (*d != '/')
+ return -1;
+
+ if (stat(path, &st) == 0)
+ return 0;
+
+ while (*++d == '/');
+
+ while ((d = strchr(d, '/'))) {
+ *d = '\0';
+ err = stat(path, &st) && mkdir(path, mode);
+ *d++ = '/';
+ if (err)
+ return -1;
+ while (*d == '/')
+ ++d;
+ }
+ return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
+}
+
+static int slow_copyfile(const char *from, const char *to)
+{
+ int err = 0;
+ char *line = NULL;
+ size_t n;
+ FILE *from_fp = fopen(from, "r"), *to_fp;
+
+ if (from_fp == NULL)
+ goto out;
+
+ to_fp = fopen(to, "w");
+ if (to_fp == NULL)
+ goto out_fclose_from;
+
+ while (getline(&line, &n, from_fp) > 0)
+ if (fputs(line, to_fp) == EOF)
+ goto out_fclose_to;
+ err = 0;
+out_fclose_to:
+ fclose(to_fp);
+ free(line);
+out_fclose_from:
+ fclose(from_fp);
+out:
+ return err;
+}
+
+int copyfile(const char *from, const char *to)
+{
+ int fromfd, tofd;
+ struct stat st;
+ void *addr;
+ int err = -1;
+
+ if (stat(from, &st))
+ goto out;
+
+ if (st.st_size == 0) /* /proc? do it slowly... */
+ return slow_copyfile(from, to);
+
+ fromfd = open(from, O_RDONLY);
+ if (fromfd < 0)
+ goto out;
+
+ tofd = creat(to, 0755);
+ if (tofd < 0)
+ goto out_close_from;
+
+ addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
+ if (addr == MAP_FAILED)
+ goto out_close_to;
+
+ if (write(tofd, addr, st.st_size) == st.st_size)
+ err = 0;
+
+ munmap(addr, st.st_size);
+out_close_to:
+ close(tofd);
+ if (err)
+ unlink(to);
+out_close_from:
+ close(fromfd);
+out:
+ return err;
+}
+
+unsigned long convert_unit(unsigned long value, char *unit)
+{
+ *unit = ' ';
+
+ if (value > 1000) {
+ value /= 1000;
+ *unit = 'K';
+ }
+
+ if (value > 1000) {
+ value /= 1000;
+ *unit = 'M';
+ }
+
+ if (value > 1000) {
+ value /= 1000;
+ *unit = 'G';
+ }
+
+ return value;
+}
+
+int readn(int fd, void *buf, size_t n)
+{
+ void *buf_start = buf;
+
+ while (n) {
+ int ret = read(fd, buf, n);
+
+ if (ret <= 0)
+ return ret;
+
+ n -= ret;
+ buf += ret;
+ }
+
+ return buf - buf_start;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/util.h b/ANDROID_3.4.5/tools/perf/util/util.h
new file mode 100644
index 00000000..0f99f394
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/util.h
@@ -0,0 +1,268 @@
+#ifndef GIT_COMPAT_UTIL_H
+#define GIT_COMPAT_UTIL_H
+
+#define _FILE_OFFSET_BITS 64
+
+#ifndef FLEX_ARRAY
+/*
+ * See if our compiler is known to support flexible array members.
+ */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
+# define FLEX_ARRAY /* empty */
+#elif defined(__GNUC__)
+# if (__GNUC__ >= 3)
+# define FLEX_ARRAY /* empty */
+# else
+# define FLEX_ARRAY 0 /* older GNU extension */
+# endif
+#endif
+
+/*
+ * Otherwise, default to safer but a bit wasteful traditional style
+ */
+#ifndef FLEX_ARRAY
+# define FLEX_ARRAY 1
+#endif
+#endif
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+
+#ifdef __GNUC__
+#define TYPEOF(x) (__typeof__(x))
+#else
+#define TYPEOF(x)
+#endif
+
+#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits))))
+#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */
+
+/* Approximation of the length of the decimal representation of this type. */
+#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
+
+#define _ALL_SOURCE 1
+#define _BSD_SOURCE 1
+#define HAS_BOOL
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <sys/time.h>
+#include <time.h>
+#include <signal.h>
+#include <fnmatch.h>
+#include <assert.h>
+#include <regex.h>
+#include <utime.h>
+#include <sys/wait.h>
+#include <sys/poll.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/select.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <pwd.h>
+#include <inttypes.h>
+#include "../../../include/linux/magic.h"
+#include "types.h"
+#include <sys/ttydefaults.h>
+
+extern const char *graph_line;
+extern const char *graph_dotted_line;
+extern char buildid_dir[];
+
+/* On most systems <limits.h> would have given us this, but
+ * not on some systems (e.g. GNU/Hurd).
+ */
+#ifndef PATH_MAX
+#define PATH_MAX 4096
+#endif
+
+#ifndef PRIuMAX
+#define PRIuMAX "llu"
+#endif
+
+#ifndef PRIu32
+#define PRIu32 "u"
+#endif
+
+#ifndef PRIx32
+#define PRIx32 "x"
+#endif
+
+#ifndef PATH_SEP
+#define PATH_SEP ':'
+#endif
+
+#ifndef STRIP_EXTENSION
+#define STRIP_EXTENSION ""
+#endif
+
+#ifndef has_dos_drive_prefix
+#define has_dos_drive_prefix(path) 0
+#endif
+
+#ifndef is_dir_sep
+#define is_dir_sep(c) ((c) == '/')
+#endif
+
+#ifdef __GNUC__
+#define NORETURN __attribute__((__noreturn__))
+#else
+#define NORETURN
+#ifndef __attribute__
+#define __attribute__(x)
+#endif
+#endif
+
+/* General helper functions */
+extern void usage(const char *err) NORETURN;
+extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
+extern int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
+extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
+
+#include "../../../include/linux/stringify.h"
+
+#define DIE_IF(cnd) \
+ do { if (cnd) \
+ die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \
+ __stringify(cnd) "\n"); \
+ } while (0)
+
+
+extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
+
+extern int prefixcmp(const char *str, const char *prefix);
+extern void set_buildid_dir(void);
+extern void disable_buildid_cache(void);
+
+static inline const char *skip_prefix(const char *str, const char *prefix)
+{
+ size_t len = strlen(prefix);
+ return strncmp(str, prefix, len) ? NULL : str + len;
+}
+
+#ifdef __GLIBC_PREREQ
+#if __GLIBC_PREREQ(2, 1)
+#define HAVE_STRCHRNUL
+#endif
+#endif
+
+#ifndef HAVE_STRCHRNUL
+#define strchrnul gitstrchrnul
+static inline char *gitstrchrnul(const char *s, int c)
+{
+ while (*s && *s != c)
+ s++;
+ return (char *)s;
+}
+#endif
+
+/*
+ * Wrappers:
+ */
+extern char *xstrdup(const char *str);
+extern void *xrealloc(void *ptr, size_t size) __attribute__((weak));
+
+
+static inline void *zalloc(size_t size)
+{
+ return calloc(1, size);
+}
+
+static inline int has_extension(const char *filename, const char *ext)
+{
+ size_t len = strlen(filename);
+ size_t extlen = strlen(ext);
+
+ return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
+}
+
+/* Sane ctype - no locale, and works with signed chars */
+#undef isascii
+#undef isspace
+#undef isdigit
+#undef isxdigit
+#undef isalpha
+#undef isprint
+#undef isalnum
+#undef islower
+#undef isupper
+#undef tolower
+#undef toupper
+
+extern unsigned char sane_ctype[256];
+#define GIT_SPACE 0x01
+#define GIT_DIGIT 0x02
+#define GIT_ALPHA 0x04
+#define GIT_GLOB_SPECIAL 0x08
+#define GIT_REGEX_SPECIAL 0x10
+#define GIT_PRINT_EXTRA 0x20
+#define GIT_PRINT 0x3E
+#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0)
+#define isascii(x) (((x) & ~0x7f) == 0)
+#define isspace(x) sane_istest(x,GIT_SPACE)
+#define isdigit(x) sane_istest(x,GIT_DIGIT)
+#define isxdigit(x) \
+ (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G')
+#define isalpha(x) sane_istest(x,GIT_ALPHA)
+#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
+#define isprint(x) sane_istest(x,GIT_PRINT)
+#define islower(x) (sane_istest(x,GIT_ALPHA) && sane_istest(x,0x20))
+#define isupper(x) (sane_istest(x,GIT_ALPHA) && !sane_istest(x,0x20))
+#define tolower(x) sane_case((unsigned char)(x), 0x20)
+#define toupper(x) sane_case((unsigned char)(x), 0)
+
+static inline int sane_case(int x, int high)
+{
+ if (sane_istest(x, GIT_ALPHA))
+ x = (x & ~0x20) | high;
+ return x;
+}
+
+int mkdir_p(char *path, mode_t mode);
+int copyfile(const char *from, const char *to);
+
+s64 perf_atoll(const char *str);
+char **argv_split(const char *str, int *argcp);
+void argv_free(char **argv);
+bool strglobmatch(const char *str, const char *pat);
+bool strlazymatch(const char *str, const char *pat);
+int strtailcmp(const char *s1, const char *s2);
+unsigned long convert_unit(unsigned long value, char *unit);
+int readn(int fd, void *buf, size_t size);
+
+struct perf_event_attr;
+
+void event_attr_init(struct perf_event_attr *attr);
+
+uid_t parse_target_uid(const char *str, const char *tid, const char *pid);
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+#endif
diff --git a/ANDROID_3.4.5/tools/perf/util/values.c b/ANDROID_3.4.5/tools/perf/util/values.c
new file mode 100644
index 00000000..697c8b4e
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/values.c
@@ -0,0 +1,232 @@
+#include <stdlib.h>
+
+#include "util.h"
+#include "values.h"
+
+void perf_read_values_init(struct perf_read_values *values)
+{
+ values->threads_max = 16;
+ values->pid = malloc(values->threads_max * sizeof(*values->pid));
+ values->tid = malloc(values->threads_max * sizeof(*values->tid));
+ values->value = malloc(values->threads_max * sizeof(*values->value));
+ if (!values->pid || !values->tid || !values->value)
+ die("failed to allocate read_values threads arrays");
+ values->threads = 0;
+
+ values->counters_max = 16;
+ values->counterrawid = malloc(values->counters_max
+ * sizeof(*values->counterrawid));
+ values->countername = malloc(values->counters_max
+ * sizeof(*values->countername));
+ if (!values->counterrawid || !values->countername)
+ die("failed to allocate read_values counters arrays");
+ values->counters = 0;
+}
+
+void perf_read_values_destroy(struct perf_read_values *values)
+{
+ int i;
+
+ if (!values->threads_max || !values->counters_max)
+ return;
+
+ for (i = 0; i < values->threads; i++)
+ free(values->value[i]);
+ free(values->value);
+ free(values->pid);
+ free(values->tid);
+ free(values->counterrawid);
+ for (i = 0; i < values->counters; i++)
+ free(values->countername[i]);
+ free(values->countername);
+}
+
+static void perf_read_values__enlarge_threads(struct perf_read_values *values)
+{
+ values->threads_max *= 2;
+ values->pid = realloc(values->pid,
+ values->threads_max * sizeof(*values->pid));
+ values->tid = realloc(values->tid,
+ values->threads_max * sizeof(*values->tid));
+ values->value = realloc(values->value,
+ values->threads_max * sizeof(*values->value));
+ if (!values->pid || !values->tid || !values->value)
+ die("failed to enlarge read_values threads arrays");
+}
+
+static int perf_read_values__findnew_thread(struct perf_read_values *values,
+ u32 pid, u32 tid)
+{
+ int i;
+
+ for (i = 0; i < values->threads; i++)
+ if (values->pid[i] == pid && values->tid[i] == tid)
+ return i;
+
+ if (values->threads == values->threads_max)
+ perf_read_values__enlarge_threads(values);
+
+ i = values->threads++;
+ values->pid[i] = pid;
+ values->tid[i] = tid;
+ values->value[i] = malloc(values->counters_max * sizeof(**values->value));
+ if (!values->value[i])
+ die("failed to allocate read_values counters array");
+
+ return i;
+}
+
+static void perf_read_values__enlarge_counters(struct perf_read_values *values)
+{
+ int i;
+
+ values->counters_max *= 2;
+ values->counterrawid = realloc(values->counterrawid,
+ values->counters_max * sizeof(*values->counterrawid));
+ values->countername = realloc(values->countername,
+ values->counters_max * sizeof(*values->countername));
+ if (!values->counterrawid || !values->countername)
+ die("failed to enlarge read_values counters arrays");
+
+ for (i = 0; i < values->threads; i++) {
+ values->value[i] = realloc(values->value[i],
+ values->counters_max * sizeof(**values->value));
+ if (!values->value[i])
+ die("failed to enlarge read_values counters arrays");
+ }
+}
+
+static int perf_read_values__findnew_counter(struct perf_read_values *values,
+ u64 rawid, const char *name)
+{
+ int i;
+
+ for (i = 0; i < values->counters; i++)
+ if (values->counterrawid[i] == rawid)
+ return i;
+
+ if (values->counters == values->counters_max)
+ perf_read_values__enlarge_counters(values);
+
+ i = values->counters++;
+ values->counterrawid[i] = rawid;
+ values->countername[i] = strdup(name);
+
+ return i;
+}
+
+void perf_read_values_add_value(struct perf_read_values *values,
+ u32 pid, u32 tid,
+ u64 rawid, const char *name, u64 value)
+{
+ int tindex, cindex;
+
+ tindex = perf_read_values__findnew_thread(values, pid, tid);
+ cindex = perf_read_values__findnew_counter(values, rawid, name);
+
+ values->value[tindex][cindex] = value;
+}
+
+static void perf_read_values__display_pretty(FILE *fp,
+ struct perf_read_values *values)
+{
+ int i, j;
+ int pidwidth, tidwidth;
+ int *counterwidth;
+
+ counterwidth = malloc(values->counters * sizeof(*counterwidth));
+ if (!counterwidth)
+ die("failed to allocate counterwidth array");
+ tidwidth = 3;
+ pidwidth = 3;
+ for (j = 0; j < values->counters; j++)
+ counterwidth[j] = strlen(values->countername[j]);
+ for (i = 0; i < values->threads; i++) {
+ int width;
+
+ width = snprintf(NULL, 0, "%d", values->pid[i]);
+ if (width > pidwidth)
+ pidwidth = width;
+ width = snprintf(NULL, 0, "%d", values->tid[i]);
+ if (width > tidwidth)
+ tidwidth = width;
+ for (j = 0; j < values->counters; j++) {
+ width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
+ if (width > counterwidth[j])
+ counterwidth[j] = width;
+ }
+ }
+
+ fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID");
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*s", counterwidth[j], values->countername[j]);
+ fprintf(fp, "\n");
+
+ for (i = 0; i < values->threads; i++) {
+ fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
+ tidwidth, values->tid[i]);
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*" PRIu64,
+ counterwidth[j], values->value[i][j]);
+ fprintf(fp, "\n");
+ }
+ free(counterwidth);
+}
+
+static void perf_read_values__display_raw(FILE *fp,
+ struct perf_read_values *values)
+{
+ int width, pidwidth, tidwidth, namewidth, rawwidth, countwidth;
+ int i, j;
+
+ tidwidth = 3; /* TID */
+ pidwidth = 3; /* PID */
+ namewidth = 4; /* "Name" */
+ rawwidth = 3; /* "Raw" */
+ countwidth = 5; /* "Count" */
+
+ for (i = 0; i < values->threads; i++) {
+ width = snprintf(NULL, 0, "%d", values->pid[i]);
+ if (width > pidwidth)
+ pidwidth = width;
+ width = snprintf(NULL, 0, "%d", values->tid[i]);
+ if (width > tidwidth)
+ tidwidth = width;
+ }
+ for (j = 0; j < values->counters; j++) {
+ width = strlen(values->countername[j]);
+ if (width > namewidth)
+ namewidth = width;
+ width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
+ if (width > rawwidth)
+ rawwidth = width;
+ }
+ for (i = 0; i < values->threads; i++) {
+ for (j = 0; j < values->counters; j++) {
+ width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
+ if (width > countwidth)
+ countwidth = width;
+ }
+ }
+
+ fprintf(fp, "# %*s %*s %*s %*s %*s\n",
+ pidwidth, "PID", tidwidth, "TID",
+ namewidth, "Name", rawwidth, "Raw",
+ countwidth, "Count");
+ for (i = 0; i < values->threads; i++)
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64,
+ pidwidth, values->pid[i],
+ tidwidth, values->tid[i],
+ namewidth, values->countername[j],
+ rawwidth, values->counterrawid[j],
+ countwidth, values->value[i][j]);
+}
+
+void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw)
+{
+ if (raw)
+ perf_read_values__display_raw(fp, values);
+ else
+ perf_read_values__display_pretty(fp, values);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/values.h b/ANDROID_3.4.5/tools/perf/util/values.h
new file mode 100644
index 00000000..2fa967e1
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/values.h
@@ -0,0 +1,27 @@
+#ifndef __PERF_VALUES_H
+#define __PERF_VALUES_H
+
+#include "types.h"
+
+struct perf_read_values {
+ int threads;
+ int threads_max;
+ u32 *pid, *tid;
+ int counters;
+ int counters_max;
+ u64 *counterrawid;
+ char **countername;
+ u64 **value;
+};
+
+void perf_read_values_init(struct perf_read_values *values);
+void perf_read_values_destroy(struct perf_read_values *values);
+
+void perf_read_values_add_value(struct perf_read_values *values,
+ u32 pid, u32 tid,
+ u64 rawid, const char *name, u64 value);
+
+void perf_read_values_display(FILE *fp, struct perf_read_values *values,
+ int raw);
+
+#endif /* __PERF_VALUES_H */
diff --git a/ANDROID_3.4.5/tools/perf/util/wrapper.c b/ANDROID_3.4.5/tools/perf/util/wrapper.c
new file mode 100644
index 00000000..73e900ed
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/wrapper.c
@@ -0,0 +1,40 @@
+/*
+ * Various trivial helper wrappers around standard functions
+ */
+#include "cache.h"
+
+/*
+ * There's no pack memory to release - but stay close to the Git
+ * version so wrap this away:
+ */
+static inline void release_pack_memory(size_t size __used, int flag __used)
+{
+}
+
+char *xstrdup(const char *str)
+{
+ char *ret = strdup(str);
+ if (!ret) {
+ release_pack_memory(strlen(str) + 1, -1);
+ ret = strdup(str);
+ if (!ret)
+ die("Out of memory, strdup failed");
+ }
+ return ret;
+}
+
+void *xrealloc(void *ptr, size_t size)
+{
+ void *ret = realloc(ptr, size);
+ if (!ret && !size)
+ ret = realloc(ptr, 1);
+ if (!ret) {
+ release_pack_memory(size, -1);
+ ret = realloc(ptr, size);
+ if (!ret && !size)
+ ret = realloc(ptr, 1);
+ if (!ret)
+ die("Out of memory, realloc failed");
+ }
+ return ret;
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/xyarray.c b/ANDROID_3.4.5/tools/perf/util/xyarray.c
new file mode 100644
index 00000000..22afbf6c
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/xyarray.c
@@ -0,0 +1,20 @@
+#include "xyarray.h"
+#include "util.h"
+
+struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
+{
+ size_t row_size = ylen * entry_size;
+ struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size);
+
+ if (xy != NULL) {
+ xy->entry_size = entry_size;
+ xy->row_size = row_size;
+ }
+
+ return xy;
+}
+
+void xyarray__delete(struct xyarray *xy)
+{
+ free(xy);
+}
diff --git a/ANDROID_3.4.5/tools/perf/util/xyarray.h b/ANDROID_3.4.5/tools/perf/util/xyarray.h
new file mode 100644
index 00000000..c488a072
--- /dev/null
+++ b/ANDROID_3.4.5/tools/perf/util/xyarray.h
@@ -0,0 +1,20 @@
+#ifndef _PERF_XYARRAY_H_
+#define _PERF_XYARRAY_H_ 1
+
+#include <sys/types.h>
+
+struct xyarray {
+ size_t row_size;
+ size_t entry_size;
+ char contents[];
+};
+
+struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
+void xyarray__delete(struct xyarray *xy);
+
+static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
+{
+ return &xy->contents[x * xy->row_size + y * xy->entry_size];
+}
+
+#endif /* _PERF_XYARRAY_H_ */